ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40872887b265d235c6ef5e3c1edb091deed6258 | #!/usr/bin/env python3
from common.op_params import opParams
import time
import ast
import difflib
class opEdit: # use by running `python /data/openpilot/op_edit.py`
def __init__(self):
self.op_params = opParams()
self.params = None
self.sleep_time = 1.0
self.live_tuning = self.op_params.get('op_edit_live_mode', False)
self.run_init()
def run_init(self):
self.run_loop()
def run_loop(self):
while True:
if not self.live_tuning:
print('Parameters:\n')
else:
print('Live Parameters:\n')
self.params = self.op_params.get(force_update=True)
self.params = dict(sorted(self.params.items(), key=lambda x:x[0].lower()))
if self.live_tuning: # only display live tunable params
self.params = {k: v for k, v in self.params.items() if self.op_params.key_info(k).live}
values_list = [self.params[i] if len(str(self.params[i])) < 20 else '{} ... {}'.format(str(self.params[i])[:30], str(self.params[i])[-15:]) for i in self.params]
live = ['(live!)' if self.op_params.key_info(i).live else '' for i in self.params]
to_print = ['{}. {}: {} {}'.format(idx + 1, i, values_list[idx], live[idx]) for idx, i in enumerate(self.params)]
extras = ['---\na. Add new parameter',
'd. Delete parameter',
'l. Toggle live tuning']
to_print += extras
print('\n'.join(to_print))
print('\nChoose a parameter to edit (by index or name):')
choice = input('>> ').strip()
parsed, choice = self.parse_choice(choice, len(to_print) - len(extras))
if parsed == 'continue':
continue
elif parsed == 'add':
self.add_parameter()
elif parsed == 'change':
self.change_parameter(choice)
elif parsed == 'delete':
self.delete_parameter()
elif parsed == 'live':
self.live_tuning = not self.live_tuning
self.op_params.put('op_edit_live_mode', self.live_tuning) # for next opEdit startup
elif parsed == 'exit':
return
def parse_choice(self, choice, opt_len):
if choice.isdigit():
choice = int(choice)
choice -= 1
if choice not in range(opt_len): # number of options to choose from
self.message('Not in range!')
return 'continue', choice
return 'change', choice
if choice == '':
print('Exiting opEdit!')
return 'exit', choice
if choice in ['a', 'add']: # add new parameter
return 'add', choice
elif choice in ['d', 'delete', 'del']: # delete parameter
return 'delete', choice
elif choice in ['l', 'live']: # live tuning mode
return 'live', choice
else: # find most similar param to user's input
param_sims = [(idx, self.str_sim(choice, param)) for idx, param in enumerate(self.params)]
param_sims = [param for param in param_sims if param[1] > 0.5]
if len(param_sims) > 0:
chosen_param = sorted(param_sims, key=lambda param: param[1], reverse=True)[0]
return 'change', chosen_param[0] # return idx
self.message('Invalid choice!')
return 'continue', choice
def str_sim(self, a, b):
return difflib.SequenceMatcher(a=a, b=b).ratio()
def change_parameter(self, choice):
while True:
chosen_key = list(self.params)[choice]
key_info = self.op_params.key_info(chosen_key)
old_value = self.params[chosen_key]
print('Chosen parameter: {}'.format(chosen_key))
to_print = []
if key_info.has_description:
to_print.append('>> Description: {}'.format(self.op_params.default_params[chosen_key]['description'].replace('\n', '\n > ')))
if key_info.has_allowed_types:
to_print.append('>> Allowed types: {}'.format(', '.join([str(i).split("'")[1] for i in key_info.allowed_types])))
if key_info.live:
to_print.append('>> Live tunable! Up to 5 second delay')
if to_print:
print('\n{}\n'.format('\n'.join(to_print)))
print('Current value: {} (type: {})'.format(old_value, str(type(old_value)).split("'")[1]))
if key_info.has_clip:
print('Clipped value: {}, [{},{}]'.format(self.op_params.get(chosen_key), key_info.min, key_info.max))
while True:
print('\nEnter your new value:')
new_value = input('>> ').strip()
if new_value == '':
self.message('Exiting this parameter...', 0.5)
return
new_value = self.parse_input(new_value)
if key_info.has_allowed_types and type(new_value) not in key_info.allowed_types:
self.message('The type of data you entered ({}) is not allowed with this parameter!'.format(str(type(new_value)).split("'")[1]))
continue
if key_info.live: # stay in live tuning interface
self.op_params.put(chosen_key, new_value)
print('Saved {} with value: {}! (type: {})'.format(chosen_key, new_value, str(type(new_value)).split("'")[1]))
else: # else ask to save and break
print('\nOld value: {} (type: {})'.format(old_value, str(type(old_value)).split("'")[1]))
print('New value: {} (type: {})'.format(new_value, str(type(new_value)).split("'")[1]))
print('\nDo you want to save this?')
if self.input_with_options(['Y', 'n'], 'n')[0] == 0:
self.op_params.put(chosen_key, new_value)
self.message('Saved!')
else:
self.message('Not saved!')
return
def input_with_options(self, options, default=None):
"""
Takes in a list of options and asks user to make a choice.
The most similar option list index is returned along with the similarity percentage from 0 to 1
"""
user_input = input('[{}]: '.format('/'.join(options))).lower().strip()
if not user_input:
return default, 0.0
sims = [self.str_sim(i.lower().strip(), user_input) for i in options]
argmax = sims.index(max(sims))
return argmax, sims[argmax]
def parse_input(self, dat):
dat = dat.strip()
try:
dat = ast.literal_eval(dat)
except Exception as e:
print(e)
if dat.lower() == 'none':
dat = None
elif dat.lower() == 'false':
dat = False
elif dat.lower() == 'true': # else, assume string
dat = True
return dat
def delete_parameter(self):
while True:
print('Enter the name of the parameter to delete:')
key = self.parse_input(input('>> '))
if key == '':
return
if not isinstance(key, str):
self.message('Input must be a string!')
continue
if key not in self.params:
self.message("Parameter doesn't exist!")
continue
value = self.params.get(key)
print('Parameter name: {}'.format(key))
print('Parameter value: {} (type: {})'.format(value, str(type(value)).split("'")[1]))
print('Do you want to delete this?')
if self.input_with_options(['Y', 'n'], default='n')[0] == 0:
self.op_params.delete(key)
self.message('Deleted!')
else:
self.message('Not saved!')
return
def add_parameter(self):
while True:
print('Type the name of your new parameter:')
key = self.parse_input(input('>> '))
if key == '':
return
if not isinstance(key, str):
self.message('Input must be a string!')
continue
print("Enter the data you'd like to save with this parameter:")
value = input('>> ').strip()
value = self.parse_input(value)
print('Parameter name: {}'.format(key))
print('Parameter value: {} (type: {})'.format(value, str(type(value)).split("'")[1]))
print('Do you want to save this?')
if self.input_with_options(['Y', 'n'], default='n')[0] == 0:
self.op_params.put(key, value)
self.message('Saved!')
else:
self.message('Not saved!')
return
def message(self, msg, sleep_time=None):
if sleep_time is None:
sleep_time = self.sleep_time
print('--------\n{}\n--------'.format(msg), flush=True)
time.sleep(sleep_time)
print()
opEdit()
|
py | b40872cb51516829bb24e09a60eea50001ad0fed | import numpy as np
import torch
import os
from torch.autograd import Variable
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
class Pix2PixHDModel(BaseModel):
def name(self):
return 'Pix2PixHDModel'
def init_loss_filter(self, use_gan_feat_loss, use_vgg_loss):
flags = (True, use_gan_feat_loss, use_vgg_loss, True, True)
def loss_filter(g_gan, g_gan_feat, g_vgg, d_real, d_fake):
return [l for (l,f) in zip((g_gan,g_gan_feat,g_vgg,d_real,d_fake),flags) if f]
return loss_filter
def initialize(self, opt):
BaseModel.initialize(self, opt)
if opt.resize_or_crop != 'none' or not opt.isTrain: # when training at full res this causes OOM
torch.backends.cudnn.benchmark = True
self.isTrain = opt.isTrain
self.use_features = opt.instance_feat or opt.label_feat
self.gen_features = self.use_features and not self.opt.load_features
input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc
##### define networks
# Generator network
netG_input_nc = input_nc
if not opt.no_instance:
netG_input_nc += 1
if self.use_features:
netG_input_nc += opt.feat_num
self.netG = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,
opt.n_blocks_local, opt.norm, gpu_ids=self.gpu_ids)
# Discriminator network
if self.isTrain:
use_sigmoid = opt.no_lsgan
netD_input_nc = input_nc + opt.output_nc
if not opt.no_instance:
netD_input_nc += 1
self.netD = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm, use_sigmoid,
opt.num_D, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)
### Encoder network
if self.gen_features:
self.netE = networks.define_G(opt.output_nc, opt.feat_num, opt.nef, 'encoder',
opt.n_downsample_E, norm=opt.norm, gpu_ids=self.gpu_ids)
if self.opt.verbose:
print('---------- Networks initialized -------------')
# load networks
if not self.isTrain or opt.continue_train or opt.load_pretrain:
pretrained_path = '' if not self.isTrain else opt.load_pretrain
self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path)
if self.isTrain:
self.load_network(self.netD, 'D', opt.which_epoch, pretrained_path)
if self.gen_features:
self.load_network(self.netE, 'E', opt.which_epoch, pretrained_path)
# set loss functions and optimizers
if self.isTrain:
if opt.pool_size > 0 and (len(self.gpu_ids)) > 1:
raise NotImplementedError("Fake Pool Not Implemented for MultiGPU")
self.fake_pool = ImagePool(opt.pool_size)
self.old_lr = opt.lr
# define loss functions
self.loss_filter = self.init_loss_filter(not opt.no_ganFeat_loss, not opt.no_vgg_loss)
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)
self.criterionFeat = torch.nn.L1Loss()
if not opt.no_vgg_loss:
self.criterionVGG = networks.VGGLoss(self.gpu_ids)
# Names so we can breakout loss
self.loss_names = self.loss_filter('G_GAN','G_GAN_Feat','G_VGG','D_real', 'D_fake')
# initialize optimizers
# optimizer G
if opt.niter_fix_global > 0:
import sys
if sys.version_info >= (3,0):
finetune_list = set()
else:
from sets import Set
finetune_list = Set()
params_dict = dict(self.netG.named_parameters())
params = []
for key, value in params_dict.items():
if key.startswith('model' + str(opt.n_local_enhancers)):
params += [value]
finetune_list.add(key.split('.')[0])
print('------------- Only training the local enhancer network (for %d epochs) ------------' % opt.niter_fix_global)
print('The layers that are finetuned are ', sorted(finetune_list))
else:
params = list(self.netG.parameters())
if self.gen_features:
params += list(self.netE.parameters())
self.optimizer_G = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
# optimizer D
params = list(self.netD.parameters())
self.optimizer_D = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
def encode_input(self, label_map, inst_map=None, real_image=None, feat_map=None, infer=False):
if self.opt.label_nc == 0:
input_label = label_map.data.cuda()
else:
# create one-hot vector for label map
size = label_map.size()
oneHot_size = (size[0], self.opt.label_nc, size[2], size[3])
input_label = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_()
input_label = input_label.scatter_(1, label_map.data.long().cuda(), 1.0)
if self.opt.data_type == 16:
input_label = input_label.half()
# get edges from instance map
if not self.opt.no_instance:
inst_map = inst_map.data.cuda()
edge_map = self.get_edges(inst_map)
input_label = torch.cat((input_label, edge_map), dim=1)
input_label = Variable(input_label, volatile=infer)
# real images for training
if real_image is not None:
real_image = Variable(real_image.data.cuda())
# instance map for feature encoding
if self.use_features:
# get precomputed feature maps
if self.opt.load_features:
feat_map = Variable(feat_map.data.cuda())
if self.opt.label_feat:
inst_map = label_map.cuda()
return input_label, inst_map, real_image, feat_map
def discriminate(self, input_label, test_image, use_pool=False):
input_concat = torch.cat((input_label, test_image.detach()), dim=1)
if use_pool:
fake_query = self.fake_pool.query(input_concat)
return self.netD.forward(fake_query)
else:
return self.netD.forward(input_concat)
def forward(self, label, inst, image, feat, infer=False, is_OC=False):
# Encode Inputs
input_label, inst_map, real_image, feat_map = self.encode_input(label, inst, image, feat, infer=is_OC)
# Fake Generation
if self.use_features:
if not self.opt.load_features:
feat_map = self.netE.forward(real_image, inst_map)
input_concat = torch.cat((input_label, feat_map), dim=1)
else:
input_concat = input_label
fake_image = self.netG.forward(input_concat)
if is_OC:
return fake_image
# Fake Detection and Loss
pred_fake_pool = self.discriminate(input_label, fake_image, use_pool=True)
loss_D_fake = self.criterionGAN(pred_fake_pool, False)
# Real Detection and Loss
pred_real = self.discriminate(input_label, real_image)
loss_D_real = self.criterionGAN(pred_real, True)
# GAN loss (Fake Passability Loss)
pred_fake = self.netD.forward(torch.cat((input_label, fake_image), dim=1))
loss_G_GAN = self.criterionGAN(pred_fake, True)
# GAN feature matching loss
loss_G_GAN_Feat = 0
if not self.opt.no_ganFeat_loss:
feat_weights = 4.0 / (self.opt.n_layers_D + 1)
D_weights = 1.0 / self.opt.num_D
for i in range(self.opt.num_D):
for j in range(len(pred_fake[i])-1):
loss_G_GAN_Feat += D_weights * feat_weights * \
self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach()) * self.opt.lambda_feat
# VGG feature matching loss
loss_G_VGG = 0
if not self.opt.no_vgg_loss:
loss_G_VGG = self.criterionVGG(fake_image, real_image) * self.opt.lambda_feat
# Only return the fake_B image if necessary to save BW
return [ self.loss_filter( loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_D_real, loss_D_fake ), None if not infer else fake_image ]
def inference(self, label, inst, image=None):
# Encode Inputs
image = Variable(image) if image is not None else None
input_label, inst_map, real_image, _ = self.encode_input(Variable(label), Variable(inst), image, infer=True)
# Fake Generation
if self.use_features:
if self.opt.use_encoded_image:
# encode the real image to get feature map
feat_map = self.netE.forward(real_image, inst_map)
else:
# sample clusters from precomputed features
feat_map = self.sample_features(inst_map)
input_concat = torch.cat((input_label, feat_map), dim=1)
else:
input_concat = input_label
if torch.__version__.startswith('0.4'):
with torch.no_grad():
fake_image = self.netG.forward(input_concat)
else:
fake_image = self.netG.forward(input_concat)
return fake_image
def sample_features(self, inst):
# read precomputed feature clusters
cluster_path = os.path.join(self.opt.checkpoints_dir, self.opt.name, self.opt.cluster_path)
features_clustered = np.load(cluster_path, encoding='latin1').item()
# randomly sample from the feature clusters
inst_np = inst.cpu().numpy().astype(int)
feat_map = self.Tensor(inst.size()[0], self.opt.feat_num, inst.size()[2], inst.size()[3])
for i in np.unique(inst_np):
label = i if i < 1000 else i//1000
if label in features_clustered:
feat = features_clustered[label]
cluster_idx = np.random.randint(0, feat.shape[0])
idx = (inst == int(i)).nonzero()
for k in range(self.opt.feat_num):
feat_map[idx[:,0], idx[:,1] + k, idx[:,2], idx[:,3]] = feat[cluster_idx, k]
if self.opt.data_type==16:
feat_map = feat_map.half()
return feat_map
def encode_features(self, image, inst):
image = Variable(image.cuda(), volatile=True)
feat_num = self.opt.feat_num
h, w = inst.size()[2], inst.size()[3]
block_num = 32
feat_map = self.netE.forward(image, inst.cuda())
inst_np = inst.cpu().numpy().astype(int)
feature = {}
for i in range(self.opt.label_nc):
feature[i] = np.zeros((0, feat_num+1))
for i in np.unique(inst_np):
label = i if i < 1000 else i//1000
idx = (inst == int(i)).nonzero()
num = idx.size()[0]
idx = idx[num//2,:]
val = np.zeros((1, feat_num+1))
for k in range(feat_num):
val[0, k] = feat_map[idx[0], idx[1] + k, idx[2], idx[3]].data[0]
val[0, feat_num] = float(num) / (h * w // block_num)
feature[label] = np.append(feature[label], val, axis=0)
return feature
def get_edges(self, t):
edge = torch.cuda.ByteTensor(t.size()).zero_()
edge = edge.bool()
edge[:,:,:,1:] = edge[:,:,:,1:] | (t[:,:,:,1:] != t[:,:,:,:-1])
edge[:,:,:,:-1] = edge[:,:,:,:-1] | (t[:,:,:,1:] != t[:,:,:,:-1])
edge[:,:,1:,:] = edge[:,:,1:,:] | (t[:,:,1:,:] != t[:,:,:-1,:])
edge[:,:,:-1,:] = edge[:,:,:-1,:] | (t[:,:,1:,:] != t[:,:,:-1,:])
if self.opt.data_type==16:
return edge.half()
else:
return edge.float()
def save(self, which_epoch):
self.save_network(self.netG, 'G', which_epoch, self.gpu_ids)
self.save_network(self.netD, 'D', which_epoch, self.gpu_ids)
if self.gen_features:
self.save_network(self.netE, 'E', which_epoch, self.gpu_ids)
def update_fixed_params(self):
# after fixing the global generator for a number of iterations, also start finetuning it
params = list(self.netG.parameters())
if self.gen_features:
params += list(self.netE.parameters())
self.optimizer_G = torch.optim.Adam(params, lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
if self.opt.verbose:
print('------------ Now also finetuning global generator -----------')
def update_learning_rate(self):
lrd = self.opt.lr / self.opt.niter_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_D.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_G.param_groups:
param_group['lr'] = lr
if self.opt.verbose:
print('update learning rate: %f -> %f' % (self.old_lr, lr))
self.old_lr = lr
class InferenceModel(Pix2PixHDModel):
def forward(self, inp):
label, inst = inp
return self.inference(label, inst)
|
py | b408734c2f32c83b58e6a2d702c30594c63139bc | from konlpy.tag import Okt
import json
import nltk
import numpy as np
import tensorflow as tf
class Analyzer:
def __init__(self, file_name, model_name):
with open(file_name, encoding='UTF8') as json_file:
train_docs = json.load(json_file)
tokens = [t for d in train_docs for t in d[0]]
text = nltk.Text(tokens, name='NMSC')
self.selected_words = [f[0] for f in text.vocab().most_common(10000)]
self.model = tf.keras.models.load_model(model_name)
def tokenize(self, doc):
okt = Okt()
return ['/'.join(t) for t in okt.pos(doc, norm=True, stem=True)]
def term_frequency(self, doc):
#print(doc)
return [doc.count(word) for word in self.selected_words]
def predict_pos_neg(self, review):
token = self.tokenize(review)
tf = self.term_frequency(token)
data = np.expand_dims(np.asarray(tf).astype('float32'), axis=0)
score = float(self.model.predict(data))
if (score > 0.5):
return "1"
#print("{}, 1\n".format(review))
#print("[{}]๋ {:.2f}% ํ๋ฅ ๋ก ๊ธ์ ๋ฆฌ๋ทฐ์ด์ง ์์๊น ์ถ์ธกํด๋ด
๋๋ค.^^\n".format(review, score * 100))
else:
return "0"
#print("{}, 0\n".format(review))
#print("[{}]๋ {:.2f}% ํ๋ฅ ๋ก ๋ถ์ ๋ฆฌ๋ทฐ์ด์ง ์์๊น ์ถ์ธกํด๋ด
๋๋ค.^^;\n".format(review, (1 - score) * 100))
|
py | b408737bde2460bf9334aace288da64ff8589480 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azure.cli.core.commands import CliCommandType
from azure.cli.core.commands.parameters import get_enum_type
from azure.cli.command_modules.profile._format import transform_account_list
import azure.cli.command_modules.profile._help # pylint: disable=unused-import
from ._validators import validate_tenant
cloud_resource_types = ["oss-rdbms", "arm", "aad-graph", "ms-graph", "batch", "media", "data-lake"]
class ProfileCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
super(ProfileCommandsLoader, self).__init__(cli_ctx=cli_ctx)
def load_command_table(self, args):
profile_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.profile.custom#{}'
)
with self.command_group('', profile_custom) as g:
g.command('login', 'login')
g.command('logout', 'logout')
g.command('self-test', 'check_cli', deprecate_info=g.deprecate(hide=True))
with self.command_group('account', profile_custom) as g:
g.command('list', 'list_subscriptions', table_transformer=transform_account_list)
g.command('set', 'set_active_subscription')
g.show_command('show', 'show_subscription')
g.command('clear', 'account_clear')
g.command('list-locations', 'list_locations')
g.command('get-access-token', 'get_access_token')
return self.command_table
# pylint: disable=line-too-long
def load_arguments(self, command):
from azure.cli.core.api import get_subscription_id_list
with self.argument_context('login') as c:
c.argument('password', options_list=['--password', '-p'], help="Credentials like user password, or for a service principal, provide client secret or a pem file with key and public certificate. Will prompt if not given.")
c.argument('service_principal', action='store_true', help='The credential representing a service principal.')
c.argument('username', options_list=['--username', '-u'], help='user name, service principal, or managed service identity ID')
c.argument('tenant', options_list=['--tenant', '-t'], help='The AAD tenant, must provide when using service principals.', validator=validate_tenant)
c.argument('allow_no_subscriptions', action='store_true', help="Support access tenants without subscriptions. It's uncommon but useful to run tenant level commands, such as 'az ad'")
c.ignore('_subscription') # hide the global subscription parameter
c.argument('identity', options_list=('-i', '--identity'), action='store_true', help="Log in using the Virtual Machine's identity", arg_group='Managed Service Identity')
c.argument('identity_port', type=int, help="the port to retrieve tokens for login. Default: 50342", arg_group='Managed Service Identity')
c.argument('use_device_code', action='store_true',
help="Use CLI's old authentication flow based on device code. CLI will also use this if it can't launch a browser in your behalf, e.g. in remote SSH or Cloud Shell")
c.argument('use_cert_sn_issuer', action='store_true', help='used with a service principal configured with Subject Name and Issuer Authentication in order to support automatic certificate rolls')
c.argument('scopes', options_list=['--scope'], nargs='+', help='Used in the /authorize request. It can cover only one static resource.')
with self.argument_context('logout') as c:
c.argument('username', help='account user, if missing, logout the current active account')
c.ignore('_subscription') # hide the global subscription parameter
with self.argument_context('account') as c:
c.argument('subscription', options_list=['--subscription', '-s'], arg_group='', help='Name or ID of subscription.', completer=get_subscription_id_list)
c.ignore('_subscription')
with self.argument_context('account list') as c:
c.argument('all', help="List all subscriptions, rather than just 'Enabled' ones", action='store_true')
c.argument('refresh', help="retrieve up-to-date subscriptions from server", action='store_true')
c.ignore('_subscription') # hide the global subscription parameter
with self.argument_context('account show') as c:
c.argument('show_auth_for_sdk', options_list=['--sdk-auth'], action='store_true',
deprecate_info=c.deprecate(target='--sdk-auth', expiration='3.0.0'),
help='Output result to a file compatible with Azure SDK auth. Only applicable when authenticating with a Service Principal.')
with self.argument_context('account get-access-token') as c:
c.argument('resource_type', get_enum_type(cloud_resource_types), options_list=['--resource-type'], arg_group='', help='Type of well-known resource.')
c.argument('tenant', options_list=['--tenant', '-t'], help='Tenant ID for which the token is acquired. Only available for user and service principal account, not for MSI or Cloud Shell account')
COMMAND_LOADER_CLS = ProfileCommandsLoader
|
py | b408748ee4836728cfa3231dc1c23dc73cdee76c | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef ZALGOCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define ZALGOCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the zalgocoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9888)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19888)
g.write('#endif // ZALGOCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
py | b408749af40ac642b803bd5de775ad1057bbd853 | import matplotlib
import matplotlib.pyplot as plt
import sys
import numpy as np
def integr(x,p):
out = np.zeros(len(x))
for i in range(1, len(x)):
out[i]=out[i-1]+(x[i]-x[i-1])*p
return out
pfu = 0
cfu = 0
P = 0
vol_b = 0
vol_b_p = 0
raw_dt_ms = 20.0
i = 0
def fit(flow):
global P
temp_deb = 0
fact_erreur = 0
if flow < 0:
fact_erreur = 0.0037 * P*P - 0.5124 * P + 16.376;
temp_deb = flow * 0.88
else:
fact_erreur = -0.0143 * P + 1.696;
temp_deb = flow * 0.87;
flow_corr = temp_deb + flow * raw_dt_ms/1000.0 * fact_erreur
return flow_corr
def main(argv):
name=argv[1]
global P
val = np.loadtxt(name)
x = val[:,0]
paw = val[:,1]
flow = val[:,4]
vol = np.zeros_like(flow)
flow_corr = np.zeros_like(flow)
flow_tsi = np.zeros_like(flow)
vol_corr = np.zeros_like(flow)
vol_tsi = np.zeros_like(flow)
P = max(paw)
for i in range(len(vol)):
flow_corr[i]=fit(flow[i])
vol_corr[i]=vol_corr[i-1]+flow_corr[i]*0.020 if i>0 else 0
#vol_corr[i]=flow_corr[i]/60.0*0.0020 if i>0 else 0
#fig, ax = plt.subplots(1, 1)
fig, axs = plt.subplots(2, 1)
for i in range(len(vol)):
flow_tsi[i] = val[i,2] if flow[i] > 0 else - val[i,2]
for i in range(1, len(vol)):
vol[i] = vol[i-1] + flow[i]*0.02
for i in range(1, len(vol)):
vol_tsi[i] = vol_tsi[i-1] + flow_tsi[i]*0.02
axs[0].plot(x,flow )
axs[0].plot(x,flow_corr )
axs[0].plot(x, flow_tsi )
axs[0].grid(True)
axs[0].legend(['Flow Reco', 'Flow Corr', 'Flow TSI'])
axs[1].plot(x,vol )
axs[1].plot(x,vol_corr )
axs[1].plot(x,vol_tsi )
axs[1].grid(True)
axs[1].legend(['Vol Reco', 'Vol Corr', 'Vol TSI'])
#freco = ax.plot(x,flow )
#freco = ax.plot(x,val[:,2] )
#vcor = ax.plot(x,flow_corr )
#vcor = ax.plot(x,val[:,3] )
#ax.set(xlabel='time (ms)', ylabel='slm', title='Flow Sensors')
#ax.grid()
plt.savefig('fig')
plt.show()
out = np.column_stack((x, flow, flow_corr, flow_tsi, vol, vol_corr, vol_tsi))
np.savetxt('fit_'+name, out)
#out = np.column_stack((x_tsi, pawi_reco, paw_tsi, vol_tsi, dpi_reco, voli_reco))
if __name__ == "__main__":
main(sys.argv)
|
py | b408767e2a2fe4e7c024982ade6fbc3909cd5de9 | # Copyright (c) 2020 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import numpy as np
from collections import namedtuple
from .stats import compute_centered_ranks, batched_weighted_sum
from .logger import CSVLogger
import json
import functools
StepStats = namedtuple('StepStats', [
'po_returns_mean',
'po_returns_median',
'po_returns_std',
'po_returns_max',
'po_theta_max',
'po_returns_min',
'po_len_mean',
'po_len_std',
'noise_std',
'learning_rate',
'theta_norm',
'grad_norm',
'update_ratio',
'episodes_this_step',
'timesteps_this_step',
'time_elapsed_this_step',
])
EvalStats = namedtuple('StepStats', [
'eval_returns_mean',
'eval_returns_median',
'eval_returns_std',
'eval_len_mean',
'eval_len_std',
'eval_n_episodes',
'time_elapsed',
])
POResult = namedtuple('POResult', [
'noise_inds',
'returns',
'lengths',
])
EvalResult = namedtuple('EvalResult', ['returns', 'lengths'])
logger = logging.getLogger(__name__)
def initialize_master_fiber():
global noise
from .noise_module import noise
def initialize_worker_fiber(arg_thetas, arg_niches):
global noise, thetas, niches
from .noise_module import noise
thetas = arg_thetas
niches = arg_niches
@functools.lru_cache(maxsize=1000)
def fiber_get_theta(iteration, optim_id):
return thetas[optim_id]
@functools.lru_cache(maxsize=1000)
def fiber_get_niche(iteration, optim_id):
return niches[optim_id]
def run_eval_batch_fiber(iteration, optim_id, batch_size, rs_seed):
global noise, niches, thetas
random_state = np.random.RandomState(rs_seed)
niche = fiber_get_niche(iteration, optim_id)
theta = fiber_get_theta(iteration, optim_id)
returns, lengths = niche.rollout_batch((theta for i in range(batch_size)),
batch_size, random_state, eval=True)
return EvalResult(returns=returns, lengths=lengths)
def run_po_batch_fiber(iteration, optim_id, batch_size, rs_seed, noise_std):
global noise, niches, thetas
random_state = np.random.RandomState(rs_seed)
niche = fiber_get_niche(iteration, optim_id)
theta = fiber_get_theta(iteration, optim_id)
noise_inds = np.asarray([noise.sample_index(random_state, len(theta))
for i in range(batch_size)],
dtype='int')
returns = np.zeros((batch_size, 2))
lengths = np.zeros((batch_size, 2), dtype='int')
returns[:, 0], lengths[:, 0] = niche.rollout_batch(
(theta + noise_std * noise.get(noise_idx, len(theta))
for noise_idx in noise_inds), batch_size, random_state)
returns[:, 1], lengths[:, 1] = niche.rollout_batch(
(theta - noise_std * noise.get(noise_idx, len(theta))
for noise_idx in noise_inds), batch_size, random_state)
return POResult(returns=returns, noise_inds=noise_inds, lengths=lengths)
class ESOptimizer:
def __init__(self,
fiber_pool,
fiber_shared,
theta,
make_niche,
learning_rate,
batches_per_chunk,
batch_size,
eval_batch_size,
eval_batches_per_step,
l2_coeff,
noise_std,
lr_decay=1,
lr_limit=0.001,
noise_decay=1,
noise_limit=0.01,
normalize_grads_by_noise_std=False,
returns_normalization='centered_ranks',
optim_id=0,
log_file='unname.log',
created_at=0,
is_candidate=False):
from .optimizers import Adam, SimpleSGD
logger.debug('Creating optimizer {}...'.format(optim_id))
self.fiber_pool = fiber_pool
self.fiber_shared = fiber_shared
self.optim_id = optim_id
assert self.fiber_pool is not None
self.theta = theta
#print(self.theta)
logger.debug('Optimizer {} optimizing {} parameters'.format(
optim_id, len(theta)))
self.optimizer = Adam(self.theta, stepsize=learning_rate)
self.sgd_optimizer = SimpleSGD(stepsize=learning_rate)
self.lr_decay = lr_decay
self.lr_limit = lr_limit
self.noise_decay = noise_decay
self.noise_limit = noise_limit
self.fiber_shared = fiber_shared
niches = fiber_shared["niches"]
niches[optim_id] = make_niche()
self.batches_per_chunk = batches_per_chunk
self.batch_size = batch_size
self.eval_batch_size = eval_batch_size
self.eval_batches_per_step = eval_batches_per_step
self.l2_coeff = l2_coeff
self.noise_std = noise_std
self.init_noise_std = noise_std
self.normalize_grads_by_noise_std = normalize_grads_by_noise_std
self.returns_normalization = returns_normalization
if is_candidate == False:
log_fields = [
'po_returns_mean_{}'.format(optim_id),
'po_returns_median_{}'.format(optim_id),
'po_returns_std_{}'.format(optim_id),
'po_returns_max_{}'.format(optim_id),
'po_returns_min_{}'.format(optim_id),
'po_len_mean_{}'.format(optim_id),
'po_len_std_{}'.format(optim_id),
'noise_std_{}'.format(optim_id),
'learning_rate_{}'.format(optim_id),
'eval_returns_mean_{}'.format(optim_id),
'eval_returns_median_{}'.format(optim_id),
'eval_returns_std_{}'.format(optim_id),
'eval_len_mean_{}'.format(optim_id),
'eval_len_std_{}'.format(optim_id),
'eval_n_episodes_{}'.format(optim_id),
'theta_norm_{}'.format(optim_id),
'grad_norm_{}'.format(optim_id),
'update_ratio_{}'.format(optim_id),
'episodes_this_step_{}'.format(optim_id),
'episodes_so_far_{}'.format(optim_id),
'timesteps_this_step_{}'.format(optim_id),
'timesteps_so_far_{}'.format(optim_id),
'time_elapsed_this_step_{}'.format(optim_id),
'accept_theta_in_{}'.format(optim_id),
'eval_returns_mean_best_in_{}'.format(optim_id),
'eval_returns_mean_best_with_ckpt_in_{}'.format(optim_id),
'eval_returns_mean_theta_from_others_in_{}'.format(optim_id),
'eval_returns_mean_proposal_from_others_in_{}'.format(optim_id),
]
log_path = log_file + '/' + log_file.split('/')[-1] + '.' + optim_id + '.log'
self.data_logger = CSVLogger(log_path, log_fields + [
'time_elapsed_so_far',
'iteration',
])
logger.info('Optimizer {} created!'.format(optim_id))
self.filename_best = log_file + '/' + log_file.split('/')[-1] + '.' + optim_id + '.best.json'
self.log_data = {}
self.t_start = time.time()
self.episodes_so_far = 0
self.timesteps_so_far = 0
self.checkpoint_thetas = None
self.checkpoint_scores = None
self.self_evals = None # Score of current parent theta
self.proposal = None # Score of best transfer
self.proposal_theta = None # Theta of best transfer
self.proposal_source = None # Source of best transfer
self.created_at = created_at
self.start_score = None
self.best_score = None
self.best_theta = None
self.recent_scores = []
self.transfer_target = None
self.pata_ec = None
self.iteration = 0
def __del__(self):
logger.debug('Optimizer {} cleanning up workers...'.format(
self.optim_id))
def clean_dicts_before_iter(self):
self.log_data.clear()
self.self_evals = None
self.proposal = None
self.proposal_theta = None
self.proposal_source = None
def pick_proposal(self, checkpointing, reset_optimizer):
accept_key = 'accept_theta_in_{}'.format(
self.optim_id)
if checkpointing and self.checkpoint_scores > self.proposal:
self.log_data[accept_key] = 'do_not_consider_CP'
else:
self.log_data[accept_key] = '{}'.format(
self.proposal_source)
if self.optim_id != self.proposal_source:
self.set_theta(
self.proposal_theta,
reset_optimizer=reset_optimizer)
self.self_evals = self.proposal
self.checkpoint_thetas = np.array(self.theta)
self.checkpoint_scores = self.self_evals
if self.best_score < self.self_evals:
self.best_score = self.self_evals
self.best_theta = np.array(self.theta)
def save_to_logger(self, iteration):
self.log_data['time_elapsed_so_far'] = time.time() - self.t_start
self.log_data['iteration'] = iteration
self.data_logger.log(**self.log_data)
logger.debug('iter={} Optimizer {} best score {}'.format(
iteration, self.optim_id, self.best_score))
#if iteration % 100 == 0:
# self.save_policy(self.filename_best+'.arxiv.'+str(iteration))
self.save_policy(self.filename_best)
def save_policy(self, policy_file, reset=False):
if self.best_score is not None and self.best_theta is not None:
with open(policy_file, 'wt') as out:
json.dump([self.best_theta.tolist(), self.best_score], out, sort_keys=True, indent=0, separators=(',', ': '))
if reset:
self.best_score = None
self.best_theta = None
def update_dicts_after_transfer(self, source_optim_id, source_optim_theta, stats, keyword):
eval_key = 'eval_returns_mean_{}_from_others_in_{}'.format(keyword, # noqa
self.optim_id)
if eval_key not in self.log_data.keys():
self.log_data[eval_key] = source_optim_id + '_' + str(stats.eval_returns_mean)
else:
self.log_data[eval_key] += '_' + source_optim_id + '_' + str(stats.eval_returns_mean)
if keyword == 'proposal' and stats.eval_returns_mean > self.transfer_target:
if stats.eval_returns_mean > self.proposal:
self.proposal = stats.eval_returns_mean
self.proposal_source = source_optim_id + ('' if keyword=='theta' else "_proposal")
self.proposal_theta = np.array(source_optim_theta)
return stats.eval_returns_mean > self.transfer_target
def update_dicts_after_es(self, stats, self_eval_stats):
self.self_evals = self_eval_stats.eval_returns_mean
if self.start_score is None:
self.start_score = self.self_evals
self.proposal = self_eval_stats.eval_returns_mean
self.proposal_source = self.optim_id
self.proposal_theta = np.array(self.theta)
if self.checkpoint_scores is None:
self.checkpoint_thetas = np.array(self.theta)
self.checkpoint_scores = self_eval_stats.eval_returns_mean
self.episodes_so_far += stats.episodes_this_step
self.timesteps_so_far += stats.timesteps_this_step
if self.best_score is None or self.best_score < self.self_evals:
self.best_score = self.self_evals
self.best_theta = np.array(self.theta)
assert len(self.recent_scores) <= 5
if len(self.recent_scores) == 5:
self.recent_scores.pop(0)
self.recent_scores.append(self.self_evals)
self.transfer_target = max(self.recent_scores)
self.log_data.update({
'po_returns_mean_{}'.format(self.optim_id):
stats.po_returns_mean,
'po_returns_median_{}'.format(self.optim_id):
stats.po_returns_median,
'po_returns_std_{}'.format(self.optim_id):
stats.po_returns_std,
'po_returns_max_{}'.format(self.optim_id):
stats.po_returns_max,
'po_returns_min_{}'.format(self.optim_id):
stats.po_returns_min,
'po_len_mean_{}'.format(self.optim_id):
stats.po_len_mean,
'po_len_std_{}'.format(self.optim_id):
stats.po_len_std,
'noise_std_{}'.format(self.optim_id):
stats.noise_std,
'learning_rate_{}'.format(self.optim_id):
stats.learning_rate,
'eval_returns_mean_{}'.format(self.optim_id):
self_eval_stats.eval_returns_mean,
'eval_returns_median_{}'.format(self.optim_id):
self_eval_stats.eval_returns_median,
'eval_returns_std_{}'.format(self.optim_id):
self_eval_stats.eval_returns_std,
'eval_len_mean_{}'.format(self.optim_id):
self_eval_stats.eval_len_mean,
'eval_len_std_{}'.format(self.optim_id):
self_eval_stats.eval_len_std,
'eval_n_episodes_{}'.format(self.optim_id):
self_eval_stats.eval_n_episodes,
'theta_norm_{}'.format(self.optim_id):
stats.theta_norm,
'grad_norm_{}'.format(self.optim_id):
stats.grad_norm,
'update_ratio_{}'.format(self.optim_id):
stats.update_ratio,
'episodes_this_step_{}'.format(self.optim_id):
stats.episodes_this_step,
'episodes_so_far_{}'.format(self.optim_id):
self.episodes_so_far,
'timesteps_this_step_{}'.format(self.optim_id):
stats.timesteps_this_step,
'timesteps_so_far_{}'.format(self.optim_id):
self.timesteps_so_far,
'time_elapsed_this_step_{}'.format(self.optim_id):
stats.time_elapsed_this_step + self_eval_stats.time_elapsed,
'accept_theta_in_{}'.format(self.optim_id): 'self'
})
def broadcast_theta(self, theta):
'''On all worker, set thetas[this optimizer] to theta'''
logger.debug('Optimizer {} broadcasting theta...'.format(self.optim_id))
thetas = self.fiber_shared["thetas"]
thetas[self.optim_id] = theta
self.iteration += 1
def add_env(self, env):
'''On all worker, add env_name to niche'''
logger.debug('Optimizer {} add env {}...'.format(self.optim_id, env.name))
thetas = self.fiber_shared["niches"]
niches[self.optim_id].add_env(env)
def delete_env(self, env_name):
'''On all worker, delete env from niche'''
logger.debug('Optimizer {} delete env {}...'.format(self.optim_id, env_name))
niches = self.fiber_shared["niches"]
niches[self.optim_id].delete_env(env_name)
def start_chunk_fiber(self, runner, batches_per_chunk, batch_size, *args):
logger.debug('Optimizer {} spawning {} batches of size {}'.format(
self.optim_id, batches_per_chunk, batch_size))
rs_seeds = np.random.randint(np.int32(2 ** 31 - 1), size=batches_per_chunk)
chunk_tasks = []
pool = self.fiber_pool
niches = self.fiber_shared["niches"]
thetas = self.fiber_shared["thetas"]
for i in range(batches_per_chunk):
chunk_tasks.append(
pool.apply_async(runner, args=(self.iteration,
self.optim_id, batch_size, rs_seeds[i])+args))
return chunk_tasks
def get_chunk(self, tasks):
return [task.get() for task in tasks]
def collect_po_results(self, po_results):
noise_inds = np.concatenate([r.noise_inds for r in po_results])
returns = np.concatenate([r.returns for r in po_results])
lengths = np.concatenate([r.lengths for r in po_results])
return noise_inds, returns, lengths
def collect_eval_results(self, eval_results):
eval_returns = np.concatenate([r.returns for r in eval_results])
eval_lengths = np.concatenate([r.lengths for r in eval_results])
return eval_returns, eval_lengths
def compute_grads(self, step_results, theta):
noise_inds, returns, _ = self.collect_po_results(step_results)
pos_row, neg_row = returns.argmax(axis=0)
noise_sign = 1.0
po_noise_ind_max = noise_inds[pos_row]
if returns[pos_row, 0] < returns[neg_row, 1]:
noise_sign = -1.0
po_noise_ind_max = noise_inds[neg_row]
po_theta_max = theta + noise_sign * self.noise_std * noise.get(po_noise_ind_max, len(theta))
if self.returns_normalization == 'centered_ranks':
proc_returns = compute_centered_ranks(returns)
elif self.returns_normalization == 'normal':
proc_returns = (returns - returns.mean()) / (returns.std() + 1e-5)
else:
raise NotImplementedError(
'Invalid return normalization `{}`'.format(
self.returns_normalization))
grads, _ = batched_weighted_sum(
proc_returns[:, 0] - proc_returns[:, 1],
(noise.get(idx, len(theta)) for idx in noise_inds),
batch_size=500)
grads /= len(returns)
if self.normalize_grads_by_noise_std:
grads /= self.noise_std
return grads, po_theta_max
def set_theta(self, theta, reset_optimizer=True):
self.theta = np.array(theta)
if reset_optimizer:
self.optimizer.reset()
self.noise_std = self.init_noise_std
def start_theta_eval(self, theta):
'''eval theta in this optimizer's niche'''
step_t_start = time.time()
self.broadcast_theta(theta)
eval_tasks = self.start_chunk_fiber(
run_eval_batch_fiber, self.eval_batches_per_step, self.eval_batch_size)
return eval_tasks, theta, step_t_start
def get_theta_eval(self, res):
eval_tasks, theta, step_t_start = res
eval_results = self.get_chunk(eval_tasks)
eval_returns, eval_lengths = self.collect_eval_results(eval_results)
step_t_end = time.time()
logger.debug(
'get_theta_eval {} finished running {} episodes, {} timesteps'.format(
self.optim_id, len(eval_returns), eval_lengths.sum()))
return EvalStats(
eval_returns_mean=eval_returns.mean(),
eval_returns_median=np.median(eval_returns),
eval_returns_std=eval_returns.std(),
eval_len_mean=eval_lengths.mean(),
eval_len_std=eval_lengths.std(),
eval_n_episodes=len(eval_returns),
time_elapsed=step_t_end - step_t_start,
)
def start_step(self, theta=None):
''' based on theta (if none, this optimizer's theta)
generate the P.O. cloud, and eval them in this optimizer's niche
'''
step_t_start = time.time()
if theta is None:
theta = self.theta
self.broadcast_theta(theta)
step_results = self.start_chunk_fiber(
run_po_batch_fiber,
self.batches_per_chunk,
self.batch_size,
self.noise_std)
return step_results, theta, step_t_start
def get_step(self, res, propose_with_adam=True, decay_noise=True, propose_only=False):
step_tasks, theta, step_t_start = res
step_results = self.get_chunk(step_tasks)
_, po_returns, po_lengths = self.collect_po_results(
step_results)
episodes_this_step = len(po_returns)
timesteps_this_step = po_lengths.sum()
logger.debug(
'Optimizer {} finished running {} episodes, {} timesteps'.format(
self.optim_id, episodes_this_step, timesteps_this_step))
grads, po_theta_max = self.compute_grads(step_results, theta)
if not propose_only:
update_ratio, theta = self.optimizer.update(
theta, -grads + self.l2_coeff * theta)
self.optimizer.stepsize = max(
self.optimizer.stepsize * self.lr_decay, self.lr_limit)
if decay_noise:
self.noise_std = max(
self.noise_std * self.noise_decay, self.noise_limit)
else: #only make proposal
if propose_with_adam:
update_ratio, theta = self.optimizer.propose(
theta, -grads + self.l2_coeff * theta)
else:
update_ratio, theta = self.sgd_optimizer.compute(
theta, -grads + self.l2_coeff * theta) # keeps no state
logger.debug(
'Optimizer {} finished computing gradients'.format(
self.optim_id))
step_t_end = time.time()
return theta, StepStats(
po_returns_mean=po_returns.mean(),
po_returns_median=np.median(po_returns),
po_returns_std=po_returns.std(),
po_returns_max=po_returns.max(),
po_theta_max=po_theta_max,
po_returns_min=po_returns.min(),
po_len_mean=po_lengths.mean(),
po_len_std=po_lengths.std(),
noise_std=self.noise_std,
learning_rate=self.optimizer.stepsize,
theta_norm=np.square(theta).sum(),
grad_norm=float(np.square(grads).sum()),
update_ratio=float(update_ratio),
episodes_this_step=episodes_this_step,
timesteps_this_step=timesteps_this_step,
time_elapsed_this_step=step_t_end - step_t_start,
)
def evaluate_theta(self, theta):
self_eval_task = self.start_theta_eval(theta)
self_eval_stats = self.get_theta_eval(self_eval_task)
return self_eval_stats.eval_returns_mean
def update_pata_ec(self, archived_optimizers, optimizers, lower_bound, upper_bound):
def cap_score(score, lower, upper):
if score < lower:
score = lower
elif score > upper:
score = upper
return score
raw_scores = []
for source_optim in archived_optimizers.values():
raw_scores.append(cap_score(self.evaluate_theta(source_optim.theta), lower_bound, upper_bound))
for source_optim in optimizers.values():
raw_scores.append(cap_score(self.evaluate_theta(source_optim.theta), lower_bound, upper_bound))
self.pata_ec = compute_centered_ranks(np.array(raw_scores))
def evaluate_transfer(self, optimizers, evaluate_proposal=True, propose_with_adam=False):
best_init_score = None
best_init_theta = None
for source_optim in optimizers.values():
score = self.evaluate_theta(source_optim.theta)
if best_init_score == None or score > best_init_score:
best_init_score = score
best_init_theta = np.array(source_optim.theta)
if evaluate_proposal:
task = self.start_step(source_optim.theta)
proposed_theta, _ = self.get_step(
task, propose_with_adam=propose_with_adam, propose_only=True)
score = self.evaluate_theta(proposed_theta)
if score > best_init_score:
best_init_score = score
best_init_theta = np.array(proposed_theta)
return best_init_score, best_init_theta
|
py | b4087691f5fcb579a5d81e0acbd84d1fdc5b5f04 | # Generated by Django 4.0.1 on 2022-01-21 10:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dashboard', '0002_alter_profile_user'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
py | b4087880d80932c6c12e9aedecf492db6061b031 | from binance_d import RequestClient
from binance_d.constant.test import *
from binance_d.base.printobject import *
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
result = request_client.get_ticker_price_change_statistics()
PrintMix.print_data(result)
|
py | b4087991a3888d1e6320e4d92bca4d4d488ae65f | import numpy as np
import numpy
import matplotlib.pyplot as plt
from time import time
def autocorr1(x,lags):
'''numpy.corrcoef, partial'''
corr=[1. if l==0 else numpy.corrcoef(x[l:],x[:-l])[0][1] for l in lags]
return numpy.array(corr)
def autocorr2(x,lags):
'''manualy compute, non partial'''
mean=numpy.mean(x)
var=numpy.var(x)
xp=x-mean
corr=[1. if l==0 else numpy.sum(xp[l:]*xp[:-l])/len(x)/var for l in lags]
return numpy.array(corr)
def autocorr3(x,lags):
'''fft, pad 0s, non partial'''
n=len(x)
# pad 0s to 2n-1
ext_size=2*n-1
# nearest power of 2
fsize=2**numpy.ceil(numpy.log2(ext_size)).astype('int')
xp=x-numpy.mean(x)
var=numpy.var(x)
# do fft and ifft
cf=numpy.fft.fft(xp,fsize)
sf=cf.conjugate()*cf
corr=numpy.fft.ifft(sf).real
corr=corr/var/n
return corr[:len(lags)]
def autocorr4(x,lags):
'''fft, don't pad 0s, non partial'''
mean=x.mean()
var=numpy.var(x)
xp=x-mean
cf=numpy.fft.fft(xp)
sf=cf.conjugate()*cf
corr=numpy.fft.ifft(sf).real/var/len(x)
return corr[:len(lags)]
def autocorr5(x,lags):
'''numpy.correlate, non partial'''
mean=x.mean()
var=numpy.var(x)
xp=x-mean
corr=numpy.correlate(xp,xp,'full')[len(x)-1:]/var/len(x)
return corr[:len(lags)]
def autocorr6(x, K):
freqs = np.fft.rfft(x)
return np.fft.irfft(freqs * np.conj(freqs))[:len(K)]/np.var(x) / len(x)
def autocorr7(x, K):
result = np.correlate(x, x, mode='full')
return result[len(x)-1:len(x)-1+len(K)] / np.var(x) / len(x)
if __name__=='__main__':
y = np.random.rand(200) - 0.5
lags=range(50)
fig,ax=plt.subplots()
for funcii, labelii in zip([autocorr1, autocorr2, autocorr3, autocorr4,
autocorr5, autocorr6, autocorr7], ['np.corrcoef, partial', 'manual, non-partial',
'fft, pad 0s, non-partial', 'fft, no padding, non-partial',
'np.correlate, non-partial', 'trying FFT', 'np full corr']):
start = time()
cii=funcii(y,lags)
duration = time() - start
print(labelii + f" {duration}sec")
print(cii)
ax.plot(lags,cii,label=labelii)
ax.set_xlabel('lag')
ax.set_ylabel('correlation coefficient')
ax.legend()
plt.show() |
py | b4087a06c15fed811ace9b10f9dd3385141714ad | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {TreeNode}
def invertTree(self, root):
if not root:
return root
nodes = [root]
while nodes:
node = nodes.pop(0)
if node.left or node.right:
node.left, node.right = node.right, node.left
if node.left:
nodes.append(node.left)
if node.right:
nodes.append(node.right)
return root
|
py | b4087ad8498ee1fef11d2a03d1e5e433f17e09c4 | import numpy as np
import spacy.cli.download as spacy_download
import typer
from spacy import util
from tqdm import tqdm
import settings
from nanotoms import data as dm
from nanotoms import etl
from nanotoms import features as fm
from nanotoms import generate as gm
from nanotoms import search as sm
from nanotoms import train as tm
from nanotoms import visualize as vm
app = typer.Typer()
@app.command()
def prepare(datadir: str = settings.DATA_DIR.name, scrape: bool = False):
"""
Download, generate, and process data.
:param datadir: Path to the data directory
:param scrape: Download data from the URLs in the data?
"""
with tqdm(total=4, desc="Preparing data...") as progress:
data = dm.get_raw_data(datadir)
progress.update(1)
data = etl.clean(data)
data.to_csv(dm.get_clean_data_path(datadir), index=False)
progress.update(1)
filepath = dm.get_scraped_data_path(datadir)
if scrape or not filepath.is_file():
scraped_data = etl.scrape(data, warn)
dm.dump_data(filepath, scraped_data)
else:
scraped_data = dm.load_data(filepath)
progress.update(1)
extracted = etl.extract(
data,
scraped_data,
settings.SCRAPE_DOMAINS,
settings.SCRAPE_TEXT_CLEAN_PATTERN,
warn,
)
extracted.to_csv(dm.get_extracted_data_path(datadir), index=False)
progress.update(1)
def error(msg: str):
typer.echo()
typer.secho(f"Error: {msg}", fg=typer.colors.RED)
raise typer.Abort()
def warn(msg: str):
typer.echo()
typer.secho(f"Warn: {msg}", fg=typer.colors.YELLOW)
@app.command()
def transform(
datadir: str = settings.DATA_DIR.name,
language_model: str = settings.SPACY_LANGUAGE_MODEL,
stop_words: list[str] = settings.SPACY_EXTRA_STOP_WORDS,
entity_labels: list[str] = settings.SPACY_ENTITY_TYPES,
):
"""
Transform the data. Add new features and apply named entity recognition.
:param datadir: Path to the data directory
:param spacy_language_model: Name of the spacy language model to use
"""
if language_model not in util.get_installed_models():
warn(f"The spacy language model {language_model} is not installed")
download = typer.confirm("Would you like to download the model?")
if not download:
error("Language model not available")
spacy(language_model)
with tqdm(total=7, desc="Adding features to data...") as progress:
cleaned_df = dm.get_clean_data(datadir)
progress.update(1)
extracted_df = dm.get_extracted_data(datadir)
progress.update(1)
data = fm.add_features(
cleaned_df,
extracted_df,
language_model,
stop_words,
entity_labels,
)
data.to_csv(dm.get_transformed_data_path(datadir), index=False)
progress.update(1)
descriptions = fm.get_descriptions(data)
with open(dm.get_descriptions_data_path(datadir), "w") as f:
f.writelines(descriptions)
text_corpus = fm.text_corpus(data["lemmas"])
dm.dump_data(dm.get_text_corpus_path(datadir), text_corpus)
progress.update(1)
dict_corpus = fm.corpus_to_dict(
text_corpus, no_below=settings.MINIMUM_NUMBER_OF_DOCS_WITH_TERM
)
dict_corpus.save_as_text(dm.get_dict_corpus_path(datadir))
progress.update(1)
bow_corpus = fm.bow_corpus(text_corpus, dict_corpus)
dm.dump_data(dm.get_bow_corpus_path(datadir), bow_corpus)
progress.update(1)
inventory_data_path = dm.get_raw_inventory_data_path(datadir)
if inventory_data_path.is_file():
inventories = fm.get_inventories(inventory_data_path.as_posix())
inventory_data_path = dm.get_inventory_data_path(datadir)
with open(inventory_data_path, "w") as f:
f.writelines(inventories)
progress.update(1)
@app.command()
def spacy(name: str = settings.SPACY_LANGUAGE_MODEL):
"""
Download spacy language model.
:name: Language model name
"""
spacy_download(name)
@app.command()
def train(
datadir: str = settings.DATA_DIR.name,
number_of_topics: int = settings.NUMBER_OF_TOPICS,
passes: int = settings.NUMBER_OF_PASSES,
minimum_probability: float = settings.TOPICS_MINIMUM_PROBABILITY,
multicore: bool = True,
show: bool = typer.Option(
...,
prompt="Print topics",
confirmation_prompt=True,
),
):
"""
Train topic model, and assign topics to the data.
:param datadir: Path to the data directory
:param number_of_topics: Number of topics to be extracted from the data
:param show: Print the extracted topics?
"""
typer.echo()
with tqdm(total=6, desc="Training...") as progress:
data = dm.get_transformed_data(datadir)
progress.update(1)
bow_corpus = dm.get_bow_corpus(datadir)
progress.update(1)
dict_corpus = dm.get_dict_corpus(datadir)
progress.update(1)
text_corpus = dm.get_text_corpus(datadir)
progress.update(1)
model = tm.model(
bow_corpus,
dict_corpus,
passes=passes,
num_topics=number_of_topics,
minimum_probability=minimum_probability,
multicore=multicore,
)
model.save(dm.get_model_path(datadir, f"{number_of_topics}").as_posix())
progress.update(1)
data = tm.add_topics_to_documents(model, bow_corpus, data, number_of_topics)
data.to_csv(
dm.get_final_data_path(datadir, f"{number_of_topics}"),
index=False,
)
progress.update(1)
score = tm.coherence_score(
model,
text_corpus,
dict_corpus,
settings.COHERENCE_MEASURE,
)
progress.update(1)
typer.echo()
typer.echo(f"Trained model {settings.COHERENCE_MEASURE} score: {score}")
if show:
vm.print_topics(model, n=number_of_topics, writer=typer.echo)
@app.command()
def tune(
datadir: str = settings.DATA_DIR.name,
min_number_of_topics: int = settings.MIN_NUMBER_OF_TOPICS,
max_number_of_topics: int = settings.MAX_NUMBER_OF_TOPICS,
passes: int = settings.NUMBER_OF_PASSES,
minimum_probability: float = settings.TOPICS_MINIMUM_PROBABILITY,
multicore: bool = True,
show: bool = typer.Option(
...,
prompt="Print topics",
confirmation_prompt=True,
),
):
"""
Train topic model, and assign topics to the data. Iterate over different settings
to try to find a model with the best score. This can take a long time to run
depending on the minimum and maximum number of topics chosen.
:param datadir: Path to the data directory
:param number_of_topics: The initial number of topics to be extracted
:param show: Print the extracted topics?
"""
data = dm.get_transformed_data(datadir)
bow_corpus = dm.get_bow_corpus(datadir)
dict_corpus = dm.get_dict_corpus(datadir)
text_corpus = dm.get_text_corpus(datadir)
num_topics_range = range(min_number_of_topics, max_number_of_topics, 1)
alphas = list(np.arange(0.01, 1, 0.3))
alphas.append("symmetric")
alphas.append("asymmetric")
etas = list(np.arange(0.01, 1, 0.3))
etas.append("symmetric")
max_score = 0
model = None
parameters = {}
for num_topics in tqdm(num_topics_range, desc="Tunning model..."):
for alpha in tqdm(alphas, desc=f"n:{num_topics}, alpha"):
for eta in tqdm(etas, desc=f"n:{num_topics}, alpha:{alpha}, eta"):
trained_model = tm.model(
bow_corpus,
dict_corpus,
passes=passes,
num_topics=num_topics,
alpha=alpha,
eta=eta,
minimum_probability=minimum_probability,
multicore=multicore,
)
score = tm.coherence_score(
trained_model,
text_corpus,
dict_corpus,
settings.COHERENCE_MEASURE,
)
if score > max_score:
max_score = score
model = trained_model
parameters["topics"] = num_topics
parameters["alpha"] = alpha
parameters["eta"] = eta
typer.echo()
typer.echo(f"max score: {max_score}")
for k, v in parameters.items():
typer.echo(f"{k}: {v}")
if model:
model.save(
dm.get_model_path(datadir, f"tunned_{parameters['topics']}").as_posix()
)
data = tm.add_topics_to_documents(model, bow_corpus, data, parameters["topics"])
data.to_csv(
dm.get_final_data_path(datadir, f"tunned_{parameters['topics']}"),
index=False,
)
if show:
vm.print_topics(model, n=parameters["topics"], writer=typer.echo)
@app.command()
def generate(
prompt: str,
do_sample: bool = True,
early_stopping: bool = False,
no_repeat_ngram_size: int = 2,
max_length: int = 100,
temperature: float = 0.7,
top_k: int = 50,
):
"""
Generate text based on the given prompt.
:param prompt: Prompt to generate text for
:param do_sample: Choose words based on their conditional probability?
:param early_stopping: Stop at last full sentence (if possible)?
:param no_repeat_ngram_size: N-gram size that can't occur more than once
:param max_length: Maximum length of the generated text
:param temperature: How sensitive the algorithm is to selecting least common
optionsfor the generated text
:param top_k: How many potential outcomes are considered before generating the text
"""
try:
model = gm.get_model(settings.TEXT_GENERATOR_MODEL_PATH)
tokenizer = gm.get_tokenizer(settings.TEXT_GENERATOR_MODEL_PATH)
generate = True
text = prompt
while generate:
text = gm.generate(
model,
tokenizer,
text,
dict(
do_sample=do_sample,
early_stopping=early_stopping,
no_repeat_ngram_size=no_repeat_ngram_size,
max_length=max_length,
temperature=temperature,
top_k=top_k,
),
)
typer.echo(text)
generate = typer.confirm("Continue generating text?")
except Exception as e:
error(f"Error loading text generator model: {e}")
@app.command()
def index(
datadir: str = settings.DATA_DIR.name,
):
"""
Index the data for semantic search.
:param datadir: Path to the data directory
"""
with tqdm(total=3, desc="Indexing data...") as progress:
data = dm.get_transformed_data(datadir)
progress.update(1)
embeddings = sm.index(data)
progress.update(1)
embeddings.save(dm.get_embeddings_path(datadir).as_posix())
progress.update(1)
@app.command()
def search(
datadir: str = settings.DATA_DIR.name,
query: str = "",
limit: int = 5,
):
"""
Find objects in the data using a semantic search, finds by meaning as well as by
keyword.
:param datadir: Path to the data directory
:param query: The query to search for
:param limit: Maximum number of results to return
"""
with tqdm(total=3, desc=f"Searching for {query}...") as progress:
data = dm.get_transformed_data(datadir)
progress.update(1)
embeddings = sm.get_embeddings(dm.get_embeddings_path(datadir).as_posix())
progress.update(1)
found = sm.search(data, embeddings, query, limit)
progress.update(1)
typer.echo(found)
if __name__ == "__main__":
app()
|
py | b4087b113aecd0d4dc85a486d5da4404eecce730 |
import numpy as np
import multiprocessing as mp
from rlpyt.utils.buffer import np_mp_array
class SumTree:
"""
Sum tree for matrix of values stored as [T,B], updated in chunks along T
dimension, applying to the full B dimension at each update. Priorities
represented as first T*B leaves of binary tree. Turns on/off entries in
vicinity of cursor position according to "off_backward" (e.g.
n_step_return) and "off_forward" (e.g. 1 for
prev_action or max(1, frames-1) for frame-wise buffer).
Provides efficient sampling from non-uniform probability masses.
NOTE:
Tried single precision (float32) tree, and it sometimes returned
samples with priority 0.0, because subtraction during tree cascade
left random value larger than the remaining sum; suggest keeping
float64.
"""
async_ = False
def __init__(self, T, B, off_backward, off_forward,
default_value=1,
enable_input_priorities=False,
input_priority_shift=0, # Does not apply to update_batch_pri.
):
self.T = T
self.B = B
self.size = T * B
self.off_backward = off_backward
self.off_forward = off_forward
self.default_value = default_value
self.input_priority_shift = input_priority_shift # (See self.sample()).
self.tree_levels = int(np.ceil(np.log2(self.size + 1)) + 1)
self._allocate_tree()
self.low_idx = 2 ** (self.tree_levels - 1) - 1 # pri_idx + low_idx -> tree_idx
self.high_idx = self.size + self.low_idx
self.priorities = self.tree[self.low_idx:self.high_idx].reshape(T, B)
if enable_input_priorities:
self.input_priorities = default_value * np.ones((T, B))
else:
self.input_priorities = None # Save memory.
self.reset()
def _allocate_tree(self):
self.tree = np.zeros(2 ** self.tree_levels - 1) # Double precision.
def reset(self):
self.tree.fill(0)
self.t = 0
self._initial_wrap_guard = True
if self.input_priorities is not None:
self.input_priorities[:] = self.default_value
def advance(self, T, priorities=None):
"""Cursor advances by T: set priorities to zero in vicinity of new
cursor position and turn priorities on for new samples since previous
cursor position.
Optional param ``priorities`` can be None for default, or of
dimensions [T, B], or [B] or scalar will broadcast. (Must have enabled
``input_priorities=True`` when instantiating the tree.) These will be
stored at the current cursor position, meaning these priorities
correspond to the current values being added to the buffer, even
though their priority might temporarily be set to zero until future
advances.
"""
if T == 0:
return
t, b, f = self.t, self.off_backward, self.off_forward
low_on_t = (t - b) % self.T # inclusive range: [0, self.T-1]
high_on_t = ((t + T - b - 1) % self.T) + 1 # inclusive: [1, self.T]
low_off_t = (t + T - b) % self.T
high_off_t = ((t + T + f - 1) % self.T) + 1
if self._initial_wrap_guard:
low_on_t = max(f, t - b) # Don't wrap back to end, and off_forward.
high_on_t = low_off_t = max(low_on_t, t + T - b)
if t + T - b >= f: # Next low_on_t >= f.
self._initial_wrap_guard = False
if priorities is not None:
assert self.input_priorities is not None, "Must enable input priorities."
# e.g. Use input_priority_shift = warmup_T // rnn_state_interval
# to make the fresh priority at t be the one input with the later
# samples at t + shift, which would be the start of training
# (priorities are aligned with start of warmup sequence).
input_t = t - self.input_priority_shift
if input_t < 0 or input_t + T > self.T: # Wrap (even at very first).
idxs = np.arange(input_t, input_t + T) % self.T
else:
idxs = slice(input_t, input_t + T)
self.input_priorities[idxs] = priorities
if self._initial_wrap_guard and input_t < 0:
self.input_priorities[input_t:] = self.default_value # Restore.
self.reconstruct_advance(low_on_t, high_on_t, low_off_t, high_off_t)
self.t = (t + T) % self.T
def sample(self, n, unique=False):
"""Get `n` samples, with replacement (default) or without. Use
``np.random.rand()`` to generate random values with which to descend
the tree to each sampled leaf node. Returns `T_idxs` and `B_idxs`, and sample
priorities."""
self._sampled_unique = unique
random_values = np.random.rand(int(n * 1 if unique else n))
tree_idxs, scaled_random_values = self.find(random_values)
if unique:
i = 0
while i < 100:
tree_idxs, unique_idx = np.unique(tree_idxs, return_index=True)
scaled_random_values = scaled_random_values[unique_idx]
if len(tree_idxs) < n:
new_idxs, new_values = self.find(np.random.rand(2 * (n - len(tree_idxs))))
tree_idxs = np.concatenate([tree_idxs, new_idxs])
scaled_random_values = np.concatenate([scaled_random_values, new_values])
else:
break
i += 1
if len(tree_idxs) < n:
raise RuntimeError("After 100 tries, unable to get unique indexes.")
tree_idxs = tree_idxs[:n]
priorities = self.tree[tree_idxs]
self.prev_tree_idxs = tree_idxs
T_idxs, B_idxs = np.divmod(tree_idxs - self.low_idx, self.B)
return (T_idxs, B_idxs), priorities
def update_batch_priorities(self, priorities):
"""Apply new priorities to tree at the leaf positions where the last
batch was returned from the ``sample()`` method.
"""
if not self._sampled_unique: # Must remove duplicates
self.prev_tree_idxs, unique_idxs = np.unique(self.prev_tree_idxs,
return_index=True)
priorities = priorities[unique_idxs]
self.reconstruct(self.prev_tree_idxs, priorities)
def print_tree(self, level=None):
"""Print values for whole tree or at specified level."""
levels = range(self.tree_levels) if level is None else [level]
for k in levels:
for j in range(2 ** k - 1, 2 ** (k + 1) - 1):
print(self.tree[j], end=' ')
print()
# Helpers.
def reconstruct(self, tree_idxs, values):
diffs = values - self.tree[tree_idxs] # Numpy upcasts to float64.
self.tree[tree_idxs] = values
self.propagate_diffs(tree_idxs, diffs, min_level=1)
def reconstruct_advance(self, low_on_t, high_on_t, low_off_t, high_off_t):
"""Efficiently write new values / zeros into tree."""
low_on_idx = low_on_t * self.B + self.low_idx
high_on_idx = high_on_t * self.B + self.low_idx
low_off_idx = low_off_t * self.B + self.low_idx
high_off_idx = high_off_t * self.B + self.low_idx
idxs, diffs = list(), list()
if high_on_t > low_on_t:
if self.input_priorities is None:
input_priorities = self.default_value
else:
input_priorities = self.input_priorities[low_on_t:high_on_t]
diffs.append(input_priorities - self.priorities[low_on_t:high_on_t])
self.priorities[low_on_t:high_on_t] = input_priorities
idxs.append(np.arange(low_on_idx, high_on_idx))
elif high_on_t < low_on_t: # Wrap
if self.input_priorities is None:
diffs.append(self.default_value - np.concatenate([
self.priorities[low_on_t:], self.priorities[:high_on_t]],
axis=0))
self.priorities[low_on_t:] = self.default_value
self.priorities[:high_on_t] = self.default_value
else:
diffs.append(
np.concatenate(
[self.input_priorities[low_on_t:],
self.input_priorities[:high_on_t]], axis=0) -
np.concatenate(
[self.priorities[low_on_t:],
self.priorities[:high_on_t]], axis=0)
)
self.priorities[low_on_t:] = self.input_priorities[low_on_t:]
self.priorities[:high_on_t] = self.input_priorities[:high_on_t]
idxs.extend([np.arange(low_on_idx, self.high_idx),
np.arange(self.low_idx, high_on_idx)])
if high_off_t > low_off_t:
diffs.append(-self.priorities[low_off_t:high_off_t])
self.priorities[low_off_t:high_off_t] = 0
idxs.append(np.arange(low_off_idx, high_off_idx))
else: # Wrap.
diffs.extend([-self.priorities[low_off_t:],
-self.priorities[:high_off_t]])
self.priorities[low_off_t:] = 0
self.priorities[:high_off_t] = 0
idxs.extend([np.arange(low_off_idx, self.high_idx),
np.arange(self.low_idx, high_off_idx)])
if diffs:
diffs = np.concatenate(diffs).reshape(-1)
idxs = np.concatenate(idxs)
self.propagate_diffs(idxs, diffs, min_level=1)
def propagate_diffs(self, tree_idxs, diffs, min_level=1):
for _ in range(min_level, self.tree_levels):
tree_idxs = (tree_idxs - 1) // 2 # Rise a level
np.add.at(self.tree, tree_idxs, diffs)
def find(self, random_values):
"""Param random_values: numpy array of floats in range [0, 1] """
random_values = self.tree[0] * random_values # Double precision.
scaled_random_values = random_values.copy()
tree_idxs = np.zeros(len(random_values), dtype=np.int64)
for _ in range(self.tree_levels - 1):
tree_idxs = 2 * tree_idxs + 1
left_values = self.tree[tree_idxs]
where_right = np.where(random_values > left_values)[0]
tree_idxs[where_right] += 1
random_values[where_right] -= left_values[where_right]
return tree_idxs, scaled_random_values
class AsyncSumTree(SumTree):
"""Allocates the tree into shared memory, and manages asynchronous cursor
position, for different read and write processes. Assumes that writing to
tree values is lock protected elsewhere, i.e. by the replay buffer.
"""
async_ = True
def __init__(self, *args, **kwargs):
self.async_t = mp.RawValue("l", 0)
super().__init__(*args, **kwargs)
# Wrap guard behavior should be fine without async--each will catch it.
def _allocate_tree(self):
self.tree = np_mp_array(2 ** self.tree_levels - 1, np.float64) # Shared memory.
self.tree.fill(0) # Just in case.
def reset(self):
super().reset()
self.async_t.value = 0
def advance(self, *args, **kwargs):
self.t = self.async_t.value
super().advance(*args, **kwargs)
self.async_t.value = self.t
|
py | b4087b5c510a49fec68c5eef14cd5da072135bfd | """
This file implements Tensorflow framework of the
simulator. It's main use is in conjunction with the :py:mod:`optimizer`
module, and example programs are listed in :py:mod:`simulator` module.
"""
import numpy as np
import tensorflow as tf
import qtree.operators as ops
import qtree.optimizer as opt
import qtree.utils as utils
import qtree.system_defs as defs
def get_sliced_tf_buckets(buckets, slice_dict):
"""
Takes buckets and returns their Tensorflow counterparts, where
all data attributes of tensors are filled with Tensorflow
placeholders.
Parameters
----------
buckets : list of list
buckets as returned by :py:meth:`read_buckets`y
and :py:meth:`reorder_buckets`.
slice_dict : dict
dictionary of {variable : slice} pairs
Returns
-------
tf_buckets : list of lists
Buckets having Tensorflow tensors in place of Tensor.data
attribute
placeholder_dict: dict
dictionary of the form {placeholder: data_key}.
"""
# import pdb
# pdb.set_trace()
placeholder_dict = {}
# Create tf buckets from buckets
tf_buckets = []
for bucket in buckets:
tf_bucket = []
for tensor in bucket:
# Save the reference to placeholder in the dictionary
placeholder = tf.stop_gradient(
tf.placeholder(defs.TF_ARRAY_TYPE,
tensor.shape, name=tensor.name)
)
placeholder_dict[placeholder] = tensor.data_key
# sort tensor dimensions
transpose_order = np.argsort(list(map(int, tensor.indices)))
data = tf.transpose(placeholder, transpose_order)
# transpose indices
indices_sorted = [tensor.indices[pp] for pp
in transpose_order]
# slice tensor
slice_bounds = []
indices_sliced = []
for idx in indices_sorted:
if idx in slice_dict:
# insert slice variables into the placeholder dict
slice_start = tf.stop_gradient(
tf.placeholder(
tf.int32,
name=idx.name + '_start')
)
slice_stop = tf.stop_gradient(
tf.placeholder(
tf.int32,
name=idx.name + '_stop')
)
placeholder_dict[slice_start] = (idx, 'start')
placeholder_dict[slice_stop] = (idx, 'stop')
slice_bounds.append(slice(slice_start, slice_stop))
# update the size of tensor variables
indices_sliced.append(idx.copy(
size=slice_dict[idx].stop-slice_dict[idx].start))
else:
slice_bounds.append(slice(None))
indices_sliced.append(idx)
data = data[tuple(slice_bounds)]
# Create new tensor with a placeholder for data
new_tensor = tensor.copy(
indices=indices_sliced,
data=data)
tf_bucket.append(new_tensor)
tf_buckets.append(tf_bucket)
return tf_buckets, placeholder_dict
def assign_tensor_placeholders(placeholder_dict, data_dict):
"""
Builds feed dictionary for Tensorflow from the placeholder
dictionary, which holds placeholders of all gates in the circuit,
and a global data dictionary.
Parameters
----------
placeholder_dict : dict
Dictionary of {tensorflow.placeholder : data_key} pairs
data_dict : dict
Dictionary of {data_key : np.array} pairs
Returns
-------
feed_dict : dict
Dictionary to feed in Tensorflow session
"""
feed_dict = {}
# Try to fill all fixed gates placeholders
for placeholder, data_key in placeholder_dict.items():
try:
feed_dict[placeholder] = data_dict[data_key]
except KeyError:
pass
return feed_dict
def assign_variable_placeholders(placeholder_dict, slice_dict):
"""
Builds feed dictionary for Tensorflow from the placeholder
dictionary which holds information about variables
and variable slice information
Parameters
----------
placeholder_dict : dict
Dictionary of {tensorflow.placeholder : data_key} pairs
slice_dict : dict
Dictionary of {variable : slice} pairs
Returns
-------
feed_dict : dict
Dictionary to feed in Tensorflow session
"""
feed_dict = {}
# Try to fill all variables with placeholders
for placeholder, data_key in placeholder_dict.items():
var, slice_end = data_key
try:
feed_dict[placeholder] = getattr(slice_dict[var], slice_end)
except KeyError:
pass
return feed_dict
def slice_tf_buckets(tf_buckets, old_pdict, idx_parallel):
"""
Takes (symbolic) slices of the Tensorflow buckets
over the variables in idx_parallel. Updates the placeholder
dictionary.
Parameters
----------
tf_buckets : list of lists
Buckets containing Tensorflow tensors and variables
old_pdict : dict
Placeholder dictionary
idx_parallel : list
Indices to parallelize over
Returns
-------
sliced_buckets : list of lists
buckets with (symbolically) sliced gates
pdict : dict
updated placeholder dictionary
"""
# import pdb
# pdb.set_trace()
pdict = {key: val for key, val in old_pdict.items()}
# Define slice variables
slice_var_dict = {'q_{}'.format(var):
tf.stop_gradient(
tf.placeholder(dtype=tf.int32,
shape=[],
name='q_{}'.format(var))
)
for var in idx_parallel}
pdict.update(slice_var_dict)
# Create tf buckets from unordered buckets
sliced_buckets = []
for bucket in tf_buckets:
sliced_bucket = []
for tensor, variables in bucket:
slice_bounds = []
new_shape = []
for var in variables:
if var in idx_parallel:
slice_bounds.append(slice_var_dict[f'q_{var}'])
new_shape.append(1)
else:
slice_bounds.append(slice(None))
new_shape.append(2)
sliced_bucket.append(
(tf.reshape(tensor[tuple(slice_bounds)], new_shape),
variables)
)
sliced_buckets.append(sliced_bucket)
return sliced_buckets, pdict
def run_tf_session(tf_variable, feed_dict):
"""
Run Tensorflow session and get variable value
Parameters
----------
tf_variable : tensorflow.Tensor
variable to evaluate
feed_dict : dict
dictionary with placeholder values
Returns
-------
res : numpy.array
result of the calculation
"""
# Configure tensorflow for single threaded execution
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1
)
with tf.Session(config=session_conf) as sess:
res = sess.run(tf_variable, feed_dict=feed_dict)
return res
def process_bucket_tf(bucket):
"""
Process bucket in the bucket elimination algorithm.
We multiply all tensors in the bucket and sum over the
variable which the bucket corresponds to. This way the
variable of the bucket is removed from the expression.
Parameters
----------
bucket : list
List containing tuples of tensors (gates) with their indices.
Returns
-------
tensor : optimizer.Tensor
wrapper tensor object holding the resulting computational graph
"""
result_data = bucket[0].data
result_indices = bucket[0].indices
for tensor in bucket[1:]:
expr = utils.get_einsum_expr(list(map(int, result_indices)),
list(map(int, tensor.indices)))
result_data = tf.einsum(expr, result_data, tensor.data)
# Merge and sort indices and shapes
result_indices = tuple(sorted(
set(result_indices + tensor.indices),
key=int))
if len(result_indices) > 0:
first_index, *result_indices = result_indices
tag = first_index.identity
else:
tag = 'f'
result_indices = []
# reduce
result = opt.Tensor(f'E{tag}', result_indices,
data=tf.reduce_sum(result_data, axis=0))
return result
def eval_tf_buckets(buckets, feed_dict):
"""
This is a test function which substitutes actual numpy tensors
in place of buckets
Parameters
----------
buckets : list of lists
holds Tensors with tensorflow placeholders in place of data
feed_dict : dict
dictionary of {placeholder : numpy.array} pairs
Returns
-------
np_buckets : list of lists
buckets with Tensors where data are numpy arrays
"""
np_buckets = []
for bucket in buckets:
np_bucket = []
for tensor in bucket:
data = run_tf_session(tensor.data, feed_dict)
np_bucket.append(tensor.copy(data=data))
np_buckets.append(np_bucket)
return np_buckets
|
py | b4087c13b5393223a3c9f4029256add11a741127 | import contextlib
from itertools import chain
from beartype.typing import Dict, List, Union
class ErrorInServerResponseException(Exception):
pass
class ServerResponseParser:
def __init__(self, response: dict):
self.response = response
def more_results_expected(self) -> bool:
return self.total and self.total > 50 and self.total != len(self.result)
@property
def result(self):
return self.response.get("result")
@property
def total(self):
return self.response.get("total")
@property
def error_description(self):
return self.response.get("error_description")
@property
def result_error(self):
return self.response.get("result_error")
def extract_results(self) -> Union[Dict, List[Dict]]:
"""ะะตัะฝััั ัะตะทัะปััะฐัั ะทะฐะฟัะพัะฐ.
ะัะปะธ ะพะฟัะตะดะตะปะตะฝะพ, ััะพ ะทะฐะฟัะพั ะฑัะป ะฑะฐััะตะฒัะผ, ัะพ ัะฐะทะพะฑัะฐัั ัะตะทัะปััะฐัั ะฑะฐััะตะน
ะธ ัะพะฑัะฐัั ะธั
ะฒ ะฟะปะพัะบะธะน ัะฟะธัะพะบ.
Returns:
Any: ะ ะตะทัะปััะฐัั ะทะฐะฟัะพัะฐ, ะฟะพ ะฒะพะทะผะพะถะฝะพััะธ ะฟัะตะฒัะฐัะตะฝะฝัะต ะฒ ะฟะปะพัะบะธะน ัะฟะธัะพะบ.
"""
self.raise_for_errors()
if self.is_batch():
return self.extract_from_batch_response(self.result["result"])
else:
return self.extract_from_single_response(self.result)
def raise_for_errors(self):
errors = self.extract_errors()
if errors:
raise ErrorInServerResponseException(errors)
def extract_errors(self):
if self.is_batch():
if self.result.get("result_error"):
return self.result["result_error"]
elif self.result_error:
return self.result_error
return None
def is_batch(self) -> bool:
return isinstance(self.result, dict) and "result" in self.result
@staticmethod
def extract_from_single_response(result: dict):
# ะตัะปะธ ัะตะทัะปััะฐั ะฒัะทะพะฒะฐ ัะพะดะตัะถะธั ัะพะปัะบะพ ัะปะพะฒะฐัั {'tasks': ัะฟะธัะพะบ},
# ัะพ ะฒะตัะฝััั ััะพั ัะฟะธัะพะบ.
# ะกะผ. https://github.com/leshchenko1979/fast_bitrix24/issues/132
# ะผะตัะพะด `crm.stagehistory.list` ะฒะพะทะฒัะฐัะฐะตั dict["items", list] --
# ัะฐะทะฒะพัะฐัะธะฒะฐะตะผ ะตะณะพ ะฒ ัะฟะธัะพะบ
if isinstance(result, dict) and result.keys() & {"tasks", "items"}:
contents = result[list(result.keys())[0]]
if isinstance(contents, list):
return contents
return result
def extract_from_batch_response(self, result) -> list:
if not result:
return []
# ะตัะปะธ ัะตะทัะปััะฐั ะฒัะทะพะฒะฐ ัะพะดะตัะถะธั ัะพะปัะบะพ ัะปะพะฒะฐัั c ะบะปััะพะผ
# "tasks" ะธะปะธ "items" ะธ ัะฟะธัะบะพะผ ั ะฝะตะณะพ ะฒะฝัััะธ,
# ัะพ ะฒะตัะฝััั ััะพั ัะฟะธัะพะบ.
# ะกะผ. https://github.com/leshchenko1979/fast_bitrix24/issues/132
first_item = next(iter(result.values()))
nested_keys = {"tasks", "items"}
nested_results = (
isinstance(first_item, dict) and first_item.keys() & nested_keys
)
# ะตัะปะธ ะฒะฝัััะธ - ัะฟะธัะบะธ, ัะพ ะฒะตัะฝััั ะธั
ะฒ ะพะดะฝะพะผ ะฟะปะพัะบะพะผ ัะฟะธัะบะต
if isinstance(first_item, list) or nested_results:
result_list = [
self.extract_from_single_response(element)
for element in result.values()
]
result_list = list(chain(*result_list))
return result_list
# ะธะฝะฐัะต (ะตัะปะธ ะฒะฝัััะธ - dict), ัะพ ะฒะตัะฝััั ะฒ ัะฐะผ dict
return result
|
py | b4087c9695900422b107227c3bc60b0fae23c1f4 | #/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * [email protected]
# */
#---------------------------------------------------------------------
# Python for CI of OAI-eNB + COTS-UE
#
# Required Python Version
# Python 3.x
#
# Required Python Package
# pexpect
#---------------------------------------------------------------------
#-----------------------------------------------------------
# Import
#-----------------------------------------------------------
import pexpect # pexpect
import logging
import time # sleep
import re
import subprocess
import sys
#-----------------------------------------------------------
# Class Declaration
#-----------------------------------------------------------
class SSHConnection():
def __init__(self):
self.ssh = ''
self.picocom_closure = False
self.ipaddress = ''
self.username = ''
self.cmd2Results = ''
def disablePicocomClosure(self):
self.picocom_closure = False
def enablePicocomClosure(self):
self.picocom_closure = True
def open(self, ipaddress, username, password):
count = 0
connect_status = False
while count < 4:
self.ssh = pexpect.spawn('ssh -o PubkeyAuthentication=no {}@{}'.format(username,ipaddress))
self.ssh.timeout = 5
self.sshresponse = self.ssh.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', 'Last login', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
self.ssh.sendline('yes')
self.sshresponse = self.ssh.expect(['password:', username + '@'])
if self.sshresponse == 0:
self.ssh.sendline(password)
self.sshresponse = self.ssh.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
count = 10
connect_status = True
else:
logging.debug('self.sshresponse = ' + str(self.sshresponse))
elif self.sshresponse == 1:
self.ssh.sendline(password)
self.sshresponse = self.ssh.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
count = 10
connect_status = True
else:
logging.debug('self.sshresponse = ' + str(self.sshresponse))
elif self.sshresponse == 2:
# Checking if we are really on the remote client defined by its IP address
self.command('stdbuf -o0 ifconfig | egrep --color=never "inet addr:|inet "', '\$', 5)
result = re.search(str(ipaddress), str(self.ssh.before))
if result is None:
self.close()
else:
count = 10
connect_status = True
else:
# debug output
logging.debug(str(self.ssh.before))
logging.debug('self.sshresponse = ' + str(self.sshresponse))
# adding a tempo when failure
if not connect_status:
time.sleep(1)
count += 1
if connect_status:
pass
else:
sys.exit('SSH Connection Failed')
self.ipaddress = ipaddress
self.username = username
def cde_check_value(self, commandline, expected, timeout):
logging.debug(commandline)
self.ssh.timeout = timeout
self.ssh.sendline(commandline)
expected.append(pexpect.EOF)
expected.append(pexpect.TIMEOUT)
self.sshresponse = self.ssh.expect(expected)
return self.sshresponse
def command(self, commandline, expectedline, timeout, silent=False, resync=False):
if not silent:
logging.debug(commandline)
self.ssh.timeout = timeout
# Nasty patch when pexpect output is out of sync.
# Much pronounced when running back-to-back-back oc commands
if resync:
self.ssh.send(commandline)
self.ssh.expect([commandline, pexpect.TIMEOUT])
self.ssh.send('\r\n')
self.sshresponse = self.ssh.expect([expectedline, pexpect.EOF, pexpect.TIMEOUT])
else:
self.ssh.sendline(commandline)
self.sshresponse = self.ssh.expect([expectedline, pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
return 0
elif self.sshresponse == 1:
logging.debug('\u001B[1;37;41m Unexpected EOF \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
logging.debug(str(self.ssh.before))
sys.exit(self.sshresponse)
elif self.sshresponse == 2:
logging.debug('\u001B[1;37;41m Unexpected TIMEOUT \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
result = re.search('ping |iperf |picocom', str(commandline))
if result is None:
logging.debug(str(self.ssh.before))
sys.exit(self.sshresponse)
else:
return -1
else:
logging.debug('\u001B[1;37;41m Unexpected Others \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
sys.exit(self.sshresponse)
def command2(self, commandline, timeout, silent=False):
if not silent:
logging.debug(commandline)
self.cmd2Results = ''
myHost = self.username + '@' + self.ipaddress
# CAUTION: THIS METHOD IMPLIES THAT THERE ARE VALID SSH KEYS
# BETWEEN THE PYTHON EXECUTOR NODE AND THE REMOTE HOST
# OTHERWISE IT WON'T WORK
lSsh = subprocess.Popen(["ssh", "%s" % myHost, commandline],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
self.cmd2Results = str(lSsh.stdout.readlines())
def close(self):
self.ssh.timeout = 5
self.ssh.sendline('exit')
self.sshresponse = self.ssh.expect([pexpect.EOF, pexpect.TIMEOUT])
self.ipaddress = ''
self.username = ''
if self.sshresponse == 0:
pass
elif self.sshresponse == 1:
if not self.picocom_closure:
logging.debug('\u001B[1;37;41m Unexpected TIMEOUT during closing\u001B[0m')
else:
logging.debug('\u001B[1;37;41m Unexpected Others during closing\u001B[0m')
def copyin(self, ipaddress, username, password, source, destination):
count = 0
copy_status = False
logging.debug('scp '+ username + '@' + ipaddress + ':' + source + ' ' + destination)
while count < 10:
scp_spawn = pexpect.spawn('scp '+ username + '@' + ipaddress + ':' + source + ' ' + destination, timeout = 100)
scp_response = scp_spawn.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
scp_spawn.sendline('yes')
scp_spawn.expect('password:')
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
count = 10
copy_status = True
else:
logging.debug('1 - scp_response = ' + str(scp_response))
elif scp_response == 1:
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0 or scp_response == 3:
count = 10
copy_status = True
else:
logging.debug('2 - scp_response = ' + str(scp_response))
elif scp_response == 2:
count = 10
copy_status = True
else:
logging.debug('3 - scp_response = ' + str(scp_response))
# adding a tempo when failure
if not copy_status:
time.sleep(1)
count += 1
if copy_status:
return 0
else:
return -1
def copyout(self, ipaddress, username, password, source, destination):
count = 0
copy_status = False
logging.debug('scp ' + source + ' ' + username + '@' + ipaddress + ':' + destination)
while count < 4:
scp_spawn = pexpect.spawn('scp ' + source + ' ' + username + '@' + ipaddress + ':' + destination, timeout = 100)
scp_response = scp_spawn.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
scp_spawn.sendline('yes')
scp_spawn.expect('password:')
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
count = 10
copy_status = True
else:
logging.debug('1 - scp_response = ' + str(scp_response))
elif scp_response == 1:
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0 or scp_response == 3:
count = 10
copy_status = True
else:
logging.debug('2 - scp_response = ' + str(scp_response))
elif scp_response == 2:
count = 10
copy_status = True
else:
logging.debug('3 - scp_response = ' + str(scp_response))
# adding a tempo when failure
if not copy_status:
time.sleep(1)
count += 1
if copy_status:
pass
else:
sys.exit('SCP failed')
def getBefore(self):
return str(self.ssh.before)
|
py | b4087d87b49b7f7dc06899f0841afc995fb9861a | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup module for the GRPC Python package's optional reflection."""
import os
import sys
import setuptools
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Break import-style to ensure we can actually find our local modules.
import grpc_version
class _NoOpCommand(setuptools.Command):
"""No-op command."""
description = ''
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
]
PACKAGE_DIRECTORIES = {
'': '.',
}
INSTALL_REQUIRES = (
'protobuf>=3.6.0',
'grpcio>={version}'.format(version=grpc_version.VERSION),
)
try:
import reflection_commands as _reflection_commands
# we are in the build environment, otherwise the above import fails
SETUP_REQUIRES = (
'grpcio-tools=={version}'.format(version=grpc_version.VERSION),)
COMMAND_CLASS = {
# Run preprocess from the repository *before* doing any packaging!
'preprocess': _reflection_commands.CopyProtoModules,
'build_package_protos': _reflection_commands.BuildPackageProtos,
}
except ImportError:
SETUP_REQUIRES = ()
COMMAND_CLASS = {
# wire up commands to no-op not to break the external dependencies
'preprocess': _NoOpCommand,
'build_package_protos': _NoOpCommand,
}
setuptools.setup(
name='grpcio-reflection',
version=grpc_version.VERSION,
license='Apache License 2.0',
description='Standard Protobuf Reflection Service for gRPC',
author='The gRPC Authors',
author_email='[email protected]',
classifiers=CLASSIFIERS,
url='https://grpc.io',
package_dir=PACKAGE_DIRECTORIES,
packages=setuptools.find_packages('.'),
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
cmdclass=COMMAND_CLASS)
|
py | b4087dea0e63f877b24110666b4eeaf5bdc21ef3 | #
# Auther: H.Muhammad Kamran
# email: [email protected]
# contact: +92 (313 / 333) 9112 845
#
from django.db import models
import uuid
class Roles(models.Model):
RoleId = models.UUIDField(null=False, primary_key=True, default=uuid.uuid4)
ParentRoleId = models.UUIDField(null=True)
FullName = models.TextField(null=False, unique=True)
Status = models.BooleanField(default=False)
class Meta:
db_table = '"Role"."Roles"'
|
py | b4087ebd865fd03e368b9a29b12a2920fe643476 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
import os.path
import sys
class Mvapich2(AutotoolsPackage):
"""Mvapich2 is a High-Performance MPI Library for clusters with diverse
networks (InfiniBand, Omni-Path, Ethernet/iWARP, and RoCE) and computing
platforms (x86 (Intel and AMD), ARM and OpenPOWER)"""
homepage = "http://mvapich.cse.ohio-state.edu/userguide/userguide_spack/"
url = "http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-2.3.6.tar.gz"
list_url = "http://mvapich.cse.ohio-state.edu/downloads/"
maintainers = ['natshineman', 'harisubramoni']
executables = ['^mpiname$']
# Prefer the latest stable release
version('2.3.6', sha256='b3a62f2a05407191b856485f99da05f5e769d6381cd63e2fcb83ee98fc46a249')
version('2.3.5', sha256='f9f467fec5fc981a89a7beee0374347b10c683023c76880f92a1a0ad4b961a8c')
version('2.3.4', sha256='7226a45c7c98333c8e5d2888119cce186199b430c13b7b1dca1769909e68ea7a')
version('2.3.3', sha256='41d3261be57e5bc8aabf4e32981543c015c5443ff032a26f18205985e18c2b73')
version('2.3.2', sha256='30cc0d7bcaa075d204692f76bca4d65a539e0f661c7460ffa9f835d6249e1ebf')
version('2.3.1', sha256='314e12829f75f3ed83cd4779a972572d1787aac6543a3d024ea7c6080e0ee3bf')
version('2.3', sha256='01d5fb592454ddd9ecc17e91c8983b6aea0e7559aa38f410b111c8ef385b50dd')
version('2.3rc2', sha256='dc3801f879a54358d17002a56afd45186e2e83edc5b8367b5c317e282eb6d6bf')
version('2.3rc1', sha256='607d309c864a6d57f5fa78fe6dd02368919736b8be0f4ddb938aba303ef9c45c')
version('2.3a', sha256='7f0bc94265de9f66af567a263b1be6ef01755f7f6aedd25303d640cc4d8b1cff')
version('2.2', sha256='791a6fc2b23de63b430b3e598bf05b1b25b82ba8bf7e0622fc81ba593b3bb131')
version('2.1', sha256='49f3225ad17d2f3b6b127236a0abdc979ca8a3efb8d47ab4b6cd4f5252d05d29')
provides('mpi')
provides('mpi@:3.1', when='@2.3:')
provides('mpi@:3.0', when='@2.1:')
variant('wrapperrpath', default=True, description='Enable wrapper rpath')
variant('debug', default=False,
description='Enable debug info and error messages at run-time')
variant('cuda', default=False,
description='Enable CUDA extension')
variant('regcache', default=True,
description='Enable memory registration cache')
# Accepted values are:
# single - No threads (MPI_THREAD_SINGLE)
# funneled - Only the main thread calls MPI (MPI_THREAD_FUNNELED)
# serialized - User serializes calls to MPI (MPI_THREAD_SERIALIZED)
# multiple - Fully multi-threaded (MPI_THREAD_MULTIPLE)
# runtime - Alias to "multiple"
variant(
'threads',
default='multiple',
values=('single', 'funneled', 'serialized', 'multiple'),
multi=False,
description='Control the level of thread support'
)
# 32 is needed when job size exceeds 32768 cores
variant(
'ch3_rank_bits',
default='32',
values=('16', '32'),
multi=False,
description='Number of bits allocated to the rank field (16 or 32)'
)
variant(
'process_managers',
description='List of the process managers to activate',
values=disjoint_sets(
('auto',), ('slurm',), ('hydra', 'gforker', 'remshell')
).prohibit_empty_set().with_error(
"'slurm' or 'auto' cannot be activated along with "
"other process managers"
).with_default('auto').with_non_feature_values('auto'),
)
variant(
'fabrics',
description='Select the fabric to be enabled for this build.'
'If you have verbs (either from OFED or MOFED), PSM or PSM2 '
'installed on the system already, you may need to setup external '
'packages in the package.yaml file for rdma-core, psm or opa-psm2. '
'This is recommended to avoid unexpected runtime failures. For '
'more info, visit the homepage url.',
default='mrail',
values=(
'psm', 'psm2', 'sock', 'nemesisib', 'nemesis', 'mrail',
'nemesisibtcp', 'nemesistcpib', 'nemesisofi'
)
)
variant(
'alloca',
default=False,
description='Use alloca to allocate temporary memory if available'
)
variant(
'file_systems',
description='List of the ROMIO file systems to activate',
values=auto_or_any_combination_of(
'ime', 'lustre', 'gpfs', 'nfs', 'ufs'
),
)
depends_on('findutils', type='build')
depends_on('bison', type='build')
depends_on('pkgconfig', type='build')
depends_on('zlib')
depends_on('libpciaccess', when=(sys.platform != 'darwin'))
depends_on('libxml2')
depends_on('cuda', when='+cuda')
depends_on('psm', when='fabrics=psm')
depends_on('opa-psm2', when='fabrics=psm2')
depends_on('rdma-core', when='fabrics=mrail')
depends_on('rdma-core', when='fabrics=nemesisib')
depends_on('rdma-core', when='fabrics=nemesistcpib')
depends_on('rdma-core', when='fabrics=nemesisibtcp')
depends_on('libfabric', when='fabrics=nemesisofi')
depends_on('slurm', when='process_managers=slurm')
conflicts('fabrics=psm2', when='@:2.1') # psm2 support was added at version 2.2
filter_compiler_wrappers(
'mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort', relative_root='bin'
)
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('-a', output=str, error=str)
match = re.search(r'^MVAPICH2 (\S+)', output)
return match.group(1) if match else None
@classmethod
def determine_variants(cls, exes, version):
def get_spack_compiler_spec(path):
spack_compilers = spack.compilers.find_compilers([path])
for spack_compiler in spack_compilers:
if os.path.dirname(spack_compiler.cc) == path:
return spack_compiler.spec
return None
results = []
for exe in exes:
variants = ''
output = Executable(exe)('-a', output=str, error=str)
if re.search(r'--enable-wrapper-rpath=yes', output):
variants += '+wrapperrpath'
else:
variants += '~wrapperrpath'
if (re.search(r'--disable-fast', output)
and re.search(r'--enable-error-checking=runtime', output)
and re.search(r'--enable-error-messages', output)
and re.search(r'--enable-g', output)
and re.search(r'--enable-debuginfo', output)):
variants += '+debug'
else:
variants += '~debug'
if re.search('--enable-cuda', output):
variants += '+cuda'
else:
variants += '~cuda'
if re.search('--enable-registration-cache', output):
variants += '+regcache'
else:
variants += '~regcache'
match = re.search(r'--enable-threads=(\S+)', output)
if match:
variants += " threads=" + match.group(1)
match = re.search(r'--with-ch3-rank-bits=(\S+)', output)
if match:
variants += " ch3_rank_bits=" + match.group(1)
pms = []
if re.search(r'--with-pm=slurm', output):
pms.append('slurm')
if re.search(r'--with-pm=[A-Za-z0-9:]*hydra', output):
pms.append('hydra')
if re.search(r'--with-pm=[A-Za-z0-9:]*gforker', output):
pms.append('gforker')
if re.search(r'--with-pm=[A-Za-z0-9:]*remshell', output):
pms.append('remshell')
if pms:
variants += " process_managers=" + ",".join(pms)
fabrics = {
'sock': 'ch3:sock',
'nemesistcpib': 'ch3:nemesis:tcp,ib',
'nemesisibtcp': 'ch3:nemesis:ib,tcp',
'nemesisib': 'ch3:nemesis:ib',
'nemesis': 'ch3:nemesis',
'mrail': 'ch3:mrail',
'nemesisofi': 'ch3:nemesis:ofi',
}
for fabric_name, conf_flag in fabrics.items():
if re.search(r'--with-device=' + conf_flag, output):
variants += ' fabrics=' + fabric_name
break
else:
if re.search(r'--with-device=psm', output):
if re.search(r'--with-psm=', output):
variants += ' fabrics=psm'
elif re.search(r'--with-psm2=', output):
variants += ' fabrics=psm2'
used_fs = []
for fs in ('lustre', 'gpfs', 'nfs', 'ufs'):
if re.search(
'--with-file-system=[a-zA-Z0-9+]*' + fs,
output):
used_fs.append(fs)
if used_fs:
variants += ' file_systems=' + ",".join(used_fs)
match = re.search(r'CC: (\S+)', output)
if match:
comp_spec = get_spack_compiler_spec(
os.path.dirname(match.group(1)))
if comp_spec:
variants += " %" + str(comp_spec)
results.append(variants)
return results
@property
def libs(self):
query_parameters = self.spec.last_query.extra_parameters
libraries = ['libmpi']
if 'cxx' in query_parameters:
libraries = ['libmpicxx'] + libraries
return find_libraries(
libraries, root=self.prefix, shared=True, recursive=True
)
@property
def process_manager_options(self):
spec = self.spec
other_pms = []
for x in ('hydra', 'gforker', 'remshell'):
if 'process_managers={0}'.format(x) in spec:
other_pms.append(x)
opts = []
if len(other_pms) > 0:
opts = ['--with-pm=%s' % ':'.join(other_pms)]
# See: http://slurm.schedmd.com/mpi_guide.html#mvapich2
if 'process_managers=slurm' in spec:
opts = [
'--with-pmi=pmi2',
'--with-pm=slurm',
'--with-slurm={0}'.format(spec['slurm'].prefix),
]
return opts
@property
def network_options(self):
opts = []
# From here on I can suppose that only one variant has been selected
if 'fabrics=psm' in self.spec:
opts = [
"--with-device=ch3:psm",
"--with-psm={0}".format(self.spec['psm'].prefix)
]
elif 'fabrics=psm2' in self.spec:
opts = [
"--with-device=ch3:psm",
"--with-psm2={0}".format(self.spec['opa-psm2'].prefix)
]
elif 'fabrics=sock' in self.spec:
opts = ["--with-device=ch3:sock"]
elif 'fabrics=nemesistcpib' in self.spec:
opts = ["--with-device=ch3:nemesis:tcp,ib"]
elif 'fabrics=nemesisibtcp' in self.spec:
opts = ["--with-device=ch3:nemesis:ib,tcp"]
elif 'fabrics=nemesisib' in self.spec:
opts = ["--with-device=ch3:nemesis:ib"]
elif 'fabrics=nemesis' in self.spec:
opts = ["--with-device=ch3:nemesis"]
elif 'fabrics=mrail' in self.spec:
opts = ["--with-device=ch3:mrail", "--with-rdma=gen2",
"--disable-mcast"]
elif 'fabrics=nemesisofi' in self.spec:
opts = ["--with-device=ch3:nemesis:ofi",
"--with-ofi={0}".format(self.spec['libfabric'].prefix)]
return opts
@property
def file_system_options(self):
spec = self.spec
opts = []
fs = []
for x in ('ime', 'lustre', 'gpfs', 'nfs', 'ufs'):
if 'file_systems={0}'.format(x) in spec:
fs.append(x)
# TODO : when IME package will be added, replace these paths
if x == 'ime':
opts = ["CFLAGS=-I/opt/ddn/ime/include",
"LDFLAGS=-L/opt/ddn/ime/lib",
"LIBS=-lim_client"]
if len(fs) > 0:
opts.append('--with-file-system=%s' % '+'.join(fs))
return opts
def flag_handler(self, name, flags):
if name == 'fflags':
# https://bugzilla.redhat.com/show_bug.cgi?id=1795817
if self.spec.satisfies('%gcc@10:'):
if flags is None:
flags = []
flags.append('-fallow-argument-mismatch')
return (flags, None, None)
def setup_build_environment(self, env):
# mvapich2 configure fails when F90 and F90FLAGS are set
env.unset('F90')
env.unset('F90FLAGS')
def setup_run_environment(self, env):
if 'process_managers=slurm' in self.spec:
env.set('SLURM_MPI_TYPE', 'pmi2')
# Because MPI functions as a compiler, we need to treat it as one and
# add its compiler paths to the run environment.
self.setup_compiler_environment(env)
def setup_dependent_build_environment(self, env, dependent_spec):
self.setup_compiler_environment(env)
# use the Spack compiler wrappers under MPI
env.set('MPICH_CC', spack_cc)
env.set('MPICH_CXX', spack_cxx)
env.set('MPICH_F77', spack_f77)
env.set('MPICH_F90', spack_fc)
env.set('MPICH_FC', spack_fc)
def setup_compiler_environment(self, env):
# For Cray MPIs, the regular compiler wrappers *are* the MPI wrappers.
# Cray MPIs always have cray in the module name, e.g. "cray-mvapich"
external_modules = self.spec.external_modules
if external_modules and 'cray' in external_modules[0]:
env.set('MPICC', spack_cc)
env.set('MPICXX', spack_cxx)
env.set('MPIF77', spack_fc)
env.set('MPIF90', spack_fc)
else:
env.set('MPICC', join_path(self.prefix.bin, 'mpicc'))
env.set('MPICXX', join_path(self.prefix.bin, 'mpicxx'))
env.set('MPIF77', join_path(self.prefix.bin, 'mpif77'))
env.set('MPIF90', join_path(self.prefix.bin, 'mpif90'))
def setup_dependent_package(self, module, dependent_spec):
# For Cray MPIs, the regular compiler wrappers *are* the MPI wrappers.
# Cray MPIs always have cray in the module name, e.g. "cray-mvapich"
external_modules = self.spec.external_modules
if external_modules and 'cray' in external_modules[0]:
self.spec.mpicc = spack_cc
self.spec.mpicxx = spack_cxx
self.spec.mpifc = spack_fc
self.spec.mpif77 = spack_f77
else:
self.spec.mpicc = join_path(self.prefix.bin, 'mpicc')
self.spec.mpicxx = join_path(self.prefix.bin, 'mpicxx')
self.spec.mpifc = join_path(self.prefix.bin, 'mpif90')
self.spec.mpif77 = join_path(self.prefix.bin, 'mpif77')
self.spec.mpicxx_shared_libs = [
os.path.join(self.prefix.lib, 'libmpicxx.{0}'.format(dso_suffix)),
os.path.join(self.prefix.lib, 'libmpi.{0}'.format(dso_suffix))
]
@run_before('configure')
def die_without_fortran(self):
# Until we can pass variants such as +fortran through virtual
# dependencies depends_on('mpi'), require Fortran compiler to
# avoid delayed build errors in dependents.
if (self.compiler.f77 is None) or (self.compiler.fc is None):
raise InstallError(
'Mvapich2 requires both C and Fortran compilers!'
)
def configure_args(self):
spec = self.spec
args = [
'--enable-shared',
'--enable-romio',
'--disable-silent-rules',
'--disable-new-dtags',
'--enable-fortran=all',
"--enable-threads={0}".format(spec.variants['threads'].value),
"--with-ch3-rank-bits={0}".format(
spec.variants['ch3_rank_bits'].value),
'--enable-wrapper-rpath={0}'.format('no' if '~wrapperrpath' in
spec else 'yes')
]
args.extend(self.enable_or_disable('alloca'))
if '+debug' in self.spec:
args.extend([
'--disable-fast',
'--enable-error-checking=runtime',
'--enable-error-messages=all',
# Permits debugging with TotalView
'--enable-g=dbg',
'--enable-debuginfo'
])
else:
args.append('--enable-fast=all')
if '+cuda' in self.spec:
args.extend([
'--enable-cuda',
'--with-cuda={0}'.format(spec['cuda'].prefix)
])
else:
args.append('--disable-cuda')
if '+regcache' in self.spec:
args.append('--enable-registration-cache')
else:
args.append('--disable-registration-cache')
args.extend(self.process_manager_options)
args.extend(self.network_options)
args.extend(self.file_system_options)
return args
|
py | b4087fe08026c53ee543ab914a41a7da86da9999 | import os
import pytest
from thefrick import shells
from thefrick import conf, const
from thefrick.system import Path
shells.shell = shells.Generic()
def pytest_addoption(parser):
"""Adds `--enable-functional` argument."""
group = parser.getgroup("thefrick")
group.addoption('--enable-functional', action="store_true", default=False,
help="Enable functional tests")
@pytest.fixture
def no_memoize(monkeypatch):
monkeypatch.setattr('thefrick.utils.memoize.disabled', True)
@pytest.fixture(autouse=True)
def settings(request):
def _reset_settings():
conf.settings.clear()
conf.settings.update(const.DEFAULT_SETTINGS)
request.addfinalizer(_reset_settings)
conf.settings.user_dir = Path('~/.thefrick')
return conf.settings
@pytest.fixture
def no_colors(settings):
settings.no_colors = True
@pytest.fixture(autouse=True)
def no_cache(monkeypatch):
monkeypatch.setattr('thefrick.utils.cache.disabled', True)
@pytest.fixture(autouse=True)
def functional(request):
if request.node.get_closest_marker('functional') \
and not request.config.getoption('enable_functional'):
pytest.skip('functional tests are disabled')
@pytest.fixture
def source_root():
return Path(__file__).parent.parent.resolve()
@pytest.fixture
def set_shell(monkeypatch):
def _set(cls):
shell = cls()
monkeypatch.setattr('thefrick.shells.shell', shell)
return shell
return _set
@pytest.fixture(autouse=True)
def os_environ(monkeypatch):
env = {'PATH': os.environ['PATH']}
monkeypatch.setattr('os.environ', env)
return env
|
py | b40880891b895095b5fed52988cc95182b9e736c | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('status', '0006_auto_20141225_2137'),
]
operations = [
migrations.RenameField(
model_name='incident',
old_name='modified',
new_name='updated',
),
migrations.RenameField(
model_name='status',
old_name='modified',
new_name='updated',
),
]
|
py | b40880ac35b710558aaab71e9083b93266915cb6 | # -*- coding: utf-8 -*-
import json
import os
import shutil
from django import forms as django_forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.files.uploadedfile import SimpleUploadedFile
import mock
from nose.tools import eq_, ok_
from test_utils import RequestFactory
import amo
import amo.tests
import mkt
from amo.tests import app_factory, version_factory
from amo.tests.test_helpers import get_image_path
from mkt.developers import forms
from mkt.developers.tests.test_views_edit import TestAdmin
from mkt.files.helpers import copyfileobj
from mkt.site.fixtures import fixture
from mkt.tags.models import Tag
from mkt.translations.models import Translation
from mkt.users.models import UserProfile
from mkt.webapps.models import Geodata, IARCInfo, Webapp
class TestPreviewForm(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.addon = Webapp.objects.get(pk=337141)
self.dest = os.path.join(settings.TMP_PATH, 'preview')
if not os.path.exists(self.dest):
os.makedirs(self.dest)
@mock.patch('mkt.site.models.ModelBase.update')
def test_preview_modified(self, update_mock):
name = 'transparent.png'
form = forms.PreviewForm({'upload_hash': name,
'position': 1})
shutil.copyfile(get_image_path(name), os.path.join(self.dest, name))
assert form.is_valid(), form.errors
form.save(self.addon)
assert update_mock.called
def test_preview_size(self):
name = 'non-animated.gif'
form = forms.PreviewForm({'upload_hash': name,
'position': 1})
with storage.open(os.path.join(self.dest, name), 'wb') as f:
copyfileobj(open(get_image_path(name)), f)
assert form.is_valid(), form.errors
form.save(self.addon)
eq_(self.addon.previews.all()[0].sizes,
{u'image': [250, 297], u'thumbnail': [100, 119]})
def check_file_type(self, type_):
form = forms.PreviewForm({'upload_hash': type_,
'position': 1})
assert form.is_valid(), form.errors
form.save(self.addon)
return self.addon.previews.all()[0].filetype
@mock.patch('lib.video.tasks.resize_video')
def test_preview_good_file_type(self, resize_video):
eq_(self.check_file_type('x.video-webm'), 'video/webm')
def test_preview_other_file_type(self):
eq_(self.check_file_type('x'), 'image/png')
def test_preview_bad_file_type(self):
eq_(self.check_file_type('x.foo'), 'image/png')
class TestCategoryForm(amo.tests.WebappTestCase):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
super(TestCategoryForm, self).setUp()
self.user = UserProfile.objects.get(username='regularuser')
self.app = Webapp.objects.get(pk=337141)
self.request = RequestFactory()
self.request.user = self.user
self.request.groups = ()
self.cat = 'social'
def _make_form(self, data=None):
self.form = forms.CategoryForm(
data, product=self.app, request=self.request)
def test_has_no_cats(self):
self._make_form()
eq_(self.form.initial['categories'], [])
eq_(self.form.max_categories(), 2)
def test_save_cats(self):
self._make_form({'categories': ['books', 'social']})
assert self.form.is_valid(), self.form.errors
self.form.save()
eq_(self.app.reload().categories, ['books', 'social'])
eq_(self.form.max_categories(), 2)
def test_save_too_many_cats(self):
self._make_form({'categories': ['books', 'social', 'games']})
ok_(self.form.errors)
def test_save_non_existent_cat(self):
self._make_form({'categories': ['nonexistent']})
ok_(self.form.errors)
class TestRegionForm(amo.tests.WebappTestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
super(TestRegionForm, self).setUp()
self.request = RequestFactory()
self.kwargs = {'product': self.app}
def test_initial_checked(self):
form = forms.RegionForm(data=None, **self.kwargs)
# Even special regions (i.e., China) should be checked.
eq_(form.initial['restricted'], False)
eq_(form.initial['enable_new_regions'], True)
self.assertSetEqual(form.initial['regions'],
set(mkt.regions.ALL_REGION_IDS))
def test_initial_excluded_in_region(self):
self.app.geodata.update(restricted=True)
self.app.update(enable_new_regions=False)
self.app.addonexcludedregion.create(region=mkt.regions.BR.id)
# Everything except Brazil.
regions = set(mkt.regions.ALL_REGION_IDS)
regions.remove(mkt.regions.BR.id)
self.assertSetEqual(self.get_app().get_region_ids(restofworld=True),
regions)
form = forms.RegionForm(data=None, **self.kwargs)
# Everything (even China) except Brazil.
self.assertSetEqual(form.initial['regions'], regions)
eq_(form.initial['enable_new_regions'], False)
def test_initial_excluded_in_regions_and_future_regions(self):
self.app.geodata.update(restricted=True)
self.app.update(enable_new_regions=False)
regions = [mkt.regions.BR, mkt.regions.UK, mkt.regions.RESTOFWORLD]
for region in regions:
self.app.addonexcludedregion.create(region=region.id)
regions = set(mkt.regions.ALL_REGION_IDS)
regions.remove(mkt.regions.BR.id)
regions.remove(mkt.regions.UK.id)
regions.remove(mkt.regions.RESTOFWORLD.id)
self.assertSetEqual(self.get_app().get_region_ids(),
regions)
form = forms.RegionForm(data=None, **self.kwargs)
self.assertSetEqual(form.initial['regions'], regions)
eq_(form.initial['enable_new_regions'], False)
def test_restricted_ignores_enable_new_regions(self):
self.app.geodata.update(restricted=True)
self.app.update(enable_new_regions=False)
form = forms.RegionForm({'restricted': '0',
'regions': [mkt.regions.RESTOFWORLD.id],
'enable_new_regions': False}, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(self.app.enable_new_regions, True)
eq_(self.app.geodata.restricted, False)
def test_restofworld_only(self):
form = forms.RegionForm({'regions': [mkt.regions.RESTOFWORLD.id]},
**self.kwargs)
assert form.is_valid(), form.errors
def test_no_regions(self):
form = forms.RegionForm({'restricted': '1',
'enable_new_regions': True}, **self.kwargs)
assert not form.is_valid(), 'Form should be invalid'
eq_(form.errors,
{'regions': ['You must select at least one region.']})
def test_exclude_each_region(self):
"""Test that it's possible to exclude each region."""
for region_id in mkt.regions.ALL_REGION_IDS:
to_exclude = list(mkt.regions.ALL_REGION_IDS)
to_exclude.remove(region_id)
form = forms.RegionForm({'regions': to_exclude,
'restricted': '1',
'enable_new_regions': True},
**self.kwargs)
assert form.is_valid(), form.errors
form.save()
r_id = mkt.regions.REGIONS_CHOICES_ID_DICT[region_id]
eq_(self.app.reload().get_region_ids(True), to_exclude,
'Failed for %s' % r_id)
def test_exclude_restofworld(self):
form = forms.RegionForm({'regions': mkt.regions.REGION_IDS,
'restricted': '1',
'enable_new_regions': False}, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(self.app.get_region_ids(True), mkt.regions.REGION_IDS)
def test_reinclude_region(self):
self.app.addonexcludedregion.create(region=mkt.regions.BR.id)
form = forms.RegionForm({'regions': mkt.regions.ALL_REGION_IDS,
'enable_new_regions': True}, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(self.app.get_region_ids(True), mkt.regions.ALL_REGION_IDS)
def test_reinclude_restofworld(self):
self.app.addonexcludedregion.create(region=mkt.regions.RESTOFWORLD.id)
form = forms.RegionForm({'restricted': '1',
'regions': mkt.regions.ALL_REGION_IDS},
**self.kwargs)
assert form.is_valid(), form.errors
form.save()
eq_(self.app.get_region_ids(True), mkt.regions.ALL_REGION_IDS)
def test_restofworld_valid_choice_paid(self):
self.app.update(premium_type=amo.ADDON_PREMIUM)
form = forms.RegionForm(
{'restricted': '1',
'regions': [mkt.regions.RESTOFWORLD.id]}, **self.kwargs)
assert form.is_valid(), form.errors
def test_paid_app_options_initial(self):
"""Check initial regions of a paid app post-save.
Check that if we save the region form for a paid app
with a specific region that should *not* be excluded it is still
shown as a initial region when the new form instance is created.
"""
self.app.update(premium_type=amo.ADDON_PREMIUM)
form = forms.RegionForm(
{'restricted': '1',
'regions': [mkt.regions.RESTOFWORLD.id]}, **self.kwargs)
assert form.is_valid(), form.errors
form.save()
new_form = forms.RegionForm(**self.kwargs)
self.assertIn(mkt.regions.RESTOFWORLD.id,
new_form.initial.get('regions', []))
def test_restofworld_valid_choice_free(self):
form = forms.RegionForm(
{'restricted': '1',
'regions': [mkt.regions.RESTOFWORLD.id]}, **self.kwargs)
assert form.is_valid(), form.errors
def test_china_initially_included(self):
self.create_flag('special-regions')
form = forms.RegionForm(None, **self.kwargs)
cn = mkt.regions.CN.id
assert cn in form.initial['regions']
assert cn in dict(form.fields['regions'].choices).keys()
def _test_china_excluded_if_pending_or_rejected(self):
self.create_flag('special-regions')
# Mark app as pending/rejected in China.
for status in (amo.STATUS_PENDING, amo.STATUS_REJECTED):
self.app.geodata.set_status(mkt.regions.CN, status, save=True)
eq_(self.app.geodata.get_status(mkt.regions.CN), status)
# Post the form.
form = forms.RegionForm({'regions': mkt.regions.ALL_REGION_IDS,
'special_regions': [mkt.regions.CN.id]},
**self.kwargs)
# China should be checked if it's pending and
# unchecked if rejected.
cn = mkt.regions.CN.id
if status == amo.STATUS_PENDING:
assert cn in form.initial['regions'], (
status, form.initial['regions'])
else:
assert cn not in form.initial['regions'], (
status, form.initial['regions'])
choices = dict(form.fields['regions'].choices).keys()
assert cn in choices, (status, choices)
assert form.is_valid(), form.errors
form.save()
# App should be unlisted in China and always pending after
# requesting China.
self.app = self.app.reload()
eq_(self.app.listed_in(mkt.regions.CN), False)
eq_(self.app.geodata.get_status(mkt.regions.CN),
amo.STATUS_PENDING)
def test_china_excluded_if_pending_or_rejected(self):
self._test_china_excluded_if_pending_or_rejected()
def test_china_already_excluded_and_pending_or_rejected(self):
cn = mkt.regions.CN.id
self.app.addonexcludedregion.create(region=cn)
# If the app was already excluded in China, the checkbox should still
# be checked if the app's been requested for approval in China now.
self._test_china_excluded_if_pending_or_rejected()
def test_china_excluded_if_pending_cancelled(self):
"""
If the developer already requested to be in China,
and a reviewer hasn't reviewed it for China yet,
keep the region exclusion and the status as pending.
"""
self.create_flag('special-regions')
# Mark app as pending in China.
status = amo.STATUS_PENDING
self.app.geodata.set_status(mkt.regions.CN, status, save=True)
eq_(self.app.geodata.get_status(mkt.regions.CN), status)
# Post the form.
form = forms.RegionForm({'regions': mkt.regions.ALL_REGION_IDS},
**self.kwargs)
# China should be checked if it's pending.
cn = mkt.regions.CN.id
assert cn in form.initial['regions']
assert cn in dict(form.fields['regions'].choices).keys()
assert form.is_valid(), form.errors
form.save()
# App should be unlisted in China and now null.
self.app = self.app.reload()
eq_(self.app.listed_in(mkt.regions.CN), False)
eq_(self.app.geodata.get_status(mkt.regions.CN), amo.STATUS_NULL)
def test_china_included_if_approved_but_unchecked(self):
self.create_flag('special-regions')
# Mark app as public in China.
status = amo.STATUS_PUBLIC
self.app.geodata.set_status(mkt.regions.CN, status, save=True)
eq_(self.app.geodata.get_status(mkt.regions.CN), status)
# Post the form.
form = forms.RegionForm({'regions': mkt.regions.ALL_REGION_IDS},
**self.kwargs)
# China should be checked if it's public.
cn = mkt.regions.CN.id
assert cn in form.initial['regions']
assert cn in dict(form.fields['regions'].choices).keys()
assert form.is_valid(), form.errors
form.save()
# App should be unlisted in China and now null.
self.app = self.app.reload()
eq_(self.app.listed_in(mkt.regions.CN), False)
eq_(self.app.geodata.get_status(mkt.regions.CN), amo.STATUS_NULL)
def test_china_included_if_approved_and_checked(self):
self.create_flag('special-regions')
# Mark app as public in China.
status = amo.STATUS_PUBLIC
self.app.geodata.set_status(mkt.regions.CN, status, save=True)
eq_(self.app.geodata.get_status(mkt.regions.CN), status)
# Post the form.
form = forms.RegionForm({'regions': mkt.regions.ALL_REGION_IDS,
'special_regions': [mkt.regions.CN.id]},
**self.kwargs)
assert form.is_valid(), form.errors
form.save()
# App should still be listed in China and still public.
self.app = self.app.reload()
eq_(self.app.listed_in(mkt.regions.CN), True)
eq_(self.app.geodata.get_status(mkt.regions.CN), status)
def test_low_memory_regions_true(self):
regions = {10: mock.MagicMock(low_memory=True),
20: mock.MagicMock(low_memory=False),
30: mock.MagicMock(low_memory=False)}
form = forms.RegionForm(**self.kwargs)
with mock.patch.object(forms.RegionForm, 'regions_by_id', regions):
assert form.low_memory_regions, 'expected low memory regions'
def test_low_memory_regions_false(self):
regions = {10: mock.MagicMock(low_memory=False),
20: mock.MagicMock(low_memory=False),
30: mock.MagicMock(low_memory=False)}
form = forms.RegionForm(**self.kwargs)
with mock.patch.object(forms.RegionForm, 'regions_by_id', regions):
assert not form.low_memory_regions, 'expected no low memory region'
class TestNewManifestForm(amo.tests.TestCase):
@mock.patch('mkt.developers.forms.verify_app_domain')
def test_normal_validator(self, _verify_app_domain):
form = forms.NewManifestForm({'manifest': 'http://omg.org/yes.webapp'},
is_standalone=False)
assert form.is_valid()
assert _verify_app_domain.called
@mock.patch('mkt.developers.forms.verify_app_domain')
def test_standalone_validator(self, _verify_app_domain):
form = forms.NewManifestForm({'manifest': 'http://omg.org/yes.webapp'},
is_standalone=True)
assert form.is_valid()
assert not _verify_app_domain.called
class TestPackagedAppForm(amo.tests.AMOPaths, amo.tests.WebappTestCase):
def setUp(self):
super(TestPackagedAppForm, self).setUp()
path = self.packaged_app_path('mozball.zip')
self.files = {'upload': SimpleUploadedFile('mozball.zip',
open(path).read())}
def test_not_there(self):
form = forms.NewPackagedAppForm({}, {})
assert not form.is_valid()
eq_(form.errors['upload'], [u'This field is required.'])
eq_(form.file_upload, None)
def test_right_size(self):
form = forms.NewPackagedAppForm({}, self.files)
assert form.is_valid(), form.errors
assert form.file_upload
def test_too_big(self):
form = forms.NewPackagedAppForm({}, self.files, max_size=5)
assert not form.is_valid()
validation = json.loads(form.file_upload.validation)
assert 'messages' in validation, 'No messages in validation.'
eq_(validation['messages'][0]['message'],
u'Packaged app too large for submission. Packages must be smaller '
u'than 5 bytes.')
def test_origin_exists(self):
self.app.update(app_domain='app://hy.fr')
form = forms.NewPackagedAppForm({}, self.files)
assert not form.is_valid()
validation = json.loads(form.file_upload.validation)
eq_(validation['messages'][0]['message'],
'An app already exists on this domain; only one app per domain is '
'allowed.')
class TestTransactionFilterForm(amo.tests.TestCase):
def setUp(self):
(app_factory(), app_factory())
# Need queryset to initialize form.
self.apps = Webapp.objects.all()
self.data = {
'app': self.apps[0].id,
'transaction_type': 1,
'transaction_id': 1,
'date_from_day': '1',
'date_from_month': '1',
'date_from_year': '2012',
'date_to_day': '1',
'date_to_month': '1',
'date_to_year': '2013',
}
def test_basic(self):
"""Test the form doesn't crap out."""
form = forms.TransactionFilterForm(self.data, apps=self.apps)
assert form.is_valid(), form.errors
def test_app_choices(self):
"""Test app choices."""
form = forms.TransactionFilterForm(self.data, apps=self.apps)
for app in self.apps:
assertion = (app.id, app.name) in form.fields['app'].choices
assert assertion, '(%s, %s) not in choices' % (app.id, app.name)
class TestAppFormBasic(amo.tests.TestCase):
def setUp(self):
self.data = {
'slug': 'yolo',
'manifest_url': 'https://omg.org/yes.webapp',
'description': 'You Only Live Once'
}
self.request = mock.Mock()
self.request.groups = ()
def post(self):
self.form = forms.AppFormBasic(
self.data, instance=Webapp.objects.create(app_slug='yolo'),
request=self.request)
def test_success(self):
self.post()
eq_(self.form.is_valid(), True, self.form.errors)
eq_(self.form.errors, {})
def test_slug_invalid(self):
Webapp.objects.create(app_slug='yolo')
self.post()
eq_(self.form.is_valid(), False)
eq_(self.form.errors,
{'slug': ['This slug is already in use. Please choose another.']})
class TestAppVersionForm(amo.tests.TestCase):
def setUp(self):
self.request = mock.Mock()
self.app = app_factory(publish_type=amo.PUBLISH_IMMEDIATE,
version_kw={'version': '1.0',
'created': self.days_ago(5)})
version_factory(addon=self.app, version='2.0',
file_kw=dict(status=amo.STATUS_PENDING))
self.app.reload()
def _get_form(self, version, data=None):
return forms.AppVersionForm(data, instance=version)
def test_get_publish(self):
form = self._get_form(self.app.latest_version)
eq_(form.fields['publish_immediately'].initial, True)
self.app.update(publish_type=amo.PUBLISH_PRIVATE)
self.app.reload()
form = self._get_form(self.app.latest_version)
eq_(form.fields['publish_immediately'].initial, False)
def test_post_publish(self):
# Using the latest_version, which is pending.
form = self._get_form(self.app.latest_version,
data={'publish_immediately': True})
eq_(form.is_valid(), True)
form.save()
self.app.reload()
eq_(self.app.publish_type, amo.PUBLISH_IMMEDIATE)
form = self._get_form(self.app.latest_version,
data={'publish_immediately': False})
eq_(form.is_valid(), True)
form.save()
self.app.reload()
eq_(self.app.publish_type, amo.PUBLISH_PRIVATE)
def test_post_publish_not_pending(self):
# Using the current_version, which is public.
form = self._get_form(self.app.current_version,
data={'publish_immediately': False})
eq_(form.is_valid(), True)
form.save()
self.app.reload()
eq_(self.app.publish_type, amo.PUBLISH_IMMEDIATE)
class TestPublishForm(amo.tests.TestCase):
def setUp(self):
self.app = app_factory(status=amo.STATUS_PUBLIC)
self.form = forms.PublishForm
def test_initial(self):
app = Webapp(status=amo.STATUS_PUBLIC)
eq_(self.form(None, addon=app).fields['publish_type'].initial,
amo.PUBLISH_IMMEDIATE)
eq_(self.form(None, addon=app).fields['limited'].initial, False)
app.status = amo.STATUS_UNLISTED
eq_(self.form(None, addon=app).fields['publish_type'].initial,
amo.PUBLISH_HIDDEN)
eq_(self.form(None, addon=app).fields['limited'].initial, False)
app.status = amo.STATUS_APPROVED
eq_(self.form(None, addon=app).fields['publish_type'].initial,
amo.PUBLISH_HIDDEN)
eq_(self.form(None, addon=app).fields['limited'].initial, True)
def test_go_public(self):
self.app.update(status=amo.STATUS_APPROVED)
form = self.form({'publish_type': amo.PUBLISH_IMMEDIATE,
'limited': False}, addon=self.app)
assert form.is_valid()
form.save()
self.app.reload()
eq_(self.app.status, amo.STATUS_PUBLIC)
def test_go_unlisted(self):
self.app.update(status=amo.STATUS_PUBLIC)
form = self.form({'publish_type': amo.PUBLISH_HIDDEN,
'limited': False}, addon=self.app)
assert form.is_valid()
form.save()
self.app.reload()
eq_(self.app.status, amo.STATUS_UNLISTED)
def test_go_private(self):
self.app.update(status=amo.STATUS_PUBLIC)
form = self.form({'publish_type': amo.PUBLISH_HIDDEN,
'limited': True}, addon=self.app)
assert form.is_valid()
form.save()
self.app.reload()
eq_(self.app.status, amo.STATUS_APPROVED)
def test_invalid(self):
form = self.form({'publish_type': 999}, addon=self.app)
assert not form.is_valid()
class TestPublishFormPackaged(amo.tests.TestCase):
"""
Test that changing the app visibility doesn't affect the version statuses
in weird ways.
"""
def setUp(self):
self.app = app_factory(status=amo.STATUS_PUBLIC, is_packaged=True)
self.ver1 = self.app.current_version
self.ver1.update(created=self.days_ago(1))
self.ver2 = version_factory(addon=self.app, version='2.0',
file_kw=dict(status=amo.STATUS_APPROVED))
self.app.update(_latest_version=self.ver2)
self.form = forms.PublishForm
def test_initial(self):
app = Webapp(status=amo.STATUS_PUBLIC)
eq_(self.form(None, addon=app).fields['publish_type'].initial,
amo.PUBLISH_IMMEDIATE)
eq_(self.form(None, addon=app).fields['limited'].initial, False)
app.status = amo.STATUS_UNLISTED
eq_(self.form(None, addon=app).fields['publish_type'].initial,
amo.PUBLISH_HIDDEN)
eq_(self.form(None, addon=app).fields['limited'].initial, False)
app.status = amo.STATUS_APPROVED
eq_(self.form(None, addon=app).fields['publish_type'].initial,
amo.PUBLISH_HIDDEN)
eq_(self.form(None, addon=app).fields['limited'].initial, True)
def test_go_public(self):
self.app.update(status=amo.STATUS_APPROVED)
form = self.form({'publish_type': amo.PUBLISH_IMMEDIATE,
'limited': False}, addon=self.app)
assert form.is_valid()
form.save()
self.app.reload()
eq_(self.app.status, amo.STATUS_PUBLIC)
eq_(self.app.current_version, self.ver1)
eq_(self.app.latest_version, self.ver2)
def test_go_private(self):
self.app.update(status=amo.STATUS_PUBLIC)
form = self.form({'publish_type': amo.PUBLISH_HIDDEN,
'limited': True}, addon=self.app)
assert form.is_valid()
form.save()
self.app.reload()
eq_(self.app.status, amo.STATUS_APPROVED)
eq_(self.app.current_version, self.ver1)
eq_(self.app.latest_version, self.ver2)
def test_go_unlisted(self):
self.app.update(status=amo.STATUS_PUBLIC)
form = self.form({'publish_type': amo.PUBLISH_HIDDEN,
'limited': False}, addon=self.app)
assert form.is_valid()
form.save()
self.app.reload()
eq_(self.app.status, amo.STATUS_UNLISTED)
eq_(self.app.current_version, self.ver1)
eq_(self.app.latest_version, self.ver2)
def test_invalid(self):
form = self.form({'publish_type': 999}, addon=self.app)
assert not form.is_valid()
class TestAdminSettingsForm(TestAdmin):
def setUp(self):
super(TestAdminSettingsForm, self).setUp()
self.data = {'position': 1}
self.user = UserProfile.objects.get(username='admin')
self.request = RequestFactory()
self.request.user = self.user
self.request.groups = ()
self.kwargs = {'instance': self.webapp, 'request': self.request}
@mock.patch('mkt.developers.forms.index_webapps.delay')
def test_reindexed(self, index_webapps_mock):
form = forms.AdminSettingsForm(self.data, **self.kwargs)
assert form.is_valid(), form.errors
form.save(self.webapp)
index_webapps_mock.assert_called_with([self.webapp.id])
def test_adding_tags(self):
self.data.update({'tags': 'tag one, tag two'})
form = forms.AdminSettingsForm(self.data, **self.kwargs)
assert form.is_valid(), form.errors
form.save(self.webapp)
eq_(self.webapp.tags.count(), 2)
self.assertSetEqual(
self.webapp.tags.values_list('tag_text', flat=True),
['tag one', 'tag two'])
def test_removing_tags(self):
Tag(tag_text='tag one').save_tag(self.webapp)
eq_(self.webapp.tags.count(), 1)
self.data.update({'tags': 'tag two, tag three'})
form = forms.AdminSettingsForm(self.data, **self.kwargs)
assert form.is_valid(), form.errors
form.save(self.webapp)
eq_(self.webapp.tags.count(), 2)
self.assertSetEqual(
self.webapp.tags.values_list('tag_text', flat=True),
['tag two', 'tag three'])
def test_removing_all_tags(self):
Tag(tag_text='tag one').save_tag(self.webapp)
eq_(self.webapp.tags.count(), 1)
self.data.update({'tags': ''})
form = forms.AdminSettingsForm(self.data, **self.kwargs)
assert form.is_valid(), form.errors
form.save(self.webapp)
eq_(self.webapp.tags.count(), 0)
self.assertSetEqual(
self.webapp.tags.values_list('tag_text', flat=True), [])
def test_banner_message(self):
self.data.update({
'banner_message_en-us': u'Oh Hai.',
'banner_message_es': u'ยฟDรณnde estรก la biblioteca?',
})
form = forms.AdminSettingsForm(self.data, **self.kwargs)
assert form.is_valid(), form.errors
form.save(self.webapp)
geodata = self.webapp.geodata.reload()
trans_id = geodata.banner_message_id
eq_(geodata.banner_message, self.data['banner_message_en-us'])
eq_(unicode(Translation.objects.get(id=trans_id, locale='es')),
self.data['banner_message_es'])
eq_(unicode(Translation.objects.get(id=trans_id, locale='en-us')),
self.data['banner_message_en-us'])
def test_banner_regions_garbage(self):
self.data.update({
'banner_regions': ['LOL']
})
form = forms.AdminSettingsForm(self.data, **self.kwargs)
assert not form.is_valid(), form.errors
def test_banner_regions_valid(self): # Use strings
self.data.update({
'banner_regions': [unicode(mkt.regions.BR.id),
mkt.regions.SPAIN.id]
})
self.webapp.geodata.update(banner_regions=[mkt.regions.RS.id])
form = forms.AdminSettingsForm(self.data, **self.kwargs)
eq_(form.initial['banner_regions'], [mkt.regions.RS.id])
assert form.is_valid(), form.errors
eq_(form.cleaned_data['banner_regions'], [mkt.regions.BR.id,
mkt.regions.SPAIN.id])
form.save(self.webapp)
geodata = self.webapp.geodata.reload()
eq_(geodata.banner_regions, [mkt.regions.BR.id, mkt.regions.SPAIN.id])
def test_banner_regions_initial(self):
form = forms.AdminSettingsForm(self.data, **self.kwargs)
eq_(self.webapp.geodata.banner_regions, None)
eq_(form.initial['banner_regions'], [])
self.webapp.geodata.update(banner_regions=[])
form = forms.AdminSettingsForm(self.data, **self.kwargs)
eq_(form.initial['banner_regions'], [])
class TestIARCGetAppInfoForm(amo.tests.WebappTestCase):
def _get_form(self, app=None, **kwargs):
data = {
'submission_id': 1,
'security_code': 'a'
}
data.update(kwargs)
return forms.IARCGetAppInfoForm(data=data, app=app or self.app)
def test_good(self):
with self.assertRaises(IARCInfo.DoesNotExist):
self.app.iarc_info
form = self._get_form()
assert form.is_valid(), form.errors
form.save()
iarc_info = IARCInfo.objects.get(addon=self.app)
eq_(iarc_info.submission_id, 1)
eq_(iarc_info.security_code, 'a')
@mock.patch.object(settings, 'IARC_ALLOW_CERT_REUSE', False)
def test_iarc_cert_reuse_on_self(self):
# Test okay to use on self.
self.app.set_iarc_info(1, 'a')
form = self._get_form()
ok_(form.is_valid())
form.save()
eq_(IARCInfo.objects.count(), 1)
@mock.patch.object(settings, 'IARC_ALLOW_CERT_REUSE', False)
def test_iarc_cert_already_used(self):
# Test okay to use on self.
self.app.set_iarc_info(1, 'a')
eq_(IARCInfo.objects.count(), 1)
some_app = amo.tests.app_factory()
form = self._get_form(app=some_app)
ok_(not form.is_valid())
form = self._get_form(app=some_app, submission_id=2)
ok_(form.is_valid())
@mock.patch.object(settings, 'IARC_ALLOW_CERT_REUSE', True)
def test_iarc_already_used_dev(self):
self.app.set_iarc_info(1, 'a')
form = self._get_form()
ok_(form.is_valid())
def test_changing_cert(self):
self.app.set_iarc_info(1, 'a')
form = self._get_form(submission_id=2, security_code='b')
ok_(form.is_valid(), form.errors)
form.save()
iarc_info = self.app.iarc_info.reload()
eq_(iarc_info.submission_id, 2)
eq_(iarc_info.security_code, 'b')
def test_iarc_unexclude(self):
geodata, created = Geodata.objects.get_or_create(addon=self.app)
geodata.update(region_br_iarc_exclude=True,
region_de_iarc_exclude=True)
form = self._get_form()
ok_(form.is_valid())
form.save()
geodata = Geodata.objects.get(addon=self.app)
assert not geodata.region_br_iarc_exclude
assert not geodata.region_de_iarc_exclude
def test_allow_subm(self):
form = self._get_form(submission_id='subm-1231')
assert form.is_valid(), form.errors
form.save()
iarc_info = self.app.iarc_info
eq_(iarc_info.submission_id, 1231)
eq_(iarc_info.security_code, 'a')
def test_bad_submission_id(self):
form = self._get_form(submission_id='subwayeatfresh-133')
assert not form.is_valid()
def test_incomplete(self):
form = self._get_form(submission_id=None)
assert not form.is_valid(), 'Form was expected to be invalid.'
@mock.patch('lib.iarc.utils.IARC_XML_Parser.parse_string')
def test_rating_not_found(self, _mock):
_mock.return_value = {'rows': [
{'ActionStatus': 'No records found. Please try another criteria.'}
]}
form = self._get_form()
assert form.is_valid(), form.errors
with self.assertRaises(django_forms.ValidationError):
form.save()
class TestAPIForm(amo.tests.WebappTestCase):
def setUp(self):
super(TestAPIForm, self).setUp()
self.form = forms.APIConsumerForm
def test_non_url(self):
form = self.form({
'app_name': 'test',
'redirect_uri': 'mailto:[email protected]',
'oauth_leg': 'website'
})
assert not form.is_valid()
eq_(form.errors['redirect_uri'], ['Enter a valid URL.'])
def test_non_app_name(self):
form = self.form({
'redirect_uri': 'mailto:[email protected]',
'oauth_leg': 'website'
})
assert not form.is_valid()
eq_(form.errors['app_name'], ['This field is required.'])
def test_command(self):
form = self.form({'oauth_leg': 'command'})
assert form.is_valid()
def test_website(self):
form = self.form({
'app_name': 'test',
'redirect_uri': 'https://f.com',
'oauth_leg': 'website'
})
assert form.is_valid()
|
py | b40881164a537c66521e224d4424460155442536 | from typing import Union
from files.file import File
class MediaFile(File):
"""
A media file.
"""
USUAL_FILE_NAME_EXTENSIONS = [
'swf',
'xap',
]
USE_FOR_ANALYSIS = True
USE_FOR_INDEX = True
@property
def matches_file_type(self) -> bool:
"""Whether the current instance is a static file of this type."""
return self.has_usual_file_name_extension
@property
def normalized_content(self) -> Union[bytes, None]:
"""
The content of this static file normalized for this file type.
"""
# TODO: do actual normalization
return self.raw_content
|
py | b40881549d89716ad575b2e62cc186e251d0c9a9 | from django.apps import AppConfig
class FindworkConfig(AppConfig):
name = 'findwork'
|
py | b40881e5bb0ef2a062aa7f494e4e8256bda339e8 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from datetime import datetime
from datetime import timedelta
import mock
from go.chromium.org.luci.buildbucket.proto import common_pb2
from go.chromium.org.luci.buildbucket.proto.build_pb2 import Build
from go.chromium.org.luci.buildbucket.proto.builder_pb2 import BuilderID
from go.chromium.org.luci.buildbucket.proto.builds_service_pb2 import (
SearchBuildsResponse)
from go.chromium.org.luci.buildbucket.proto.step_pb2 import Step
from google.appengine.ext import ndb
from findit_v2.model.gitiles_commit import Culprit as CulpritNdb
from common.waterfall import buildbucket_client
from findit_v2.model import luci_build
from findit_v2.model.gitiles_commit import GitilesCommit
from findit_v2.model.luci_build import LuciFailedBuild
from findit_v2.model.test_failure import TestFailure
from findit_v2.model.test_failure import TestFailureAnalysis
from findit_v2.model.test_failure import TestFailureInRerunBuild
from findit_v2.model.test_failure import TestFailureGroup
from findit_v2.model.test_failure import TestRerunBuild
from findit_v2.services.analysis.test_failure.test_analysis_api import (
TestAnalysisAPI)
from findit_v2.services.chromium_api import ChromiumProjectAPI
from findit_v2.services.context import Context
from findit_v2.services.failure_type import StepTypeEnum
from libs import analysis_status
from services import deps
from services import git
from waterfall.test import wf_testcase
class TestAnalysisAPITest(wf_testcase.TestCase):
def _GetBuildIdByNumber(self, build_number):
"""Mocks build_id by build_number to show monotonically decreasing."""
return 8000000000200 - build_number
def _MockBuild(self,
build_number,
build_id=None,
gitiles_commit_id=None,
builder_name='Linux Tests',
build_status=common_pb2.FAILURE):
builder = BuilderID(project='chromium', bucket='ci', builder=builder_name)
build_id = build_id or self._GetBuildIdByNumber(build_number)
gitiles_commit_id = gitiles_commit_id or 'git_sha_%d' % build_number
build = Build(
id=build_id, builder=builder, number=build_number, status=build_status)
build.input.gitiles_commit.host = 'gitiles.host.com'
build.input.gitiles_commit.project = 'project/name'
build.input.gitiles_commit.ref = 'ref/heads/master'
build.input.gitiles_commit.id = gitiles_commit_id
build.create_time.FromDatetime(self.create_time)
build.start_time.FromDatetime(self.create_time + timedelta(minutes=1))
build.end_time.FromDatetime(self.create_time + timedelta(minutes=30))
return build
def _GetBuildInfo(self, build_number):
return {
'id': self._GetBuildIdByNumber(build_number),
'number': build_number,
'commit_id': 'git_sha_%d' % build_number
}
def setUp(self):
super(TestAnalysisAPITest, self).setUp()
self.luci_project = 'chromium'
self.gitiles_host = 'gitiles.host.com'
self.gitiles_project = 'project/name'
self.gitiles_ref = 'ref/heads/master'
self.gitiles_id = 'git_sha_123'
self.build_number = 123
self.build_id = self._GetBuildIdByNumber(self.build_number)
self.create_time = datetime(2019, 4, 9)
self.context = Context(
luci_project_name=self.luci_project,
gitiles_host=self.gitiles_host,
gitiles_project=self.gitiles_project,
gitiles_ref=self.gitiles_ref,
gitiles_id=self.gitiles_id)
self.builder = BuilderID(
project=self.luci_project, bucket='ci', builder='Linux Tests')
self.build_info = self._GetBuildInfo(self.build_number)
self.build = self._MockBuild(self.build_number)
self.build_entity = LuciFailedBuild.Create(
luci_project=self.luci_project,
luci_bucket='ci',
luci_builder='Linux Builder',
build_id=9876543210,
legacy_build_number=self.build_number,
gitiles_host='chromium.googlesource.com',
gitiles_project='chromium/src',
gitiles_ref='refs/heads/master',
gitiles_id=self.gitiles_id,
commit_position=65450,
status=20,
create_time=datetime(2019, 3, 28),
start_time=datetime(2019, 3, 28, 0, 1),
end_time=datetime(2019, 3, 28, 1),
build_failure_type=StepTypeEnum.TEST)
self.build_entity.put()
self.test_failure_1 = TestFailure.Create(
failed_build_key=self.build_entity.key,
step_ui_name='step_ui_name',
test='test1',
first_failed_build_id=self.build_id,
failure_group_build_id=None)
self.test_failure_1.put()
self.test_failure_2 = TestFailure.Create(
failed_build_key=self.build_entity.key,
step_ui_name='step_ui_name',
test='test2',
first_failed_build_id=self.build_id,
failure_group_build_id=None)
self.test_failure_2.put()
self.commits = []
for i in xrange(0, 11):
self.commits.append(self._CreateGitilesCommit('r%d' % i, 6000000 + i))
self.analysis = TestFailureAnalysis.Create(
luci_project=self.context.luci_project_name,
luci_bucket=self.build.builder.bucket,
luci_builder=self.build.builder.builder,
build_id=self.build_id,
gitiles_host=self.context.gitiles_host,
gitiles_project=self.context.gitiles_project,
gitiles_ref=self.context.gitiles_ref,
last_passed_gitiles_id='left_sha',
last_passed_commit_position=6000000,
first_failed_gitiles_id=self.context.gitiles_id,
first_failed_commit_position=6000005,
rerun_builder_id='chromium/findit/findit-variables',
test_failure_keys=[self.test_failure_1.key, self.test_failure_2.key])
self.analysis.Save()
self.analysis_api = TestAnalysisAPI()
def _CreateGitilesCommit(self, gitiles_id, commit_position):
return GitilesCommit(
gitiles_host=self.context.gitiles_host,
gitiles_project=self.context.gitiles_project,
gitiles_ref=self.context.gitiles_ref,
gitiles_id=gitiles_id,
commit_position=commit_position)
@mock.patch.object(ChromiumProjectAPI, 'GetTestFailures')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(buildbucket_client, 'SearchV2BuildsOnBuilder')
def testUpdateFailuresWithFirstFailureInfo(
self, mock_prev_builds, mock_get_build, mock_prev_failures):
"""Test for the most common case: found both first_failed_build_id and
last_passed_build_id."""
mock_step = Step()
mock_step.name = 'step_ui_name'
mock_step.status = common_pb2.FAILURE
build_122 = self._MockBuild(122)
build_122.steps.extend([mock_step])
build_122_info = self._GetBuildInfo(122)
build_121 = self._MockBuild(121, build_status=common_pb2.SUCCESS)
build_121_info = self._GetBuildInfo(121)
mock_prev_builds.return_value = SearchBuildsResponse(
builds=[build_122, build_121])
mock_get_build.return_value = build_122
failures = {
frozenset(['test3']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
}
mock_prev_failures.return_value = {
'step_ui_name': {
'failures': failures,
'first_failed_build': build_122_info,
'last_passed_build': None,
},
}
detailed_test_failures = {
'step_ui_name': {
'failures': failures,
'first_failed_build': self.build_info,
'last_passed_build': None,
},
}
self.analysis_api.UpdateFailuresWithFirstFailureInfo(
self.context, self.build, detailed_test_failures)
expected_failures = {
'step_ui_name': {
'failures': failures,
'first_failed_build': build_122_info,
'last_passed_build': build_121_info,
},
}
self.assertEqual(expected_failures, detailed_test_failures)
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(buildbucket_client, 'SearchV2BuildsOnBuilder')
def testUpdateFailuresWithFirstFailureInfoPrevBuildDifferentStep(
self, mock_prev_builds, mock_get_build):
"""Test for previous build failed with different steps."""
mock_step = Step()
mock_step.name = 'test'
mock_step.status = common_pb2.FAILURE
mock_step1 = Step()
mock_step1.name = 'step_ui_name'
mock_step1.status = common_pb2.SUCCESS
build_122 = self._MockBuild(122)
build_122.steps.extend([mock_step, mock_step1])
build_122_info = self._GetBuildInfo(122)
build_121 = self._MockBuild(121, build_status=common_pb2.SUCCESS)
mock_prev_builds.return_value = SearchBuildsResponse(
builds=[build_122, build_121])
mock_get_build.return_value = build_122
failures = {
frozenset(['test3']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
}
detailed_test_failures = {
'step_ui_name': {
'failures': failures,
'first_failed_build': self.build_info,
'last_passed_build': None,
},
}
self.analysis_api.UpdateFailuresWithFirstFailureInfo(
self.context, self.build, detailed_test_failures)
expected_failures = {
'step_ui_name': {
'failures': failures,
'first_failed_build': self.build_info,
'last_passed_build': build_122_info,
},
}
self.assertEqual(expected_failures, detailed_test_failures)
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(buildbucket_client, 'SearchV2BuildsOnBuilder')
def testUpdateFailuresWithFirstFailureInfoPrevBuildNoSameStep(
self, mock_prev_builds, mock_get_build):
"""Test for previous build didn't run the same step."""
mock_step = Step()
mock_step.name = 'test'
mock_step.status = common_pb2.FAILURE
build_122 = self._MockBuild(122)
build_122.steps.extend([mock_step])
build_121 = self._MockBuild(121, build_status=common_pb2.SUCCESS)
build_121_info = self._GetBuildInfo(121)
mock_prev_builds.return_value = SearchBuildsResponse(
builds=[build_122, build_121])
mock_get_build.return_value = build_122
failure = {
frozenset(['test3']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
}
detailed_test_failures = {
'step_ui_name': {
'failures': failure,
'first_failed_build': self.build_info,
'last_passed_build': None,
},
}
self.analysis_api.UpdateFailuresWithFirstFailureInfo(
self.context, self.build, detailed_test_failures)
expected_failures = {
'step_ui_name': {
'failures': failure,
'first_failed_build': self.build_info,
'last_passed_build': build_121_info,
},
}
self.assertEqual(expected_failures, detailed_test_failures)
@mock.patch.object(ChromiumProjectAPI, 'GetTestFailures')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(buildbucket_client, 'SearchV2BuildsOnBuilder')
def testUpdateFailuresWithFirstFailureInfoDifferentFirstFailure(
self, mock_prev_builds, mock_get_build, mock_prev_failures):
"""Test for same tests in current build failed from different builds."""
mock_step = Step()
mock_step.name = 'step_ui_name'
mock_step.status = common_pb2.FAILURE
build_122 = self._MockBuild(122)
build_122.steps.extend([mock_step])
build_122_info = self._GetBuildInfo(122)
mock_step1 = Step()
mock_step1.name = 'step_ui_name'
mock_step1.status = common_pb2.FAILURE
build_121 = self._MockBuild(121)
build_121.steps.extend([mock_step1])
build_121_info = self._GetBuildInfo(121)
mock_step2 = Step()
mock_step2.name = 'step_ui_name'
mock_step2.status = common_pb2.FAILURE
build_120 = self._MockBuild(120)
build_120.steps.extend([mock_step2])
build_120_info = self._GetBuildInfo(120)
mock_prev_builds.return_value = SearchBuildsResponse(
builds=[build_122, build_121, build_120])
mock_get_build.side_effect = [build_122, build_121, build_120]
# Test4 failed but test3 passed.
failures_122 = {
'step_ui_name': {
'failures': {
frozenset(['test4']): {
'properties': {},
'first_failed_build': build_122_info,
'last_passed_build': None,
},
},
'first_failed_build': build_122_info,
'last_passed_build': None,
},
}
# Has the same failed tests as current build.
failures_121 = {
'step_ui_name': {
'failures': {
frozenset(['test4']): {
'properties': {},
'first_failed_build': build_121_info,
'last_passed_build': None,
},
frozenset(['test3']): {
'properties': {},
'first_failed_build': build_121_info,
'last_passed_build': None,
},
},
'first_failed_build': build_121_info,
'last_passed_build': None,
},
}
# The same step failed, but with a different test.
failures_120 = {
'step_ui_name': {
'failures': {
frozenset(['test5']): {
'properties': {},
'first_failed_build': build_120_info,
'last_passed_build': None,
},
},
'first_failed_build': build_120_info,
'last_passed_build': None,
},
}
mock_prev_failures.side_effect = [failures_122, failures_121, failures_120]
detailed_test_failures = {
'step_ui_name': {
'failures': {
frozenset(['test3']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
frozenset(['test4']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
}
self.analysis_api.UpdateFailuresWithFirstFailureInfo(
self.context, self.build, detailed_test_failures)
expected_failures = {
'step_ui_name': {
'failures': {
frozenset(['test3']): {
'properties': {},
'first_failed_build': build_121_info,
'last_passed_build': None,
},
frozenset(['test4']): {
'properties': {},
'first_failed_build': build_121_info,
'last_passed_build': None,
},
},
'first_failed_build': build_121_info,
'last_passed_build': None,
},
}
self.assertEqual(expected_failures, detailed_test_failures)
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(buildbucket_client, 'SearchV2BuildsOnBuilder')
def testUpdateFailuresWithFirstFailureInfoPrevBuildInfraFailure(
self, mock_prev_builds, mock_get_build):
"""Test for previous build failed with different steps."""
mock_step1 = Step()
mock_step1.name = 'step_ui_name'
mock_step1.status = common_pb2.INFRA_FAILURE
build_122 = self._MockBuild(122)
build_122.steps.extend([mock_step1])
build_121 = self._MockBuild(121, build_status=common_pb2.SUCCESS)
build_121_info = self._GetBuildInfo(121)
mock_prev_builds.return_value = SearchBuildsResponse(
builds=[build_122, build_121])
mock_get_build.return_value = build_122
detailed_test_failures = {
'step_ui_name': {
'failures': {
frozenset(['test3']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
}
self.analysis_api.UpdateFailuresWithFirstFailureInfo(
self.context, self.build, detailed_test_failures)
expected_failures = {
'step_ui_name': {
'failures': {
frozenset(['test3']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': build_121_info,
},
},
'first_failed_build': self.build_info,
'last_passed_build': build_121_info,
},
}
self.assertEqual(expected_failures, detailed_test_failures)
def testGetFirstFailuresInCurrentBuild(self):
build_122_info = self._GetBuildInfo(122)
failures = {
'step_ui_name': {
'failures': {
frozenset(['test3']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': build_122_info,
},
},
'first_failed_build': self.build_info,
'last_passed_build': build_122_info,
},
}
expected_res = {
'failures': {
'step_ui_name': {
'atomic_failures': [frozenset(['test3'])],
'last_passed_build': build_122_info,
},
},
'last_passed_build': build_122_info
}
self.assertEqual(
expected_res,
self.analysis_api.GetFirstFailuresInCurrentBuild(self.build, failures))
def testGetFirstFailuresInCurrentBuildNoFirstFailures(self):
build_122_info = self._GetBuildInfo(122)
build_121_info = self._GetBuildInfo(121)
failures = {
'step_ui_name': {
'failures': {
frozenset(['test3']): {
'properties': {},
'first_failed_build': build_122_info,
'last_passed_build': build_121_info,
},
},
'first_failed_build': build_122_info,
'last_passed_build': build_121_info,
},
}
expected_res = {'failures': {}, 'last_passed_build': None}
self.assertEqual(
expected_res,
self.analysis_api.GetFirstFailuresInCurrentBuild(self.build, failures))
def testGetFirstFailuresInCurrentBuildNoLastPass(self):
failures = {
'step_ui_name': {
'failures': {
frozenset(['test3']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
}
expected_res = {'failures': {}, 'last_passed_build': None}
self.assertEqual(
expected_res,
self.analysis_api.GetFirstFailuresInCurrentBuild(self.build, failures))
def testGetFirstFailuresInCurrentBuildOnlyStep(self):
build_122_info = self._GetBuildInfo(122)
failures = {
'step_ui_name': {
'failures': {},
'first_failed_build': self.build_info,
'last_passed_build': build_122_info,
},
}
expected_res = {
'failures': {
'step_ui_name': {
'atomic_failures': [],
'last_passed_build': build_122_info,
},
},
'last_passed_build': build_122_info
}
self.assertEqual(
expected_res,
self.analysis_api.GetFirstFailuresInCurrentBuild(self.build, failures))
def testGetFirstFailuresInCurrentBuildOnlyStepFailedBefore(self):
build_122_info = self._GetBuildInfo(122)
build_121_info = self._GetBuildInfo(121)
failures = {
'step_ui_name': {
'failures': {},
'first_failed_build': build_122_info,
'last_passed_build': build_121_info,
},
}
expected_res = {'failures': {}, 'last_passed_build': None}
self.assertEqual(
expected_res,
self.analysis_api.GetFirstFailuresInCurrentBuild(self.build, failures))
def testGetFirstFailuresInCurrentBuildFailureStartedInDifferentBuild(self):
build_122_info = self._GetBuildInfo(122)
build_121_info = self._GetBuildInfo(121)
failures = {
'step_ui_name': {
'failures': {
frozenset(['test3']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': build_122_info,
},
frozenset(['test4']): {
'properties': {},
'first_failed_build': build_122_info,
'last_passed_build': None,
},
frozenset(['test5']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': build_121_info,
},
},
'first_failed_build': build_122_info,
'last_passed_build': None,
},
}
expected_res = {
'failures': {
'step_ui_name': {
'atomic_failures': [frozenset(['test5']),
frozenset(['test3'])],
'last_passed_build':
build_121_info,
},
},
'last_passed_build': build_121_info
}
self.assertEqual(
expected_res,
self.analysis_api.GetFirstFailuresInCurrentBuild(self.build, failures))
@mock.patch.object(git, 'GetCommitPositionFromRevision', return_value=67890)
def testSaveFailures(self, _):
build_121_info = self._GetBuildInfo(121)
build_120_info = self._GetBuildInfo(120)
detailed_test_failures = {
'step_ui_name': {
'failures': {
frozenset(['test3']): {
'properties': {},
'first_failed_build': build_121_info,
'last_passed_build': build_120_info,
},
},
'first_failed_build': build_121_info,
'last_passed_build': build_120_info,
},
}
# Prepares data for existing failure group.
group_build = self._MockBuild(
12134, 8000003412134, 'git_sha_121', builder_name='Mac')
group_build_entity = luci_build.SaveFailedBuild(self.context, group_build,
StepTypeEnum.TEST)
group_failure = TestFailure.Create(
group_build_entity.key,
'step_ui_name',
'test3',
first_failed_build_id=8000003412134,
failure_group_build_id=8000003412134)
group_failure.put()
# Prepares data for first failed build.
first_failed_build = self._MockBuild(121)
first_failed_build_entity = luci_build.SaveFailedBuild(
self.context, first_failed_build, StepTypeEnum.TEST)
first_failure = TestFailure.Create(
first_failed_build_entity.key,
'step_ui_name',
'test3',
first_failed_build_id=first_failed_build.id,
failure_group_build_id=800000341213)
first_failure.merged_failure_key = group_failure.key
first_failure.put()
self.analysis_api.SaveFailures(self.context, self.build,
detailed_test_failures)
build = LuciFailedBuild.get_by_id(self.build_id)
self.assertIsNotNone(build)
test_failures = TestFailure.query(ancestor=build.key).fetch()
self.assertEqual(1, len(test_failures))
self.assertEqual(
self._GetBuildIdByNumber(121), test_failures[0].first_failed_build_id)
self.assertEqual(group_failure.key, test_failures[0].merged_failure_key)
@mock.patch.object(git, 'GetCommitPositionFromRevision', return_value=67890)
def testSaveFailuresOnlyStepLevelFailures(self, _):
detailed_test_failures = {
'step_ui_name': {
'failures': {},
'first_failed_build': self._GetBuildInfo(121),
'last_passed_build': self._GetBuildInfo(120),
},
}
# Prepares data for first failed build.
first_failed_build = self._MockBuild(121)
first_failed_build_entity = luci_build.SaveFailedBuild(
self.context, first_failed_build, StepTypeEnum.TEST)
first_failure = TestFailure.Create(
first_failed_build_entity.key,
'step_ui_name',
None,
first_failed_build_id=first_failed_build.id,
failure_group_build_id=first_failed_build.id)
first_failure.put()
self.analysis_api.SaveFailures(self.context, self.build,
detailed_test_failures)
build_entity = LuciFailedBuild.get_by_id(self.build_id)
self.assertIsNotNone(build_entity)
test_failures = TestFailure.query(ancestor=build_entity.key).fetch()
self.assertEqual(1, len(test_failures))
self.assertEqual(
self._GetBuildIdByNumber(121), test_failures[0].first_failed_build_id)
self.assertEqual(frozenset([]), test_failures[0].GetFailureIdentifier())
self.assertEqual(first_failure.key, test_failures[0].merged_failure_key)
@mock.patch.object(
ChromiumProjectAPI,
'GetRerunBuilderId',
return_value='chromium/findit/findit_variables')
@mock.patch.object(
git, 'GetCommitPositionFromRevision', side_effect=[66680, 66666, 66680])
def testSaveFailureAnalysis(self, *_):
build_120_info = self._GetBuildInfo(120)
detailed_test_failures = {
'step_ui_name': {
'failures': {
frozenset(['test3']): {
'properties': {
'properties': {},
},
'first_failed_build': self.build_info,
'last_passed_build': build_120_info,
},
frozenset(['test4']): {
'properties': {
'properties': {},
},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
},
'first_failed_build': self.build_info,
'last_passed_build': build_120_info,
},
}
self.analysis_api.SaveFailures(self.context, self.build,
detailed_test_failures)
first_failures_in_current_build = {
'failures': {
'step_ui_name': {
'atomic_failures': [frozenset(['test3'])],
'last_passed_build': build_120_info,
},
},
'last_passed_build': build_120_info
}
self.analysis_api.SaveFailureAnalysis(
ChromiumProjectAPI(), self.context, self.build,
first_failures_in_current_build, False)
analysis = TestFailureAnalysis.GetVersion(self.build_id)
self.assertIsNotNone(analysis)
self.assertEqual('git_sha_120', analysis.last_passed_commit.gitiles_id)
self.assertEqual(66666, analysis.last_passed_commit.commit_position)
self.assertEqual('chromium/findit/findit_variables',
analysis.rerun_builder_id)
self.assertEqual(1, len(analysis.test_failure_keys))
self.assertItemsEqual('test3', analysis.test_failure_keys[0].get().test)
@mock.patch.object(
ChromiumProjectAPI,
'GetRerunBuilderId',
return_value='chromium/findit/findit_variables')
@mock.patch.object(
git, 'GetCommitPositionFromRevision', side_effect=[66680, 66666, 66680])
def testSaveFailureAnalysisWithGroup(self, *_):
build_120_info = self._GetBuildInfo(120)
detailed_test_failures = {
'step_ui_name': {
'failures': {
frozenset(['test3']): {
'properties': {
'properties': {},
},
'first_failed_build': self.build_info,
'last_passed_build': build_120_info,
},
frozenset(['test4']): {
'properties': {
'properties': {},
},
'first_failed_build': self.build_info,
'last_passed_build': None,
},
},
'first_failed_build': self.build_info,
'last_passed_build': build_120_info,
},
}
self.analysis_api.SaveFailures(self.context, self.build,
detailed_test_failures)
first_failures_in_current_build = {
'failures': {
'step_ui_name': {
'atomic_failures': [frozenset(['test3'])],
'last_passed_build': build_120_info,
},
},
'last_passed_build': build_120_info
}
self.analysis_api.SaveFailureAnalysis(ChromiumProjectAPI(), self.context,
self.build,
first_failures_in_current_build, True)
analysis = TestFailureAnalysis.GetVersion(self.build_id)
self.assertIsNotNone(analysis)
self.assertEqual('git_sha_120', analysis.last_passed_commit.gitiles_id)
self.assertEqual(66666, analysis.last_passed_commit.commit_position)
self.assertEqual('chromium/findit/findit_variables',
analysis.rerun_builder_id)
self.assertEqual(1, len(analysis.test_failure_keys))
self.assertItemsEqual('test3', analysis.test_failure_keys[0].get().test)
group = TestFailureGroup.get_by_id(self.build_id)
self.assertIsNotNone(group)
self.assertEqual('git_sha_120', analysis.last_passed_commit.gitiles_id)
self.assertEqual(self.build_info['commit_id'],
analysis.first_failed_commit.gitiles_id)
@mock.patch.object(
ChromiumProjectAPI,
'GetFailuresWithMatchingTestFailureGroups',
return_value={})
def testGetFirstFailuresInCurrentBuildWithoutGroupNoExistingGroup(self, _):
self.assertEqual(
{},
self.analysis_api.GetFirstFailuresInCurrentBuildWithoutGroup(
ChromiumProjectAPI(), self.context, self.build, {}))
@mock.patch.object(
git, 'GetCommitPositionFromRevision', side_effect=[66680, 66666, 66680])
@mock.patch.object(
ChromiumProjectAPI,
'GetFailuresWithMatchingTestFailureGroups',
return_value={
'step_ui_name': {
frozenset(['test1']): 8000000000134,
frozenset(['test2']): 8000000000134
}
})
def testGetFirstFailuresInCurrentBuildWithoutGroup(self, *_):
build_121_info = self._GetBuildInfo(121)
first_failures_in_current_build = {
'failures': {
'step_ui_name': {
'atomic_failures': [frozenset(['test1']),
frozenset(['test2'])],
'last_passed_build':
build_121_info,
},
},
'last_passed_build': build_121_info
}
# Creates and saves entities of the existing group.
detailed_test_failures = {
'step_ui_name': {
'failures': {
frozenset(['test1']): {
'properties': {
'properties': {},
},
'first_failed_build': self.build_info,
'last_passed_build': build_121_info,
},
frozenset(['test2']): {
'properties': {
'properties': {},
},
'first_failed_build': self.build_info,
'last_passed_build': build_121_info,
},
},
'first_failed_build': self.build_info,
'last_passed_build': build_121_info,
},
}
self.analysis_api.SaveFailures(self.context, self.build,
detailed_test_failures)
# Prepares data for existing failure group.
group_build_id = 8000000000134
group_build = self._MockBuild(
12134,
build_id=group_build_id,
gitiles_commit_id='git_sha_134',
builder_name='Mac')
group_build_entity = luci_build.SaveFailedBuild(self.context, group_build,
StepTypeEnum.TEST)
group_failure1 = TestFailure.Create(
group_build_entity.key,
'step_ui_name',
'test1',
first_failed_build_id=group_build_id,
failure_group_build_id=group_build_id)
group_failure1.put()
group_failure2 = TestFailure.Create(
group_build_entity.key,
'step_ui_name',
'test2',
first_failed_build_id=group_build_id,
failure_group_build_id=group_build_id)
group_failure2.put()
self.assertEqual(
{
'failures': {},
'last_passed_build': None
},
self.analysis_api.GetFirstFailuresInCurrentBuildWithoutGroup(
ChromiumProjectAPI(), self.context, self.build,
first_failures_in_current_build))
build = LuciFailedBuild.get_by_id(self.build_id)
test_failures = TestFailure.query(ancestor=build.key).fetch()
self.assertEqual(2, len(test_failures))
for failure in test_failures:
if failure.test == 'test1':
self.assertEqual(group_failure1.key, failure.merged_failure_key)
else:
self.assertEqual(group_failure2.key, failure.merged_failure_key)
@mock.patch.object(
git, 'GetCommitPositionFromRevision', side_effect=[66680, 66666, 66680])
@mock.patch.object(ChromiumProjectAPI,
'GetFailuresWithMatchingTestFailureGroups')
def testGetFirstFailuresInCurrentBuildWithoutGroupExistingGroupForSameBuild(
self, mock_get_failures_w_group, _):
build_121_info = self._GetBuildInfo(121)
first_failures_in_current_build = {
'failures': {
'step_ui_name': {
'atomic_failures': [frozenset(['test1']),
frozenset(['test2'])],
'last_passed_build':
build_121_info,
},
},
'last_passed_build': build_121_info
}
# Creates and saves entities of the existing group.
detailed_test_failures = {
'step_ui_name': {
'failures': {
frozenset(['test1']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': build_121_info,
},
frozenset(['test2']): {
'properties': {},
'first_failed_build': self.build_info,
'last_passed_build': build_121_info,
},
},
'first_failed_build': self.build_info,
'last_passed_build': build_121_info,
},
}
mock_get_failures_w_group.return_value = {
'step_ui_name': {
frozenset(['test1']): self._GetBuildIdByNumber(123)
}
}
self.analysis_api.SaveFailures(self.context, self.build,
detailed_test_failures)
expected_result = {
'failures': {
'step_ui_name': {
'atomic_failures': [frozenset(['test1']),
frozenset(['test2'])],
'last_passed_build':
build_121_info,
},
},
'last_passed_build': build_121_info
}
self.assertEqual(
expected_result,
self.analysis_api.GetFirstFailuresInCurrentBuildWithoutGroup(
ChromiumProjectAPI(), self.context, self.build,
first_failures_in_current_build))
def testBisectGitilesCommitGetCulpritCommit(self):
culprit_commit = self.analysis_api._GetCulpritCommit(
self.commits[9], self.commits[10])
self.assertEqual(6000010, culprit_commit.commit_position)
def testBisectGitilesCommitFailedToGetGitilesId(self):
context = Context(
luci_project_name='chromium',
gitiles_project='project/name',
gitiles_host='gitiles.host.com',
gitiles_ref='ref/heads/master',
gitiles_id=self.commits[10].gitiles_id)
bisect_commit = self.analysis_api._BisectGitilesCommit(
context, self.commits[0], self.commits[10], {})
self.assertIsNone(bisect_commit)
def testUpdateFailureRegressionRanges(self):
rerun_builds_info = [(self.commits[5], {}),
(self.commits[7], {
'step_ui_name': ['test1']
}), (self.commits[6], {
'step_ui_name': ['test1']
}), (self.commits[8], {
'step_ui_name': ['test2']
})]
failures_with_range = [{
'failure': self.test_failure_1,
'last_passed_commit': self.commits[0],
'first_failed_commit': self.commits[10],
},
{
'failure': self.test_failure_2,
'last_passed_commit': self.commits[0],
'first_failed_commit': self.commits[10],
}]
expected_results = [{
'failure': self.test_failure_1,
'last_passed_commit': self.commits[5],
'first_failed_commit': self.commits[6],
},
{
'failure': self.test_failure_2,
'last_passed_commit': self.commits[7],
'first_failed_commit': self.commits[8],
}]
self.analysis_api._UpdateFailureRegressionRanges(rerun_builds_info,
failures_with_range)
for real_failure in failures_with_range:
for expected_result in expected_results:
if real_failure['failure'].GetFailureIdentifier(
) == expected_result['failure'].GetFailureIdentifier():
self.assertEqual(expected_result['last_passed_commit'].gitiles_id,
real_failure['last_passed_commit'].gitiles_id)
self.assertEqual(expected_result['first_failed_commit'].gitiles_id,
real_failure['first_failed_commit'].gitiles_id)
def testGroupFailuresByRegressionRange(self):
test_failure_3 = TestFailure.Create(self.build_entity.key, 'step_ui_name',
'test6')
test_failure_3.put()
failures_with_range = [{
'failure': self.test_failure_1,
'last_passed_commit': self.commits[5],
'first_failed_commit': self.commits[6],
},
{
'failure': self.test_failure_2,
'last_passed_commit': self.commits[7],
'first_failed_commit': self.commits[8],
},
{
'failure': test_failure_3,
'last_passed_commit': self.commits[5],
'first_failed_commit': self.commits[6],
}]
expected_result = [
{
'failures': [self.test_failure_1, test_failure_3],
'last_passed_commit': self.commits[5],
'first_failed_commit': self.commits[6],
},
{
'failures': [self.test_failure_2],
'last_passed_commit': self.commits[7],
'first_failed_commit': self.commits[8],
},
]
result = self.analysis_api._GroupFailuresByRegressionRange(
failures_with_range)
self.assertItemsEqual(expected_result, result)
def testGetCulpritsForFailures(self):
culprit = CulpritNdb.Create(self.gitiles_host, self.gitiles_project,
self.gitiles_ref, 'git_hash_123', 123)
culprit.put()
failure1 = TestFailure.Create(self.build_entity.key, 'step_ui_name',
'test1')
failure1.culprit_commit_key = culprit.key
failure1.put()
failure2 = TestFailure.Create(self.build_entity.key, 'step_ui_name',
'test2')
failure2.culprit_commit_key = culprit.key
failure2.put()
culprits = self.analysis_api.GetCulpritsForFailures([failure1, failure2])
self.assertEqual(1, len(culprits))
self.assertEqual('git_hash_123', culprits[0].commit.id)
def _CreateTestRerunBuild(self, commit_index=2):
rerun_commit = self.commits[commit_index]
rerun_builder = BuilderID(
project='chromium', bucket='findit', builder='findit-variables')
rerun_build = TestRerunBuild.Create(
luci_project=rerun_builder.project,
luci_bucket=rerun_builder.bucket,
luci_builder=rerun_builder.builder,
build_id=8000000000789,
legacy_build_number=60789,
gitiles_host=rerun_commit.gitiles_host,
gitiles_project=rerun_commit.gitiles_project,
gitiles_ref=rerun_commit.gitiles_ref,
gitiles_id=rerun_commit.gitiles_id,
commit_position=rerun_commit.commit_position,
status=1,
create_time=datetime(2019, 3, 28),
parent_key=self.analysis.key)
rerun_build.put()
return rerun_build
@mock.patch.object(TestAnalysisAPI, '_GetRerunDimensions', return_value=None)
@mock.patch.object(
ChromiumProjectAPI,
'GetTestRerunBuildInputProperties',
return_value={'recipe': 'step_ui_name'})
@mock.patch.object(buildbucket_client, 'TriggerV2Build')
def testTriggerRerunBuild(self, mock_trigger_build, *_):
new_build_id = 800000024324
new_build = Build(id=new_build_id, number=300)
new_build.status = common_pb2.SCHEDULED
new_build.create_time.FromDatetime(datetime(2019, 4, 20))
rerun_builder = BuilderID(
project='chromium', bucket='findit', builder='findit-variables')
rerun_commit = self.commits[2]
rerun_tests = {'step_ui_name': ['test1']}
mock_trigger_build.return_value = new_build
self.analysis_api.TriggerRerunBuild(self.context, self.build_id,
self.analysis.key, rerun_builder,
rerun_commit, rerun_tests)
rerun_build = TestRerunBuild.get_by_id(
new_build_id, parent=self.analysis.key)
self.assertIsNotNone(rerun_build)
mock_trigger_build.assert_called_once_with(
rerun_builder,
common_pb2.GitilesCommit(
project=rerun_commit.gitiles_project,
host=rerun_commit.gitiles_host,
ref=rerun_commit.gitiles_ref,
id=rerun_commit.gitiles_id), {'recipe': 'step_ui_name'},
dimensions=None,
tags=[{
'value': 'test-failure-culprit-finding',
'key': 'purpose'
}, {
'value': str(self.build.id),
'key': 'analyzed_build_id'
}])
@mock.patch.object(TestAnalysisAPI, '_GetRerunDimensions', return_value=None)
@mock.patch.object(
ChromiumProjectAPI,
'GetTestRerunBuildInputProperties',
return_value={'recipe': 'step_ui_name'})
@mock.patch.object(buildbucket_client, 'TriggerV2Build')
def testTriggerRerunBuildFoundRunningBuild(self, mock_trigger_build, *_):
"""This test is for the case where there's already an existing rerun build,
so no new rerun-build should be scheduled."""
rerun_builder = BuilderID(
project='chromium', bucket='findit', builder='findit-variables')
rerun_tests = {'step_ui_name': ['test1']}
self._CreateTestRerunBuild(commit_index=2)
self.analysis_api.TriggerRerunBuild(self.context, self.build_id,
self.analysis.key, rerun_builder,
self.commits[2], rerun_tests)
self.assertFalse(mock_trigger_build.called)
@mock.patch.object(
ChromiumProjectAPI, 'GetTestRerunBuildInputProperties', return_value=None)
@mock.patch.object(buildbucket_client, 'TriggerV2Build')
def testTriggerRerunBuildFailedToGetProperty(self, mock_trigger_build, _):
"""This test is for the case where there's already an existing rerun build,
so no new rerun-build should be scheduled."""
rerun_commit = self.commits[2]
rerun_builder = BuilderID(
project='chromium', bucket='findit', builder='findit-variables')
rerun_tests = {'step_ui_name': ['test1']}
self.analysis_api.TriggerRerunBuild(self.context, self.build_id,
self.analysis.key, rerun_builder,
rerun_commit, rerun_tests)
self.assertFalse(mock_trigger_build.called)
@mock.patch.object(TestAnalysisAPI, '_GetRerunDimensions', return_value=None)
@mock.patch.object(
ChromiumProjectAPI,
'GetTestRerunBuildInputProperties',
return_value={'recipe': 'step_ui_name'})
@mock.patch.object(buildbucket_client, 'TriggerV2Build', return_value=None)
def testTriggerRerunBuildFailedToTriggerBuild(self, mock_trigger_build, *_):
"""This test is for the case where there's already an existing rerun build,
so no new rerun-build should be scheduled."""
rerun_commit = self.commits[2]
rerun_builder = BuilderID(
project='chromium', bucket='findit', builder='findit-variables')
rerun_tests = {'step_ui_name': ['test1']}
self.analysis_api.TriggerRerunBuild(self.context, self.build_id,
self.analysis.key, rerun_builder,
rerun_commit, rerun_tests)
self.assertTrue(mock_trigger_build.called)
rerun_builds = TestRerunBuild.query(ancestor=self.analysis.key).fetch()
self.assertEqual([], rerun_builds)
def testGetRegressionRangesForFailuresNoRerunBuilds(self):
result = self.analysis_api._GetRegressionRangesForFailures(self.analysis)
expected_result = [{
'failures': [self.test_failure_1, self.test_failure_2],
'last_passed_commit': self.analysis.last_passed_commit,
'first_failed_commit': self.analysis.first_failed_commit
}]
self.assertEqual(expected_result, result)
def testGetRegressionRangesForFailures(self):
rerun_build = self._CreateTestRerunBuild(commit_index=2)
rerun_build.status = 20
failure_entity = TestFailureInRerunBuild(
step_ui_name='step_ui_name', test='test1')
rerun_build.failures = [failure_entity]
rerun_build.put()
results = self.analysis_api._GetRegressionRangesForFailures(self.analysis)
expected_results = [{
'failures': [self.test_failure_2],
'first_failed_commit': self.analysis.first_failed_commit,
'last_passed_commit': self.commits[2]
},
{
'failures': [self.test_failure_1],
'first_failed_commit':
self.commits[2],
'last_passed_commit':
self.analysis.last_passed_commit
}]
self.assertEqual(expected_results, results)
@mock.patch.object(TestAnalysisAPI, '_GetRerunDimensions', return_value=None)
@mock.patch.object(
TestAnalysisAPI,
'_GetRerunBuildInputProperties',
return_value={'recipe': 'step_ui_name'})
@mock.patch.object(buildbucket_client, 'TriggerV2Build')
@mock.patch.object(git, 'MapCommitPositionsToGitHashes')
def testRerunBasedAnalysisContinueWithNextRerunBuild(self, mock_revisions,
mock_trigger_build, *_):
mock_revisions.return_value = {n: str(n) for n in xrange(6000000, 6000005)}
mock_rerun_build = Build(id=8000055000123, number=78990)
mock_rerun_build.create_time.FromDatetime(datetime(2019, 4, 30))
mock_trigger_build.return_value = mock_rerun_build
self.analysis_api.RerunBasedAnalysis(self.context, self.build_id)
self.assertTrue(mock_trigger_build.called)
analysis = TestFailureAnalysis.GetVersion(self.build_id)
self.assertEqual(analysis_status.RUNNING, analysis.status)
rerun_builds = TestRerunBuild.query(ancestor=self.analysis.key).fetch()
self.assertEqual(1, len(rerun_builds))
self.assertEqual(6000002, rerun_builds[0].gitiles_commit.commit_position)
@mock.patch.object(TestAnalysisAPI, 'TriggerRerunBuild')
@mock.patch.object(git, 'MapCommitPositionsToGitHashes')
def testRerunBasedAnalysisEndWithCulprit(self, mock_revisions,
mock_trigger_build):
rerun_build = self._CreateTestRerunBuild(commit_index=1)
rerun_build.status = 20
failure_entity_a = TestFailureInRerunBuild(
step_ui_name='step_ui_name', test='test1')
failure_entity_b = TestFailureInRerunBuild(
step_ui_name='step_ui_name', test='test2')
rerun_build.failures = [failure_entity_a, failure_entity_b]
rerun_build.put()
mock_revisions.return_value = {n: str(n) for n in xrange(6000000, 6000005)}
self.analysis_api.RerunBasedAnalysis(self.context, self.build_id)
self.assertFalse(mock_trigger_build.called)
analysis = TestFailureAnalysis.GetVersion(self.build_id)
self.assertEqual(analysis_status.COMPLETED, analysis.status)
test_failures = ndb.get_multi(analysis.test_failure_keys)
culprit_key = test_failures[0].culprit_commit_key
self.assertIsNotNone(culprit_key)
culprit = culprit_key.get()
self.assertEqual(6000001, culprit.commit_position)
@mock.patch.object(
ChromiumProjectAPI, 'FailureShouldBeAnalyzed', return_value=False)
def testGetSkippedFailures(self, _):
step_ui_name = 's'
test_name = 't'
first_build_id = 800000000013954
first_failure = TestFailure.Create(
ndb.Key(LuciFailedBuild, first_build_id),
step_ui_name,
test_name,
first_failed_build_id=first_build_id,
failure_group_build_id=first_build_id)
first_failure.put()
current_failure = TestFailure.Create(
ndb.Key(LuciFailedBuild, 800000000013950),
step_ui_name,
test_name,
first_failed_build_id=first_build_id,
failure_group_build_id=first_build_id,
merged_failure_key=first_failure.key)
current_failure.put()
self.assertEqual({
first_build_id: [first_failure],
},
self.analysis_api.GetSkippedFailures(
ChromiumProjectAPI(), [current_failure]))
@mock.patch.object(
ChromiumProjectAPI, 'FailureShouldBeAnalyzed', return_value=False)
def testGetSkippedFailuresMissingMergedFailure(self, _):
step_ui_name = 's'
test_name = 't'
first_build_id = 800000000013954
current_failure = TestFailure.Create(
ndb.Key(LuciFailedBuild, 800000000013950),
step_ui_name,
test_name,
first_failed_build_id=first_build_id,
failure_group_build_id=first_build_id)
current_failure.put()
self.assertEqual({},
self.analysis_api.GetSkippedFailures(
ChromiumProjectAPI(), [current_failure]))
def testGetSkippedFailuresAllFailuresHaveBeenAnalyzed(self):
step_ui_name = 's'
test_name = 't'
first_build_id = 800000000013954
first_failure = TestFailure.Create(
ndb.Key(LuciFailedBuild, first_build_id),
step_ui_name,
test_name,
first_failed_build_id=first_build_id,
failure_group_build_id=first_build_id)
first_failure.put()
current_failure = TestFailure.Create(
ndb.Key(LuciFailedBuild, 800000000013950),
step_ui_name,
test_name,
first_failed_build_id=first_build_id,
failure_group_build_id=first_build_id)
current_failure.put()
self.assertEqual({},
self.analysis_api.GetSkippedFailures(
ChromiumProjectAPI(), [current_failure]))
@mock.patch.object(
ChromiumProjectAPI,
'GetRerunBuilderId',
return_value='chromium/ci/builder')
@mock.patch.object(TestAnalysisAPI, 'RerunBasedAnalysis')
def testAnalyzeSkippedFailures(self, mock_rerun, _):
context = Context(
luci_project_name='chromium',
gitiles_project='project/name',
gitiles_host='gitiles.host.com',
gitiles_ref='ref/heads/master',
gitiles_id='gitiles_id_125')
build_id = 800000000003954
build = Build(
id=build_id,
builder=BuilderID(project='chromium', bucket='ci', builder='builder'))
failure = TestFailure.Create(
ndb.Key(LuciFailedBuild, build_id),
'step',
'test3231',
first_failed_build_id=build_id,
failure_group_build_id=build_id)
failure.put()
group = TestFailureGroup.Create(
luci_project=context.luci_project_name,
luci_bucket=build.builder.bucket,
build_id=build.id,
gitiles_host=context.gitiles_host,
gitiles_project=context.gitiles_project,
gitiles_ref=context.gitiles_ref,
last_passed_gitiles_id='gitiles_id_123',
last_passed_commit_position=123,
first_failed_gitiles_id=context.gitiles_id,
first_failed_commit_position=125,
test_failure_keys=[failure.key])
group.put()
self.analysis_api.AnalyzeSkippedFailures(ChromiumProjectAPI(), context,
build, [failure])
mock_rerun.assert_called_once_with(context, build_id)
analysis = self.analysis_api._GetFailureAnalysis(build_id)
self.assertIsNotNone(analysis)
@mock.patch.object(git, 'PullChangeLogs')
@mock.patch.object(deps, 'ExtractDepsInfo')
@mock.patch.object(ChromiumProjectAPI, 'GetTestFailureInfo')
@mock.patch.object(ChromiumProjectAPI, 'ExtractSignalsForTestFailure')
@mock.patch.object(ChromiumProjectAPI, 'HeuristicAnalysisForTest')
def testGetSuspectedCulprits(self, mock_heuristic, *_):
context = Context(
luci_project_name='chromium',
gitiles_project='project/name',
gitiles_host='gitiles.host.com',
gitiles_ref='ref/heads/master',
gitiles_id='gitiles_id_125')
build_id = 800000000003954
build = Build(
id=build_id,
builder=BuilderID(project='chromium', bucket='ci', builder='builder'))
mock_step = Step()
mock_step.name = 'step_ui_name'
mock_step.status = common_pb2.FAILURE
build_120 = self._MockBuild(120)
build_120.steps.extend([mock_step])
build_120_info = self._GetBuildInfo(120)
first_failures_in_current_build = {
'failures': {
'step_ui_name': {
'atomic_failures': [frozenset(['test3'])],
'last_passed_build': build_120_info,
},
},
'last_passed_build': build_120_info
}
self.analysis_api.GetSuspectedCulprits(ChromiumProjectAPI(), context, build,
first_failures_in_current_build)
# There is not much testable logic here, this method functions as glue
# routing together inputs for the heuristic analysis only. All we test is
# that the correct implementation is called.
self.assertEqual(1, mock_heuristic.call_count)
|
py | b4088224d2de89ae38ab35def9d2458eed34a3a9 | def load_file(filename): # define a function to load your data, one input parameter is the name of the file to be loaded
import pandas as pd
data = pd.read_csv(filename) # load file with given name inside the function
return data # tell function to return the loaded data
|
py | b408828b7d209ebcaf58e5206da5b200f50c92cd | #!/usr/bin/env python
import csv
import importlib
import os
import shutil
import sys
from setuptools import setup, find_packages
# Constants
DISTNAME = 'kymatio'
DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'
URL = 'https://www.kymat.io'
LICENSE = 'BSD-3-Clause'
# Parse description
with open('README.md') as f:
README = f.read().split('\n')
LONG_DESCRIPTION = '\n'.join([x for x in README if not x[:3]=='[!['])
# Parse version.py
kymatio_version_spec = importlib.util.spec_from_file_location(
'kymatio_version', 'kymatio/version.py')
kymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)
kymatio_version_spec.loader.exec_module(kymatio_version_module)
VERSION = kymatio_version_module.version
# Parse requirements.txt
with open('requirements.txt', 'r') as f:
REQUIREMENTS = f.read().split('\n')
setup_info = dict(
# Metadata
name=DISTNAME,
version=VERSION,
author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '
'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '
'Louis Thiry, Vincent Lostanlen, Joakim Andรฉn, '
'Tomรกs Angles, Gabriel Huang, Roberto Leonarduzzi'),
author_email=('[email protected], [email protected], '
'[email protected], [email protected], '
'[email protected], [email protected], '
'[email protected], [email protected], [email protected], '
'[email protected], [email protected], [email protected]'),
url=URL,
download_url='https://github.com/kymatio/kymatio/releases',
project_urls={
'Documentation': 'https://www.kymat.io/codereference.html',
'Source': 'https://github.com/kymatio/kymatio/',
'Tracker': 'https://github.com/kymatio/kymatio/issues',
'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'
},
classifiers=['Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Multimedia :: Graphics :: 3D Modeling',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Software Development :: Libraries :: Python Modules',
],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
python_requires='>=3.6',
license=LICENSE,
packages=find_packages(exclude=('test',)),
install_requires=REQUIREMENTS,
zip_safe=True,
)
setup(**setup_info)
|
py | b408829e418634418a97df7428010366a6ab7ef1 | from flask import Blueprint, render_template, redirect, url_for, flash
from .database import db
import plotly.graph_objects as go
views = Blueprint('views', __name__)
@views.route('/', methods=['GET', 'POST'])
def index():
fig = go.Figure(data=go.Bar(y=[2, 3, 1]))
fig.show()
return render_template('index.html')
@views.route('/about/')
def about():
return render_template('about.html')
@views.route('/index2/')
def about():
return render_template('index2.html')
@views.route('/index3/')
def about():
return render_template('index3.html')
|
py | b40882c35e4d137b0253cae7dd707813b12e1091 | # Copyright 2014 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import testtools
import zuul.connection.gerrit
from tests.base import ZuulTestCase
class TestGerritConnection(testtools.TestCase):
log = logging.getLogger("zuul.test_connection")
def test_driver_name(self):
self.assertEqual('gerrit',
zuul.connection.gerrit.GerritConnection.driver_name)
class TestGitHubConnection(testtools.TestCase):
log = logging.getLogger("zuul.test_connection")
def test_driver_name(self):
self.assertEqual('github',
zuul.connection.github.GithubConnection.driver_name)
class TestConnections(ZuulTestCase):
def setup_config(self, config_file='zuul-connections-same-gerrit.conf'):
super(TestConnections, self).setup_config(config_file)
def test_multiple_connections(self):
"Test multiple connections to the one gerrit"
A = self.fake_review_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_review_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(A.patchsets[-1]['approvals']), 1)
self.assertEqual(A.patchsets[-1]['approvals'][0]['type'], 'VRFY')
self.assertEqual(A.patchsets[-1]['approvals'][0]['value'], '1')
self.assertEqual(A.patchsets[-1]['approvals'][0]['by']['username'],
'jenkins')
B = self.fake_review_gerrit.addFakeChange('org/project', 'master', 'B')
self.worker.addFailTest('project-test2', B)
self.fake_review_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(len(B.patchsets[-1]['approvals']), 1)
self.assertEqual(B.patchsets[-1]['approvals'][0]['type'], 'VRFY')
self.assertEqual(B.patchsets[-1]['approvals'][0]['value'], '-1')
self.assertEqual(B.patchsets[-1]['approvals'][0]['by']['username'],
'civoter')
class TestMultipleGerrits(ZuulTestCase):
def setup_config(self,
config_file='zuul-connections-multiple-gerrits.conf'):
super(TestMultipleGerrits, self).setup_config(config_file)
self.config.set(
'zuul', 'layout_config',
'layout-connections-multiple-gerrits.yaml')
def test_multiple_project_separate_gerrits(self):
self.worker.hold_jobs_in_build = True
A = self.fake_another_gerrit.addFakeChange(
'org/project', 'master', 'A')
self.fake_another_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertEqual(1, len(self.builds))
self.assertEqual('project-another-gerrit', self.builds[0].name)
self.assertTrue(self.job_has_changes(self.builds[0], A))
self.worker.hold_jobs_in_build = False
self.worker.release()
self.waitUntilSettled()
class TestGerritAndGithub(ZuulTestCase):
def setup_config(self,
config_file='zuul-connections-gerrit-and-github.conf'):
super(TestGerritAndGithub, self).setup_config(config_file)
self.config.set(
'zuul', 'layout_config',
'layout-connections-gerrit-and-github.yaml')
def test_multiple_project_gerrit_and_github(self):
self.worker.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
B = self.fake_github.openFakePullRequest('org/project1', 'master', 'B')
self.fake_github.emitEvent(B.getPullRequestOpenedEvent())
self.waitUntilSettled()
self.assertEqual(2, len(self.builds))
self.assertEqual('project-gerrit', self.builds[0].name)
self.assertEqual('project1-github', self.builds[1].name)
self.assertTrue(self.job_has_changes(self.builds[0], A))
self.assertTrue(self.job_has_changes(self.builds[1], B))
self.worker.hold_jobs_in_build = False
self.worker.release()
self.waitUntilSettled()
|
py | b408838e861863e81acba11a8fbe697003170181 | import os
import numpy as np
data = []
for f in os.listdir("."):
if len(f.split("."))==3 and f.split(".")[1]=='o38675' and int(f.split(".")[2])<1000:
with open(f) as file:
lines = file.readlines()
if len(lines) > 2 and len(lines) < 7:
data.append([float(p) for p in [lines[0].split(" ")[1], lines[1].split(" ")[1], lines[3], lines[4]]])
#print data
#for d in data:
# print str(d[0]) + "\t" + str(d[1]) + "\t" + str(d[2]) + "\t" + str(d[3])
dictcheck = []
mydict = {}
for d in data:
if d[0] not in dictcheck:
dictcheck.append(d[0])
mydict[d[0]] = []
for d in data:
mydict[d[0]].append(d[1])
for d in dictcheck:
print str(d) + "\t" + str(np.mean(mydict[d])) + "\t" + str(np.std(mydict[d])/np.sqrt(len(mydict[d])))
|
py | b40883976cfbb6e29ae961d12bc4a97428c14ec8 | # Copyright 2013 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test cases for Neutron PLUMgrid Plug-in
"""
import mock
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron import manager
from neutron.openstack.common import importutils
from neutron.plugins.plumgrid.plumgrid_plugin import plumgrid_plugin
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit import test_db_plugin as test_plugin
PLUM_DRIVER = ('neutron.plugins.plumgrid.drivers.fake_plumlib.Plumlib')
FAKE_DIRECTOR = '1.1.1.1'
FAKE_PORT = '1234'
FAKE_USERNAME = 'fake_admin'
FAKE_PASSWORD = 'fake_password'
FAKE_TIMEOUT = '0'
class PLUMgridPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = ('neutron.plugins.plumgrid.plumgrid_plugin.'
'plumgrid_plugin.NeutronPluginPLUMgridV2')
def setUp(self):
def mocked_plumlib_init(self):
director_plumgrid = FAKE_DIRECTOR
director_port = FAKE_PORT
director_username = FAKE_USERNAME
director_password = FAKE_PASSWORD
timeout = FAKE_TIMEOUT
self._plumlib = importutils.import_object(PLUM_DRIVER)
self._plumlib.director_conn(director_plumgrid,
director_port, timeout,
director_username,
director_password)
with mock.patch.object(plumgrid_plugin.NeutronPluginPLUMgridV2,
'plumgrid_init', new=mocked_plumlib_init):
super(PLUMgridPluginV2TestCase, self).setUp(self._plugin_name)
def tearDown(self):
super(PLUMgridPluginV2TestCase, self).tearDown()
class TestPlumgridPluginNetworksV2(test_plugin.TestNetworksV2,
PLUMgridPluginV2TestCase):
pass
class TestPlumgridV2HTTPResponse(test_plugin.TestV2HTTPResponse,
PLUMgridPluginV2TestCase):
pass
class TestPlumgridPluginPortsV2(test_plugin.TestPortsV2,
PLUMgridPluginV2TestCase):
def test_range_allocation(self):
self.skipTest("Plugin does not support Neutron allocation process")
class TestPlumgridPluginSubnetsV2(test_plugin.TestSubnetsV2,
PLUMgridPluginV2TestCase):
_unsupported = (
'test_create_subnet_default_gw_conflict_allocation_pool_returns_409',
'test_create_subnet_defaults', 'test_create_subnet_gw_values',
'test_create_subnet_ipv6_gw_values',
'test_update_subnet_gateway_in_allocation_pool_returns_409',
'test_update_subnet_allocation_pools',
'test_update_subnet_allocation_pools_invalid_pool_for_cidr')
def setUp(self):
if self._testMethodName in self._unsupported:
self.skipTest("Plugin does not support Neutron allocation process")
super(TestPlumgridPluginSubnetsV2, self).setUp()
class TestPlumgridPluginPortBinding(PLUMgridPluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_IOVISOR
HAS_PORT_FILTER = True
def setUp(self):
super(TestPlumgridPluginPortBinding, self).setUp()
class TestPlumgridNetworkAdminState(PLUMgridPluginV2TestCase):
def test_network_admin_state(self):
name = 'network_test'
admin_status_up = False
tenant_id = 'tenant_test'
network = {'network': {'name': name,
'admin_state_up': admin_status_up,
'tenant_id': tenant_id}}
plugin = manager.NeutronManager.get_plugin()
self.assertEqual(plugin._network_admin_state(network), network)
class TestPlumgridAllocationPool(PLUMgridPluginV2TestCase):
def test_allocate_pools_for_subnet(self):
cidr = '10.0.0.0/24'
gateway_ip = '10.0.0.254'
subnet = {'gateway_ip': gateway_ip,
'cidr': cidr,
'ip_version': 4}
allocation_pool = [{"start": '10.0.0.2',
"end": '10.0.0.253'}]
context = None
plugin = manager.NeutronManager.get_plugin()
pool = plugin._allocate_pools_for_subnet(context, subnet)
self.assertEqual(allocation_pool, pool)
def test_conflict_dhcp_gw_ip(self):
cidr = '10.0.0.0/24'
gateway_ip = '10.0.0.1'
subnet = {'gateway_ip': gateway_ip,
'cidr': cidr,
'ip_version': 4}
allocation_pool = [{"start": '10.0.0.3',
"end": '10.0.0.254'}]
context = None
plugin = manager.NeutronManager.get_plugin()
pool = plugin._allocate_pools_for_subnet(context, subnet)
self.assertEqual(allocation_pool, pool)
class TestPlumgridProvidernet(PLUMgridPluginV2TestCase):
def test_create_provider_network(self):
tenant_id = 'admin'
data = {'network': {'name': 'net1',
'admin_state_up': True,
'tenant_id': tenant_id,
provider.NETWORK_TYPE: 'vlan',
provider.SEGMENTATION_ID: 3333,
provider.PHYSICAL_NETWORK: 'phy3333'}}
network_req = self.new_create_request('networks', data, self.fmt)
net = self.deserialize(self.fmt, network_req.get_response(self.api))
plumlib = importutils.import_object(PLUM_DRIVER)
plumlib.create_network(tenant_id, net, data)
self.assertEqual(net['network'][provider.NETWORK_TYPE], 'vlan')
self.assertEqual(net['network'][provider.SEGMENTATION_ID], 3333)
self.assertEqual(net['network'][provider.PHYSICAL_NETWORK], 'phy3333')
class TestDisassociateFloatingIP(PLUMgridPluginV2TestCase):
def test_disassociate_floating_ip(self):
port_id = "abcdefgh"
tenant_id = "94eb42de4e331"
fip_net_id = "b843d18245678"
fip_addr = "10.0.3.44"
fip_id = "e623679734051"
fip = {"router_id": "94eb42de4e331",
"tenant_id": tenant_id,
"floating_network_id": fip_net_id,
"fixed_ip_address": "192.168.8.2",
"floating_ip_address": fip_addr,
"port_id": port_id,
"id": fip_id}
plumlib = importutils.import_object(PLUM_DRIVER)
fip_res = plumlib.disassociate_floatingips(fip, port_id)
self.assertEqual(fip_res["id"], fip_id)
self.assertEqual(fip_res["floating_ip_address"], fip_addr)
self.assertEqual(fip_res["floating_network_id"], fip_net_id)
|
py | b40884e3890d1d8acd7a4824b0f31f9b81e6adf0 | import numpy as np
from typing import Optional, List
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from src.models.sparse_volume import VolumeList
from src.models.model_utils import set_optimizer_and_lr
from src.models.models import register
from src.models.fusion.modules import ReplicateNeRFModel
from src.utils.render_utils import get_camera_params
from src.utils.render_utils import hierarchical_sampling
import src.utils.voxel_utils as voxel_utils
import src.utils.hydra_utils as hydra_utils
from src.utils.common import override_weights
log = hydra_utils.get_logger(__name__)
@register("lit_fusion_refiner")
class LitFusionRefiner(pl.LightningModule):
def __init__(self, cfg, **kwargs):
super().__init__()
self.cfg = cfg
self.voxel_size = cfg.model.voxel_size
self.ray_max_dist = cfg.model.ray_tracer.ray_max_dist
self.truncated_units = cfg.model.ray_tracer.truncated_units
self.truncated_dist = min(self.truncated_units * self.voxel_size * 0.5, 0.1)
self.train_ray_splits = cfg.model.train_ray_splits
self.sdf_delta_weight = cfg.model.sdf_delta_weight
self.loss_weight = cfg.model.loss
self.nerf = ReplicateNeRFModel(
cfg.model.feature_vector_size, **cfg.model.nerf)
if hasattr(cfg.dataset, "out_root"):
self.plots_dir = os.path.join(cfg.dataset.out_root, cfg.dataset.scan_id)
else:
self.plots_dir = os.path.join(os.getcwd(), "plots")
if not os.path.exists(self.plots_dir):
os.makedirs(self.plots_dir)
pretrained_weights = torch.load(cfg.model.pretrained_model)
pretrained_used_weights = {k: v for k, v in pretrained_weights['state_dict'].items() if "nerf." in k}
keys = [k for k in self.nerf.state_dict().keys()]
if "/" in cfg.dataset.scan_id:
dataset_name, scan_id = cfg.dataset.scan_id.split("/")
else:
scan_id = cfg.dataset.scan_id
dataset_name = "ScanNet"
if cfg.model.freeze_pretrained_weights:
override_weights(self, pretrained_used_weights, keys=keys)
# load volume
if "volume_path" in kwargs:
volume_path = kwargs['volume_path']
else:
volume_path = os.path.join(
cfg.model.volume_dir,
f"{scan_id}_fine_sparse_volume.pth"
)
volume = VolumeList(
cfg.model.feature_vector_size,
self.voxel_size,
kwargs['dimensions'],
cfg.model.min_pts_in_grid
)
volume.load(volume_path)
self.volume = volume
# load tsdf volume
if "tsdf_volume_dir" in kwargs:
tsdf_volume_dir = kwargs["tsdf_volume_dir"]
else:
tsdf_volume_dir = os.path.join(
"/home/kejie/repository/fast_sdf/render_out/tsdf_volume",
dataset_name
)
if not os.path.exists(tsdf_volume_dir):
pass
else:
tsdf_name = [f for f in os.listdir(tsdf_volume_dir) if (scan_id in f) and (f.endswith(".npy"))]
if len(tsdf_name) == 0:
tsdf_volume_dir = "/not_exist"
else:
tsdf_name = tsdf_name[0]
tsdf_volume_dir = os.path.join(tsdf_volume_dir, tsdf_name)
if not os.path.exists(tsdf_volume_dir):
print("[warning]: tsdf volume does not exist")
self.sdf_delta = None
else:
world_min_coords, world_max_coords, _ = \
voxel_utils.get_world_range(
kwargs['dimensions'], cfg.model.voxel_size
)
world_volume_resolution = np.ceil((world_max_coords - world_min_coords) / cfg.model.voxel_size).astype(np.int32)
new_sdf_delta = []
tsdf_volume = np.load(tsdf_volume_dir)
tsdf_volume = torch.from_numpy(tsdf_volume).float().unsqueeze(0).unsqueeze(0)
resized_tsdf_volume = F.interpolate(
tsdf_volume,
size=(
world_volume_resolution[0],
world_volume_resolution[1],
world_volume_resolution[2]
),
mode="trilinear",
align_corners=True)
resized_tsdf_volume = torch.clip(
resized_tsdf_volume, min=-self.truncated_dist, max=self.truncated_dist)
resized_tsdf_volume *= self.sdf_delta_weight
new_sdf_delta.append(resized_tsdf_volume.to("cuda"))
self.sdf_delta = new_sdf_delta
self.volume.fine_volume.features = nn.Parameter(self.volume.fine_volume.features)
self.automatic_optimization = False
def render_with_rays(
self,
frame,
volume,
rays,
weight_mask,
sdf_delta,
):
"""
rays:
uv [b, n, 2]:
gt_pts [b, n, 3]: gt world positions of rays
T_wc [b, 4, 4]:
intr_mat [b, 3, 3]:
"""
uv = rays['uv']
T_wcs = rays['T_wc']
intr_mats = rays['intr_mat']
ray_dirs, cam_loc = \
get_camera_params(uv, T_wcs, intr_mats)
gt_depths = torch.sqrt(torch.sum(
(rays['gt_pts'] - cam_loc.unsqueeze(1)) ** 2,
dim=-1
)) # [v, n_pts]
pts, dists = hierarchical_sampling(
self.truncated_units*2,
int(self.ray_max_dist*5), gt_depths, rays['gt_pts'],
ray_dirs, cam_loc, offset_distance=self.truncated_dist,
max_depth=self.ray_max_dist
)
assert weight_mask is None
pred_sdf = volume.decode_pts(
pts, self.nerf, sdf_delta=sdf_delta)
pred_sdf = pred_sdf[..., 0]
out = {
"cam_loc": cam_loc,
"ray_dirs": ray_dirs,
"sdf_on_rays": pred_sdf,
"pts_on_rays": pts,
}
return out
def compute_sdf_loss(self, rays, pred_sdf, pred_pts, cam_loc, num_valid_pixels):
gt_depths = torch.sqrt(torch.sum(
(rays['gt_pts'] - cam_loc.unsqueeze(1)) ** 2,
dim=-1
)).unsqueeze(-1) # [1, n_pts, 1]
depths = torch.sqrt(torch.sum(
(pred_pts - cam_loc.unsqueeze(1).unsqueeze(1)) ** 2,
dim=-1
)) # [1, n_pts, n_steps]
gt_sdf = gt_depths - depths
gt_sdf = torch.clip(
gt_sdf, min=-self.truncated_dist, max=self.truncated_dist)
valid_map = gt_sdf > max(-self.truncated_dist*0.5, -0.05)
dists = torch.sqrt(
torch.sum(
(rays['neighbor_pts'].unsqueeze(2) - pred_pts.unsqueeze(3)) ** 2,
dim=-1)
) # [1, n_pts, n_steps, n_neighbors]
n_samples = pred_pts.shape[2]
neighbor_mask = rays['neighbor_masks'].unsqueeze(2).repeat(1, 1, n_samples, 1) # [1, n_pts, n_steps, n_neighbors]
dists = torch.where(neighbor_mask.bool(), dists, torch.ones_like(dists) * 10000)
# get the corrected SDF using the minimum of a neighborhood
gt_nearest_dists = torch.min(dists, dim=-1)[0]
sign = torch.where(gt_sdf > 0, torch.ones_like(gt_sdf), torch.ones_like(gt_sdf) * -1)
gt_nearest_signed_dists = gt_nearest_dists * sign
gt_nearest_signed_dists = torch.clip(
gt_nearest_signed_dists, min=-self.truncated_dist, max=self.truncated_dist)
depth_bce = F.l1_loss(
pred_sdf,
gt_nearest_signed_dists,
reduction='none'
) * valid_map
depth_bce = (depth_bce * rays['mask'].unsqueeze(-1)).sum() / num_valid_pixels
return depth_bce
def calculate_loss(
self,
frame,
volume,
rays,
weight_mask=None,
sdf_delta=None,
):
""" calculate RGB and occupancy loss given rays
geometry loss:
zero_leve loss: points on the surface should be occupied
ray bce loss: points before the gt_pts should be unoccupied
rgb loss: the rendered images given surface pts and view directions
rays:
uv [v, n, 2]:
gt_pts [v, n, 3]: gt world positions of rays
T_wc [v, 4, 4]:
intr [v, 4, 4]:
"""
object_mask = rays['mask']
num_valid_pixels = torch.sum(object_mask) + 1e-4
loss_output = {}
render_out = self.render_with_rays(
frame,
volume,
rays,
weight_mask,
sdf_delta,
)
sdf_loss = self.compute_sdf_loss(
rays,
render_out['sdf_on_rays'],
render_out['pts_on_rays'],
render_out['cam_loc'],
num_valid_pixels
)
loss_output['depth_bce_loss'] = sdf_loss
return loss_output
def decode_feature_grid_sparse(self, volume, sdf_delta, volume_resolution):
surface_pts, mesh = volume.meshlize(self.nerf, sdf_delta, volume_resolution)
return mesh
def forward(self, frame, rays, volume, backward):
batch_loss = {}
n_rays = rays['uv'].shape[1]
batch_loss = {}
for i, indx in enumerate(torch.split(
torch.arange(n_rays).cuda(), self.train_ray_splits, dim=0
)):
ray_splits = {
"uv": torch.index_select(rays['uv'], 1, indx),
"rgb": torch.index_select(rays['rgb'], 1, indx),
"gt_pts": torch.index_select(rays['gt_pts'], 1, indx),
"mask": torch.index_select(rays['mask'], 1, indx),
"neighbor_pts": torch.index_select(rays['neighbor_pts'], 1, indx),
"neighbor_masks": torch.index_select(rays['neighbor_masks'], 1, indx),
"T_wc": rays['T_wc'],
"intr_mat": rays['intr_mat'],
}
split_loss_out = \
self.calculate_loss(
frame,
volume,
ray_splits,
sdf_delta=self.sdf_delta,
)
loss_for_backward = 0
for k in split_loss_out:
if k[0] != "_":
try:
weight = getattr(self.loss_weight, k)
except AttributeError:
log.info("[warning]: can't find loss weight")
loss_for_backward += split_loss_out[k] * weight
if k not in batch_loss:
batch_loss[k] = split_loss_out[k]
else:
batch_loss[k] += split_loss_out[k]
if backward:
self.manual_backward(loss_for_backward)
return batch_loss
def training_step(self, data, batch_idx):
"""
data:
frame: new frame information
volume: world feature volume
rays: rays for supervision
"""
# setup manual optim.
opt = self.optimizers()
opt.zero_grad()
frame, rays = data
if torch.any(torch.isnan(frame['T_wc'])):
return None
for k in frame.keys():
if isinstance(frame[k], torch.Tensor):
frame[k] = frame[k].float()
for k in rays.keys():
if isinstance(rays[k], torch.Tensor):
rays[k] = rays[k].float()
batch_loss = self.forward(
frame, rays, self.volume, backward=True)
for k in batch_loss:
self.log(f"train/{k}", batch_loss[k])
opt.step()
return frame
def validation_step(self, data, batch_idx):
frame, rays = data
for k in frame.keys():
if isinstance(frame[k], torch.Tensor):
frame[k] = frame[k].float()
for k in rays.keys():
if isinstance(rays[k], torch.Tensor):
rays[k] = rays[k].float()
if batch_idx != 0:
# return frame
return None
scene_id = frame['scene_id'][0]
frame_id = frame['frame_id'][0]
batch_loss = self.forward(
frame, rays, self.volume, backward=False)
val_loss = 0
for k in batch_loss:
val_loss += batch_loss[k]
self.log(f"val/{k}", batch_loss[k])
self.log("val_loss", val_loss)
mesh = self.decode_feature_grid_sparse(
self.volume,
self.sdf_delta,
frame['world_volume_resolution'].float()
)
scene_id = scene_id.split("/")[-1]
mesh_out_path = os.path.join(self.plots_dir, f"{scene_id}_{self.current_epoch}.ply")
if mesh is not None:
mesh.export(mesh_out_path)
def configure_optimizers(self):
from src.utils.import_utils import import_from
optimizers = []
if not self.cfg.model.freeze_pretrained_weights:
parameters = self.parameters()
optimizer = import_from(
module=self.cfg.optimizer._target_package_,
name=self.cfg.optimizer._class_
)(parameters, lr=self.cfg.optimizer.lr.initial)
optimizers.append(optimizer)
else:
optimizer = import_from(
module=self.cfg.optimizer._target_package_,
name=self.cfg.optimizer._class_
)(
[
# {"params": [self.sdf_delta], "lr": self.cfg.optimizer.lr.initial * 0.01},
{"params": [self.volume.fine_volume.features]},
],
lr=self.cfg.optimizer.lr.initial
)
optimizers.append(optimizer)
return optimizers
|
py | b408859128ebe078cdbf14e963942c5889c612fd | import functools
import io
import re
from typing import Optional
from github3.exceptions import NotFoundError
from github3.git import Tag
from github3.repos.repo import Repository
from cumulusci.core.config import BaseConfig
from cumulusci.core.config.project_config import BaseProjectConfig
from cumulusci.core.exceptions import DependencyResolutionError
from cumulusci.utils.yaml.cumulusci_yml import cci_safe_load
PACKAGE_TYPE_RE = re.compile(r"^package_type: (.*)$", re.MULTILINE)
VERSION_ID_RE = re.compile(r"^version_id: (04t[a-zA-Z0-9]{12,15})$", re.MULTILINE)
def get_repo(github: str, context: BaseProjectConfig) -> Repository:
try:
repo = context.get_repo_from_url(github)
except NotFoundError:
repo = None
if repo is None:
raise DependencyResolutionError(
f"We are unable to find the repository at {github}. Please make sure the URL is correct, that your GitHub user has read access to the repository, and that your GitHub personal access token includes the โrepoโ scope."
)
return repo
@functools.lru_cache(50)
def get_remote_project_config(repo: Repository, ref: str) -> BaseConfig:
contents = repo.file_contents("cumulusci.yml", ref=ref)
contents_io = io.StringIO(contents.decoded.decode("utf-8"))
contents_io.url = f"cumulusci.yml from {repo.owner}/{repo.name}" # for logging
return BaseConfig(cci_safe_load(contents_io))
def get_package_data(config: BaseConfig):
namespace = config.project__package__namespace
package_name = (
config.project__package__name_managed
or config.project__package__name
or "Package"
)
return package_name, namespace
def get_2gp_version_id_from_tag(tag: Tag) -> Optional[str]:
message = tag.message
version_id = VERSION_ID_RE.search(message)
if version_id:
version_id = version_id.group(1)
package_type = PACKAGE_TYPE_RE.search(message)
if package_type:
package_type = package_type.group(1)
if package_type == "2GP" and version_id:
return version_id
|
py | b4088660fcd1b5a8f515334a2f891d4da154682f | import unittest
import mock
from mock import MagicMock,patch
import os.path
import logging
import sys,os
from MockData import Emailid,Password,Jobdetails,Messages5,Messages2
import sys
import sys, os
from Test_Config import *
sys.path.append(os.path.abspath(os.path.join('..', 'extensions/')))
import extensions
sys.path.append(os.path.abspath(os.path.join('..', 'LoggingDatabase/')))
import LoggingErrorsinDatabase
sys.path.append(os.path.abspath(os.path.join('..', 'Databaselayer/')))
import InsertMyJob
class Test_InsertMyJobAd(unittest.TestCase):
def test_changeMyProfilePassword_DBL_1(self):
insertmyjob = InsertMyJob.InsertMyJob(mysql,Jobdetails[0],Jobdetails[1],Jobdetails[2],Jobdetails[3],Jobdetails[4],Jobdetails[5],Jobdetails[6],Messages2[0])
result = insertmyjob.insertMyJob()
assert result == 'pass'
def test_changeMyProfilePassword_DBL_2(self):
insertmyjob = InsertMyJob.InsertMyJob(mysql,Jobdetails[0],Jobdetails[1],Jobdetails[2],Jobdetails[3],Jobdetails[4],Jobdetails[5],Emailid[0],Messages2[1])
result = insertmyjob.insertMyJob()
assert result == 'fail'
if __name__ == '__main__':
unittest.main()
|
py | b408887a105d32f2d9c90e70b9de98d8faa2a4ac | """
"""
from multiprocessing import Queue
from processsrc.acqWorker import SerialProcess, DisplayProcess
from processsrc.graphWorker import GraphProcess
from processsrc.csvWorker import CSVProcess
from processsrc.parserWorker import ParserProcess
process_join_timeout_ms = 1000
DEFAULT_SAMPLES = 10
class MainWorker:
def __init__(self, pidobj, is_manmode = True, export=False, port=None, speed=None):
self._is_manmode = is_manmode
self._is_play = False
self._is_q1_on = False
self._is_q2_on = False
self._export = export
self._port = port
self._speed = speed
self._graphs = None
self._curves = None
self._pid = pidobj
self._temp_queue = Queue()
self._parameters = None
self._setpoint = None
self._output = 0.0
self._label = []
self._template = []
self._spoints = DEFAULT_SAMPLES
self._winsize = 5
self._dist = 0
self._mode = 0
self._disp_process = None
self._acq_contr_process = None
self._parser_process = None
self._csv_process = None
self._graphic_process = None
def set_lines(self, graphs, curves, legends):
self._graphs = graphs
self._curves = curves
self._legends = legends
def set_mode(self, mode):
if mode == "Manual":
self._is_manmode = True
self._mode = 0
else:
self._is_manmode = False
self._mode = 1
def set_arduino(self, port, brate):
self._port = port
self._speed = brate
def set_save(self, enable: bool):
self._export = enable
def set_winsize(self, mintime):
self._winsize = mintime
print("duty cycle: {}".format(mintime))
def set_label(self, label, template):
self._label = label
self._template = template
def set_samples(self, points):
self._spoints = points
print("point buffer : {}".format(points))
def toggleplay(self):
if self._is_play:
self._stop()
return False
else:
if self._start():
return True
else:
return False
def _start(self):
self._graphic_process = GraphProcess(self._graphs, self._curves, self._legends)
self._graphic_process.reset_buffers(self._spoints)
print("Spoint: {}".format(self._spoints))
self._disp_process = DisplayProcess(self._label, self._template)
if self._export:
self._csv_process =CSVProcess(manmode=self._is_manmode)
self._parser_process = ParserProcess(self._temp_queue,self._graphic_process, store_reference=self._csv_process)
else:
self._parser_process = ParserProcess(self._temp_queue, self._graphic_process)
self._acq_contr_process = SerialProcess(parser_process=self._parser_process, display_process=self._disp_process, mode_process= self._mode)
if self._acq_contr_process.open(self._port, self._speed):
self._parser_process.start()
if self._export:
self._csv_process.start()
self._acq_contr_process.add([0.0, self._winsize, self._mode, 0]) # initial value
self._acq_contr_process.start()
self._disp_process.start()
self._graphic_process.start()
self._is_play = True
return True
else:
return False
def update(self):
if self._is_manmode:
self._calc_manual()
else:
self._calc_pid()
print("pid calculate")
self._graphic_process.update()
self._disp_process.update()
print("update")
def _calc_pid(self):
while not self._temp_queue.empty() and self._is_play:
temp = self._temp_queue.get()
self._pid.update(temp)
self._output = self._pid.get_output
print("pid calculation: {}".format(self._output))
self._parameters = self._pid.get_parameters
self._setpoint = self._pid.get_setpoint
self._acq_contr_process.add([self._output, self._winsize, self._mode, self._dist])
if self._export:
self._csv_process.addinter(self._output, self._setpoint, self._parameters)
self._graphic_process.addinter(self._setpoint, self._output)
def _calc_manual(self):
self._consume_queue()
self._winsize = 0.0
self._acq_contr_process.add([self._output, self._winsize, self._mode, self._dist])
def togglemain(self):
if self._is_q1_on:
self._stop_main()
return False
else:
self._start_main()
return True
def _start_main(self):
self._output = 100.0
self._is_q1_on = True
def _stop_main(self):
self._output = 0.0
self._is_q1_on = False
def toggledist(self):
if self._is_q2_on:
self._stop_dist()
return False
else:
self._start_dist()
return True
def _start_dist(self):
self._dist = 1
self._is_q2_on = True
def _stop_dist(self):
self._dist = 0
self._is_q2_on = False
def _stop(self):
self._consume_queue()
for process in [self._acq_contr_process, self._parser_process, self._graphic_process, self._csv_process, self._disp_process]:
if process is not None and process.is_alive():
process.stop()
process.join(process_join_timeout_ms)
self._clear_value()
self._is_play = False
def force_stop(self):
self._stop()
self._stop_dist()
if self._is_manmode:
self._stop_main()
def _consume_queue(self):
if not self._temp_queue.empty():
self._temp_queue.get()
def _clear_value(self):
self._export = False
self._parameters = None
self._setpoint = None
self._output = None
self._winsize = 11
self._main = 0
self._dist = 0
def disp(self, label):
param = self._pid.get_parameters
if label == 'Setpoint':
return self._pid.get_setpoint
elif label == 'kC':
return param[0]
elif label == 'tauI':
return param[1]
else:
return param[2]
|
py | b408888dc525906fcd824c9d11402da8375008a2 | """
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import six
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class UpdateReplacePolicy(CloudFormationLintRule):
"""Check Base Resource Configuration"""
id = 'E3036'
shortdesc = 'Check UpdateReplacePolicy values for Resources'
description = 'Check that the UpdateReplacePolicy values are valid'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatereplacepolicy.html'
tags = ['resources', 'updatereplacepolicy']
def check_value(self, key, path, res_type):
"""Check resource names for UpdateReplacePolicy"""
matches = []
valid_values = [
'Delete',
'Retain',
'Snapshot'
]
valid_snapshot_types = [
'AWS::EC2::Volume',
'AWS::ElastiCache::CacheCluster',
'AWS::ElastiCache::ReplicationGroup',
'AWS::Neptune::DBCluster',
'AWS::RDS::DBCluster',
'AWS::RDS::DBInstance',
'AWS::Redshift::Cluster',
]
if not isinstance(key, (six.text_type, six.string_types)):
message = 'UpdateReplacePolicy values should be of string at {0}'
matches.append(RuleMatch(path, message.format('/'.join(map(str, path)))))
return matches
if key not in valid_values:
message = 'UpdateReplacePolicy should be only one of {0} at {1}'
matches.append(RuleMatch(
path,
message.format(', '.join(map(str, valid_values)),
'/'.join(map(str, path)))))
if key == 'Snapshot' and res_type not in valid_snapshot_types:
message = 'UpdateReplacePolicy cannot be Snapshot for resources of type {0} at {1}'
matches.append(RuleMatch(
path,
message.format(res_type,
'/'.join(map(str, path)))))
return matches
def match(self, cfn):
"""Check CloudFormation Resources"""
matches = []
resources = cfn.get_resources()
for resource_name, resource_values in resources.items():
updatereplace_policies = resource_values.get('UpdateReplacePolicy')
if updatereplace_policies:
path = ['Resources', resource_name, 'UpdateReplacePolicy']
res_type = resource_values.get('Type')
self.logger.debug('Validating UpdateReplacePolicy for %s base configuration', resource_name)
if isinstance(updatereplace_policies, list):
message = 'Only one UpdateReplacePolicy allowed per resource at {0}'
matches.append(RuleMatch(path, message.format('/'.join(map(str, path)))))
else:
matches.extend(self.check_value(updatereplace_policies, path, res_type))
return matches
|
py | b4088a25e4c5aefaf25dd899dfc2bd3c81050efa | # -*- coding: utf-8 -*-
"""
shepherd.__init__
~~~~~
Provides main API functionality for recording and emulation with shepherd.
:copyright: (c) 2019 Networked Embedded Systems Lab, TU Dresden.
:license: MIT, see LICENSE for more details.
"""
import datetime
import logging
import time
import sys
from logging import NullHandler
from pathlib import Path
from contextlib import ExitStack
import invoke
import signal
from shepherd.datalog import LogReader
from shepherd.datalog import LogWriter
from shepherd.datalog import ExceptionRecord
from shepherd.eeprom import EEPROM
from shepherd.eeprom import CapeData
from shepherd.calibration import CalibrationData
from shepherd.shepherd_io import ShepherdIOException
from shepherd.shepherd_io import ShepherdIO
from shepherd import commons
from shepherd import sysfs_interface
# Set default logging handler to avoid "No handler found" warnings.
logging.getLogger(__name__).addHandler(NullHandler())
logger = logging.getLogger(__name__)
class Recorder(ShepherdIO):
"""API for recording data with shepherd.
Provides an easy to use, high-level interface for recording data with
shepherd. Configures all hardware and initializes the communication
with kernel module and PRUs.
Args:
mode (str): Should be either 'harvesting' to record harvesting data
or 'load' to record target consumption data.
load (str): Selects, which load should be used for recording.
Should be one of 'artificial' or 'node'.
harvesting_voltage (float): Fixed reference voltage for boost
converter input.
ldo_voltage (float): Pre-charge capacitor to this voltage before
starting recording.
ldo_mode (str): Selects if LDO should just pre-charge capacitor or run
continuously.
"""
def __init__(
self,
mode: str = "harvesting",
load: str = "artificial",
harvesting_voltage: float = None,
ldo_voltage: float = None,
ldo_mode: str = "pre-charge",
):
super().__init__(mode, load)
if ldo_voltage is None:
if mode == "load":
self.ldo_voltage = 3.0
else:
self.ldo_voltage = 0.0
else:
self.ldo_voltage = ldo_voltage
self.harvesting_voltage = harvesting_voltage
self.ldo_mode = ldo_mode
def __enter__(self):
super().__enter__()
if self.harvesting_voltage is not None:
self.set_mppt(False)
self.set_harvesting_voltage(self.harvesting_voltage)
else:
self.set_mppt(True)
# In 'load' mode, the target is supplied from constant voltage reg
if self.mode == "load":
self.set_ldo_voltage(self.ldo_voltage)
elif self.mode == "harvesting":
self.set_harvester(True)
if self.ldo_voltage > 0.0:
self.set_ldo_voltage(self.ldo_voltage)
if self.ldo_mode == "pre-charge":
time.sleep(1)
self.set_ldo_voltage(False)
logger.debug("Disabling LDO")
# Give the PRU empty buffers to begin with
for i in range(self.n_buffers):
time.sleep(0.2 * float(self.buffer_period_ns) / 1e9)
self.return_buffer(i)
logger.debug(f"sent empty buffer {i}")
return self
def return_buffer(self, index: int):
"""Returns a buffer to the PRU
After reading the content of a buffer and potentially filling it with
emulation data, we have to release the buffer to the PRU to avoid it
running out of buffers.
Args:
index (int): Index of the buffer. 0 <= index < n_buffers
"""
self._return_buffer(index)
class Emulator(ShepherdIO):
"""API for emulating data with shepherd.
Provides an easy to use, high-level interface for emulating data with
shepherd. Configures all hardware and initializes the communication
with kernel module and PRUs.
Args:
calibration_recording (CalibrationData): Shepherd calibration data
belonging to the IV data that is being emulated
calibration_emulation (CalibrationData): Shepherd calibration data
belonging to the cape used for emulation
load (str): Selects, which load should be used for recording.
Should be one of 'artificial' or 'node'.
ldo_voltage (float): Pre-charge the capacitor to this voltage before
starting recording.
"""
def __init__(
self,
initial_buffers: list = None,
calibration_recording: CalibrationData = None,
calibration_emulation: CalibrationData = None,
load: str = "node",
ldo_voltage: float = 0.0
):
shepherd_mode = "emulation"
self.ldo_voltage = ldo_voltage
super().__init__(shepherd_mode, load)
if calibration_emulation is None:
calibration_emulation = CalibrationData.from_default()
logger.warning(
"No emulation calibration data provided - using defaults"
)
if calibration_recording is None:
calibration_recording = CalibrationData.from_default()
logger.warning(
"No recording calibration data provided - using defaults"
)
self.transform_coeffs = {"voltage": dict(), "current": dict()}
# Values from recording are binary ADC values. We have to send binary
# DAC values to the DAC for emulation. To directly convert ADC to DAC
# values, we precalculate the 'transformation coefficients' based on
# calibration data from the recorder and the emulator.
for channel in ["voltage", "current"]:
self.transform_coeffs[channel]["gain"] = (
calibration_recording["harvesting"][channel]["gain"]
* calibration_emulation["emulation"][channel]["gain"]
)
self.transform_coeffs[channel]["offset"] = (
calibration_emulation["emulation"][channel]["gain"]
* calibration_recording["harvesting"][channel]["offset"]
+ calibration_emulation["emulation"][channel]["offset"]
)
self._initial_buffers = initial_buffers
self._calibration_emulation = calibration_emulation
def __enter__(self):
super().__enter__()
if self.ldo_voltage > 0.0:
logger.debug(f"Precharging capacitor to {self.ldo_voltage}V")
self.set_ldo_voltage(self.ldo_voltage)
time.sleep(1)
self.set_ldo_voltage(False)
# Disconnect harvester to avoid leakage in or out of the harvester
self.set_harvester(False)
# We will dynamically generate the reference voltage for the boost
# converter. This only takes effect if MPPT is disabled.
self.set_mppt(False)
# Preload emulator with some data
for idx, buffer in enumerate(self._initial_buffers):
time.sleep(0.2 * float(self.buffer_period_ns) / 1e9)
self.return_buffer(idx, buffer)
return self
def return_buffer(self, index, buffer):
ts_start = time.time()
# Convert binary ADC recordings to binary DAC values
voltage_transformed = (
buffer.voltage * self.transform_coeffs["voltage"]["gain"]
+ self.transform_coeffs["voltage"]["offset"]
).astype("u4")
current_transformed = (
buffer.current * self.transform_coeffs["current"]["gain"]
+ self.transform_coeffs["current"]["offset"]
).astype("u4")
''' TODO: could be later replaced by (when cal-values are properly scaled)
v_gain = 1e6 * self._cal_recording["harvesting"]["adc_voltage"]["gain"]
v_offset = 1e6 * self._cal_recording["harvesting"]["adc_voltage"]["offset"]
i_gain = 1e9 * self._cal_recording["harvesting"]["adc_current"]["gain"]
i_offset = 1e9 * self._cal_recording["harvesting"]["adc_current"]["offset"]
# Convert raw ADC data to SI-Units -> the virtual-source-emulator in PRU expects uV and nV
voltage_transformed = (buffer.voltage * v_gain + v_offset).astype("u4")
current_transformed = (buffer.current * i_gain + i_offset).astype("u4")
'''
self.shared_mem.write_buffer(index, voltage_transformed, current_transformed)
self._return_buffer(index)
logger.debug(
(
f"Returning buffer #{ index } to PRU took "
f"{ round(1e3 * (time.time()-ts_start), 2) } ms"
)
)
class ShepherdDebug(ShepherdIO):
"""API for direct access to ADC and DAC.
For debugging purposes, running the GUI or for retrieving calibration
values, we need to directly read values from the ADC and set voltage using
the DAC. This class allows to put the underlying PRUs and kernel module in
a mode, where they accept 'debug messages' that allow to directly interface
with the ADC and DAC.
"""
def __init__(self):
super().__init__("debug", "artificial")
def adc_read(self, channel: str):
"""Reads value from specified ADC channel.
Args:
channel (str): Specifies the channel to read from, e.g., 'v_in' for
harvesting voltage or 'i_out' for load current
Returns:
Binary ADC value read from corresponding channel
"""
if channel.lower() == "v_in":
channel_no = 0
elif channel.lower() == "v_out":
channel_no = 1
elif channel.lower() in ["a_in", "i_in"]:
channel_no = 2
elif channel.lower() in ["a_out", "i_out"]:
channel_no = 3
else:
raise ValueError(f"ADC channel { channel } unknown")
self._send_msg(commons.MSG_DEP_DBG_ADC, channel_no)
msg_type, value = self._get_msg(3.0)
if msg_type != commons.MSG_DEP_DBG_ADC:
raise ShepherdIOException(
(
f"Expected msg type { commons.MSG_DEP_DBG_ADC } "
f"got type={ msg_type } value={ value }"
)
)
return value
def dac_write(self, channel: str, value: int):
"""Writes value to specified DAC channel
Args:
channel (str): Specifies the channel to write to, e.g., 'current'
for current channel or 'v' for voltage channel
value (int): Binary DAC value to be sent to corresponding channel
"""
# For a mapping of DAC channel to command refer to TI DAC8562T
# datasheet Table 17
if channel.lower() in ["current", "i", "a"]:
dac_command = value
elif channel.lower() in ["voltage", "v"]:
# The DAC 'voltage' channel is on channel B
dac_command = value | (1 << 16)
else:
raise ValueError(f"DAC channel { channel } unknown")
self._send_msg(commons.MSG_DEP_DBG_DAC, dac_command)
def get_buffer(self, timeout=None):
raise NotImplementedError("Method not implemented for debugging mode")
def record(
output_path: Path,
mode: str = "harvesting",
duration: float = None,
force_overwrite: bool = False,
no_calib: bool = False,
harvesting_voltage: float = None,
load: str = "artificial",
ldo_voltage: float = None,
ldo_mode: str = "pre-charge",
start_time: float = None,
warn_only: bool = False,
):
"""Starts recording.
Args:
output_path (Path): Path of hdf5 file where IV measurements should be
stored
mode (str): 'harvesting' for recording harvesting data, 'load' for
recording load consumption data.
duration (float): Maximum time duration of emulation in seconds
force_overwrite (bool): True to overwrite existing file under output path,
False to store under different name
no_calib (bool): True to use default calibration values, False to
read calibration data from EEPROM
harvesting_voltage (float): Sets a fixed reference voltage for the
input of the boost converter. Alternative to MPPT algorithm.
load (str): Type of load. 'artificial' for dummy, 'node' for sensor
node
ldo_voltage (bool): True to pre-charge capacitor before starting
emulation
ldo_mode (str): Selects if LDO should just pre-charge capacitor or run
continuously.
start_time (float): Desired start time of emulation in unix epoch time
warn_only (bool): Set true to continue recording after recoverable
error
"""
if no_calib:
calib = CalibrationData.from_default()
else:
try:
with EEPROM() as eeprom:
calib = eeprom.read_calibration()
except ValueError:
logger.warning("Couldn't read calibration from EEPROM (val). Falling back to default values.")
calib = CalibrationData.from_default()
except FileNotFoundError:
logger.warning("Couldn't read calibration from EEPROM (FS). Falling back to default values.")
calib = CalibrationData.from_default()
if start_time is None:
start_time = time.time() + 15
if not output_path.is_absolute():
output_path = output_path.absolute()
if output_path.is_dir():
timestamp = datetime.datetime.fromtimestamp(start_time)
timestamp = timestamp.strftime("%Y-%m-%d_%H-%M-%S") # closest to ISO 8601, avoid ":"
store_path = output_path / f"rec_{timestamp}.h5"
else:
store_path = output_path
recorder = Recorder(
mode=mode,
load=load,
harvesting_voltage=harvesting_voltage,
ldo_voltage=ldo_voltage,
ldo_mode=ldo_mode,
)
log_writer = LogWriter(
store_path=store_path, calibration_data=calib, mode=mode, force_overwrite=force_overwrite
)
with ExitStack() as stack:
stack.enter_context(recorder)
stack.enter_context(log_writer)
# in_stream has to be disabled to avoid trouble with pytest
res = invoke.run("hostname", hide=True, warn=True, in_stream=False)
log_writer["hostname"] = res.stdout
recorder.start(start_time, wait_blocking=False)
logger.info(f"waiting {start_time - time.time():.2f} s until start")
recorder.wait_for_start(start_time - time.time() + 15)
logger.info("shepherd started!")
def exit_gracefully(signum, frame):
stack.close()
sys.exit(0)
signal.signal(signal.SIGTERM, exit_gracefully) # TODO: should be inserted earlier
signal.signal(signal.SIGINT, exit_gracefully)
if duration is None:
ts_end = sys.float_info.max
else:
ts_end = time.time() + duration
while time.time() < ts_end:
try:
idx, buf = recorder.get_buffer()
except ShepherdIOException as e:
logger.error(
f"ShepherdIOException(ID={e.id}, val={e.value}): {str(e)}"
)
err_rec = ExceptionRecord(
int(time.time() * 1e9), str(e), e.value
)
log_writer.write_exception(err_rec)
if not warn_only:
raise
log_writer.write_buffer(buf)
recorder.return_buffer(idx)
def emulate(
input_path: Path,
output_path: Path = None,
duration: float = None,
force_overwrite: bool = False,
no_calib: bool = False,
load: str = "artificial",
ldo_voltage: float = None,
start_time: float = None,
warn_only: bool = False,
):
""" Starts emulation.
Args:
input_path (Path): path of hdf5 file containing recorded
harvesting data
output_path (Path): Path of hdf5 file where load measurements should
be stored
duration (float): Maximum time duration of emulation in seconds
force_overwrite (bool): True to overwrite existing file under output,
False to store under different name
no_calib (bool): True to use default calibration values, False to
read calibration data from EEPROM
load (str): Type of load. 'artificial' for dummy, 'node' for sensor
node
ldo_voltage (float): Pre-charge capacitor to this voltage before
starting emulation
start_time (float): Desired start time of emulation in unix epoch time
warn_only (bool): Set true to continue emulation after recoverable
error
"""
if no_calib:
calib = CalibrationData.from_default()
else:
try:
with EEPROM() as eeprom:
calib = eeprom.read_calibration()
except ValueError:
logger.warning("Couldn't read calibration from EEPROM (val). Falling back to default values.")
calib = CalibrationData.from_default()
except FileNotFoundError:
logger.warning("Couldn't read calibration from EEPROM (FS). Falling back to default values.")
calib = CalibrationData.from_default()
if start_time is None:
start_time = time.time() + 15
if output_path is not None:
if not output_path.is_absolute():
output_path = output_path.absolute()
if output_path.is_dir():
timestamp = datetime.datetime.fromtimestamp(start_time)
timestamp = timestamp.strftime("%Y-%m-%d_%H-%M-%S") # closest to ISO 8601, avoid ":"
store_path = output_path / f"emu_{timestamp}.h5"
else:
store_path = output_path
log_writer = LogWriter(
store_path=store_path,
force_overwrite=force_overwrite,
mode="load",
calibration_data=calib,
)
if isinstance(input_path, str):
input_path = Path(input_path)
if input_path is None:
raise ValueError("No Input-File configured for emulation")
if not input_path.exists():
raise ValueError("Input-File does not exist")
log_reader = LogReader(input_path, 10_000)
with ExitStack() as stack:
if output_path is not None:
stack.enter_context(log_writer)
stack.enter_context(log_reader)
emu = Emulator(
calibration_recording=log_reader.get_calibration_data(),
calibration_emulation=calib,
initial_buffers=log_reader.read_buffers(end=64),
ldo_voltage=ldo_voltage,
load=load,
)
stack.enter_context(emu)
emu.start(start_time, wait_blocking=False)
logger.info(f"waiting {start_time - time.time():.2f} s until start")
emu.wait_for_start(start_time - time.time() + 15)
logger.info("shepherd started!")
def exit_gracefully(signum, frame):
stack.close()
sys.exit(0)
signal.signal(signal.SIGTERM, exit_gracefully)
signal.signal(signal.SIGINT, exit_gracefully)
if duration is None:
ts_end = sys.float_info.max
else:
ts_end = time.time() + duration
for hrvst_buf in log_reader.read_buffers(start=64):
try:
idx, emu_buf = emu.get_buffer(timeout=1)
except ShepherdIOException as e:
logger.error(
f"ShepherdIOException(ID={e.id}, val={e.value}): {str(e)}"
)
if output_path is not None:
err_rec = ExceptionRecord(
int(time.time() * 1e9), str(e), e.value
)
log_writer.write_exception(err_rec)
if not warn_only:
raise
if output_path is not None:
log_writer.write_buffer(emu_buf)
emu.return_buffer(idx, hrvst_buf)
if time.time() > ts_end:
break
# Read all remaining buffers from PRU
while True:
try:
idx, emu_buf = emu.get_buffer(timeout=1)
if output_path is not None:
log_writer.write_buffer(emu_buf)
except ShepherdIOException as e:
# We're done when the PRU has processed all emulation data buffers
if e.id == commons.MSG_DEP_ERR_NOFREEBUF:
break
else:
if not warn_only:
raise
|
py | b4088a5b7a45627734f641f00fb76cae0caf1c00 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pandas as pd
import tempfile
import os
from bigdl.chronos.forecaster.prophet_forecaster import ProphetForecaster
from unittest import TestCase
import pytest
def create_data():
seq_len = 400
data = pd.DataFrame(pd.date_range('20130101', periods=seq_len), columns=['ds'])
data.insert(1, 'y', np.random.rand(seq_len))
horizon = np.random.randint(2, 50)
validation_data = pd.DataFrame(pd.date_range('20140426', periods=horizon), columns=['ds'])
validation_data.insert(1, 'y', np.random.rand(horizon))
return data, validation_data
class TestChronosModelProphetForecaster(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_prophet_forecaster_fit_eval_pred(self):
data, validation_data = create_data()
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
train_loss = forecaster.fit(data, validation_data)
test_pred = forecaster.predict(validation_data.shape[0])
assert test_pred.shape[0] == validation_data.shape[0]
test_mse = forecaster.evaluate(validation_data)
def test_prophet_forecaster_save_restore(self):
data, validation_data = create_data()
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
train_loss = forecaster.fit(data, validation_data)
with tempfile.TemporaryDirectory() as tmp_dir_name:
ckpt_name = os.path.join(tmp_dir_name, "json")
test_pred_save = forecaster.predict(validation_data.shape[0])
forecaster.save(ckpt_name)
forecaster.restore(ckpt_name)
test_pred_restore = forecaster.predict(validation_data.shape[0])
assert (test_pred_save['yhat'] == test_pred_restore['yhat']).all()
def test_prophet_forecaster_runtime_error(self):
data, validation_data = create_data()
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
with pytest.raises(Exception,
match="You must call fit or restore first before calling predict!"):
forecaster.predict(horizon=validation_data.shape[0])
with pytest.raises(Exception,
match="You must call fit or restore first before calling save!"):
model_file = "tmp.json"
forecaster.save(model_file)
def test_prophet_forecaster_shape_error(self):
data, validation_data = create_data()
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
with pytest.raises(AssertionError):
forecaster.fit(data[['ds']], validation_data)
with pytest.raises(AssertionError):
forecaster.fit(data, validation_data[['ds']])
|
py | b4088a657bad3b04aeda71102e0150f181181524 | # -*- coding: utf-8 -*-
ACCOUNT_SCHEMA = {
'firstname': {
'type': 'string'
},
'lastname': {
'type': 'string'
},
'username': {
'type': 'string',
'required': True,
'unique': True,
},
'password': {
'type': 'string',
'required': True,
},
'roles': {
'type': 'list',
'allowed': ['user', 'admin'],
'required': True,
},
}
ACCOUNT_ENDPOINT = {
'item_title': 'account',
'additional_lookup': {
'url': r'regex("[\w]+")',
'field': 'username',
},
# We also disable endpoint caching as we don't want client apps to
# cache account data.
'cache_control': '',
'cache_expires': 0,
'allowed_roles': ['admin'],
'schema': ACCOUNT_SCHEMA,
}
|
py | b4088b903e4d6e1435a63e7cf4724043e8ae9567 | # coding=utf-8
########################
# Player Blueprint
#
# Full route of this blueprint: /music/player/
########################
import re
import logging
from quart import Blueprint, request
from .web_utilities import with_status, get_json_from_request, process_time, dictify_YoutubeAudio
from ._bp_types import StatusType
from ..core.player import Player
from ..core.exceptions import MediaNotLoaded, QueueException, PlayerException, \
YoutubeException, OutsideTimeBounds
app = Blueprint("player", __name__)
player = Player()
# OTHER
log = logging.getLogger(__name__)
# regex: hh:mm:ss[.ms]
time_regex = re.compile(r"([0-9]+:?){1,3}(\.[0-9]+)?")
@app.route("/quickQueue", methods=["POST"])
async def player_quick_queue():
"""
Full route: /music/player/quickQueue
Request (JSON):
song: string
type: Types.PlayType
Queue a song to play (at the end of the queue or next).
"""
json = await get_json_from_request(request)
url = json.get("song")
if not url:
return with_status({"message": "Missing 'song' field"}, 400, StatusType.BAD_REQUEST)
try:
await player.player_queue(url)
except YoutubeException:
return with_status(None, 400, StatusType.ERROR)
else:
return with_status(None, 200, StatusType.OK)
@app.route("/getCurrentSong")
async def player_get_current():
"""
Full route: /music/player/getCurrentSong
Request (JSON): None
:return: info about the current song.
"""
current_song = player._queue.current_audio
if current_song is None:
return with_status(None, 440, StatusType.NOOP)
else:
is_playing = player.player_is_playing()
data = {
"current_song": dictify_YoutubeAudio(current_song),
"is_playing": is_playing,
"time": await player.player_get_time() if is_playing else None
}
return with_status(data, 200, StatusType.OK)
@app.route("/play", methods=["POST"])
async def player_play():
"""
Full route: /music/player/queue
Request (JSON): None
Play the current song.
"""
# no json expected
try:
await player.player_play()
except QueueException:
return with_status(None, 441, StatusType.ERROR)
except PlayerException:
return with_status(None, 444, StatusType.INTERNAL_ERROR)
else:
return with_status(None, 200, StatusType.OK)
@app.route("/pause", methods=["POST"])
async def player_pause():
"""
Full route: /music/player/pause
Request (JSON): None
Pause the current song.
"""
# no json expected
did_pause = await player.player_pause()
if did_pause:
return with_status(None, 200, StatusType.OK)
else:
return with_status(None, 440, StatusType.NOOP)
@app.route("/resume", methods=["POST"])
async def player_resume():
"""
Full route: /music/player/resume
Request (JSON): None
Resume the current song.
"""
# no json expected
try:
did_resume = await player.player_resume()
except MediaNotLoaded:
return with_status(None, 441, StatusType.ERROR)
else:
if did_resume:
return with_status(None, 200, StatusType.OK)
else:
return with_status(None, 440, StatusType.NOOP)
@app.route("/stop", methods=["POST"])
async def player_stop():
"""
Full route: /music/player/resume
Request (JSON): None
Stop (unload) the current song.
"""
# no json expected
was_playing = await player.player_stop()
if was_playing:
return with_status(None, 200, StatusType.OK)
else:
return with_status(None, 440, StatusType.NOOP)
@app.route("/next", methods=["POST"])
async def player_next():
"""
Full route: /music/player/next
Request (JSON): None
Play the next song in queue.
"""
# no json expected
try:
await player.player_next()
except QueueException:
return with_status(None, 441, StatusType.ERROR)
except PlayerException:
return with_status(None, 444, StatusType.INTERNAL_ERROR)
else:
return with_status(None, 200, StatusType.OK)
@app.route("/previous", methods=["POST"])
async def player_previous():
"""
Full route: /music/player/previous
Request (JSON): None
Play the previous song in queue.
"""
# no json expected
try:
await player.player_previous()
except QueueException:
return with_status(None, 441, StatusType.ERROR)
except PlayerException:
return with_status(None, 444, StatusType.INTERNAL_ERROR)
else:
return with_status(None, 200, StatusType.OK)
async def player_get_time():
# Return a 441 if no song is loaded
if not player.player_is_song_loaded():
return with_status(None, 441, StatusType.ERROR)
# Otherwise, return the data
data = {
"time": await player.player_get_time() or 0,
"total_length": player._queue.current_audio.length,
"is_playing": player.player_is_playing()
}
return with_status(data, 200, StatusType.OK)
async def player_set_time():
json = await get_json_from_request(request)
audio_time = json.get("time")
if not audio_time:
return with_status({"message": "Missing 'time' field"}, 400, StatusType.BAD_REQUEST)
try:
time_in_float = int(audio_time)
except TypeError:
return with_status({"message": "Invalid 'time' format"}, 400, StatusType.BAD_REQUEST)
try:
log.debug(f"Parsed time: {time_in_float}")
did_change = await player.player_set_time(time_in_float)
except RuntimeError:
return with_status(None, 444, StatusType.INTERNAL_ERROR)
except OutsideTimeBounds:
return with_status(None, 441, StatusType.ERROR)
else:
if did_change:
return with_status(None, 200, StatusType.OK)
else:
return with_status(None, 440, StatusType.NOOP)
@app.route("/audioTime", methods=["GET", "PATCH"])
async def player_audio_time():
"""
Full route /music/player/audioTime (GET and PATCH)
GET: Get the current audio time.
PATCH: Move to the specified time in the song.
Request (JSON):
time: integer
"""
if request.method == "GET":
return await player_get_time()
elif request.method == "PATCH":
return await player_set_time()
else:
return with_status(None, 400, StatusType.BAD_REQUEST)
async def player_volume_get():
# no json
data = {
"volume": player.get_volume()
}
return with_status(data, 200, StatusType.OK)
async def player_volume_set():
json = await get_json_from_request(request)
volume = json.get("volume")
# Validate data
if volume is None:
return with_status({"message": "Missing 'volume' field."}, 400, StatusType.BAD_REQUEST)
try:
volume = int(volume)
except TypeError:
return with_status({"message": "'volume' is not a number"}, 400, StatusType.BAD_REQUEST)
if not (0 <= volume <= 100):
return with_status({"message": "'volume' not in range 0-100"}, 400, StatusType.BAD_REQUEST)
player.set_volume(volume)
return with_status(None, 200, StatusType.OK)
@app.route("/audioVolume", methods=["GET", "POST"])
async def player_audio_volume():
"""
Full route /music/player/audioVolume (GET and POST)
GET: Get the current volume
POST: Set the volume (0-100)
Request (JSON):
volume: integer
"""
if request.method == "GET":
return await player_volume_get()
elif request.method == "POST":
return await player_volume_set()
else:
return with_status(None, 400, StatusType.BAD_REQUEST)
|
py | b4088bde8b88527165761eac2cc408870cc15281 | import json
import django
from django.db import models
from django.utils import timezone
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _, ugettext
# from django.core.urlresolvers import NoReverseMatch, reverse
from django.urls import NoReverseMatch, reverse
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.base import ModelBase
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.db.models.signals import post_migrate
from django.contrib.auth.models import Permission
import datetime
import decimal
from xadmin.util import quote
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def add_view_permissions(sender, **kwargs):
"""
This syncdb hooks takes care of adding a view permission too all our
content types.
"""
# for each of our content types
for content_type in ContentType.objects.all():
# build our permission slug
codename = "view_%s" % content_type.model
# if it doesn't exist..
if not Permission.objects.filter(content_type=content_type, codename=codename):
# add it
Permission.objects.create(content_type=content_type,
codename=codename,
name="Can view %s" % content_type.name)
#print "Added view permission for %s" % content_type.name
# check for all our view permissions after a syncdb
post_migrate.connect(add_view_permissions)
@python_2_unicode_compatible
class Bookmark(models.Model):
title = models.CharField(_(u'Title'), max_length=128)
user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user"), blank=True, null=True,on_delete=models.CASCADE)
url_name = models.CharField(_(u'Url Name'), max_length=64)
content_type = models.ForeignKey(ContentType,on_delete=models.CASCADE)
query = models.CharField(_(u'Query String'), max_length=1000, blank=True)
is_share = models.BooleanField(_(u'Is Shared'), default=False)
@property
def url(self):
base_url = reverse(self.url_name)
if self.query:
base_url = base_url + '?' + self.query
return base_url
def __str__(self):
return self.title
class Meta:
verbose_name = _(u'Bookmark')
verbose_name_plural = _('Bookmarks')
class JSONEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, datetime.datetime):
return o.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(o, datetime.date):
return o.strftime('%Y-%m-%d')
elif isinstance(o, decimal.Decimal):
return str(o)
elif isinstance(o, ModelBase):
return '%s.%s' % (o._meta.app_label, o._meta.model_name)
else:
try:
return super(JSONEncoder, self).default(o)
except Exception:
return smart_text(o)
@python_2_unicode_compatible
class UserSettings(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user"),on_delete=models.CASCADE)
key = models.CharField(_('Settings Key'), max_length=256)
value = models.TextField(_('Settings Content'))
def json_value(self):
return json.loads(self.value)
def set_json(self, obj):
self.value = json.dumps(obj, cls=JSONEncoder, ensure_ascii=False)
def __str__(self):
return "%s %s" % (self.user, self.key)
class Meta:
verbose_name = _(u'User Setting')
verbose_name_plural = _('User Settings')
@python_2_unicode_compatible
class UserWidget(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user"),on_delete=models.CASCADE)
page_id = models.CharField(_(u"Page"), max_length=256)
widget_type = models.CharField(_(u"Widget Type"), max_length=50)
value = models.TextField(_(u"Widget Params"))
def get_value(self):
value = json.loads(self.value)
value['id'] = self.id
value['type'] = self.widget_type
return value
def set_value(self, obj):
self.value = json.dumps(obj, cls=JSONEncoder, ensure_ascii=False)
def save(self, *args, **kwargs):
created = self.pk is None
super(UserWidget, self).save(*args, **kwargs)
if created:
try:
portal_pos = UserSettings.objects.get(
user=self.user, key="dashboard:%s:pos" % self.page_id)
portal_pos.value = "%s,%s" % (self.pk, portal_pos.value) if portal_pos.value else self.pk
portal_pos.save()
except Exception:
pass
def __str__(self):
return "%s %s widget" % (self.user, self.widget_type)
class Meta:
verbose_name = _(u'User Widget')
verbose_name_plural = _('User Widgets')
@python_2_unicode_compatible
class Log(models.Model):
action_time = models.DateTimeField(
_('action time'),
default=timezone.now,
editable=False,
)
user = models.ForeignKey(
AUTH_USER_MODEL,
models.CASCADE,
verbose_name=_('user'),
)
ip_addr = models.GenericIPAddressField(_('action ip'), blank=True, null=True)
content_type = models.ForeignKey(
ContentType,
models.SET_NULL,
verbose_name=_('content type'),
blank=True, null=True,
)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.CharField(_('action flag'), max_length=32)
message = models.TextField(_('change message'), blank=True)
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
ordering = ('-action_time',)
def __repr__(self):
return smart_text(self.action_time)
def __str__(self):
if self.action_flag == 'create':
return ugettext('Added "%(object)s".') % {'object': self.object_repr}
elif self.action_flag == 'change':
return ugettext('Changed "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.message,
}
elif self.action_flag == 'delete' and self.object_repr:
return ugettext('Deleted "%(object)s."') % {'object': self.object_repr}
return self.message
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
|
py | b4088c1f9d98e2719e6dc3655ec713811edf8d02 | #
# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
# Jonas Koenemann, Yutao Chen, Tobias Schรถls, Jonas Schlagenhauf, Moritz Diehl
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import sys, json, os
sys.path.insert(0, '../common')
from acados_template import AcadosOcp, AcadosOcpSolver, acados_dae_model_json_dump, get_acados_path
from pendulum_model import export_pendulum_ode_model
import numpy as np
import scipy.linalg
from utils import plot_pendulum
TOL = 1e-7
def main(discretization='shooting_nodes'):
# create ocp object to formulate the OCP
ocp = AcadosOcp()
# set model
model = export_pendulum_ode_model()
ocp.model = model
integrator_type = 'LIFTED_IRK' # ERK, IRK, GNSF, LIFTED_IRK
if integrator_type == 'GNSF':
acados_dae_model_json_dump(model)
# structure detection in Matlab/Octave -> produces 'pendulum_ode_gnsf_functions.json'
status = os.system('octave detect_gnsf_from_json.m')
# load gnsf from json
with open(model.name + '_gnsf_functions.json', 'r') as f:
gnsf_dict = json.load(f)
ocp.gnsf_model = gnsf_dict
Tf = 1.0
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
N = 15
# discretization
ocp.dims.N = N
# shooting_nodes = np.linspace(0, Tf, N+1)
time_steps = np.linspace(0, 1, N)
time_steps = Tf * time_steps / sum(time_steps)
shooting_nodes = np.zeros((N+1,))
for i in range(len(time_steps)):
shooting_nodes[i+1] = shooting_nodes[i] + time_steps[i]
# nonuniform discretizations can be defined either by shooting_nodes or time_steps:
if discretization == 'shooting_nodes':
ocp.solver_options.shooting_nodes = shooting_nodes
elif discretization == 'time_steps':
ocp.solver_options.time_steps = time_steps
else:
raise NotImplementedError(f"discretization type {discretization} not supported.")
# set num_steps
ocp.solver_options.sim_method_num_steps = 2*np.ones((N,))
ocp.solver_options.sim_method_num_steps[0] = 3
# set num_stages
ocp.solver_options.sim_method_num_stages = 2*np.ones((N,))
ocp.solver_options.sim_method_num_stages[0] = 4
# set cost
Q = 2*np.diag([1e3, 1e3, 1e-2, 1e-2])
R = 2*np.diag([1e-2])
ocp.cost.W_e = Q
ocp.cost.W = scipy.linalg.block_diag(Q, R)
ocp.cost.cost_type = 'LINEAR_LS'
ocp.cost.cost_type_e = 'LINEAR_LS'
ocp.cost.Vx = np.zeros((ny, nx))
ocp.cost.Vx[:nx,:nx] = np.eye(nx)
Vu = np.zeros((ny, nu))
Vu[4,0] = 1.0
ocp.cost.Vu = Vu
ocp.cost.Vx_e = np.eye(nx)
ocp.cost.yref = np.zeros((ny, ))
ocp.cost.yref_e = np.zeros((ny_e, ))
# set constraints
Fmax = 80
ocp.constraints.lbu = np.array([-Fmax])
ocp.constraints.ubu = np.array([+Fmax])
x0 = np.array([0.0, np.pi, 0.0, 0.0])
ocp.constraints.x0 = x0
ocp.constraints.idxbu = np.array([0])
ocp.solver_options.qp_solver = 'PARTIAL_CONDENSING_HPIPM' # FULL_CONDENSING_QPOASES
ocp.solver_options.hessian_approx = 'GAUSS_NEWTON'
ocp.solver_options.integrator_type = integrator_type
ocp.solver_options.print_level = 0
ocp.solver_options.nlp_solver_type = 'SQP' # SQP_RTI, SQP
# set prediction horizon
ocp.solver_options.tf = Tf
ocp.solver_options.initialize_t_slacks = 1
# Set additional options for Simulink interface:
acados_path = get_acados_path()
json_path = os.path.join(acados_path, 'interfaces/acados_template/acados_template')
with open(json_path + '/simulink_default_opts.json', 'r') as f:
simulink_opts = json.load(f)
ocp_solver = AcadosOcpSolver(ocp, json_file = 'acados_ocp.json', simulink_opts = simulink_opts)
# ocp_solver = AcadosOcpSolver(ocp, json_file = 'acados_ocp.json')
simX = np.ndarray((N+1, nx))
simU = np.ndarray((N, nu))
# change options after creating ocp_solver
ocp_solver.options_set("step_length", 0.99999)
ocp_solver.options_set("globalization", "fixed_step") # fixed_step, merit_backtracking
ocp_solver.options_set("tol_eq", TOL)
ocp_solver.options_set("tol_stat", TOL)
ocp_solver.options_set("tol_ineq", TOL)
ocp_solver.options_set("tol_comp", TOL)
# initialize solver
for i in range(N):
ocp_solver.set(i, "x", x0)
status = ocp_solver.solve()
if status not in [0, 2]:
raise Exception('acados returned status {}. Exiting.'.format(status))
# get primal solution
for i in range(N):
simX[i,:] = ocp_solver.get(i, "x")
simU[i,:] = ocp_solver.get(i, "u")
simX[N,:] = ocp_solver.get(N, "x")
print("inequality multipliers at stage 1")
print(ocp_solver.get(1, "lam")) # inequality multipliers at stage 1
print("slack values at stage 1")
print(ocp_solver.get(1, "t")) # slack values at stage 1
print("multipliers of dynamic conditions between stage 1 and 2")
print(ocp_solver.get(1, "pi")) # multipliers of dynamic conditions between stage 1 and 2
# initialize ineq multipliers and slacks at stage 1
ocp_solver.set(1, "lam", np.zeros(2,))
ocp_solver.set(1, "t", np.zeros(2,))
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
# timings
time_tot = ocp_solver.get_stats("time_tot")
time_lin = ocp_solver.get_stats("time_lin")
time_sim = ocp_solver.get_stats("time_sim")
time_qp = ocp_solver.get_stats("time_qp")
print(f"timings OCP solver: total: {1e3*time_tot}ms, lin: {1e3*time_lin}ms, sim: {1e3*time_sim}ms, qp: {1e3*time_qp}ms")
# print("simU", simU)
# print("simX", simX)
iterate_filename = f'final_iterate_{discretization}.json'
ocp_solver.store_iterate(filename=iterate_filename, overwrite=True)
plot_pendulum(shooting_nodes, Fmax, simU, simX, latexify=False)
del ocp_solver
if __name__ == "__main__":
discretizations = ['shooting_nodes', 'time_steps']
for discretization in discretizations:
main(discretization=discretization)
import json
# compare iterates
iterate_filename = f'final_iterate_shooting_nodes.json'
with open(iterate_filename, 'r') as f:
iterate0 = json.load(f)
iterate_filename = f'final_iterate_time_steps.json'
with open(iterate_filename, 'r') as f:
iterate1 = json.load(f)
assert iterate1.keys() == iterate0.keys()
for k in iterate0:
assert(len(iterate0[k]) == len(iterate1[k]))
if len(iterate0[k]) > 0:
diff = np.max(np.abs(np.array(iterate0[k]) - np.array(iterate1[k])))
if diff > TOL:
raise Exception(f'results for {k}, do not match up to accuracy {TOL}, diff is {diff}')
print(f'Check passed: discretization formulations are equivalent: {discretizations}') |
py | b4088c34d2f5494f23938dd405984f74275168dd | # coding: utf-8
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from joeynmt.helpers import freeze_params
from joeynmt.transformer_layers import \
TransformerEncoderLayer, PositionalEncoding
#pylint: disable=abstract-method
class Encoder(nn.Module):
"""
Base encoder class
"""
@property
def output_size(self):
"""
Return the output size
:return:
"""
return self._output_size
class RecurrentEncoder(Encoder):
"""Encodes a sequence of word embeddings"""
#pylint: disable=unused-argument
def __init__(self,
rnn_type: str = "gru",
hidden_size: int = 1,
emb_size: int = 1,
num_layers: int = 1,
dropout: float = 0.,
emb_dropout: float = 0.,
bidirectional: bool = True,
freeze: bool = False,
**kwargs) -> None:
"""
Create a new recurrent encoder.
:param rnn_type: RNN type: `gru` or `lstm`.
:param hidden_size: Size of each RNN.
:param emb_size: Size of the word embeddings.
:param num_layers: Number of encoder RNN layers.
:param dropout: Is applied between RNN layers.
:param emb_dropout: Is applied to the RNN input (word embeddings).
:param bidirectional: Use a bi-directional RNN.
:param freeze: freeze the parameters of the encoder during training
:param kwargs:
"""
super().__init__()
self.emb_dropout = torch.nn.Dropout(p=emb_dropout, inplace=False)
self.type = rnn_type
self.emb_size = emb_size
rnn = nn.GRU if rnn_type == "gru" else nn.LSTM
self.rnn = rnn(
emb_size, hidden_size, num_layers, batch_first=True,
bidirectional=bidirectional,
dropout=dropout if num_layers > 1 else 0.)
self._output_size = 2 * hidden_size if bidirectional else hidden_size
if freeze:
freeze_params(self)
#pylint: disable=invalid-name, unused-argument
def _check_shapes_input_forward(self, embed_src: Tensor, src_length: Tensor,
mask: Tensor) -> None:
"""
Make sure the shape of the inputs to `self.forward` are correct.
Same input semantics as `self.forward`.
:param embed_src: embedded source tokens
:param src_length: source length
:param mask: source mask
"""
assert embed_src.shape[0] == src_length.shape[0]
assert embed_src.shape[2] == self.emb_size
# assert mask.shape == embed_src.shape
assert len(src_length.shape) == 1
#pylint: disable=arguments-differ
def forward(self, embed_src: Tensor, src_length: Tensor, mask: Tensor) \
-> (Tensor, Tensor):
"""
Applies a bidirectional RNN to sequence of embeddings x.
The input mini-batch x needs to be sorted by src length.
x and mask should have the same dimensions [batch, time, dim].
:param embed_src: embedded src inputs,
shape (batch_size, src_len, embed_size)
:param src_length: length of src inputs
(counting tokens before padding), shape (batch_size)
:param mask: indicates padding areas (zeros where padding), shape
(batch_size, src_len, embed_size)
:return:
- output: hidden states with
shape (batch_size, max_length, directions*hidden),
- hidden_concat: last hidden state with
shape (batch_size, directions*hidden)
"""
self._check_shapes_input_forward(embed_src=embed_src,
src_length=src_length,
mask=mask)
# apply dropout to the rnn input
embed_src = self.emb_dropout(embed_src)
packed = pack_padded_sequence(embed_src, src_length, batch_first=True)
output, hidden = self.rnn(packed)
#pylint: disable=unused-variable
if isinstance(hidden, tuple):
hidden, memory_cell = hidden
output, _ = pad_packed_sequence(output, batch_first=True)
# hidden: dir*layers x batch x hidden
# output: batch x max_length x directions*hidden
batch_size = hidden.size()[1]
# separate final hidden states by layer and direction
hidden_layerwise = hidden.view(self.rnn.num_layers,
2 if self.rnn.bidirectional else 1,
batch_size, self.rnn.hidden_size)
# final_layers: layers x directions x batch x hidden
# concatenate the final states of the last layer for each directions
# thanks to pack_padded_sequence final states don't include padding
fwd_hidden_last = hidden_layerwise[-1:, 0]
bwd_hidden_last = hidden_layerwise[-1:, 1]
# only feed the final state of the top-most layer to the decoder
#pylint: disable=no-member
hidden_concat = torch.cat(
[fwd_hidden_last, bwd_hidden_last], dim=2).squeeze(0)
# final: batch x directions*hidden
return output, hidden_concat
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.rnn)
class TransformerEncoder(Encoder):
"""
Transformer Encoder
"""
#pylint: disable=unused-argument
def __init__(self,
hidden_size: int = 512,
ff_size: int = 2048,
num_layers: int = 8,
num_heads: int = 4,
dropout: float = 0.1,
emb_dropout: float = 0.1,
freeze: bool = False,
**kwargs):
"""
Initializes the Transformer.
:param hidden_size: hidden size and size of embeddings
:param ff_size: position-wise feed-forward layer size.
(Typically this is 2*hidden_size.)
:param num_layers: number of layers
:param num_heads: number of heads for multi-headed attention
:param dropout: dropout probability for Transformer layers
:param emb_dropout: Is applied to the input (word embeddings).
:param freeze: freeze the parameters of the encoder during training
:param kwargs:
"""
super().__init__()
# build all (num_layers) layers
self.layers = nn.ModuleList([
TransformerEncoderLayer(size=hidden_size, ff_size=ff_size,
num_heads=num_heads, dropout=dropout)
for _ in range(num_layers)])
self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-6)
self.pe = PositionalEncoding(hidden_size)
self.emb_dropout = nn.Dropout(p=emb_dropout)
self._output_size = hidden_size
if freeze:
freeze_params(self)
#pylint: disable=arguments-differ
def forward(self,
embed_src: Tensor,
src_length: Tensor,
mask: Tensor) -> (Tensor, Tensor):
"""
Pass the input (and mask) through each layer in turn.
Applies a Transformer encoder to sequence of embeddings x.
The input mini-batch x needs to be sorted by src length.
x and mask should have the same dimensions [batch, time, dim].
:param embed_src: embedded src inputs,
shape (batch_size, src_len, embed_size)
:param src_length: length of src inputs
(counting tokens before padding), shape (batch_size)
:param mask: indicates padding areas (zeros where padding), shape
(batch_size, src_len, embed_size)
:return:
- output: hidden states with
shape (batch_size, max_length, directions*hidden),
- hidden_concat: last hidden state with
shape (batch_size, directions*hidden)
"""
x = self.pe(embed_src) # add position encoding to word embeddings
x = self.emb_dropout(x)
for layer in self.layers:
x = layer(x, mask)
return self.layer_norm(x), None
def __repr__(self):
return "%s(num_layers=%r, num_heads=%r)" % (
self.__class__.__name__, len(self.layers),
self.layers[0].src_src_att.num_heads)
|
py | b4088cc92b113b26e401d234efa38210ea8fede3 | from gensim import utils
from gensim.models.doc2vec import TaggedDocument
from gensim.models import Doc2Vec
from random import shuffle
import nltk
path = 'ordered_lines.txt'
WINDOW_SIZE = 20
lines = [line for line in open(path, encoding="ISO-8859-1").readlines()]
class LabeledLineSentence(object):
def __init__(self, sources):
self.sources = sources
flipped = {}
# make sure that keys are unique
for key, value in sources.items():
if value not in flipped:
flipped[value] = [key]
else:
raise Exception('Non-unique prefix encountered')
def to_array(self):
self.sentences = []
for source, prefix in self.sources.items():
text = source
tokenizer = nltk.tokenize.TweetTokenizer()
words = tokenizer.tokenize(utils.to_unicode(text))
words = [word.lower() for word in words]
self.sentences.append(TaggedDocument(words=words, tags=[prefix]))
return self.sentences
def sentences_perm(self):
shuffle(self.sentences)
return self.sentences
sources = {}
for i in range(len(lines)):
line = lines[i]
ident = str(i)
sources[line] = 'LINES_' + str(ident)
sentences = LabeledLineSentence(sources)
model = Doc2Vec(window=WINDOW_SIZE, size=100, sample=1e-4, negative=5, workers=4, dm=0)
model.build_vocab(sentences.to_array())
for epoch in range(0, 151):
print('Training epoch ' + str(epoch) + '.')
model.train(sentences.sentences_perm(), total_examples=model.corpus_count, epochs=model.iter)
if epoch % 50 == 0:
model.save('./lines-' + str(epoch) + '.d2v')
|
py | b4088cfcb870d73fb1fc85c78d580cc98613fe6f | #!/usr/bin/python3
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
#tokens = {}
#tokens['app_id'] = 'YOUR_APP_ID'
#tokens['app_key'] = 'YOUR_APP_KEY'
bdb_root_url= 'http://localhost:9984/'
#bdb = BigchainDB('https://test.bigchaindb.com', headers=tokens)
bdb = BigchainDB(bdb_root_url)
alice = generate_keypair()
claim_1 = {'data': {'ipfs_file_hash': 'QmRRKg7vw1zDyNos29dUB3YNem1c3WVLFFbAbtpty5AnqY'},}
claim_2 = {'data': {'ipfs_file_hash': 'QmNunE3njVPRzDQRmgrX5FBKCTuxFvSiXVXcG2qj2frUaq'},}
claim_3 = {'data': {'ipfs_file_hash': 'QmaQ5MKCSpz9jaKrtQzpLBzUzmrv7BFKDYRfRg8ZEmob4B'},}
claim_4 = {'data': {'ipfs_file_hash': 'QmbytFc55UB1B1Egs33t1FvwmRmhsjttRpmGE6Yp97MiMh'},}
claim_5 = {'data': {'ipfs_file_hash': 'QmX6RkEgF4RfsfiaDZLHFJQNoTSCgFGru2aW5UJYGx7RSu'},}
# Until implementing login-logout functionality, It is okay to keep all ooe's as altest
claims = [claim_1, claim_2, claim_3, claim_4, claim_5]
metadata ={'project-name' : 'ipfs'}
for claim in claims:
prepared_creation_tx = bdb.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
asset=claim,
metadata=metadata
)
fulfilled_creation_tx = bdb.transactions.fulfill(prepared_creation_tx, private_keys=alice.private_key)
print("Inserting Claim {}".format(claim));
bdb.transactions.send_commit(fulfilled_creation_tx)
|
py | b4088d96d4365a6d553dba24c0a456686be89b20 | import numpy as np
from ...transformation.view import View
class Camera(View):
def __init__(self, pos, target_pos=(0, 0, 0), camera_up=(0, 1, 0)):
super().__init__(pos, target_pos, camera_up)
def save(self, file_path):
np.save(file_path, self.matrix4)
@staticmethod
def load(file_path):
matrix = np.load(file_path)
camera = Camera((0, 0, 0))
camera._matrix = matrix
return camera |
py | b4088d9740e3943e2ded9444f9002fc0891e7049 | # -*- coding: utf-8 -*-
from mixem.distribution import Distribution
import numpy as np
import scipy.special
def l2norm(x,axis=None):
res = np.sum(x**2,axis=axis)**0.5
return res
# class vmfDistribution(mixem.distribution.Distribution):
class vmfDistribution(Distribution):
"""Von-mises Fisher distribution with parameters (mu, kappa).
Ref: Clustering on the Unit Hypersphere using von Mises-Fisher Distributions
http://www.jmlr.org/papers/volume6/banerjee05a/banerjee05a.pdf
"""
def __init__(self, mu,
kappa = None,
):
mu = np.array(mu)
assert len(mu.shape) == 1, "Expect mu to be 1D vector!"
if all(mu==0):
self.dummy = True
else:
self.dummy = False
if kappa is not None:
assert len(np.shape(kappa)) == 0,"Expect kappa to be 0D vector"
kappa = float(kappa)
self.kappa = kappa
self.mu = mu
self.radius = np.sum(self.mu ** 2) ** 0.5
self.D = len(mu)
def log_density(self, data):
# L2 = np.sum(np.square(data),axis=1,keepdims=1)
# return np.dot(data, self.mu) * L2 / L2
logP = np.dot(data, self.mu)
if self.kappa is not None:
normTerm = ( - np.log(scipy.special.iv(self.D/2. -1., self.kappa ))
+ np.log(self.kappa) * (self.D/2. - 1.)
- np.log(2*np.pi) * self.D/2.
)
logP = logP * self.kappa + normTerm
return logP
def estimate_parameters(self, data, weights):
if not self.dummy:
L2 = np.sum(np.square(data),axis=1,keepdims=1)
L2sqrt = np.sqrt(L2)
# fct = np.exp(L2sqrt)/L2sqrt
# fct = np.exp(L2sqrt)
fct = 1.
wdata = data * fct
wwdata = wdata * weights[:, np.newaxis]
rvct = np.sum(wwdata, axis=0) / np.sum(weights)
rnorm = l2norm(rvct)
self.mu = rvct / rnorm * self.radius
if self.kappa is not None:
r = rnorm
self.kappa = (r * self.D - r **3 )/(1. - r **2)
def __repr__(self):
po = np.get_printoptions()
np.set_printoptions(precision=3)
try:
result = "MultiNorm[ฮผ={mu},]".format(mu=self.mu,)
finally:
np.set_printoptions(**po)
return result |
py | b4088e6413595582cc67b83fde4b0c319939693f | peso = float(input('Qual รฉ seu peso ? (Kg)'))
altura = float(input('Qual รฉ sua altura? (m)'))
imc = peso / (altura **2) #calculo do IMC
if imc < 18.5:
print('VOCร ESTร MUITO ABAIXO DO PESO !')
elif 18.5 <= imc < 25:
print('VOCร ESTA NA FAIXA DE PESO NORMAL')
elif 25 <= imc < 30:
print('VOCร ESTร EM SOBREPESO')
elif 30 <= imc < 35:
print('VOCร TEM OBESIDADE ')
elif 35 <= imc < 40:
print('VOCร TEM OBESIDADE SEVERA')
else:
print('OBESIDADE MORBIDA')
print('Seu IMC รฉ {:.2f}'.format(imc))
|
py | b4088f692d224fd4da95afebfd814b778ddab42a | import os
from zipmeta.matchlist import getMatchingZippedFilesMetadata
# Build a dictionary containing metadata about DICOM files in a
# .PvDatasets file from a Bruker BioSpec MRI scanner and
#
# The zipFileName argument should be a PvDatasets file, but does noot
# need to have the usual file extension.
#
# Return a tuple containing:
#
# [0]: a dictionary containing metadata about DICOM files in a zip file.
# [1]: An error message string if an error occurs, or None otherwise.
#
# The dictionary contains:
#
# ['numFiles']
# The total number of DICOM files found in all directories.
#
# ['numBytes']
# The total size in bytes of all DICOM files found in all directories.
#
# ['numDirs']
# The total number of directories containing one or more DICOM
# files (non-recursive).
#
# ['dirs']
# A dictionary with one item for each directory that directly contains
# one or more child DICOM files (non-recursive). The key is the
# directory path. The value is a dictionary with two keys:
# 'numFiles' and 'numBytes'. The values are the number of DICOM
# files and the total size of the DICOM files within the directory
# (non-recursive).
def findDicomFiles(zipFileName):
zipMeta = getMatchingZippedFilesMetadata(zipFileName, ['[^/]\.dcm$'])
if zipMeta[1] != None:
return ({}, zipMeta[1])
dicomFileCount = 0
dicomDirCount = 0
dicomBytesCount = 0
dicomDirs = {} # key: dir name, value: (fileCount, byteCount)
numFilesKey = 'numFiles'
numBytesKey = 'numBytes'
for meta in zipMeta[0]:
# sys.stdout.write('{0:8} {1}\n'.format(meta[1], meta[0]))
dicomFileCount += 1
dicomBytesCount += meta[1]
(dirName, fileName) = os.path.split(meta[0])
if dirName in dicomDirs:
dirMeta = dicomDirs[dirName]
dicomDirs[dirName] = [ dirMeta[0] + 1, dirMeta[1] + meta[1] ]
else:
dicomDirCount += 1
dicomDirs[dirName] = [ 1, meta[1] ]
# sys.stdout.write('\ntotal {0:4} dirs {1:6} files {2:12} bytes\n\n'
# .format(dicomDirCount, dicomFileCount, dicomBytesCount))
resultDirs = {}
for dirName, dirMeta in dicomDirs.items():
# sys.stdout.write('{0:3} files {1:8} bytes {2}\n'
# .format(dirMeta[0], dirMeta[1], dirName))
resultDirs[dirName] = {
numFilesKey: dirMeta[0],
numBytesKey: dirMeta[1]
}
result = {
numFilesKey: dicomFileCount,
numBytesKey: dicomBytesCount,
'numDirs': dicomDirCount,
'dirs': resultDirs
}
return (result, None)
|
py | b40890ef2bd3d3305996d7d02935ed48b3830516 | #!/usr/bin/env python3
import numpy as np
from .base_converter import BaseConverter
from ..egograph import EgoGraph
from typing import Tuple, Dict
NID = '_NID'
EID = '_EID'
UID = '_UID'
VID = '_VID'
class Ego2Tensor(BaseConverter):
"""
An object that convert ego-graph into tensor.
:param graph: the graph to lookup nodes & edges attributes
:param include_edge: whether or not lookup edges attributes
"""
def __init__(self, graph, include_edge=False):
self._graph = graph
self._include_edge = include_edge
def convert(self, egograph: EgoGraph, **kwargs) \
-> Tuple[Tuple[Tuple[Dict[str, np.ndarray],
Dict[str, np.ndarray],
Dict[str, np.ndarray], np.ndarray], ...], ...]:
"""
Fetch weight & attributes of nodes and edges in ego-graph and
convert the ego-graph into format that could be feed into tensorflow
:param egograph: :class:`graph2tensor.egograph.EgoGraph` object
:return: A tuple representation of sub-graph as (path#1, path#2, ..., path#N).
`path#N` is a tuple, representing a path in ego-graph, whose element -- `hop`
is triplet as (src, edge, dst, offset) and `src`/`edge`/`dst` is a dict
with attribute name as key and attribute in `numpy.ndarray` format as value.
Besides normal attributes, there are several reserved keys in node & edge attributes dict, for node
attributes there will always be a reserved key named `graph2tensor.NID` which store the
ids of nodes, for edge attributes, there will be 3 reserved keys, `graph2tensor.EID` - ids of edges,
`graph2tensor.UID` - source ids of edges and `graph2tensor.VID` - destination ids of edges.
`segment_ids` is a `np.1darray`, it record the match-up from `dst` & `edge` to `src`,
`[0, 0, 1, 2, 2, 3, 3, 3]` means the 1st & 2nd `dst` node & `edge` belong to 1st `src`
node, 3rd `dst` node & `edge` to 2nd `src`, 4th & 5th `dst` node & `edge` to 3rd `src`,
6th, 7th and 8th `dst` node & `edge` to 4th `src`. see
`Segmentation <https://www.tensorflow.org/api_docs/python/tf/math#Segmentation>`__ for
more details.
e.g.
.. code-block::
(
# path#1
((src, edge, dst, segment_ids), #hop#1
(src, edge, dst, segment_ids), #hop#2
(src, edge, dst, segment_ids)), #hop#3
# path#2
((src, edge, dst, segment_ids), #hop#1
(src, edge, dst, segment_ids), #hop#2
(src, edge, dst, segment_ids)), #hop#3
# path#N
...
)
"""
rst = []
centre_attrs = self._graph.lookup_nodes(egograph.centre_nodes)
centre_attrs[NID] = egograph.centre_nodes.ids
for path in egograph.paths:
hops = []
dst_attrs = centre_attrs
for edge, dst_node in path:
edge_attrs = {
EID: edge.edge_ids,
UID: edge.src_ids,
VID: edge.dst_ids
}
if self._include_edge:
edge_attrs.update(self._graph.lookup_edges(edge))
src_attrs = dst_attrs
dst_attrs = self._graph.lookup_nodes(dst_node)
dst_attrs[NID] = dst_node.ids
segment_ids = np.repeat(np.arange(dst_node.offset.shape[0]), dst_node.offset)
hops.append((src_attrs, edge_attrs, dst_attrs, segment_ids))
rst.append(tuple(hops))
return tuple(rst)
|
py | b4089163a48bb97dd58c5ad4f2afa0ac2ab376e5 | from sklearn import datasets
from sklearn import linear_model
from sklearn import metrics
from sklearn import model_selection
from sklearn import tree
import starboost as sb
X, y = datasets.load_boston(return_X_y=True)
X_fit, X_val, y_fit, y_val = model_selection.train_test_split(X, y, test_size=0.2, random_state=42)
def rmse(y_true, y_pred):
return metrics.mean_squared_error(y_true, y_pred) ** 0.5
model = sb.BoostingRegressor(
loss=sb.losses.L2Loss(),
base_estimator=tree.DecisionTreeRegressor(max_depth=3, presort=True),
base_estimator_is_tree=True,
n_estimators=30,
init_estimator=linear_model.LinearRegression(),
learning_rate=0.1,
row_sampling=0.8,
col_sampling=0.8,
eval_metric=rmse,
early_stopping_rounds=5,
random_state=42
)
model = model.fit(X_fit, y_fit, eval_set=(X_val, y_val))
y_pred = model.predict(X_val)
print(rmse(y_val, y_pred))
|
py | b4089218412aaae8a5dc43116be9e58d9ede4f6e | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2015 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. [email protected]
# ****************************************************************************
# This module is part of the package GTW.OMP.Auth.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# GTW.OMP.Auth.UI_Spec
#
# Purpose
# UI specification for E_Types defined by GTW.OMP.Auth
#
# Revision Dates
# 26-Feb-2010 (CT) Creation
# 30-Apr-2010 (MG) Adapted to new form's
# 2-May-2010 (MG) Simplified
# 6-May-2010 (MG) Switch to render mode rendering
# 18-Nov-2011 (CT) Import `unicode_literals` from `__future__`
# 24-Jan-2012 (CT) Remove `Form_args`, `*_completer`,
# i.e., stuff related to non-AFS forms
# 22-May-2012 (CT) Fix typo (`Account_P = dict`, not `Account = dict`)
# 26-Jul-2012 (CT) Import `_GTW._RST.Permission`, not `_GTW._NAV.Permission`
# 16-Jan-2013 (CT) Add `Certificate`
# 16-Dec-2015 (CT) Change to `UI_Spec`
# ยซยซrevision-dateยปยปยทยทยท
#--
from _GTW import GTW
from _TFL import TFL
from _GTW._RST.Permission import Is_Superuser
import _GTW._OMP._PAP
import _TFL.Sorted_By
class UI_Spec (object) :
"""UI specification for E_Types defined by GTW.OMP.Auth"""
Account = dict \
( permission = Is_Superuser ()
)
Certificate = dict \
( permission = Is_Superuser ()
)
Group = dict \
( permission = Is_Superuser ()
)
Account_in_Group = dict \
( permission = Is_Superuser ()
)
# end class UI_Spec
if __name__ != "__main__" :
GTW.OMP.Auth._Export ("UI_Spec")
### __END__ GTW.OMP.Auth.UI_Spec
|
py | b4089271511a2fcb2f3a786942d1691e19ab55c7 |
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 Michael Purcaro, Henry Pratt, Jill Moore, Zhiping Weng
import sys
import os
from models.gwas import Gwas
sys.path.append(os.path.join(os.path.dirname(__file__), '../common/'))
from common.pg_gwas import PGgwas
sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))
from config import Config
class GwasWebServiceWrapper:
def __init__(self, args, ps, cacheW, staticDir):
def makeWS(assembly):
return GwasWebService(args, ps, cacheW[assembly], staticDir, assembly)
self.assemblies = Config.assemblies
self.wss = {a: makeWS(a) for a in self.assemblies}
def process(self, j, args, kwargs):
if "assembly" not in j:
raise Exception("assembly not defined")
if j["assembly"] not in self.assemblies:
raise Exception("invalid assembly")
return self.wss[j["assembly"]].process(j, args, kwargs)
class GwasWebService(object):
def __init__(self, args, ps, cache, staticDir, assembly):
self.args = args
self.ps = ps
self.cache = cache
self.staticDir = staticDir
self.assembly = assembly
self.actions = {"search": self._initialLoad,
"main": self._mainTable,
"cres": self._cres}
def process(self, j, args, kwargs):
action = args[0]
if action not in self.actions:
raise Exception("gwas_ws: invalid action: " + action)
try:
return self.actions[action](j, args[1:])
except:
raise
def _initialLoad(self, j, args):
g = Gwas(self.assembly, self.ps, self.assembly)
return {"gwas": {"studies": g.studies,
"byStudy": g.byStudy},
"gwas_study": "",
"ct": "",
"assembly": self.assembly}
def _mainTable(self, j, args):
g = Gwas(self.assembly, self.ps, self.cache)
self.gwas_study = j["gwas_study"]
if not g.checkStudy(self.gwas_study):
raise Exception("invalid gwas study")
return g.mainTable(self.gwas_study)
def _cres(self, j, args):
g = Gwas(self.assembly, self.ps, self.cache)
self.gwas_study = j["gwas_study"]
if not g.checkStudy(self.gwas_study):
raise Exception("invalid gwas study")
ct = j["cellType"]
# TODO: check ct!
return g.cres(self.gwas_study, ct)
|
py | b408931b85528c32676d7322f57f31f6d333dbb5 | import requests
import ssl
import re
from requests.adapters import HTTPAdapter
from urllib3.poolmanager import PoolManager
from html.parser import HTMLParser
GOOGLE_API_URL = 'https://content.googleapis.com/drive/v2/files/'
GOOGLE_API_PARAMS = {'key': 'AIzaSyAa8yy0GdcGPHdtD083HiGGx_S0vMPScDM'}
GOOGLE_API_HEADERS = {
'X-Origin': 'https://explorer.apis.google.com',
'X-Referer': 'https://explorer.apis.google.com'
}
# Modified regular expression from: https://github.com/circulosmeos/gdown.pl/blob/master/gdown.pl
REG_EXs = [
'^https?://drive.google.com/file/d/([^/]+)',
'id=([^/&]+)'
]
class TitleParser(HTMLParser):
def __init__(self):
super().__init__()
self.recording = False
def get_title(self):
return self.title
def handle_starttag(self, tag, attrs):
if(tag.lower() == 'title'):
self.recording = True
def handle_endtag(self, tag):
if(tag.lower() == 'title'):
self.recording = False
def handle_data(self, data):
if(self.recording):
self.title = data
def get_html_title(html_page):
title_parser = TitleParser()
title_parser.feed(html_page)
return title_parser.get_title()
def get_file_id(url):
for i in REG_EXs:
match = re.findall(i, url)
if(match):
return match[0]
return None
def __get_direct_url(file_id):
return 'https://drive.google.com/uc?export=download&id={}'.format(file_id)
def get_confirmed_url(session, file_id):
url = __get_direct_url(file_id)
head = session.head(url).headers
location = head.get('Location') or head.get('location')
if(location):
return location, None
res = session.get(url).content.decode()
for i in res.split('&'):
if i.startswith('confirm'):
return url+'&'+i, None
return None, get_html_title(res)
def get_file_info(file_id):
res = requests.get(GOOGLE_API_URL + file_id, headers = GOOGLE_API_HEADERS, params=GOOGLE_API_PARAMS).json()
if(res.get('error')):
return None, None, True
file_name = res.get('title')
file_size = res.get('fileSize')
file_size = int(file_size) if(file_size) else file_size
return file_name, file_size, False
class TLSAdapter(HTTPAdapter):
# Copyright (C) 2007 Free Software Foundation
# Class is copied from: https://github.com/coursera-dl/coursera-dl/blob/master/coursera/cookies.py , Under LGPLv3+ License
# You may get a copy of the license here: https://github.com/coursera-dl/coursera-dl/blob/master/LICENSE
"""
A customized HTTP Adapter which uses TLS v1.2 for encrypted
connections.
"""
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1_2)
def get_session():
session = requests.Session()
session.mount('https://', TLSAdapter())
return session
|
py | b40893aea52ce387568e706627cf0905917b1002 | # development-specific settings
from .base import *
DEBUG = True
ALLOWED_HOSTS = []
ROOT_URLCONF = 'core.urls.dev'
INSTALLED_APPS = [
'debug_toolbar',
'whitenoise.runserver_nostatic',
] + INSTALLED_APPS
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = ('127.0.0.1',)
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ROOT_DIR, 'db.sqlite3'),
}
}
# cache config
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# send emails to the console
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# logging config
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'static/bundles/',
'STATS_FILE': os.path.join(ROOT_DIR, 'webpack-stats.json'),
}
}
|
py | b40894257a1d2a8aec4f6e324e2bfa15d4fa38c2 | #!/usr/bin/env python3
#author [email protected]
"""RabbitMQ helper class.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
# pylint: disable=R0903, R0913
import os
import ssl
import logging
import json
from abc import ABC, abstractmethod
import pika
import pycloudmessenger.utils as utils
__rabbit_helper_version_info__ = ('0', '1', '2')
LOGGER = logging.getLogger(__package__)
class RabbitContext():
"""
Holds connection details for a RabbitMQ service
"""
def __init__(self, args: dict, user: str = None, password: str = None):
self.cert_file = None
self.args = args.copy()
#First set up some defaults
if 'broker_timeout' not in self.args:
self.args['broker_timeout'] = 60.0
self.args['broker_user'] = user if user else self.arg_value(self.args, ['broker_user', 'broker_guest_user', 'client_user'])
self.args['broker_password'] = password if password else self.arg_value(self.args, ['broker_password', 'broker_guest_password', 'client_pwd'])
if 'broker_cert_b64' in self.args:
self.cert_file = utils.Certificate(args['broker_cert_b64'])
self.args['broker_pem_path'] = self.cert_file.filename
self.args['broker_tls'] = True
else:
self.args['broker_tls'] = False
#Now check that all required fields are present
cfg = ['broker_host', 'broker_port', 'broker_vhost',
'broker_user', 'broker_password']
for key in cfg:
if not self.args.get(key):
raise Exception(f'{key} is missing from RabbitContext initialisation.')
def __str__(self):
return json.dumps(self.args)
def arg_value(self, args, possibilities):
for p in possibilities:
val = args.get(p)
if val:
return val
raise Exception(f'{possibilities} missing from arguments.')
@classmethod
def from_credentials_file(self, cred_file: str, user: str = None, password: str = None):
with open(cred_file) as creds:
args = json.load(creds)
#First, we need to support legacy credential formats
if 'broker' in args:
args['broker_host'] = args.pop('broker')
args['broker_port'] = args.pop('port')
args['broker_vhost'] = args.pop('vhost')
args['broker_user'] = args.pop('client_user')
args['broker_password'] = args.pop('client_pwd')
args['broker_cert_b64'] = args.pop('cert_b64')
return RabbitContext(args, user, password)
def get(self, key: str):
try:
return self.args[key]
except:
return None
def user(self):
return self.get('broker_user')
def pwd(self):
return self.get('broker_password')
def host(self):
return self.get('broker_host')
def port(self):
return self.get('broker_port')
def vhost(self):
return self.get('broker_vhost')
def cert(self):
return self.get('broker_pem_path')
def ssl(self):
return self.get('broker_tls')
def feeds(self):
return self.get('broker_request_queue')
def replies(self):
return self.get('broker_response_queue')
def timeout(self):
return self.get('broker_timeout')
class RabbitQueue():
"""
Holds configuration details for a RabbitMQ Queue
"""
def __init__(self, queue: str = None, auto_delete: bool = False, durable: bool = False, purge: bool = False, prefetch: int = 1):
self.durable = durable
self.auto_delete = auto_delete
self.purge = purge
self.prefetch = prefetch
#If no queue specified, create a temporary, exclusive queue
#This will force a server generated queue name like 'amq.gen....'
if queue:
self.name = queue
self.exclusive = False
else:
self.name = ''
self.exclusive = True
self.name = self.name.strip()
class AbstractRabbitMessenger(ABC):
"""
Communicates with a RabbitMQ service
"""
def __init__(self, context: RabbitContext):
self.context = context
self.pub_queue = None
self.sub_queue = None
self.inbound = 0
self.outbound = 0
self.connection = None
self.channel = None
self.cancel_on_close = False
self.credentials = pika.PlainCredentials(self.context.user(), self.context.pwd())
self.ssl_options = {}
if self.context.ssl():
self.ssl_options['ssl_version'] = ssl.PROTOCOL_TLSv1_2
if self.context.cert():
self.ssl_options['ca_certs'] = self.context.cert()
self.ssl_options['cert_reqs'] = ssl.CERT_REQUIRED
def __enter__(self):
return self
def __exit__(self, *args):
self.stop()
def declare_queue(self, queue: RabbitQueue) -> RabbitQueue:
"""
Declare a queue, creating if required
Throws:
An exception if connection attempt is not successful
Returns:
None
"""
if queue.exclusive or queue.durable:
#Will not raise an exception if access rights insufficient on the queue
#Exception only raised when channel consume takes place
result = self.channel.queue_declare(
queue=queue.name,
exclusive=queue.exclusive,
auto_delete=queue.auto_delete,
durable=queue.durable)
queue.name = result.method.queue
#Useful when testing - clear the queue
if queue.purge:
self.channel.queue_purge(queue=queue.name)
return queue
def establish_connection(self, parameters: pika.ConnectionParameters):
"""
Connect to RabbitMQ service
Throws:
An exception if connection attempt is not successful
Returns:
None
"""
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
def connect(self, connection_attempts: int, retry_delay: int):
"""
Setup connection settings to RabbitMQ service
Throws:
An exception if connection attempt is not successful
Returns:
None
"""
parameters = pika.ConnectionParameters(
self.context.host(), self.context.port(), self.context.vhost(),
self.credentials, ssl=self.context.ssl(), ssl_options=self.ssl_options,
connection_attempts=connection_attempts,
retry_delay=retry_delay)
self.establish_connection(parameters)
def publish(self, message, queue: str, exchange: str = '', mode: int = 1):
"""
Publish a message to a queue
Throws:
Exception - maybe access rights are insufficient on the queue
Returns:
None
"""
self.channel.basic_publish(
exchange=exchange, routing_key=queue, body=message,
properties=pika.BasicProperties(delivery_mode=mode)
)
self.outbound += 1
def stop(self):
"""
Closes open channels and connections
Throws:
Nothing
Returns:
None
"""
try:
if self.channel:
if self.cancel_on_close:
self.channel.cancel()
self.channel.close()
if self.connection:
self.connection.close()
except:
pass
@abstractmethod
def receive(self, handler, timeout: int, max_messages: int):
pass
@abstractmethod
def start(self, publish: RabbitQueue = None, subscribe: RabbitQueue = None, connection_attempts: int = 10, retry_delay: int = 1):
pass
class RabbitTimedOutException(Exception):
pass
class RabbitConsumerException(Exception):
pass
class RabbitClient(AbstractRabbitMessenger):
"""
Communicates with a RabbitMQ service
"""
def start(self, publish: RabbitQueue = None, subscribe: RabbitQueue = None, connection_attempts: int = 10, retry_delay: int = 1):
if publish:
self.pub_queue = publish
if subscribe:
self.sub_queue = subscribe
self.connect(connection_attempts, retry_delay)
def get_subscribe_queue(self):
return self.sub_queue.name if self.sub_queue else None
def establish_connection(self, parameters: pika.ConnectionParameters):
super(RabbitClient, self).establish_connection(parameters)
if self.pub_queue:
self.declare_queue(self.pub_queue)
if self.sub_queue:
self.declare_queue(self.sub_queue)
#Ensure the consumer only gets 'prefetch' unacknowledged message
self.channel.basic_qos(prefetch_count=self.sub_queue.prefetch)
def publish(self, message, queue: RabbitQueue = None, exchange: str = '', mode: int = 1):
if not queue:
queue = self.pub_queue
super(RabbitClient, self).publish(message, queue.name, exchange, mode)
def receive(self, handler=None, timeout: int = 30, max_messages: int = 0) -> str:
"""
Start receiving messages, up to max_messages
Throws:
Exception if consume fails
Returns:
The last message received
"""
msgs = 0
body = None
try:
for msg in self.channel.consume(
self.sub_queue.name,
exclusive=self.sub_queue.exclusive,
inactivity_timeout=timeout):
method_frame, properties, body = msg
if not method_frame and not properties and not body:
raise RabbitTimedOutException("Operation timeout reached.")
msgs += 1
self.inbound += 1
self.channel.basic_ack(method_frame.delivery_tag)
if handler:
#body is of type 'bytes' in Python 3+
handler(body)
elif not max_messages:
break
#Stop consuming if message limit reached
if msgs == max_messages:
break
except pika.exceptions.AMQPError as exc:
LOGGER.error(exc)
finally:
self.channel.cancel()
if not msgs:
raise RabbitConsumerException('Consumer cancelled prior to timeout.')
return body
class RabbitDualClient():
"""
Communicates with a RabbitMQ service
"""
def __init__(self, context):
"""
Class initializer
"""
self.context = context
self.subscriber = None
self.publisher = None
self.last_recv_msg = None
def start_subscriber(self, queue: RabbitQueue, client=RabbitClient):
"""
Connect to Castor service and create a queue
Throws:
An exception if connection attempt is not successful
Returns:
Nothing
"""
self.subscriber = client(self.context)
self.subscriber.start(subscribe=queue)
def get_subscribe_queue(self):
return self.subscriber.get_subscribe_queue()
def start_publisher(self, queue: RabbitQueue, client=RabbitClient):
"""
Connect to Castor service and create a queue
Throws:
An exception if connection attempt is not successful
Returns:
Nothing
"""
self.publisher = client(self.context)
self.publisher.start(publish=queue)
def send_message(self, message, queue: RabbitQueue = None):
"""
Publish a message to Castor service
Throws:
An exception if publish is not successful
Returns:
Nothing
"""
self.publisher.publish(message, queue)
def receive_message(self, handler, timeout: int, max_messages: int):
"""
Receive messages from Castor service
Throws:
An exception if receive is not successful
Returns:
Nothing
"""
self.subscriber.receive(handler, timeout, max_messages)
def internal_handler(self, message):
"""
Handler for invoke_service method
Throws:
Nothing
Returns:
Nothing
"""
self.last_recv_msg = message
def invoke_service(self, message, timeout: int = 30):
"""
Publish a message and receive a reply
Throws:
An exception if not successful or timedout
Returns:
The reply dictionary
"""
self.last_recv_msg = None
LOGGER.debug(f"Sending message: {message}")
self.send_message(message)
LOGGER.debug("Waiting for reply...")
#Now wait for the reply
self.subscriber.receive(self.internal_handler, timeout, 1)
LOGGER.debug(f"Received: {self.last_recv_msg}")
return self.last_recv_msg
def stop(self):
"""
Close connection to service
Throws:
An exception if not successful
Returns:
Nothing
"""
self.subscriber.stop()
self.publisher.stop()
|
py | b4089489d60ee3b73dd5f1b5035999e6c4f72303 | from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils.validation import check_is_fitted
from feature_engine.dataframe_checks import (
_is_dataframe,
_check_input_matches_training_df,
)
from feature_engine.variable_manipulation import (
_find_numerical_variables,
_define_variables,
)
class DropCorrelatedFeatures(BaseEstimator, TransformerMixin):
"""
DropCorrelatedFeatures finds and removes correlated features.
Features are removed on first found first removed basis, without any further
insight.
DropCorrelatedFeatures() works only with numerical variables. Categorical variables
will need to be encoded to numerical or will be excluded from the analysis.
Parameters
----------
variables: list, default=None
The list of variables to evaluate. If None, the transformer will evaluate all
numerical variables in the dataset.
method: string, default='pearson'
Can take 'pearson', 'spearman' or'kendall'. It refers to the correlation method
to be used to identify the correlated features.
pearson : standard correlation coefficient
kendall : Kendall Tau correlation coefficient
spearman : Spearman rank correlation
See
https://pandas.pydata.org/pandas-docs/stable/reference/api/
pandas.DataFrame.corr.html
for more details.
threshold: float, default=0.8
The correlation threshold above which a feature will be deemed correlated with
another one and removed from the dataset.
Attributes
----------
correlated_features_: set
The correlated features.
correlated_feature_sets_: list
Groups of correlated features. Or in other words, features that are
correlated with each other. Each list represents a group of correlated
features.
correlated_matrix_: pandas dataframe
The correlated matrix.
Methods
-------
fit: finds the correlated features
transform: removes correlated features
fit_transform: finds and removes correlated features
"""
def __init__(self, variables=None, method="pearson", threshold=0.8):
if method not in ["pearson", "spearman", "kendall"]:
raise ValueError(
"correlation method takes only values 'pearson', 'spearman', 'kendall'"
)
if (threshold < 0 or threshold > 1) or not isinstance(threshold, float):
raise ValueError("threshold must be a float between 0 and 1")
self.variables = _define_variables(variables)
self.method = method
self.threshold = threshold
def fit(self, X, y=None):
"""
Finds the correlated features
Args:
X: pandas dataframe of shape = [n_samples, n_features]
The training input samples.
Can be the entire dataframe, not just the variables to transform.
y: It is not needed in this transformer. Defaults to None.
Alternatively takes Pandas Series.ss
Returns:
self
"""
# check input dataframe
X = _is_dataframe(X)
# find all numerical variables or check those entered are in the dataframe
self.variables = _find_numerical_variables(X, self.variables)
# set to collect features that are correlated
self.correlated_features_ = set()
# create tuples of correlated feature groups
self.correlated_feature_sets_ = []
# the correlation matrix
self.correlated_matrix_ = X[self.variables].corr(method=self.method)
# create set of examined features, helps to determine feature combinations
# to evaluate below
_examined_features = set()
# for each feature in the dataset (columns of the correlation matrix)
for feature in self.correlated_matrix_.columns:
if feature not in _examined_features:
# append so we can exclude when we create the combinations
_examined_features.add(feature)
# here we collect potentially correlated features
# we need this for the correlated groups sets
_temp_set = set([feature])
# features that have not been examined, are not currently examined and
# were not found correlated
_features_to_compare = [
f
for f in self.correlated_matrix_.columns
if f not in _examined_features
]
# create combinations:
for f2 in _features_to_compare:
# if the correlation is higher than the threshold
# we are interested in absolute correlation coefficient value
if abs(self.correlated_matrix_.loc[f2, feature]) > self.threshold:
# add feature (f2) to our correlated set
self.correlated_features_.add(f2)
_temp_set.add(f2)
_examined_features.add(f2)
# if there are correlated features
if len(_temp_set) > 1:
self.correlated_feature_sets_.append(_temp_set)
self.input_shape_ = X.shape
return self
def transform(self, X):
"""
Drops the correlated features from a dataframe.
Args:
X: pandas dataframe of shape = [n_samples, n_features].
The input samples.
Returns:
X_transformed: pandas dataframe
shape = [n_samples, n_features - (correlated features)]
The transformed dataframe with the remaining subset of variables.
"""
# check if fit is performed prior to transform
check_is_fitted(self)
# check if input is a dataframe
X = _is_dataframe(X)
# check if number of columns in test dataset matches to train dataset
_check_input_matches_training_df(X, self.input_shape_[1])
# returned non-duplicate features
X = X.drop(columns=self.correlated_features_)
return X
|
py | b40895a16c18a9c620a7569c7c93ad0045ee6aa7 | import os
from flask import Flask, render_template
from flask.ext import assets
from flask.ext.assets import Environment, Bundle
#from flask.ext.scss import Scss
#from flask.ext.sass import sass
from webassets.filter import get_filter
app = Flask(__name__)
#Scss(app, static_dir='static', asset_dir='assets/stylesheets')
#sass(app, input_dir='assets/stylesheets', output_dir='static')
# debug mode - switch to False for production
app.config['ASSETS_DEBUG'] = True
env = assets.Environment(app)
# debug mode - switch to True for production
env.config['cache'] = False
env.config['manifest'] = False
env.config['sass_bin'] = '/usr/local/bin/sass'
# Tell flask-assets where to look for our sass files.
env.load_path = [
os.path.join(os.path.dirname(__file__), 'assets/stylesheets'),
os.path.join(os.path.dirname(__file__), 'assets'),
os.path.join(os.path.dirname(__file__), 'assets/stylesheets/stylesheets/govuk_frontend_toolkit'),
os.path.join(os.path.dirname(__file__), 'assets/stylesheets/govuk_template')
]
scss = get_filter('scss', as_output=True)
env.register(
'css_all',
assets.Bundle(
'main.scss',
filters='scss',
output='css_all.css'
)
)
env.register(
'css_govuk-template',
assets.Bundle(
'govuk_template/govuk-template.scss',
filters='scss',
output='stylesheets/govuk-template.css',
depends='*.scss'
)
)
env.register(
'css_govuk-template-ie6',
assets.Bundle(
'govuk_template/govuk-template-ie6.scss',
filters='scss',
output='stylesheets/govuk-template-ie6.css',
depends='*.scss'
)
)
env.register(
'css_govuk-template-ie7',
assets.Bundle(
'govuk_template/govuk-template-ie7.scss',
filters='scss',
output='stylesheets/govuk-template-ie7.css',
depends='*.scss'
)
)
env.register(
'css_govuk-template-ie8',
assets.Bundle(
'govuk_template/govuk-template-ie8.scss',
filters='scss',
output='stylesheets/govuk-template-ie8.css',
depends='*.scss'
)
)
env.register(
'css_govuk-template-print',
assets.Bundle(
'govuk_template/govuk-template-print.scss',
filters='scss',
output='stylesheets/govuk-template-print.css',
depends='*.scss'
)
)
@app.route("/")
def index():
return render_template('index.html')
@app.route("/govuk")
def govuk():
return render_template('govuk_template.html')
@app.route("/helloworld")
def helloworld():
return render_template('hello-world.html')
if __name__ == "__main__":
app.run(debug=True) |
py | b40895dc4fe10856f80236e7ed1407d4511f447a | # TRAIN_ON_SMALL=True python3 -m torch.distributed.launch --nproc_per_node=2 train_nq.py
from transformers import BigBirdForQuestionAnswering, BigBirdTokenizer
from transformers import TrainingArguments, Trainer
from datasets import load_dataset
import torch_xla.distributed.xla_multiprocessing as xmp
import torch
import torch.nn as nn
import numpy as np
import wandb
import os
os.environ['WANDB_WATCH'] = "false"
os.environ['WANDB_PROJECT'] = "bigbird-tpu"
TRAIN_ON_SMALL = eval(os.environ.pop("TRAIN_ON_SMALL", "False"))
from params import (
SCHEDULER,
WARMUP_STEPS,
MODEL_ID,
SEED,
GROUP_BY_LENGTH,
LEARNING_RATE,
MAX_EPOCHS,
FP16,
)
RESUME_TRAINING = None
def collate_fn(features, pad_id=0, threshold=1024):
def pad_elems(ls, pad_id, maxlen):
while len(ls)<maxlen:
ls.append(pad_id)
return ls
# maxlen = max([len(x['input_ids']) for x in features])
maxlen = 4096 # TPU static-padding
# avoid attention_type switching
# if maxlen < threshold:
# maxlen = threshold
# dynamic padding
input_ids = [pad_elems(x['input_ids'], pad_id, maxlen) for x in features]
input_ids = torch.tensor(input_ids, dtype=torch.long)
# padding mask
attention_mask = input_ids.clone()
attention_mask[attention_mask != pad_id] = 1
attention_mask[attention_mask == pad_id] = 0
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"start_positions": torch.tensor([x['start_token'] for x in features], dtype=torch.long),
"end_positions": torch.tensor([x['end_token'] for x in features], dtype=torch.long),
"pooler_label": torch.tensor([x["category"] for x in features]),
}
class BigBirdForNaturalQuestions(BigBirdForQuestionAnswering):
""" BigBirdForQuestionAnswering with CLS Head over the top for predicting category """
def __init__(self, config):
super().__init__(config, add_pooling_layer=True)
self.cls = nn.Linear(config.hidden_size, 5)
def forward(self, input_ids, attention_mask=None, start_positions=None, end_positions=None, pooler_label=None):
outputs = super().forward(input_ids, attention_mask=attention_mask)
cls_out = self.cls(outputs.pooler_output)
loss = None
if start_positions is not None and end_positions is not None:
loss_fct = nn.CrossEntropyLoss()
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
start_loss = loss_fct(outputs.start_logits, start_positions)
end_loss = loss_fct(outputs.end_logits, end_positions)
if pooler_label is not None:
cls_loss = loss_fct(cls_out, pooler_label)
loss = (start_loss + end_loss + cls_loss) / 3
else:
loss = (start_loss + end_loss) / 2
return {
"loss": loss,
"start_logits": outputs.start_logits,
"end_logits": outputs.end_logits,
"cls_out": cls_out,
}
def main():
# "nq-training.jsonl" & "nq-validation.jsonl" are obtained from running `prepare_nq.py`
tr_dataset = load_dataset("json", data_files="data/nq-validation.jsonl")['train']
val_dataset = load_dataset("json", data_files="data/nq-validation.jsonl")['train']
if TRAIN_ON_SMALL:
# this will run for ~1 day
np.random.seed(SEED)
indices = np.random.randint(0, 298152, size=8000)
tr_dataset = tr_dataset.select(indices)
np.random.seed(SEED)
indices = np.random.randint(0, 9000, size=1000)
val_dataset = val_dataset.select(indices)
print(tr_dataset, val_dataset)
tokenizer = BigBirdTokenizer.from_pretrained(MODEL_ID)
model = BigBirdForNaturalQuestions.from_pretrained(MODEL_ID, gradient_checkpointing=False)
args = TrainingArguments(
output_dir="bigbird-nq-complete-tuning",
overwrite_output_dir=False,
do_train=True,
do_eval=True,
evaluation_strategy="epoch",
# eval_steps=4000,
per_device_train_batch_size=1,
per_device_eval_batch_size=1,
gradient_accumulation_steps=4,
# group_by_length=GROUP_BY_LENGTH,
learning_rate=LEARNING_RATE,
warmup_steps=WARMUP_STEPS,
lr_scheduler_type=SCHEDULER,
num_train_epochs=MAX_EPOCHS,
tpu_num_cores=8,
logging_strategy="no",
# logging_steps=500,
save_strategy="steps",
save_steps=250,
run_name="bigbird-nq-complete-tuning",
disable_tqdm=False,
# load_best_model_at_end=True,
report_to="wandb",
remove_unused_columns=False,
fp16=FP16,
label_names=["pooler_label", "start_positions", "end_positions"], # it's important to log eval_loss
)
print("Batch Size", args.train_batch_size)
print("Parallel Mode", args.parallel_mode)
trainer = Trainer(
model=model,
args=args,
data_collator=collate_fn,
train_dataset=tr_dataset,
eval_dataset=val_dataset,
)
try:
trainer.train(resume_from_checkpoint=RESUME_TRAINING)
trainer.save_model("final-model")
except KeyboardInterrupt:
trainer.save_model("interrupted-natural-questions")
wandb.finish()
def _mp_fn(index):
main()
if __name__ == "__main__":
# xmp.spawn(_mp_fn, args=()) # not working right now :(
main() |
py | b40897ca0b4b0bb459c0770e1aefcf51e98c14f3 | from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage.color import rgb2gray
from skimage.morphology import watershed, disk
from skimage import data
from skimage.filters import rank
from skimage.util import img_as_ubyte
image = img_as_ubyte(rgb2gray(imread("lena.png")))
# denoise image
denoised = rank.median(image, disk(2))
# find continuous region (low gradient -
# where less than 10 for this image) --> markers
# disk(5) is used here to get a more smooth image
markers = rank.gradient(denoised, disk(5)) < 10
markers = ndi.label(markers)[0]
# local gradient (disk(2) is used to keep edges thin)
gradient = rank.gradient(denoised, disk(2))
# process the watershed
labels = watershed(gradient, markers)
# display results
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 8),
sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title("Original")
ax[1].imshow(gradient, cmap=plt.cm.nipy_spectral, interpolation='nearest')
ax[1].set_title("Local Gradient")
ax[2].imshow(markers, cmap=plt.cm.nipy_spectral, interpolation='nearest')
ax[2].set_title("Markers")
ax[3].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax[3].imshow(labels, cmap=plt.cm.nipy_spectral, interpolation='nearest', alpha=.7)
ax[3].set_title("Segmented")
for a in ax:
a.axis('off')
fig.tight_layout()
plt.show() |
py | b408982ecf233762550c52480cb307d640642169 | from django import template
from ..models import References
from django.db.models import Count
from django.utils.safestring import mark_safe
import markdown
register = template.Library()
@register.inclusion_tag('references/references/latest_references.html')
def show_latest_references(count=5):
latest_references = References.published.order_by('-titleRf')[:count]
return {'latest_references' : latest_references }
|
py | b4089844de39c75ad7bf362c6f58fa5605a5884c | from django import views
class DispatchView(views.View):
def get_http_handler(self, method):
return dict(
put=self.put if hasattr(self, 'put') else self.http_method_not_allowed,
patch=self.patch if hasattr(self, 'patch') else self.http_method_not_allowed,
delete=self.delete if hasattr(self, 'delete') else self.http_method_not_allowed,
).get(method, None)
def dispatch(self, request, *args, **kwargs):
http_method = request.POST.get('http_method', '')
handler = self.get_http_handler(http_method)
if not handler:
return super().dispatch(request, *args, **kwargs)
return handler(request, *args, **kwargs)
|
py | b40899862fe4acc98966ba11ab5090c08863654e | """OAuth support functionality
"""
from __future__ import unicode_literals
# Try importing the Python 3 packages first, falling back to 2.x packages when it fails.
try:
from http import server as http_server
except ImportError:
import BaseHTTPServer as http_server
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
import logging
import random
import os.path
import sys
import six
from requests_toolbelt import MultipartEncoder
import requests
from requests_oauthlib import OAuth1
from . import sockutil, exceptions, html
from .exceptions import FlickrError
class OAuthTokenHTTPHandler(http_server.BaseHTTPRequestHandler):
def do_GET(self):
# /?oauth_token=72157630789362986-5405f8542b549e95&oauth_verifier=fe4eac402339100e
qs = urllib_parse.urlsplit(self.path).query
url_vars = urllib_parse.parse_qs(qs)
oauth_token = url_vars['oauth_token'][0]
oauth_verifier = url_vars['oauth_verifier'][0]
if six.PY2:
self.server.oauth_token = oauth_token.decode('utf-8')
self.server.oauth_verifier = oauth_verifier.decode('utf-8')
else:
self.server.oauth_token = oauth_token
self.server.oauth_verifier = oauth_verifier
assert (isinstance(self.server.oauth_token, six.string_types))
assert (isinstance(self.server.oauth_verifier, six.string_types))
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(html.auth_okay_html)
class OAuthTokenHTTPServer(http_server.HTTPServer):
"""HTTP server on a random port, which will receive the OAuth verifier."""
def __init__(self):
self.log = logging.getLogger('%s.%s' % (self.__class__.__module__, self.__class__.__name__))
self.local_addr = self.listen_port()
self.log.info('Creating HTTP server at %s', self.local_addr)
http_server.HTTPServer.__init__(self, self.local_addr, OAuthTokenHTTPHandler)
self.oauth_verifier = None
def listen_port(self):
"""Returns the hostname and TCP/IP port number to listen on.
By default finds a random free port between 1100 and 20000.
"""
# Find a random free port
local_addr = ('localhost', int(random.uniform(1100, 20000)))
self.log.debug('Finding free port starting at %s', local_addr)
# return local_addr
return sockutil.find_free_port(local_addr)
def wait_for_oauth_verifier(self, timeout=None):
"""Starts the HTTP server, waits for the OAuth verifier."""
if self.oauth_verifier is None:
self.timeout = timeout
self.handle_request()
if self.oauth_verifier:
self.log.info('OAuth verifier: %s' % self.oauth_verifier)
return self.oauth_verifier
@property
def oauth_callback_url(self):
return 'http://localhost:%i/' % (self.local_addr[1],)
class FlickrAccessToken(object):
"""Flickr access token.
Contains the token, token secret, and the user's full name, username and NSID.
"""
levels = (u'read', u'write', u'delete')
def __init__(self, token, token_secret, access_level,
fullname=u'', username=u'', user_nsid=u''):
assert isinstance(token, six.text_type), 'token should be unicode text'
assert isinstance(token_secret, six.text_type), 'token_secret should be unicode text'
assert isinstance(access_level, six.text_type), 'access_level should be unicode text, is %r' % type(
access_level)
assert isinstance(fullname, six.text_type), 'fullname should be unicode text'
assert isinstance(username, six.text_type), 'username should be unicode text'
assert isinstance(user_nsid, six.text_type), 'user_nsid should be unicode text'
access_level = access_level.lower()
assert access_level in self.levels, 'access_level should be one of %r' % (self.levels,)
self.token = token
self.token_secret = token_secret
self.access_level = access_level
self.fullname = fullname
self.username = username
self.user_nsid = user_nsid
def __str__(self):
return six.text_type(self).encode('utf-8')
def __unicode__(self):
return 'FlickrAccessToken(token=%s, fullname=%s, username=%s, user_nsid=%s)' % (
self.token, self.fullname, self.username, self.user_nsid)
def __repr__(self):
return str(self)
def has_level(self, access_level):
"""Returns True iff the token's access level implies the given access level."""
my_idx = self.levels.index(self.access_level)
q_idx = self.levels.index(access_level)
return q_idx <= my_idx
class OAuthFlickrInterface(object):
"""Interface object for handling OAuth-authenticated calls to Flickr."""
session = requests.Session()
REQUEST_TOKEN_URL = "https://www.flickr.com/services/oauth/request_token"
AUTHORIZE_URL = "https://www.flickr.com/services/oauth/authorize"
ACCESS_TOKEN_URL = "https://www.flickr.com/services/oauth/access_token"
def __init__(self, api_key, api_secret, oauth_token=None, default_timeout=None):
self.log = logging.getLogger('%s.%s' % (self.__class__.__module__, self.__class__.__name__))
assert isinstance(api_key, six.text_type), 'api_key must be unicode string'
assert isinstance(api_secret, six.text_type), 'api_secret must be unicode string'
token = None
secret = None
if oauth_token.token:
token = oauth_token.token.token
secret = oauth_token.token.token_secret
self.oauth = OAuth1(api_key, api_secret, token, secret, signature_type='auth_header')
self.oauth_token = oauth_token
self.auth_http_server = None
self.requested_permissions = None
self.default_timeout = default_timeout
@property
def key(self):
"""Returns the OAuth key"""
return self.oauth.client.client_key
@property
def resource_owner_key(self):
"""Returns the OAuth resource owner key"""
return self.oauth.client.resource_owner_key
@resource_owner_key.setter
def resource_owner_key(self, new_key):
"""Stores the OAuth resource owner key"""
self.oauth.client.resource_owner_key = new_key
@property
def resource_owner_secret(self):
"""Returns the OAuth resource owner secret"""
return self.oauth.client.resource_owner_secret
@resource_owner_secret.setter
def resource_owner_secret(self, new_secret):
"""Stores the OAuth resource owner secret"""
self.oauth.client.resource_owner_secret = new_secret
@property
def verifier(self):
"""Returns the OAuth verifier."""
return self.oauth.client.verifier
@verifier.setter
def verifier(self, new_verifier):
"""Sets the OAuth verifier"""
assert isinstance(new_verifier, six.text_type), 'verifier must be unicode text type'
self.oauth.client.verifier = new_verifier
@property
def token(self):
return self.oauth_token
@token.setter
def token(self, new_token):
if new_token is None:
self.oauth_token = None
self.oauth.client.resource_owner_key = None
self.oauth.client.resource_owner_secret = None
self.oauth.client.verifier = None
self.requested_permissions = None
return
assert isinstance(new_token, FlickrAccessToken), new_token
self.oauth_token = new_token
self.oauth.client.resource_owner_key = new_token.token
self.oauth.client.resource_owner_secret = new_token.token_secret
self.oauth.client.verifier = None
self.requested_permissions = new_token.access_level
def _find_cache_dir(self):
"""Returns the appropriate directory for the HTTP cache."""
if sys.platform.startswith('win'):
return os.path.expandvars('%APPDATA%/flickrapi/cache')
return os.path.expanduser('~/.flickrapi/cache')
def do_request(self, url, params=None, timeout=None):
"""Performs the HTTP request, signed with OAuth.
:param timeout: optional request timeout, in seconds.
:type timeout: float
@return: the response content
"""
req = self.session.post(url,
data=params,
auth=self.oauth,
timeout=timeout or self.default_timeout)
# check the response headers / status code.
if req.status_code != 200:
self.log.error('do_request: Status code %i received, content:', req.status_code)
for part in req.text.split('&'):
self.log.error(' %s', urllib_parse.unquote(part))
raise exceptions.FlickrError('do_request: Status code %s received' % req.status_code)
return req.content
def do_upload(self, filename, url, params=None, fileobj=None, timeout=None):
"""Performs a file upload to the given URL with the given parameters, signed with OAuth.
:param timeout: optional request timeout, in seconds.
:type timeout: float
@return: the response content
"""
# work-around to allow non-ascii characters in file name
# Flickr doesn't store the name but does use it as a default title
if 'title' not in params:
params['title'] = os.path.basename(filename).encode('utf8')
# work-around for Flickr expecting 'photo' to be excluded
# from the oauth signature:
# 1. create a dummy request without 'photo'
# 2. create real request and use auth headers from the dummy one
dummy_req = requests.Request('POST', url, data=params,
auth=self.oauth)
prepared = dummy_req.prepare()
headers = prepared.headers
self.log.debug('do_upload: prepared headers = %s', headers)
if not fileobj:
fileobj = open(filename, 'rb')
params['photo'] = ('dummy name', fileobj)
m = MultipartEncoder(fields=params)
auth = {'Authorization': headers.get('Authorization'),
'Content-Type': m.content_type}
self.log.debug('POST %s', auth)
req = self.session.post(url, data=m, headers=auth, timeout=timeout or self.default_timeout)
# check the response headers / status code.
if req.status_code != 200:
self.log.error('do_upload: Status code %i received, content:', req.status_code)
for part in req.text.split('&'):
self.log.error(' %s', urllib_parse.unquote(part))
raise exceptions.FlickrError('do_upload: Status code %s received' % req.status_code)
return req.content
@staticmethod
def parse_oauth_response(data):
"""Parses the data string as OAuth response, returning it as a dict.
The keys and values of the dictionary will be text strings (i.e. not binary strings).
"""
if isinstance(data, six.binary_type):
data = data.decode('utf-8')
qsl = urllib_parse.parse_qsl(data)
resp = {}
for key, value in qsl:
resp[key] = value
return resp
def _start_http_server(self):
"""Starts the HTTP server, if it wasn't started already."""
if self.auth_http_server is not None: return
self.auth_http_server = OAuthTokenHTTPServer()
def _stop_http_server(self):
"""Stops the HTTP server, if one was started."""
if self.auth_http_server is None: return
self.auth_http_server = None
def get_request_token(self, oauth_callback=None):
"""Requests a new request token.
Updates this OAuthFlickrInterface object to use the request token on the following
authentication calls.
@param oauth_callback: the URL the user is sent to after granting the token access.
If the callback is None, a local web server is started on a random port, and the
callback will be http://localhost:randomport/
If you do not have a web-app and you also do not want to start a local web server,
pass oauth_callback='oob' and have your application accept the verifier from the
user instead.
"""
self.log.debug('get_request_token(oauth_callback=%s):', oauth_callback)
if oauth_callback is None:
self._start_http_server()
oauth_callback = self.auth_http_server.oauth_callback_url
params = {
'oauth_callback': oauth_callback,
}
token_data = self.do_request(self.REQUEST_TOKEN_URL, params)
self.log.debug('Token data: %s', token_data)
# Parse the token data
request_token = self.parse_oauth_response(token_data)
self.log.debug('Request token: %s', request_token)
self.oauth.client.resource_owner_key = request_token['oauth_token']
self.oauth.client.resource_owner_secret = request_token['oauth_token_secret']
def auth_url(self, perms=u'read'):
"""Returns the URL the user should visit to authenticate the given oauth Token.
Use this method in webapps, where you can redirect the user to the returned URL.
After authorization by the user, the browser is redirected to the callback URL,
which will contain the OAuth verifier. Set the 'verifier' property on this object
in order to use it.
In stand-alone apps, use open_browser_for_authentication instead.
"""
if self.oauth.client.resource_owner_key is None:
raise FlickrError('No resource owner key set, you probably forgot to call get_request_token(...)')
if perms not in (u'read', u'write', u'delete'):
raise ValueError('Invalid parameter perms=%r' % perms)
self.requested_permissions = perms
return "%s?oauth_token=%s&perms=%s" % (self.AUTHORIZE_URL, self.oauth.client.resource_owner_key, perms)
def auth_via_browser(self, perms=u'read'):
"""Opens the webbrowser to authenticate the given request request_token, sets the verifier.
Use this method in stand-alone apps. In webapps, use auth_url(...) instead,
and redirect the user to the returned URL.
Updates the given request_token by setting the OAuth verifier.
"""
import webbrowser
# The HTTP server may have been started already, but we're not sure. Just start
# it if it needs to be started.
self._start_http_server()
url = self.auth_url(perms)
if not webbrowser.open_new_tab(url):
raise exceptions.FlickrError('Unable to open a browser to visit %s' % url)
self.verifier = self.auth_http_server.wait_for_oauth_verifier()
# We're now done with the HTTP server, so close it down again.
self._stop_http_server()
def auth_via_console(self, perms=u'read'):
"""Waits for the user to authenticate the app, sets the verifier.
Use this method in stand-alone apps. In webapps, use auth_url(...) instead,
and redirect the user to the returned URL.
Updates the given request_token by setting the OAuth verifier.
"""
# The HTTP server may have been started already, but we're not sure. Just start
# it if it needs to be started.
self._start_http_server()
auth_url = self.auth_url(perms=perms)
print("Go to the following link in your browser to authorize this application:")
print(auth_url)
print()
self.verifier = self.auth_http_server.wait_for_oauth_verifier()
# We're now done with the HTTP server, so close it down again.
self._stop_http_server()
def auth_for_test(self, perms=u'read'):
"""Doesn't wait for anything, sets the verifier to something silly.
Only use this in unit tests.
"""
auth_url = self.auth_url(perms=perms)
# Normally we would direct the user to this URL. Now we don't.
self.verifier = u'test'
def get_access_token(self):
"""Exchanges the request token for an access token.
Also stores the access token in 'self' for easy authentication of subsequent calls.
@return: Access token, a FlickrAccessToken object.
"""
if self.oauth.client.resource_owner_key is None:
raise FlickrError('No resource owner key set, you probably forgot to call get_request_token(...)')
if self.oauth.client.verifier is None:
raise FlickrError('No token verifier set, you probably forgot to set %s.verifier' % self)
if self.requested_permissions is None:
raise FlickrError('Requested permissions are unknown.')
content = self.do_request(self.ACCESS_TOKEN_URL)
# parse the response
access_token_resp = self.parse_oauth_response(content)
self.oauth_token = FlickrAccessToken(access_token_resp['oauth_token'],
access_token_resp['oauth_token_secret'],
self.requested_permissions,
access_token_resp.get('fullname', ''),
access_token_resp['username'],
access_token_resp['user_nsid'])
self.oauth.client.resource_owner_key = access_token_resp['oauth_token']
self.oauth.client.resource_owner_secret = access_token_resp['oauth_token_secret']
self.oauth.client.verifier = None
return self.oauth_token
|
py | b40899e807ecce5a5b8a5e0167c293a36002e61d | import sys
from setuptools import setup
from setuptools import find_packages
version = '0.20.0.dev0'
# Please update tox.ini when modifying dependency version requirements
install_requires = [
'acme=={0}'.format(version),
'certbot=={0}'.format(version),
# 1.5 is the first version that supports oauth2client>=2.0
'google-api-python-client>=1.5',
'mock',
# for oauth2client.service_account.ServiceAccountCredentials
'oauth2client>=2.0',
# For pkg_resources. >=1.0 so pip resolves it to a version cryptography
# will tolerate; see #2599:
'setuptools>=1.0',
'zope.interface',
# already a dependency of google-api-python-client, but added for consistency
'httplib2'
]
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='certbot-dns-google',
version=version,
description="Google Cloud DNS Authenticator plugin for Certbot",
url='https://github.com/certbot/certbot',
author="Certbot Project",
author_email='[email protected]',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'certbot.plugins': [
'dns-google = certbot_dns_google.dns_google:Authenticator',
],
},
test_suite='certbot_dns_google',
)
|
py | b40899e929222461de326d96f977ffce4d13863c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import argparse
import subprocess
import sys
from os import getcwd
from os.path import dirname, basename, splitext, join, exists
try:
import pandas as pd
except ImportError:
print ('Please install pandas. See http://pandas.pydata.org/pandas-docs/stable/')
sys.exit(1)
FREQ_SAMPLE = 0.001
# low pass filter
def lowpass_filter(data, cutoff_freq=2, order=1, dt=FREQ_SAMPLE):
tau = 1.0 / (2 * np.pi * cutoff_freq)
for _ in range(order):
for i in range(1,len(data)):
data[i] = (tau / (tau + dt) * data[i-1] + dt / (tau + dt) * data[i])
return data
def rel2abs(path):
'''
Return absolute path from relative path input
'''
return join(getcwd(), path)
def rosbag_to_csv(path, topic_name):
name = splitext(basename(path))[0]
suffix = topic_name.replace('/', '-')
output_path = join(dirname(path), name + '_' + suffix + '.csv')
if exists(output_path):
return output_path
else:
command = "rostopic echo -b {0} -p /{1} | sed -e 's/,/ /g' > {2}".format(path, topic_name, output_path)
print (command)
subprocess.check_call(command, shell=True)
return output_path
def getActValue(df, speed_type):
tm = np.array(list(df['%time'])) * 1e-9
# Unit Conversion
if speed_type:
val = np.array(list(df['field'])) / 3.6
else:
val = np.array(list(df['field']))
# Calc differential
dval = (val[2:] - val[:-2]) / (tm[2:] - tm[:-2])
return tm[1:-1], val[1:-1], dval
def getCmdValueWithDelay(df, delay):
tm = np.array(list(df['%time'])) * 1e-9
val = np.array(list(df['field']))
return tm + delay, val
def getLinearInterpolate(_tm, _val, _index, ti):
tmp_t = _tm[_index]
tmp_nextt = _tm[_index + 1]
tmp_val = _val[_index]
tmp_nextval = _val[_index + 1]
val_i = tmp_val + (tmp_nextval - tmp_val) / (tmp_nextt - tmp_t) * (ti - tmp_t)
return val_i
def getFittingTimeConstantParam(cmd_data, act_data, \
delay, args, speed_type = False):
tm_cmd, cmd_delay = getCmdValueWithDelay(cmd_data, delay)
tm_act, act, dact = getActValue(act_data, speed_type)
_t_min = max(tm_cmd[0], tm_act[0])
_t_max = min(tm_cmd[-1], tm_act[-1])
tm_cmd = tm_cmd - _t_min
tm_act = tm_act - _t_min
MAX_CNT = int((_t_max - _t_min - args.cutoff_time) / FREQ_SAMPLE)
dact_samp = [None] * MAX_CNT
diff_actcmd_samp = [None] * MAX_CNT
ind_cmd = 0
ind_act = 0
for ind in range(MAX_CNT):
ti = ind * FREQ_SAMPLE + args.cutoff_time
while (tm_cmd[ind_cmd + 1] < ti):
ind_cmd += 1
cmd_delay_i = getLinearInterpolate(tm_cmd, cmd_delay, ind_cmd, ti)
while (tm_act[ind_act + 1] < ti):
ind_act += 1
act_i = getLinearInterpolate(tm_act, act, ind_act, ti)
dact_i = getLinearInterpolate(tm_act, dact, ind_act, ti)
dact_samp[ind] = dact_i
diff_actcmd_samp[ind] = act_i - cmd_delay_i
dact_samp = np.array(dact_samp)
diff_actcmd_samp = np.array(diff_actcmd_samp)
if args.cutoff_freq > 0:
dact_samp = lowpass_filter(dact_samp, cutoff_freq=args.cutoff_freq)
diff_actcmd_samp = lowpass_filter(diff_actcmd_samp, cutoff_freq=args.cutoff_freq)
dact_samp = dact_samp.reshape(1,-1)
diff_actcmd_samp = diff_actcmd_samp.reshape(1,-1)
tau = -np.dot(diff_actcmd_samp, np.linalg.pinv(dact_samp))[0,0]
error = np.linalg.norm(diff_actcmd_samp + tau * dact_samp) / dact_samp.shape[1]
return tau, error
def getFittingParam(cmd_data, act_data, args, speed_type = False):
delay_range = int((args.max_delay - args.min_delay) / args.delay_incr)
delays = [args.min_delay + i * args.delay_incr for i in range(delay_range + 1)]
error_min = 1.0e10
delay_opt = -1
tau_opt = -1
for delay in delays:
tau, error = getFittingTimeConstantParam(cmd_data, act_data, delay, args, speed_type=speed_type)
if tau > 0:
if error < error_min:
error_min = error
delay_opt = delay
tau_opt = tau
else:
break
return tau_opt, delay_opt, error_min
if __name__ == '__main__':
topics = [ 'vehicle_cmd/ctrl_cmd/steering_angle', 'vehicle_status/angle', \
'vehicle_cmd/ctrl_cmd/linear_velocity', 'vehicle_status/speed']
pd_data = [None] * len(topics)
parser = argparse.ArgumentParser(description='Paramter fitting for Input Delay Model (First Order System with Dead Time) with rosbag file input')
parser.add_argument('--bag_file', '-b', required=True, type=str, help='rosbag file', metavar='file')
parser.add_argument('--cutoff_time', default=1.0, type=float, help='Cutoff time[sec], Parameter fitting will only consider data from t= cutoff_time to the end of the bag file (default is 1.0)')
parser.add_argument('--cutoff_freq', default=0.1, type=float, help='Cutoff freq for low-pass filter[Hz], negative value will disable low-pass filter (default is 0.1)')
parser.add_argument('--min_delay', default=0.1, type=float, help='Min value for searching delay loop (default is 0.1)')
parser.add_argument('--max_delay', default=1.0, type=float, help='Max value for searching delay loop (default is 1.0)')
parser.add_argument('--delay_incr', default=0.01, type=float, help='Step value for searching delay loop (default is 0.01)')
args = parser.parse_args()
for i, topic in enumerate(topics):
csv_log = rosbag_to_csv(rel2abs(args.bag_file), topic)
pd_data[i] = pd.read_csv(csv_log, sep=' ')
tau_opt, delay_opt, error = getFittingParam(pd_data[0], pd_data[1], args, speed_type=False)
print ('Steer angle: tau_opt = %2.4f, delay_opt = %2.4f, error = %2.4e' %(tau_opt, delay_opt, error))
tau_opt, delay_opt, error = getFittingParam(pd_data[2], pd_data[3], args, speed_type=True)
print ('Velocity : tau_opt = %2.4f, delay_opt = %2.4f, error = %2.4e' %(tau_opt, delay_opt, error))
|
py | b4089a38809b293a986651300a781f3739c9c44c | # Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal dispatcher for training loops."""
import os.path
import pprint
import time
from typing import Any, Callable, Dict, List, Optional
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
def create_if_not_exists(path):
try:
tf.io.gfile.makedirs(path)
except tf.errors.OpError:
logging.info('Skipping creation of directory [%s], already exists', path)
def _setup_outputs(root_output_dir, experiment_name):
"""Set up directories for experiment loops, write hyperparameters to disk."""
if not experiment_name:
raise ValueError('experiment_name must be specified.')
create_if_not_exists(root_output_dir)
checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)
create_if_not_exists(checkpoint_dir)
checkpoint_mngr = tff.simulation.FileCheckpointManager(checkpoint_dir)
results_dir = os.path.join(root_output_dir, 'results', experiment_name)
create_if_not_exists(results_dir)
csv_file = os.path.join(results_dir, 'experiment.metrics.csv')
metrics_mngr = tff.simulation.CSVMetricsManager(csv_file)
summary_logdir = os.path.join(root_output_dir, 'logdir', experiment_name)
tb_mngr = tff.simulation.TensorBoardManager(summary_dir=summary_logdir)
logging.info('Writing...')
logging.info(' checkpoints to: %s', checkpoint_dir)
logging.info(' metrics csv to: %s', metrics_mngr.metrics_filename)
logging.info(' summaries to: %s', summary_logdir)
return checkpoint_mngr, metrics_mngr, tb_mngr
def _write_metrics(metrics_mngr, tb_mngr, metrics, round_num):
"""Atomic metrics writer which inlines logic from MetricsHook class."""
if not isinstance(metrics, dict):
raise TypeError('metrics should be type `dict`.')
if not isinstance(round_num, int):
raise TypeError('round_num should be type `int`.')
logging.info('Metrics at round {:d}:\n{!s}'.format(round_num,
pprint.pformat(metrics)))
metrics_mngr.save_metrics(round_num, metrics)
tb_mngr.save_metrics(round_num, metrics)
def run(iterative_process: tff.templates.IterativeProcess,
client_datasets_fn: Callable[[int], List[tf.data.Dataset]],
validation_fn: Callable[[Any, int], Dict[str, float]],
total_rounds: int,
experiment_name: str,
test_fn: Optional[Callable[[Any], Dict[str, float]]] = None,
root_output_dir: Optional[str] = '/tmp/fed_opt',
rounds_per_eval: Optional[int] = 1,
rounds_per_checkpoint: Optional[int] = 50):
"""Runs federated training for a given `tff.templates.IterativeProcess`.
We assume that the iterative process has the following functional type
signatures:
* `initialize`: `( -> S@SERVER)` where `S` represents the server state.
* `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
represents the server state, `{B*}` represents the client datasets,
and `T` represents a python `Mapping` object.
Args:
iterative_process: A `tff.templates.IterativeProcess` instance to run.
client_datasets_fn: Function accepting an integer argument (the round
number) and returning a list of client datasets to use as federated data
for that round.
validation_fn: A callable accepting the current state of `iterative_process`
and the current round number, and returning a dict of evaluation metrics.
Used to compute validation metrics throughout the training process.
total_rounds: The number of federated training rounds to perform.
experiment_name: The name of the experiment being run. This will be appended
to the `root_output_dir` for purposes of writing outputs.
test_fn: An optional callable accepting the current state of
`iterative_process` and returning a dict of test set metrics. Used to
compute test metrics at the end of the training process.
root_output_dir: The name of the root output directory for writing
experiment outputs.
rounds_per_eval: How often to compute validation metrics.
rounds_per_checkpoint: How often to checkpoint the iterative process state.
If you expect the job to restart frequently, this should be small. If no
interruptions are expected, this can be made larger.
Returns:
The final `state` of the iterative process after training.
"""
if not isinstance(iterative_process, tff.templates.IterativeProcess):
raise TypeError(
'iterative_process must be a `tff.templates.IterativeProcess`.')
if not callable(client_datasets_fn):
raise TypeError('client_datasets_fn should be callable.')
if not callable(validation_fn):
raise TypeError('validation_fn should be callable.')
if test_fn is not None and not callable(test_fn):
raise TypeError('test_fn should be callable.')
logging.info('Starting iterative_process training loop...')
initial_state = iterative_process.initialize()
checkpoint_mngr, metrics_mngr, tb_mngr = _setup_outputs(
root_output_dir, experiment_name)
logging.info('Asking checkpoint manager to load checkpoint.')
state, round_num = checkpoint_mngr.load_latest_checkpoint(initial_state)
if state is None:
logging.info('Initializing experiment from scratch.')
state = initial_state
round_num = 0
else:
logging.info('Restarted from checkpoint round %d', round_num)
round_num += 1 # Increment to avoid overwriting current checkpoint
metrics_mngr.clear_metrics(round_num)
loop_start_time = time.time()
loop_start_round = round_num
while round_num < total_rounds:
data_prep_start_time = time.time()
federated_train_data = client_datasets_fn(round_num)
train_metrics = {
'prepare_datasets_secs': time.time() - data_prep_start_time
}
training_start_time = time.time()
state, round_metrics = iterative_process.next(state, federated_train_data)
train_metrics['training_secs'] = time.time() - training_start_time
train_metrics.update(round_metrics)
loop_time = time.time() - loop_start_time
loop_rounds = (round_num - loop_start_round + 1)
logging.info('Round {:2d}, {:.2f}s per round in average.'.format(
round_num, loop_time / loop_rounds))
if (round_num % rounds_per_checkpoint == 0 or
round_num == total_rounds - 1):
save_checkpoint_start_time = time.time()
checkpoint_mngr.save_checkpoint(state, round_num)
train_metrics['save_checkpoint_secs'] = (
time.time() - save_checkpoint_start_time)
metrics = {'train': train_metrics}
if round_num % rounds_per_eval == 0:
# Compute validation metrics
evaluate_start_time = time.time()
validation_metrics = validation_fn(state, round_num)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
_write_metrics(metrics_mngr, tb_mngr, metrics, round_num)
round_num += 1
# Final metrics evaluation once the training has completed
metrics = {}
# Validation metrics
evaluate_start_time = time.time()
validation_metrics = validation_fn(state, round_num)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
# Test set metrics
if test_fn:
test_start_time = time.time()
test_metrics = test_fn(state)
test_metrics['evaluate_secs'] = time.time() - test_start_time
metrics['test'] = test_metrics
_write_metrics(metrics_mngr, tb_mngr, metrics, total_rounds)
return state
|
py | b4089a50b0f56bf0133c6f0d034b66198ab973ee | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Command-line interface to inspect and execute a graph in a SavedModel.
For detailed usages and examples, please refer to:
https://www.tensorflow.org/guide/saved_model#cli_to_inspect_and_execute_savedmodel
"""
import argparse
import ast
import os
import re
import sys
from absl import app # pylint: disable=unused-import
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import meta_graph as meta_graph_lib
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_spec
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.tools import saved_model_aot_compile
from tensorflow.python.tools import saved_model_utils
from tensorflow.python.tpu import tpu
from tensorflow.python.util.compat import collections_abc
_XLA_DEBUG_OPTIONS_URL = (
'https://github.com/tensorflow/tensorflow/blob/master/'
'tensorflow/compiler/xla/debug_options_flags.cc')
# Set of ops to denylist.
_OP_DENYLIST = set(['WriteFile', 'ReadFile', 'PrintV2'])
def _show_tag_sets(saved_model_dir):
"""Prints the tag-sets stored in SavedModel directory.
Prints all the tag-sets for MetaGraphs stored in SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir)
print('The given SavedModel contains the following tag-sets:')
for tag_set in sorted(tag_sets):
print('%r' % ', '.join(sorted(tag_set)))
def _show_signature_def_map_keys(saved_model_dir, tag_set):
"""Prints the keys for each SignatureDef in the SignatureDef map.
Prints the list of SignatureDef keys from the SignatureDef map specified by
the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef to get SignatureDef map from,
in string format, separated by ','. For tag-set contains multiple tags,
all tags must be passed in.
"""
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
print('The given SavedModel MetaGraphDef contains SignatureDefs with the '
'following keys:')
for signature_def_key in sorted(signature_def_map.keys()):
print('SignatureDef key: \"%s\"' % signature_def_key)
def _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfo for all inputs of the SignatureDef.
Returns a dictionary that maps each input key to its TensorInfo for the given
signature_def_key in the meta_graph_def
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to
look up SignatureDef key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps input tensor keys to TensorInfos.
Raises:
ValueError if `signature_def_key` is not found in the MetaGraphDef.
"""
if signature_def_key not in meta_graph_def.signature_def:
raise ValueError(
f'Could not find signature "{signature_def_key}". Please choose from: '
f'{", ".join(meta_graph_def.signature_def.keys())}')
return meta_graph_def.signature_def[signature_def_key].inputs
def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfos for all outputs of the SignatureDef.
Returns a dictionary that maps each output key to its TensorInfo for the given
signature_def_key in the meta_graph_def.
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to
look up signature_def_key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps output tensor keys to TensorInfos.
"""
return meta_graph_def.signature_def[signature_def_key].outputs
def _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key, indent=0):
"""Prints input and output TensorInfos.
Prints the details of input and output TensorInfos for the SignatureDef mapped
by the given signature_def_key.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by
','. For tag-set contains multiple tags, all tags must be passed in.
signature_def_key: A SignatureDef key string.
indent: How far (in increments of 2 spaces) to indent each line of output.
"""
meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir,
tag_set)
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
indent_str = ' ' * indent
def in_print(s):
print(indent_str + s)
in_print('The given SavedModel SignatureDef contains the following input(s):')
for input_key, input_tensor in sorted(inputs_tensor_info.items()):
in_print(' inputs[\'%s\'] tensor_info:' % input_key)
_print_tensor_info(input_tensor, indent+1)
in_print('The given SavedModel SignatureDef contains the following '
'output(s):')
for output_key, output_tensor in sorted(outputs_tensor_info.items()):
in_print(' outputs[\'%s\'] tensor_info:' % output_key)
_print_tensor_info(output_tensor, indent+1)
in_print('Method name is: %s' %
meta_graph_def.signature_def[signature_def_key].method_name)
def _show_defined_functions(saved_model_dir):
"""Prints the callable concrete and polymorphic functions of the Saved Model.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
meta_graphs = saved_model_utils.read_saved_model(saved_model_dir).meta_graphs
has_object_graph_def = False
for meta_graph_def in meta_graphs:
has_object_graph_def |= meta_graph_def.HasField('object_graph_def')
if not has_object_graph_def:
return
with ops_lib.Graph().as_default():
trackable_object = load.load(saved_model_dir)
print('\nConcrete Functions:', end='')
children = list(
save._AugmentedGraphView(trackable_object) # pylint: disable=protected-access
.list_children(trackable_object))
children = sorted(children, key=lambda x: x.name)
for name, child in children:
concrete_functions = []
if isinstance(child, defun.ConcreteFunction):
concrete_functions.append(child)
elif isinstance(child, def_function.Function):
concrete_functions.extend(
child._list_all_concrete_functions_for_serialization()) # pylint: disable=protected-access
else:
continue
print('\n Function Name: \'%s\'' % name)
concrete_functions = sorted(concrete_functions, key=lambda x: x.name)
for index, concrete_function in enumerate(concrete_functions, 1):
args, kwargs = None, None
if concrete_function.structured_input_signature:
args, kwargs = concrete_function.structured_input_signature
elif concrete_function._arg_keywords: # pylint: disable=protected-access
# For pure ConcreteFunctions we might have nothing better than
# _arg_keywords.
args = concrete_function._arg_keywords # pylint: disable=protected-access
if args:
print(' Option #%d' % index)
print(' Callable with:')
_print_args(args, indent=4)
if kwargs:
_print_args(kwargs, 'Named Argument', indent=4)
def _print_args(arguments, argument_type='Argument', indent=0):
"""Formats and prints the argument of the concrete functions defined in the model.
Args:
arguments: Arguments to format print.
argument_type: Type of arguments.
indent: How far (in increments of 2 spaces) to indent each line of
output.
"""
indent_str = ' ' * indent
def _maybe_add_quotes(value):
is_quotes = '\'' * isinstance(value, str)
return is_quotes + str(value) + is_quotes
def in_print(s, end='\n'):
print(indent_str + s, end=end)
for index, element in enumerate(arguments, 1):
if indent == 4:
in_print('%s #%d' % (argument_type, index))
if isinstance(element, six.string_types):
in_print(' %s' % element)
elif isinstance(element, tensor_spec.TensorSpec):
print((indent + 1) * ' ' + '%s: %s' % (element.name, repr(element)))
elif (isinstance(element, collections_abc.Iterable) and
not isinstance(element, dict)):
in_print(' DType: %s' % type(element).__name__)
in_print(' Value: [', end='')
for value in element:
print('%s' % _maybe_add_quotes(value), end=', ')
print('\b\b]')
elif isinstance(element, dict):
in_print(' DType: %s' % type(element).__name__)
in_print(' Value: {', end='')
for (key, value) in element.items():
print('\'%s\': %s' % (str(key), _maybe_add_quotes(value)), end=', ')
print('\b\b}')
else:
in_print(' DType: %s' % type(element).__name__)
in_print(' Value: %s' % str(element))
def _print_tensor_info(tensor_info, indent=0):
"""Prints details of the given tensor_info.
Args:
tensor_info: TensorInfo object to be printed.
indent: How far (in increments of 2 spaces) to indent each line output
"""
indent_str = ' ' * indent
def in_print(s):
print(indent_str + s)
in_print(' dtype: ' +
{value: key
for (key, value) in types_pb2.DataType.items()}[tensor_info.dtype])
# Display shape as tuple.
if tensor_info.tensor_shape.unknown_rank:
shape = 'unknown_rank'
else:
dims = [str(dim.size) for dim in tensor_info.tensor_shape.dim]
shape = ', '.join(dims)
shape = '(' + shape + ')'
in_print(' shape: ' + shape)
in_print(' name: ' + tensor_info.name)
def _show_all(saved_model_dir):
"""Prints tag-set, SignatureDef and Inputs/Outputs information in SavedModel.
Prints all tag-set, SignatureDef and Inputs/Outputs information stored in
SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir)
for tag_set in sorted(tag_sets):
print("\nMetaGraphDef with tag-set: '%s' "
"contains the following SignatureDefs:" % ', '.join(tag_set))
tag_set = ','.join(tag_set)
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
for signature_def_key in sorted(signature_def_map.keys()):
print('\nsignature_def[\'' + signature_def_key + '\']:')
_show_inputs_outputs(saved_model_dir, tag_set, signature_def_key,
indent=1)
_show_defined_functions(saved_model_dir)
def get_meta_graph_def(saved_model_dir, tag_set):
"""DEPRECATED: Use saved_model_utils.get_meta_graph_def instead.
Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given
tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef to load, in string format,
separated by ','. For tag-set contains multiple tags, all tags must be
passed in.
Raises:
RuntimeError: An error when the given tag-set does not exist in the
SavedModel.
Returns:
A MetaGraphDef corresponding to the tag-set.
"""
return saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)
def get_signature_def_map(saved_model_dir, tag_set):
"""Gets SignatureDef map from a MetaGraphDef in a SavedModel.
Returns the SignatureDef map for the given tag-set in the SavedModel
directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
Returns:
A SignatureDef map that maps from string keys to SignatureDefs.
"""
meta_graph = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)
return meta_graph.signature_def
def scan_meta_graph_def(meta_graph_def):
"""Scans meta_graph_def and reports if there are ops on denylist.
Print ops if they are on black list, or print success if no denylisted ops
found.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
"""
all_ops_set = set(
meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))
denylisted_ops = _OP_DENYLIST & all_ops_set
if denylisted_ops:
# TODO(yifeif): print more warnings
print(
'MetaGraph with tag set %s contains the following denylisted ops:' %
meta_graph_def.meta_info_def.tags, denylisted_ops)
else:
print('MetaGraph with tag set %s does not contain denylisted ops.' %
meta_graph_def.meta_info_def.tags)
def run_saved_model_with_feed_dict(saved_model_dir,
tag_set,
signature_def_key,
input_tensor_key_feed_dict,
outdir,
overwrite_flag,
worker=None,
init_tpu=False,
use_tfrt=False,
tf_debug=False):
"""Runs SavedModel and fetch all outputs.
Runs the input dictionary through the MetaGraphDef within a SavedModel
specified by the given tag_set and SignatureDef. Also save the outputs to file
if outdir is not None.
Args:
saved_model_dir: Directory containing the SavedModel to execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
signature_def_key: A SignatureDef key string.
input_tensor_key_feed_dict: A dictionary maps input keys to numpy ndarrays.
outdir: A directory to save the outputs to. If the directory doesn't exist,
it will be created.
overwrite_flag: A boolean flag to allow overwrite output file if file with
the same name exists.
worker: If provided, the session will be run on the worker. Valid worker
specification is a bns or gRPC path.
init_tpu: If true, the TPU system will be initialized after the session
is created.
use_tfrt: If true, TFRT session will be used.
tf_debug: A boolean flag to use TensorFlow Debugger (TFDBG) to observe the
intermediate Tensor values and runtime GraphDefs while running the
SavedModel.
Raises:
ValueError: When any of the input tensor keys is not valid.
RuntimeError: An error when output file already exists and overwrite is not
enabled.
"""
# Get a list of output tensor names.
meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir,
tag_set)
# Re-create feed_dict based on input tensor name instead of key as session.run
# uses tensor name.
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
# Check if input tensor keys are valid.
for input_key_name in input_tensor_key_feed_dict.keys():
if input_key_name not in inputs_tensor_info:
raise ValueError(
'"%s" is not a valid input key. Please choose from %s, or use '
'--show option.' %
(input_key_name, '"' + '", "'.join(inputs_tensor_info.keys()) + '"'))
inputs_feed_dict = {
inputs_tensor_info[key].name: tensor
for key, tensor in input_tensor_key_feed_dict.items()
}
# Get outputs
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
# Sort to preserve order because we need to go from value to key later.
output_tensor_keys_sorted = sorted(outputs_tensor_info.keys())
output_tensor_names_sorted = [
outputs_tensor_info[tensor_key].name
for tensor_key in output_tensor_keys_sorted
]
config = None
if use_tfrt:
logging.info('Using TFRT session.')
config = config_pb2.ConfigProto(
experimental=config_pb2.ConfigProto.Experimental(use_tfrt=True))
with session.Session(worker, graph=ops_lib.Graph(), config=config) as sess:
if init_tpu:
print('Initializing TPU System ...')
# This is needed for freshly started worker, or if the job
# restarts after a preemption.
sess.run(tpu.initialize_system())
loader.load(sess, tag_set.split(','), saved_model_dir)
if tf_debug:
sess = local_cli_wrapper.LocalCLIDebugWrapperSession(sess)
outputs = sess.run(output_tensor_names_sorted, feed_dict=inputs_feed_dict)
for i, output in enumerate(outputs):
output_tensor_key = output_tensor_keys_sorted[i]
print('Result for output key %s:\n%s' % (output_tensor_key, output))
# Only save if outdir is specified.
if outdir:
# Create directory if outdir does not exist
if not os.path.isdir(outdir):
os.makedirs(outdir)
output_full_path = os.path.join(outdir, output_tensor_key + '.npy')
# If overwrite not enabled and file already exist, error out
if not overwrite_flag and os.path.exists(output_full_path):
raise RuntimeError(
'Output file %s already exists. Add \"--overwrite\" to overwrite'
' the existing output files.' % output_full_path)
np.save(output_full_path, output)
print('Output %s is saved to %s' % (output_tensor_key,
output_full_path))
def preprocess_inputs_arg_string(inputs_str):
"""Parses input arg into dictionary that maps input to file/variable tuple.
Parses input string in the format of, for example,
"input1=filename1[variable_name1],input2=filename2" into a
dictionary looks like
{'input_key1': (filename1, variable_name1),
'input_key2': (file2, None)}
, which maps input keys to a tuple of file name and variable name(None if
empty).
Args:
inputs_str: A string that specified where to load inputs. Inputs are
separated by semicolons.
* For each input key:
'<input_key>=<filename>' or
'<input_key>=<filename>[<variable_name>]'
* The optional 'variable_name' key will be set to None if not specified.
Returns:
A dictionary that maps input keys to a tuple of file name and variable name.
Raises:
RuntimeError: An error when the given input string is in a bad format.
"""
input_dict = {}
inputs_raw = inputs_str.split(';')
for input_raw in filter(bool, inputs_raw): # skip empty strings
# Format of input=filename[variable_name]'
match = re.match(r'([^=]+)=([^\[\]]+)\[([^\[\]]+)\]$', input_raw)
if match:
input_dict[match.group(1)] = match.group(2), match.group(3)
else:
# Format of input=filename'
match = re.match(r'([^=]+)=([^\[\]]+)$', input_raw)
if match:
input_dict[match.group(1)] = match.group(2), None
else:
raise RuntimeError(
'--inputs "%s" format is incorrect. Please follow'
'"<input_key>=<filename>", or'
'"<input_key>=<filename>[<variable_name>]"' % input_raw)
return input_dict
def preprocess_input_exprs_arg_string(input_exprs_str, safe=True):
"""Parses input arg into dictionary that maps input key to python expression.
Parses input string in the format of 'input_key=<python expression>' into a
dictionary that maps each input_key to its python expression.
Args:
input_exprs_str: A string that specifies python expression for input keys.
Each input is separated by semicolon. For each input key:
'input_key=<python expression>'
safe: Whether to evaluate the python expression as literals or allow
arbitrary calls (e.g. numpy usage).
Returns:
A dictionary that maps input keys to their values.
Raises:
RuntimeError: An error when the given input string is in a bad format.
"""
input_dict = {}
for input_raw in filter(bool, input_exprs_str.split(';')):
if '=' not in input_exprs_str:
raise RuntimeError('--input_exprs "%s" format is incorrect. Please follow'
'"<input_key>=<python expression>"' % input_exprs_str)
input_key, expr = input_raw.split('=', 1)
if safe:
try:
input_dict[input_key] = ast.literal_eval(expr)
except:
raise RuntimeError(
f'Expression "{expr}" is not a valid python literal.')
else:
# ast.literal_eval does not work with numpy expressions
input_dict[input_key] = eval(expr) # pylint: disable=eval-used
return input_dict
def preprocess_input_examples_arg_string(input_examples_str):
"""Parses input into dict that maps input keys to lists of tf.Example.
Parses input string in the format of 'input_key1=[{feature_name:
feature_list}];input_key2=[{feature_name:feature_list}];' into a dictionary
that maps each input_key to its list of serialized tf.Example.
Args:
input_examples_str: A string that specifies a list of dictionaries of
feature_names and their feature_lists for each input.
Each input is separated by semicolon. For each input key:
'input=[{feature_name1: feature_list1, feature_name2:feature_list2}]'
items in feature_list can be the type of float, int, long or str.
Returns:
A dictionary that maps input keys to lists of serialized tf.Example.
Raises:
ValueError: An error when the given tf.Example is not a list.
"""
input_dict = preprocess_input_exprs_arg_string(input_examples_str)
for input_key, example_list in input_dict.items():
if not isinstance(example_list, list):
raise ValueError(
'tf.Example input must be a list of dictionaries, but "%s" is %s' %
(example_list, type(example_list)))
input_dict[input_key] = [
_create_example_string(example) for example in example_list
]
return input_dict
def _create_example_string(example_dict):
"""Create a serialized tf.example from feature dictionary."""
example = example_pb2.Example()
for feature_name, feature_list in example_dict.items():
if not isinstance(feature_list, list):
raise ValueError('feature value must be a list, but %s: "%s" is %s' %
(feature_name, feature_list, type(feature_list)))
if isinstance(feature_list[0], float):
example.features.feature[feature_name].float_list.value.extend(
feature_list)
elif isinstance(feature_list[0], str):
example.features.feature[feature_name].bytes_list.value.extend(
[f.encode('utf8') for f in feature_list])
elif isinstance(feature_list[0], bytes):
example.features.feature[feature_name].bytes_list.value.extend(
feature_list)
elif isinstance(feature_list[0], six.integer_types):
example.features.feature[feature_name].int64_list.value.extend(
feature_list)
else:
raise ValueError(
'Type %s for value %s is not supported for tf.train.Feature.' %
(type(feature_list[0]), feature_list[0]))
return example.SerializeToString()
def load_inputs_from_input_arg_string(inputs_str, input_exprs_str,
input_examples_str):
"""Parses input arg strings and create inputs feed_dict.
Parses '--inputs' string for inputs to be loaded from file, and parses
'--input_exprs' string for inputs to be evaluated from python expression.
'--input_examples' string for inputs to be created from tf.example feature
dictionary list.
Args:
inputs_str: A string that specified where to load inputs. Each input is
separated by semicolon.
* For each input key:
'<input_key>=<filename>' or
'<input_key>=<filename>[<variable_name>]'
* The optional 'variable_name' key will be set to None if not specified.
* File specified by 'filename' will be loaded using numpy.load. Inputs
can be loaded from only .npy, .npz or pickle files.
* The "[variable_name]" key is optional depending on the input file type
as descripted in more details below.
When loading from a npy file, which always contains a numpy ndarray, the
content will be directly assigned to the specified input tensor. If a
variable_name is specified, it will be ignored and a warning will be
issued.
When loading from a npz zip file, user can specify which variable within
the zip file to load for the input tensor inside the square brackets. If
nothing is specified, this function will check that only one file is
included in the zip and load it for the specified input tensor.
When loading from a pickle file, if no variable_name is specified in the
square brackets, whatever that is inside the pickle file will be passed
to the specified input tensor, else SavedModel CLI will assume a
dictionary is stored in the pickle file and the value corresponding to
the variable_name will be used.
input_exprs_str: A string that specifies python expressions for inputs.
* In the format of: '<input_key>=<python expression>'.
* numpy module is available as np.
input_examples_str: A string that specifies tf.Example with dictionary.
* In the format of: '<input_key>=<[{feature:value list}]>'
Returns:
A dictionary that maps input tensor keys to numpy ndarrays.
Raises:
RuntimeError: An error when a key is specified, but the input file contains
multiple numpy ndarrays, none of which matches the given key.
RuntimeError: An error when no key is specified, but the input file contains
more than one numpy ndarrays.
"""
tensor_key_feed_dict = {}
inputs = preprocess_inputs_arg_string(inputs_str)
input_exprs = preprocess_input_exprs_arg_string(input_exprs_str, safe=False)
input_examples = preprocess_input_examples_arg_string(input_examples_str)
for input_tensor_key, (filename, variable_name) in inputs.items():
data = np.load(file_io.FileIO(filename, mode='rb'), allow_pickle=True) # pylint: disable=unexpected-keyword-arg
# When a variable_name key is specified for the input file
if variable_name:
# if file contains a single ndarray, ignore the input name
if isinstance(data, np.ndarray):
logging.warn(
'Input file %s contains a single ndarray. Name key \"%s\" ignored.'
% (filename, variable_name))
tensor_key_feed_dict[input_tensor_key] = data
else:
if variable_name in data:
tensor_key_feed_dict[input_tensor_key] = data[variable_name]
else:
raise RuntimeError(
'Input file %s does not contain variable with name \"%s\".' %
(filename, variable_name))
# When no key is specified for the input file.
else:
# Check if npz file only contains a single numpy ndarray.
if isinstance(data, np.lib.npyio.NpzFile):
variable_name_list = data.files
if len(variable_name_list) != 1:
raise RuntimeError(
'Input file %s contains more than one ndarrays. Please specify '
'the name of ndarray to use.' % filename)
tensor_key_feed_dict[input_tensor_key] = data[variable_name_list[0]]
else:
tensor_key_feed_dict[input_tensor_key] = data
# When input is a python expression:
for input_tensor_key, py_expr_evaluated in input_exprs.items():
if input_tensor_key in tensor_key_feed_dict:
logging.warn(
'input_key %s has been specified with both --inputs and --input_exprs'
' options. Value in --input_exprs will be used.' % input_tensor_key)
tensor_key_feed_dict[input_tensor_key] = py_expr_evaluated
# When input is a tf.Example:
for input_tensor_key, example in input_examples.items():
if input_tensor_key in tensor_key_feed_dict:
logging.warn(
'input_key %s has been specified in multiple options. Value in '
'--input_examples will be used.' % input_tensor_key)
tensor_key_feed_dict[input_tensor_key] = example
return tensor_key_feed_dict
def show(args):
"""Function triggered by show command.
Args:
args: A namespace parsed from command line.
"""
# If all tag is specified, display all information.
if args.all:
_show_all(args.dir)
else:
# If no tag is specified, display all tag_set, if no signature_def key is
# specified, display all SignatureDef keys, else show input output tensor
# information corresponding to the given SignatureDef key
if args.tag_set is None:
_show_tag_sets(args.dir)
else:
if args.signature_def is None:
_show_signature_def_map_keys(args.dir, args.tag_set)
else:
_show_inputs_outputs(args.dir, args.tag_set, args.signature_def)
def run(args):
"""Function triggered by run command.
Args:
args: A namespace parsed from command line.
Raises:
AttributeError: An error when neither --inputs nor --input_exprs is passed
to run command.
"""
if not args.inputs and not args.input_exprs and not args.input_examples:
raise AttributeError(
'At least one of --inputs, --input_exprs or --input_examples must be '
'required')
tensor_key_feed_dict = load_inputs_from_input_arg_string(
args.inputs, args.input_exprs, args.input_examples)
run_saved_model_with_feed_dict(
args.dir,
args.tag_set,
args.signature_def,
tensor_key_feed_dict,
args.outdir,
args.overwrite,
worker=args.worker,
init_tpu=args.init_tpu,
use_tfrt=args.use_tfrt,
tf_debug=args.tf_debug)
def scan(args):
"""Function triggered by scan command.
Args:
args: A namespace parsed from command line.
"""
if args.tag_set:
scan_meta_graph_def(
saved_model_utils.get_meta_graph_def(args.dir, args.tag_set))
else:
saved_model = saved_model_utils.read_saved_model(args.dir)
for meta_graph_def in saved_model.meta_graphs:
scan_meta_graph_def(meta_graph_def)
def convert_with_tensorrt(args):
"""Function triggered by 'convert tensorrt' command.
Args:
args: A namespace parsed from command line.
"""
# Import here instead of at top, because this will crash if TensorRT is
# not installed
from tensorflow.python.compiler.tensorrt import trt_convert as trt # pylint: disable=g-import-not-at-top
if not args.convert_tf1_model:
params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(
max_workspace_size_bytes=args.max_workspace_size_bytes,
precision_mode=args.precision_mode,
minimum_segment_size=args.minimum_segment_size)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=args.dir,
input_saved_model_tags=args.tag_set.split(','),
**params._asdict())
try:
converter.convert()
except Exception as e:
raise RuntimeError(
'{}. Try passing "--convert_tf1_model=True".'.format(e))
converter.save(output_saved_model_dir=args.output_dir)
else:
trt.create_inference_graph(
None,
None,
max_batch_size=1,
max_workspace_size_bytes=args.max_workspace_size_bytes,
precision_mode=args.precision_mode,
minimum_segment_size=args.minimum_segment_size,
is_dynamic_op=True,
input_saved_model_dir=args.dir,
input_saved_model_tags=args.tag_set.split(','),
output_saved_model_dir=args.output_dir)
def freeze_model(args):
"""Function triggered by freeze_model command.
Args:
args: A namespace parsed from command line.
"""
checkpoint_path = (
args.checkpoint_path
or os.path.join(args.dir, 'variables/variables'))
if not args.variables_to_feed:
variables_to_feed = []
elif args.variables_to_feed.lower() == 'all':
variables_to_feed = None # We will identify them after.
else:
variables_to_feed = args.variables_to_feed.split(',')
saved_model_aot_compile.freeze_model(
checkpoint_path=checkpoint_path,
meta_graph_def=saved_model_utils.get_meta_graph_def(
args.dir, args.tag_set),
signature_def_key=args.signature_def_key,
variables_to_feed=variables_to_feed,
output_prefix=args.output_prefix)
def aot_compile_cpu(args):
"""Function triggered by aot_compile_cpu command.
Args:
args: A namespace parsed from command line.
"""
checkpoint_path = (
args.checkpoint_path
or os.path.join(args.dir, 'variables/variables'))
if not args.variables_to_feed:
variables_to_feed = []
elif args.variables_to_feed.lower() == 'all':
variables_to_feed = None # We will identify them after.
else:
variables_to_feed = args.variables_to_feed.split(',')
saved_model_aot_compile.aot_compile_cpu_meta_graph_def(
checkpoint_path=checkpoint_path,
meta_graph_def=saved_model_utils.get_meta_graph_def(
args.dir, args.tag_set),
signature_def_key=args.signature_def_key,
variables_to_feed=variables_to_feed,
output_prefix=args.output_prefix,
target_triple=args.target_triple,
target_cpu=args.target_cpu,
cpp_class=args.cpp_class,
multithreading=args.multithreading.lower() not in ('f', 'false', '0'))
def add_show_subparser(subparsers):
"""Add parser for `show`."""
show_msg = (
'Usage examples:\n'
'To show all tag-sets in a SavedModel:\n'
'$saved_model_cli show --dir /tmp/saved_model\n\n'
'To show all available SignatureDef keys in a '
'MetaGraphDef specified by its tag-set:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve\n\n'
'For a MetaGraphDef with multiple tags in the tag-set, all tags must be '
'passed in, separated by \';\':\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu\n\n'
'To show all inputs and outputs TensorInfo for a specific'
' SignatureDef specified by the SignatureDef key in a'
' MetaGraph.\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve'
' --signature_def serving_default\n\n'
'To show all available information in the SavedModel:\n'
'$saved_model_cli show --dir /tmp/saved_model --all')
parser_show = subparsers.add_parser(
'show',
description=show_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_show.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to inspect')
parser_show.add_argument(
'--all',
action='store_true',
help='if set, will output all information in given SavedModel')
parser_show.add_argument(
'--tag_set',
type=str,
default=None,
help='tag-set of graph in SavedModel to show, separated by \',\'')
parser_show.add_argument(
'--signature_def',
type=str,
default=None,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to display input(s) and output(s) for')
parser_show.set_defaults(func=show)
def add_run_subparser(subparsers):
"""Add parser for `run`."""
run_msg = ('Usage example:\n'
'To run input tensors from files through a MetaGraphDef and save'
' the output tensors to files:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve \\\n'
' --signature_def serving_default \\\n'
' --inputs input1_key=/tmp/124.npz[x],input2_key=/tmp/123.npy '
'\\\n'
' --input_exprs \'input3_key=np.ones(2)\' \\\n'
' --input_examples '
'\'input4_key=[{"id":[26],"weights":[0.5, 0.5]}]\' \\\n'
' --outdir=/out\n\n'
'For more information about input file format, please see:\n'
'https://www.tensorflow.org/guide/saved_model_cli\n')
parser_run = subparsers.add_parser(
'run', description=run_msg, formatter_class=argparse.RawTextHelpFormatter)
parser_run.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to execute')
parser_run.add_argument(
'--tag_set',
type=str,
required=True,
help='tag-set of graph in SavedModel to load, separated by \',\'')
parser_run.add_argument(
'--signature_def',
type=str,
required=True,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to run')
msg = ('Loading inputs from files, in the format of \'<input_key>=<filename>,'
' or \'<input_key>=<filename>[<variable_name>]\', separated by \';\'.'
' The file format can only be from .npy, .npz or pickle.')
parser_run.add_argument('--inputs', type=str, default='', help=msg)
msg = ('Specifying inputs by python expressions, in the format of'
' "<input_key>=\'<python expression>\'", separated by \';\'. '
'numpy module is available as \'np\'. Please note that the expression '
'will be evaluated as-is, and is susceptible to code injection. '
'When this is set, the value will override duplicate input keys from '
'--inputs option.')
parser_run.add_argument('--input_exprs', type=str, default='', help=msg)
msg = (
'Specifying tf.Example inputs as list of dictionaries. For example: '
'<input_key>=[{feature0:value_list,feature1:value_list}]. Use ";" to '
'separate input keys. Will override duplicate input keys from --inputs '
'and --input_exprs option.')
parser_run.add_argument('--input_examples', type=str, default='', help=msg)
parser_run.add_argument(
'--outdir',
type=str,
default=None,
help='if specified, output tensor(s) will be saved to given directory')
parser_run.add_argument(
'--overwrite',
action='store_true',
help='if set, output file will be overwritten if it already exists.')
parser_run.add_argument(
'--tf_debug',
action='store_true',
help='if set, will use TensorFlow Debugger (tfdbg) to watch the '
'intermediate Tensors and runtime GraphDefs while running the '
'SavedModel.')
parser_run.add_argument(
'--worker',
type=str,
default=None,
help='if specified, a Session will be run on the worker. '
'Valid worker specification is a bns or gRPC path.')
parser_run.add_argument(
'--init_tpu',
action='store_true',
default=None,
help='if specified, tpu.initialize_system will be called on the Session. '
'This option should be only used if the worker is a TPU job.')
parser_run.add_argument(
'--use_tfrt',
action='store_true',
default=None,
help='if specified, TFRT session will be used, instead of TF1 session.')
parser_run.set_defaults(func=run)
def add_scan_subparser(subparsers):
"""Add parser for `scan`."""
scan_msg = ('Usage example:\n'
'To scan for denylisted ops in SavedModel:\n'
'$saved_model_cli scan --dir /tmp/saved_model\n'
'To scan a specific MetaGraph, pass in --tag_set\n')
parser_scan = subparsers.add_parser(
'scan',
description=scan_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_scan.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to execute')
parser_scan.add_argument(
'--tag_set',
type=str,
help='tag-set of graph in SavedModel to scan, separated by \',\'')
parser_scan.set_defaults(func=scan)
def add_convert_subparser(subparsers):
"""Add parser for `convert`."""
convert_msg = ('Usage example:\n'
'To convert the SavedModel to one that have TensorRT ops:\n'
'$saved_model_cli convert \\\n'
' --dir /tmp/saved_model \\\n'
' --tag_set serve \\\n'
' --output_dir /tmp/saved_model_trt \\\n'
' tensorrt \n')
parser_convert = subparsers.add_parser(
'convert',
description=convert_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_convert.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to convert')
parser_convert.add_argument(
'--output_dir',
type=str,
required=True,
help='output directory for the converted SavedModel')
parser_convert.add_argument(
'--tag_set',
type=str,
required=True,
help='tag-set of graph in SavedModel to convert, separated by \',\'')
convert_subparsers = parser_convert.add_subparsers(
title='conversion methods',
description='valid conversion methods',
help='the conversion to run with the SavedModel')
parser_convert_with_tensorrt = convert_subparsers.add_parser(
'tensorrt',
description='Convert the SavedModel with Tensorflow-TensorRT integration',
formatter_class=argparse.RawTextHelpFormatter)
parser_convert_with_tensorrt.add_argument(
'--max_workspace_size_bytes',
type=int,
default=2 << 20,
help=('the maximum GPU temporary memory which the TRT engine can use at '
'execution time'))
parser_convert_with_tensorrt.add_argument(
'--precision_mode',
type=str,
default='FP32',
help='one of FP32, FP16 and INT8')
parser_convert_with_tensorrt.add_argument(
'--minimum_segment_size',
type=int,
default=3,
help=('the minimum number of nodes required for a subgraph to be replaced'
'in a TensorRT node'))
parser_convert_with_tensorrt.add_argument(
'--convert_tf1_model',
type=bool,
default=False,
help='support TRT conversion for TF1 models')
parser_convert_with_tensorrt.set_defaults(func=convert_with_tensorrt)
def _parse_common_freeze_and_aot(parser_compile):
"""Parse arguments shared by freeze model and aot_compile."""
parser_compile.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to convert')
parser_compile.add_argument(
'--output_prefix',
type=str,
required=True,
help=('output directory + filename prefix for the resulting header(s) '
'and object file(s)'))
parser_compile.add_argument(
'--tag_set',
type=str,
required=True,
help='tag-set of graph in SavedModel to convert, separated by \',\'')
parser_compile.add_argument(
'--signature_def_key',
type=str,
default=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
help=('signature_def key to use. '
'default: DEFAULT_SERVING_SIGNATURE_DEF_KEY'))
parser_compile.add_argument(
'--checkpoint_path',
type=str,
default=None,
help='Custom checkpoint to use (default: use the SavedModel variables)')
parser_compile.add_argument(
'--variables_to_feed',
type=str,
default='',
help=('The names of variables that will be fed into the network. '
'Options are: empty (default; all variables are frozen, none may '
'be fed), \'all\' (all variables may be fed), or a '
'comma-delimited list of names of variables that may be fed. In '
'the last case, the non-fed variables will be frozen in the graph.'
'**NOTE** Any variables passed to `variables_to_feed` *must be set '
'by the user*. These variables will NOT be frozen and their '
'values will be uninitialized in the compiled object '
'(this applies to all input arguments from the signature as '
'well).'))
def add_freeze_model_subparser(subparsers):
"""Add parser for `freeze_model`."""
compile_msg = '\n'.join(
['Usage example:',
'To freeze a SavedModel in preparation for tfcompile:',
'$saved_model_cli freeze_model \\',
' --dir /tmp/saved_model \\',
' --tag_set serve \\',
' --output_prefix /tmp/saved_model_xla_aot',
])
parser_compile = subparsers.add_parser(
'freeze_model',
description=compile_msg,
formatter_class=argparse.RawTextHelpFormatter)
_parse_common_freeze_and_aot(parser_compile)
parser_compile.set_defaults(func=freeze_model)
def add_aot_compile_cpu_subparser(subparsers):
"""Add parser for `aot_compile_cpu`."""
compile_msg = '\n'.join(
['Usage example:',
'To compile a SavedModel signature via (CPU) XLA AOT:',
'$saved_model_cli aot_compile_cpu \\',
' --dir /tmp/saved_model \\',
' --tag_set serve \\',
' --output_dir /tmp/saved_model_xla_aot',
'', '',
'Note: Additional XLA compilation options are available by setting the ',
'XLA_FLAGS environment variable. See the XLA debug options flags for ',
'all the options: ',
' {}'.format(_XLA_DEBUG_OPTIONS_URL),
'',
'For example, to disable XLA fast math when compiling:',
'',
'XLA_FLAGS="--xla_cpu_enable_fast_math=false" $saved_model_cli '
'aot_compile_cpu ...',
'',
'Some possibly useful flags:',
' --xla_cpu_enable_fast_math=false',
' --xla_force_host_platform_device_count=<num threads>',
' (useful in conjunction with disabling multi threading)'
])
parser_compile = subparsers.add_parser(
'aot_compile_cpu',
description=compile_msg,
formatter_class=argparse.RawTextHelpFormatter)
_parse_common_freeze_and_aot(parser_compile)
parser_compile.add_argument(
'--target_triple',
type=str,
default='x86_64-pc-linux',
help=('Target triple for LLVM during AOT compilation. Examples: '
'x86_64-none-darwin, x86_64-apple-ios, arm64-none-ios, '
'armv7-none-android. More examples are available in tfcompile.bzl '
'in the tensorflow codebase.'))
parser_compile.add_argument(
'--target_cpu',
type=str,
default='',
help=('Target cpu name for LLVM during AOT compilation. Examples: '
'x86_64, skylake, haswell, westmere, <empty> (unknown). For '
'a complete list of options, run (for x86 targets): '
'`llc -march=x86 -mcpu=help`'))
parser_compile.add_argument(
'--cpp_class',
type=str,
required=True,
help=('The name of the generated C++ class, wrapping the generated '
'function. The syntax of this flag is '
'[[<optional_namespace>::],...]<class_name>. This mirrors the '
'C++ syntax for referring to a class, where multiple namespaces '
'may precede the class name, separated by double-colons. '
'The class will be generated in the given namespace(s), or if no '
'namespaces are given, within the global namespace.'))
parser_compile.add_argument(
'--multithreading',
type=str,
default='False',
help=('Enable multithreading in the compiled computation. '
'Note that if using this option, the resulting object files '
'may have external dependencies on multithreading libraries '
'like nsync.'))
parser_compile.set_defaults(func=aot_compile_cpu)
def create_parser():
"""Creates a parser that parse the command line arguments.
Returns:
A namespace parsed from command line arguments.
"""
parser = argparse.ArgumentParser(
description='saved_model_cli: Command-line interface for SavedModel')
parser.add_argument('-v', '--version', action='version', version='0.1.0')
subparsers = parser.add_subparsers(
title='commands', description='valid commands', help='additional help')
# show command
add_show_subparser(subparsers)
# run command
add_run_subparser(subparsers)
# scan command
add_scan_subparser(subparsers)
# tensorrt convert command
add_convert_subparser(subparsers)
# aot_compile_cpu command
add_aot_compile_cpu_subparser(subparsers)
# freeze_model command
add_freeze_model_subparser(subparsers)
return parser
def main():
logging.set_verbosity(logging.INFO)
parser = create_parser()
args = parser.parse_args()
if not hasattr(args, 'func'):
parser.error('too few arguments')
args.func(args)
if __name__ == '__main__':
sys.exit(main())
|
py | b4089b19873c6f6de05d3c3ebfed5bb3eeaa82c7 | # Generated by Django 3.1.5 on 2021-03-28 18:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('challenge', '0004_match_log_file_token'),
]
operations = [
migrations.AlterField(
model_name='match',
name='status',
field=models.CharField(choices=[('failed', 'Failed'), ('successful', 'Successful'), ('running', 'Running'), ('freeze', 'Freeze'), ('pending', 'pending')], default='pending', max_length=50),
),
]
|
py | b4089c0d41def904ae8df82832262b5440321c84 | #!/usr/bin/env python
# Copyright 2020 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch as th
import torch.nn as nn
from typing import Optional, Tuple, Dict
from aps.asr.xfmr.decoder import prep_sub_mask
from aps.asr.xfmr.impl import get_xfmr_encoder
from aps.asr.xfmr.pose import get_xfmr_pose
from aps.asr.base.attention import padding_mask
from aps.asr.base.layer import OneHotEmbedding, PyTorchRNN
class DecoderBase(nn.Module):
"""
Base class for RNNT decoders
"""
def __init__(self,
vocab_size: int,
embed_size: int = 512,
enc_dim: int = 512,
dec_dim: int = 512,
jot_dim: int = 512,
onehot_embed: bool = False) -> None:
super(DecoderBase, self).__init__()
if not onehot_embed:
self.vocab_embed = nn.Embedding(vocab_size, embed_size)
else:
self.vocab_embed = OneHotEmbedding(vocab_size)
self.enc_proj = nn.Linear(enc_dim, jot_dim, bias=False)
self.dec_proj = nn.Linear(dec_dim, jot_dim)
self.vocab_size = vocab_size
self.output = nn.Linear(jot_dim, vocab_size, bias=False)
def pred(self, enc_out: th.Tensor, dec_out: th.Tensor) -> th.Tensor:
"""
Joint network prediction
Args:
enc_out: N x Ti x D or N x D
dec_out: N x To+1 x D or N x D
Return:
output: N x Ti x To+1 x V or N x 1 x V
"""
# N x Ti x J or N x J
enc_out = self.enc_proj(enc_out)
# N x To+1 x J or N x J
dec_out = self.dec_proj(dec_out)
# N x Ti x To+1 x J or N x 1 x J
add_out = th.tanh(enc_out.unsqueeze(-2) + dec_out.unsqueeze(1))
# N x Ti x To+1 x V or N x 1 x V
return self.output(add_out)
class PyTorchRNNDecoder(DecoderBase):
"""
Wrapper for pytorch's RNN Decoder
"""
def __init__(self,
vocab_size: int,
embed_size: int = 512,
enc_dim: int = 512,
jot_dim: int = 512,
dec_rnn: str = "lstm",
dec_layers: int = 3,
dec_hidden: int = 512,
dec_dropout: float = 0.0,
onehot_embed: bool = False) -> None:
super(PyTorchRNNDecoder, self).__init__(vocab_size,
embed_size=embed_size,
enc_dim=enc_dim,
dec_dim=dec_hidden,
jot_dim=jot_dim,
onehot_embed=onehot_embed)
# uni-dir RNNs
self.decoder = PyTorchRNN(dec_rnn,
embed_size,
dec_hidden,
dec_layers,
dropout=dec_dropout,
bidirectional=False)
def forward(self, enc_out: th.Tensor, tgt_pad: th.Tensor) -> th.Tensor:
"""
Args:
enc_out (Tensor): N x Ti x D
tgt_pad (Tensor): N x To+1 (padding blank at time = 0)
Return:
output: N x Ti x To+1 x V
"""
# N x To+1 x E
tgt_pad = self.vocab_embed(tgt_pad)
# N x To+1 x D
dec_out, _ = self.decoder(tgt_pad)
# N x Ti x To+1 x V
return self.pred(enc_out, dec_out)
def step(self, pred_prev, hidden=None):
"""
Make one step for decoder
"""
pred_prev_emb = self.vocab_embed(pred_prev) # 1 x 1 x E
dec_out, hidden = self.decoder(pred_prev_emb, hidden)
return dec_out[:, -1], hidden
class TorchTransformerDecoder(DecoderBase):
"""
Vanilla Transformer encoder as transducer decoder
"""
def __init__(self,
vocab_size: int,
enc_dim: Optional[int] = None,
jot_dim: int = 512,
att_dim: int = 512,
pose_kwargs: Dict = {},
arch_kwargs: Dict = {},
num_layers: int = 6,
onehot_embed: bool = False) -> None:
super(TorchTransformerDecoder,
self).__init__(vocab_size,
enc_dim=enc_dim if enc_dim else att_dim,
dec_dim=att_dim,
jot_dim=jot_dim,
onehot_embed=onehot_embed)
self.abs_pos_enc = get_xfmr_pose("abs", att_dim, **pose_kwargs)
self.decoder = get_xfmr_encoder("xfmr", "abs", num_layers, arch_kwargs)
def forward(self, enc_out: th.Tensor, tgt_pad: th.Tensor,
tgt_len: Optional[th.Tensor]) -> th.Tensor:
"""
Args:
enc_out (Tensor): N x Ti x D
tgt_pad (Tensor): N x To+1 (padding blank at time = 1)
tgt_len (Tensor): N or None
Return:
output: N x Ti x To+1 x V
"""
# N x Ti
pad_mask = None if tgt_len is None else (padding_mask(tgt_len) == 1)
# genrarte target masks (-inf/0)
tgt_mask = prep_sub_mask(tgt_pad.shape[-1], device=tgt_pad.device)
# To+1 x N x E
tgt_pad = self.abs_pos_enc(self.vocab_embed(tgt_pad))
# To+1 x N x D
dec_out = self.decoder(tgt_pad,
src_mask=tgt_mask,
src_key_padding_mask=pad_mask)
return self.pred(enc_out, dec_out.transpose(0, 1))
def step(self,
pred_prev: th.Tensor,
hidden: Optional[th.Tensor] = None) -> Tuple[th.Tensor, th.Tensor]:
"""
Make one step for decoder
Args:
pred_prev: 1 x 1
hidden: None or T x 1 x E
Return:
dec_out: 1 x D
"""
t = 0 if hidden is None else hidden.shape[0]
# 1 x 1 x E
pred_prev_emb = self.abs_pos_enc(self.vocab_embed(pred_prev), t=t)
hidden = pred_prev_emb if hidden is None else th.cat(
[hidden, pred_prev_emb], dim=0)
tgt_mask = prep_sub_mask(t + 1, device=pred_prev.device)
dec_out = self.decoder(hidden, mask=tgt_mask)
return dec_out[-1], hidden
|
py | b4089d6187498ed10931539234f72ca0f74a5f62 | # coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A context manager to perform a series of tasks on a set of resources.
:class:`TaskManager` is a context manager, created on-demand to allow
synchronized access to a node and its resources.
The :class:`TaskManager` will, by default, acquire an exclusive lock on
a node for the duration that the TaskManager instance exists. You may
create a TaskManager instance without locking by passing "shared=True"
when creating it, but certain operations on the resources held by such
an instance of TaskManager will not be possible. Requiring this exclusive
lock guards against parallel operations interfering with each other.
A shared lock is useful when performing non-interfering operations,
such as validating the driver interfaces.
An exclusive lock is stored in the database to coordinate between
:class:`ironic.conductor.manager` instances, that are typically deployed on
different hosts.
:class:`TaskManager` methods, as well as driver methods, may be decorated to
determine whether their invocation requires an exclusive lock.
The TaskManager instance exposes certain node resources and properties as
attributes that you may access:
task.context
The context passed to TaskManager()
task.shared
False if Node is locked, True if it is not locked. (The
'shared' kwarg arg of TaskManager())
task.node
The Node object
task.ports
Ports belonging to the Node
task.portgroups
Portgroups belonging to the Node
task.volume_connectors
Storage connectors belonging to the Node
task.volume_targets
Storage targets assigned to the Node
task.driver
The Driver for the Node, or the Driver based on the
'driver_name' kwarg of TaskManager().
Example usage:
::
with task_manager.acquire(context, node_id, purpose='power on') as task:
task.driver.power.power_on(task.node)
If you need to execute task-requiring code in a background thread, the
TaskManager instance provides an interface to handle this for you, making
sure to release resources when the thread finishes (successfully or if
an exception occurs). Common use of this is within the Manager like so:
::
with task_manager.acquire(context, node_id, purpose='some work') as task:
<do some work>
task.spawn_after(self._spawn_worker,
utils.node_power_action, task, new_state)
All exceptions that occur in the current GreenThread as part of the
spawn handling are re-raised. You can specify a hook to execute custom
code when such exceptions occur. For example, the hook is a more elegant
solution than wrapping the "with task_manager.acquire()" with a
try..exception block. (Note that this hook does not handle exceptions
raised in the background thread.):
::
def on_error(e):
if isinstance(e, Exception):
...
with task_manager.acquire(context, node_id, purpose='some work') as task:
<do some work>
task.set_spawn_error_hook(on_error)
task.spawn_after(self._spawn_worker,
utils.node_power_action, task, new_state)
"""
import copy
import functools
import futurist
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
import retrying
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
from ironic.conductor import notification_utils as notify
from ironic import objects
from ironic.objects import fields
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def require_exclusive_lock(f):
"""Decorator to require an exclusive lock.
Decorated functions must take a :class:`TaskManager` as the first
parameter. Decorated class methods should take a :class:`TaskManager`
as the first parameter after "self".
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
# NOTE(dtantsur): this code could be written simpler, but then unit
# testing decorated functions is pretty hard, as we usually pass a Mock
# object instead of TaskManager there.
if len(args) > 1:
task = args[1] if isinstance(args[1], TaskManager) else args[0]
else:
task = args[0]
if task.shared:
raise exception.ExclusiveLockRequired()
# NOTE(lintan): This is a workaround to set the context of async tasks,
# which should contain an exclusive lock.
task.context.ensure_thread_contain_context()
return f(*args, **kwargs)
return wrapper
def acquire(context, *args, **kwargs):
"""Shortcut for acquiring a lock on a Node.
:param context: Request context.
:returns: An instance of :class:`TaskManager`.
"""
# NOTE(lintan): This is a workaround to set the context of periodic tasks.
context.ensure_thread_contain_context()
return TaskManager(context, *args, **kwargs)
class TaskManager(object):
"""Context manager for tasks.
This class wraps the locking, driver loading, and acquisition
of related resources (eg, Node and Ports) when beginning a unit of work.
"""
def __init__(self, context, node_id, shared=False,
purpose='unspecified action', retry=True,
load_driver=True):
"""Create a new TaskManager.
Acquire a lock on a node. The lock can be either shared or
exclusive. Shared locks may be used for read-only or
non-disruptive actions only, and must be considerate to what
other threads may be doing on the same node at the same time.
:param context: request context
:param node_id: ID or UUID of node to lock.
:param shared: Boolean indicating whether to take a shared or exclusive
lock. Default: False.
:param purpose: human-readable purpose to put to debug logs.
:param retry: whether to retry locking if it fails. Default: True.
:param load_driver: whether to load the ``driver`` object. Set this to
False if loading the driver is undesired or
impossible.
:raises: DriverNotFound
:raises: InterfaceNotFoundInEntrypoint
:raises: NodeNotFound
:raises: NodeLocked
"""
self._spawn_method = None
self._on_error_method = None
self.context = context
self._node = None
self.node_id = node_id
self.shared = shared
self._retry = retry
self.fsm = states.machine.copy()
self._purpose = purpose
self._debug_timer = timeutils.StopWatch()
# states and event for notification
self._prev_provision_state = None
self._prev_target_provision_state = None
self._event = None
self._saved_node = None
try:
node = objects.Node.get(context, node_id)
LOG.debug("Attempting to get %(type)s lock on node %(node)s (for "
"%(purpose)s)",
{'type': 'shared' if shared else 'exclusive',
'node': node.uuid, 'purpose': purpose})
if not self.shared:
self._lock()
else:
self._debug_timer.restart()
self.node = node
self.ports = objects.Port.list_by_node_id(context, self.node.id)
self.portgroups = objects.Portgroup.list_by_node_id(context,
self.node.id)
self.volume_connectors = objects.VolumeConnector.list_by_node_id(
context, self.node.id)
self.volume_targets = objects.VolumeTarget.list_by_node_id(
context, self.node.id)
if load_driver:
self.driver = driver_factory.build_driver_for_task(self)
else:
self.driver = None
except Exception:
with excutils.save_and_reraise_exception():
self.release_resources()
@property
def node(self):
return self._node
@node.setter
def node(self, node):
self._node = node
if node is not None:
self.fsm.initialize(start_state=self.node.provision_state,
target_state=self.node.target_provision_state)
def load_driver(self):
if self.driver is None:
self.driver = driver_factory.build_driver_for_task(self)
def _lock(self):
self._debug_timer.restart()
if self._retry:
attempts = CONF.conductor.node_locked_retry_attempts
else:
attempts = 1
# NodeLocked exceptions can be annoying. Let's try to alleviate
# some of that pain by retrying our lock attempts. The retrying
# module expects a wait_fixed value in milliseconds.
@retrying.retry(
retry_on_exception=lambda e: isinstance(e, exception.NodeLocked),
stop_max_attempt_number=attempts,
wait_fixed=CONF.conductor.node_locked_retry_interval * 1000)
def reserve_node():
self.node = objects.Node.reserve(self.context, CONF.host,
self.node_id)
LOG.debug("Node %(node)s successfully reserved for %(purpose)s "
"(took %(time).2f seconds)",
{'node': self.node.uuid, 'purpose': self._purpose,
'time': self._debug_timer.elapsed()})
self._debug_timer.restart()
reserve_node()
def upgrade_lock(self, purpose=None):
"""Upgrade a shared lock to an exclusive lock.
Also reloads node object from the database.
If lock is already exclusive only changes the lock purpose
when provided with one.
:param purpose: optionally change the purpose of the lock
:raises: NodeLocked if an exclusive lock remains on the node after
"node_locked_retry_attempts"
"""
if purpose is not None:
self._purpose = purpose
if self.shared:
LOG.debug('Upgrading shared lock on node %(uuid)s for %(purpose)s '
'to an exclusive one (shared lock was held %(time).2f '
'seconds)',
{'uuid': self.node.uuid, 'purpose': self._purpose,
'time': self._debug_timer.elapsed()})
self._lock()
self.shared = False
def spawn_after(self, _spawn_method, *args, **kwargs):
"""Call this to spawn a thread to complete the task.
The specified method will be called when the TaskManager instance
exits.
:param _spawn_method: a method that returns a GreenThread object
:param args: args passed to the method.
:param kwargs: additional kwargs passed to the method.
"""
self._spawn_method = _spawn_method
self._spawn_args = args
self._spawn_kwargs = kwargs
def set_spawn_error_hook(self, _on_error_method, *args, **kwargs):
"""Create a hook to handle exceptions when spawning a task.
Create a hook that gets called upon an exception being raised
from spawning a background thread to do a task.
:param _on_error_method: a callable object, it's first parameter
should accept the Exception object that was raised.
:param args: additional args passed to the callable object.
:param kwargs: additional kwargs passed to the callable object.
"""
self._on_error_method = _on_error_method
self._on_error_args = args
self._on_error_kwargs = kwargs
def downgrade_lock(self):
"""Downgrade the lock to a shared one."""
if self.node is None:
raise RuntimeError("Cannot downgrade an already released lock")
if not self.shared:
objects.Node.release(self.context, CONF.host, self.node.id)
self.shared = True
self.node.refresh()
LOG.debug("Successfully downgraded lock for %(purpose)s "
"on node %(node)s",
{'purpose': self._purpose, 'node': self.node.uuid})
def release_resources(self):
"""Unlock a node and release resources.
If an exclusive lock is held, unlock the node. Reset attributes
to make it clear that this instance of TaskManager should no
longer be accessed.
"""
if not self.shared:
try:
if self.node:
objects.Node.release(self.context, CONF.host, self.node.id)
except exception.NodeNotFound:
# squelch the exception if the node was deleted
# within the task's context.
pass
if self.node:
LOG.debug("Successfully released %(type)s lock for %(purpose)s "
"on node %(node)s (lock was held %(time).2f sec)",
{'type': 'shared' if self.shared else 'exclusive',
'purpose': self._purpose, 'node': self.node.uuid,
'time': self._debug_timer.elapsed()})
self.node = None
self.driver = None
self.ports = None
self.portgroups = None
self.volume_connectors = None
self.volume_targets = None
self.fsm = None
def _write_exception(self, future):
"""Set node last_error if exception raised in thread."""
node = self.node
# do not rewrite existing error
if node and node.last_error is None:
method = self._spawn_args[0].__name__
try:
exc = future.exception()
except futurist.CancelledError:
LOG.exception("Execution of %(method)s for node %(node)s "
"was canceled.", {'method': method,
'node': node.uuid})
else:
if exc is not None:
msg = _("Async execution of %(method)s failed with error: "
"%(error)s") % {'method': method,
'error': str(exc)}
node.last_error = msg
try:
node.save()
except exception.NodeNotFound:
pass
def _notify_provision_state_change(self):
"""Emit notification about change of the node provision state."""
if self._event is None:
return
if self.node is None:
# Rare case if resource released before notification
task = copy.copy(self)
task.fsm = states.machine.copy()
task.node = self._saved_node
else:
task = self
node = task.node
state = node.provision_state
prev_state = self._prev_provision_state
new_unstable = state in states.UNSTABLE_STATES
prev_unstable = prev_state in states.UNSTABLE_STATES
level = fields.NotificationLevel.INFO
if self._event in ('fail', 'error'):
status = fields.NotificationStatus.ERROR
level = fields.NotificationLevel.ERROR
elif (prev_unstable, new_unstable) == (False, True):
status = fields.NotificationStatus.START
elif (prev_unstable, new_unstable) == (True, False):
status = fields.NotificationStatus.END
else:
status = fields.NotificationStatus.SUCCESS
notify.emit_provision_set_notification(
task, level, status, self._prev_provision_state,
self._prev_target_provision_state, self._event)
# reset saved event, avoiding duplicate notification
self._event = None
def _thread_release_resources(self, fut):
"""Thread callback to release resources."""
try:
self._write_exception(fut)
finally:
self.release_resources()
def process_event(self, event, callback=None, call_args=None,
call_kwargs=None, err_handler=None, target_state=None):
"""Process the given event for the task's current state.
:param event: the name of the event to process
:param callback: optional callback to invoke upon event transition
:param call_args: optional args to pass to the callback method
:param call_kwargs: optional kwargs to pass to the callback method
:param err_handler: optional error handler to invoke if the
callback fails, eg. because there are no workers available
(err_handler should accept arguments node, prev_prov_state, and
prev_target_state)
:param target_state: if specified, the target provision state for the
node. Otherwise, use the target state from the fsm
:raises: InvalidState if the event is not allowed by the associated
state machine
"""
# save previous states and event
self._prev_provision_state = self.node.provision_state
self._prev_target_provision_state = self.node.target_provision_state
self._event = event
# Advance the state model for the given event. Note that this doesn't
# alter the node in any way. This may raise InvalidState, if this event
# is not allowed in the current state.
self.fsm.process_event(event, target_state=target_state)
# stash current states in the error handler if callback is set,
# in case we fail to get a worker from the pool
if err_handler and callback:
self.set_spawn_error_hook(err_handler, self.node,
self.node.provision_state,
self.node.target_provision_state)
self.node.provision_state = self.fsm.current_state
# NOTE(lucasagomes): If there's no extra processing
# (callback) and we've moved to a stable state, make sure the
# target_provision_state is cleared
if not callback and self.fsm.is_stable(self.node.provision_state):
self.node.target_provision_state = states.NOSTATE
else:
self.node.target_provision_state = self.fsm.target_state
# set up the async worker
if callback:
# clear the error if we're going to start work in a callback
self.node.last_error = None
if call_args is None:
call_args = ()
if call_kwargs is None:
call_kwargs = {}
self.spawn_after(callback, *call_args, **call_kwargs)
# publish the state transition by saving the Node
self.node.save()
log_message = ('Node %(node)s moved to provision state "%(state)s" '
'from state "%(previous)s"; target provision state is '
'"%(target)s"' %
{'node': self.node.uuid,
'state': self.node.provision_state,
'target': self.node.target_provision_state,
'previous': self._prev_provision_state})
if (self.node.provision_state.endswith('failed')
or self.node.provision_state == 'error'):
LOG.error(log_message)
else:
LOG.info(log_message)
if callback is None:
self._notify_provision_state_change()
else:
# save the node, in case it is released before a notification is
# emitted at __exit__().
self._saved_node = self.node
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None and self._spawn_method is not None:
# Spawn a worker to complete the task
# The linked callback below will be called whenever:
# - background task finished with no errors.
# - background task has crashed with exception.
# - callback was added after the background task has
# finished or crashed. While eventlet currently doesn't
# schedule the new thread until the current thread blocks
# for some reason, this is true.
# All of the above are asserted in tests such that we'll
# catch if eventlet ever changes this behavior.
fut = None
try:
fut = self._spawn_method(*self._spawn_args,
**self._spawn_kwargs)
# NOTE(comstud): Trying to use a lambda here causes
# the callback to not occur for some reason. This
# also makes it easier to test.
fut.add_done_callback(self._thread_release_resources)
# Don't unlock! The unlock will occur when the
# thread finishes.
# NOTE(yuriyz): A race condition with process_event()
# in callback is possible here if eventlet changes behavior.
# E.g., if the execution of the new thread (that handles the
# event processing) finishes before we get here, that new
# thread may emit the "end" notification before we emit the
# following "start" notification.
self._notify_provision_state_change()
return
except Exception as e:
with excutils.save_and_reraise_exception():
try:
# Execute the on_error hook if set
if self._on_error_method:
self._on_error_method(e, *self._on_error_args,
**self._on_error_kwargs)
except Exception:
LOG.warning("Task's on_error hook failed to "
"call %(method)s on node %(node)s",
{'method': self._on_error_method.__name__,
'node': self.node.uuid})
if fut is not None:
# This means the add_done_callback() failed for some
# reason. Nuke the thread.
fut.cancel()
self.release_resources()
self.release_resources()
|
py | b4089e7cf4eb9ac032bbc4a6b023e26fcecc6457 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from __future__ import division
import copy
from fnmatch import translate
from math import isinf, isnan
from os.path import isfile
from re import compile
import requests
from prometheus_client.samples import Sample
from six import PY3, iteritems, string_types
from ...config import is_affirmative
from ...errors import CheckException
from ...utils.common import to_native_string
from ...utils.http import RequestsWrapper
from .. import AgentCheck
from ..libs.prometheus import text_fd_to_metric_families
try:
import datadog_agent
except ImportError:
from datadog_checks.base.stubs import datadog_agent
if PY3:
long = int
class OpenMetricsScraperMixin(object):
# pylint: disable=E1101
# This class is not supposed to be used by itself, it provides scraping behavior but
# need to be within a check in the end
# indexes in the sample tuple of core.Metric
SAMPLE_NAME = 0
SAMPLE_LABELS = 1
SAMPLE_VALUE = 2
MICROS_IN_S = 1000000
MINUS_INF = float("-inf")
TELEMETRY_GAUGE_MESSAGE_SIZE = "payload.size"
TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT = "metrics.blacklist.count"
TELEMETRY_COUNTER_METRICS_INPUT_COUNT = "metrics.input.count"
TELEMETRY_COUNTER_METRICS_IGNORE_COUNT = "metrics.ignored.count"
TELEMETRY_COUNTER_METRICS_PROCESS_COUNT = "metrics.processed.count"
METRIC_TYPES = ['counter', 'gauge', 'summary', 'histogram']
KUBERNETES_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
METRICS_WITH_COUNTERS = {"counter", "histogram", "summary"}
def __init__(self, *args, **kwargs):
# Initialize AgentCheck's base class
super(OpenMetricsScraperMixin, self).__init__(*args, **kwargs)
def create_scraper_configuration(self, instance=None):
"""
Creates a scraper configuration.
If instance does not specify a value for a configuration option, the value will default to the `init_config`.
Otherwise, the `default_instance` value will be used.
A default mixin configuration will be returned if there is no instance.
"""
if 'openmetrics_endpoint' in instance:
raise CheckException('The setting `openmetrics_endpoint` is only available for Agent version 7 or later')
# We can choose to create a default mixin configuration for an empty instance
if instance is None:
instance = {}
# Supports new configuration options
config = copy.deepcopy(instance)
# Set the endpoint
endpoint = instance.get('prometheus_url')
if instance and endpoint is None:
raise CheckException("You have to define a prometheus_url for each prometheus instance")
config['prometheus_url'] = endpoint
# `NAMESPACE` is the prefix metrics will have. Need to be hardcoded in the
# child check class.
namespace = instance.get('namespace')
# Check if we have a namespace
if instance and namespace is None:
if self.default_namespace is None:
raise CheckException("You have to define a namespace for each prometheus check")
namespace = self.default_namespace
config['namespace'] = namespace
# Retrieve potential default instance settings for the namespace
default_instance = self.default_instances.get(namespace, {})
def _get_setting(name, default):
return instance.get(name, default_instance.get(name, default))
# `metrics_mapper` is a dictionary where the keys are the metrics to capture
# and the values are the corresponding metrics names to have in datadog.
# Note: it is empty in the parent class but will need to be
# overloaded/hardcoded in the final check not to be counted as custom metric.
# Metrics are preprocessed if no mapping
metrics_mapper = {}
# We merge list and dictionaries from optional defaults & instance settings
metrics = default_instance.get('metrics', []) + instance.get('metrics', [])
for metric in metrics:
if isinstance(metric, string_types):
metrics_mapper[metric] = metric
else:
metrics_mapper.update(metric)
config['metrics_mapper'] = metrics_mapper
# `_wildcards_re` is a Pattern object used to match metric wildcards
config['_wildcards_re'] = None
wildcards = set()
for metric in config['metrics_mapper']:
if "*" in metric:
wildcards.add(translate(metric))
if wildcards:
config['_wildcards_re'] = compile('|'.join(wildcards))
# `prometheus_metrics_prefix` allows to specify a prefix that all
# prometheus metrics should have. This can be used when the prometheus
# endpoint we are scrapping allows to add a custom prefix to it's
# metrics.
config['prometheus_metrics_prefix'] = instance.get(
'prometheus_metrics_prefix', default_instance.get('prometheus_metrics_prefix', '')
)
# `label_joins` holds the configuration for extracting 1:1 labels from
# a target metric to all metric matching the label, example:
# self.label_joins = {
# 'kube_pod_info': {
# 'labels_to_match': ['pod'],
# 'labels_to_get': ['node', 'host_ip']
# }
# }
config['label_joins'] = default_instance.get('label_joins', {})
config['label_joins'].update(instance.get('label_joins', {}))
# `_label_mapping` holds the additionals label info to add for a specific
# label value, example:
# self._label_mapping = {
# 'pod': {
# 'dd-agent-9s1l1': {
# "node": "yolo",
# "host_ip": "yey"
# }
# }
# }
config['_label_mapping'] = {}
# `_active_label_mapping` holds a dictionary of label values found during the run
# to cleanup the label_mapping of unused values, example:
# self._active_label_mapping = {
# 'pod': {
# 'dd-agent-9s1l1': True
# }
# }
config['_active_label_mapping'] = {}
# `_watched_labels` holds the sets of labels to watch for enrichment
config['_watched_labels'] = {}
config['_dry_run'] = True
# Some metrics are ignored because they are duplicates or introduce a
# very high cardinality. Metrics included in this list will be silently
# skipped without a 'Unable to handle metric' debug line in the logs
config['ignore_metrics'] = instance.get('ignore_metrics', default_instance.get('ignore_metrics', []))
config['_ignored_metrics'] = set()
# `_ignored_re` is a Pattern object used to match ignored metric patterns
config['_ignored_re'] = None
ignored_patterns = set()
# Separate ignored metric names and ignored patterns in different sets for faster lookup later
for metric in config['ignore_metrics']:
if '*' in metric:
ignored_patterns.add(translate(metric))
else:
config['_ignored_metrics'].add(metric)
if ignored_patterns:
config['_ignored_re'] = compile('|'.join(ignored_patterns))
# Ignore metrics based on label keys or specific label values
config['ignore_metrics_by_labels'] = instance.get(
'ignore_metrics_by_labels', default_instance.get('ignore_metrics_by_labels', {})
)
# If you want to send the buckets as tagged values when dealing with histograms,
# set send_histograms_buckets to True, set to False otherwise.
config['send_histograms_buckets'] = is_affirmative(
instance.get('send_histograms_buckets', default_instance.get('send_histograms_buckets', True))
)
# If you want the bucket to be non cumulative and to come with upper/lower bound tags
# set non_cumulative_buckets to True, enabled when distribution metrics are enabled.
config['non_cumulative_buckets'] = is_affirmative(
instance.get('non_cumulative_buckets', default_instance.get('non_cumulative_buckets', False))
)
# Send histograms as datadog distribution metrics
config['send_distribution_buckets'] = is_affirmative(
instance.get('send_distribution_buckets', default_instance.get('send_distribution_buckets', False))
)
# Non cumulative buckets are mandatory for distribution metrics
if config['send_distribution_buckets'] is True:
config['non_cumulative_buckets'] = True
# If you want to send `counter` metrics as monotonic counts, set this value to True.
# Set to False if you want to instead send those metrics as `gauge`.
config['send_monotonic_counter'] = is_affirmative(
instance.get('send_monotonic_counter', default_instance.get('send_monotonic_counter', True))
)
# If you want `counter` metrics to be submitted as both gauges and monotonic counts. Set this value to True.
config['send_monotonic_with_gauge'] = is_affirmative(
instance.get('send_monotonic_with_gauge', default_instance.get('send_monotonic_with_gauge', False))
)
config['send_distribution_counts_as_monotonic'] = is_affirmative(
instance.get(
'send_distribution_counts_as_monotonic',
default_instance.get('send_distribution_counts_as_monotonic', False),
)
)
config['send_distribution_sums_as_monotonic'] = is_affirmative(
instance.get(
'send_distribution_sums_as_monotonic',
default_instance.get('send_distribution_sums_as_monotonic', False),
)
)
# If the `labels_mapper` dictionary is provided, the metrics labels names
# in the `labels_mapper` will use the corresponding value as tag name
# when sending the gauges.
config['labels_mapper'] = default_instance.get('labels_mapper', {})
config['labels_mapper'].update(instance.get('labels_mapper', {}))
# Rename bucket "le" label to "upper_bound"
config['labels_mapper']['le'] = 'upper_bound'
# `exclude_labels` is an array of label names to exclude. Those labels
# will just not be added as tags when submitting the metric.
config['exclude_labels'] = default_instance.get('exclude_labels', []) + instance.get('exclude_labels', [])
# `include_labels` is an array of label names to include. If these labels are not in
# the `exclude_labels` list, then they are added as tags when submitting the metric.
config['include_labels'] = default_instance.get('include_labels', []) + instance.get('include_labels', [])
# `type_overrides` is a dictionary where the keys are prometheus metric names
# and the values are a metric type (name as string) to use instead of the one
# listed in the payload. It can be used to force a type on untyped metrics.
# Note: it is empty in the parent class but will need to be
# overloaded/hardcoded in the final check not to be counted as custom metric.
config['type_overrides'] = default_instance.get('type_overrides', {})
config['type_overrides'].update(instance.get('type_overrides', {}))
# `_type_override_patterns` is a dictionary where we store Pattern objects
# that match metric names as keys, and their corresponding metric type overrides as values.
config['_type_override_patterns'] = {}
with_wildcards = set()
for metric, type in iteritems(config['type_overrides']):
if '*' in metric:
config['_type_override_patterns'][compile(translate(metric))] = type
with_wildcards.add(metric)
# cleanup metric names with wildcards from the 'type_overrides' dict
for metric in with_wildcards:
del config['type_overrides'][metric]
# Some metrics are retrieved from different hosts and often
# a label can hold this information, this transfers it to the hostname
config['label_to_hostname'] = instance.get('label_to_hostname', default_instance.get('label_to_hostname', None))
# In combination to label_as_hostname, allows to add a common suffix to the hostnames
# submitted. This can be used for instance to discriminate hosts between clusters.
config['label_to_hostname_suffix'] = instance.get(
'label_to_hostname_suffix', default_instance.get('label_to_hostname_suffix', None)
)
# Add a 'health' service check for the prometheus endpoint
config['health_service_check'] = is_affirmative(
instance.get('health_service_check', default_instance.get('health_service_check', True))
)
# Can either be only the path to the certificate and thus you should specify the private key
# or it can be the path to a file containing both the certificate & the private key
config['ssl_cert'] = instance.get('ssl_cert', default_instance.get('ssl_cert', None))
# Needed if the certificate does not include the private key
#
# /!\ The private key to your local certificate must be unencrypted.
# Currently, Requests does not support using encrypted keys.
config['ssl_private_key'] = instance.get('ssl_private_key', default_instance.get('ssl_private_key', None))
# The path to the trusted CA used for generating custom certificates
config['ssl_ca_cert'] = instance.get('ssl_ca_cert', default_instance.get('ssl_ca_cert', None))
# Whether or not to validate SSL certificates
config['ssl_verify'] = is_affirmative(instance.get('ssl_verify', default_instance.get('ssl_verify', True)))
# Extra http headers to be sent when polling endpoint
config['extra_headers'] = default_instance.get('extra_headers', {})
config['extra_headers'].update(instance.get('extra_headers', {}))
# Timeout used during the network request
config['prometheus_timeout'] = instance.get(
'prometheus_timeout', default_instance.get('prometheus_timeout', 10)
)
# Authentication used when polling endpoint
config['username'] = instance.get('username', default_instance.get('username', None))
config['password'] = instance.get('password', default_instance.get('password', None))
# Custom tags that will be sent with each metric
config['custom_tags'] = instance.get('tags', [])
# Some tags can be ignored to reduce the cardinality.
# This can be useful for cost optimization in containerized environments
# when the openmetrics check is configured to collect custom metrics.
# Even when the Agent's Tagger is configured to add low-cardinality tags only,
# some tags can still generate unwanted metric contexts (e.g pod annotations as tags).
ignore_tags = instance.get('ignore_tags', default_instance.get('ignore_tags', []))
if ignore_tags:
ignored_tags_re = compile('|'.join(set(ignore_tags)))
config['custom_tags'] = [tag for tag in config['custom_tags'] if not ignored_tags_re.search(tag)]
# Additional tags to be sent with each metric
config['_metric_tags'] = []
# List of strings to filter the input text payload on. If any line contains
# one of these strings, it will be filtered out before being parsed.
# INTERNAL FEATURE, might be removed in future versions
config['_text_filter_blacklist'] = []
# Whether or not to use the service account bearer token for authentication.
# Can be explicitly set to true or false to send or not the bearer token.
# If set to the `tls_only` value, the bearer token will be sent only to https endpoints.
# If 'bearer_token_path' is not set, we use /var/run/secrets/kubernetes.io/serviceaccount/token
# as a default path to get the token.
bearer_token_auth = _get_setting('bearer_token_auth', False)
if bearer_token_auth == 'tls_only':
config['bearer_token_auth'] = config['prometheus_url'].startswith("https://")
else:
config['bearer_token_auth'] = is_affirmative(bearer_token_auth)
# Can be used to get a service account bearer token from files
# other than /var/run/secrets/kubernetes.io/serviceaccount/token
# 'bearer_token_auth' should be enabled.
config['bearer_token_path'] = instance.get('bearer_token_path', default_instance.get('bearer_token_path', None))
# The service account bearer token to be used for authentication
config['_bearer_token'] = self._get_bearer_token(config['bearer_token_auth'], config['bearer_token_path'])
config['telemetry'] = is_affirmative(instance.get('telemetry', default_instance.get('telemetry', False)))
# The metric name services use to indicate build information
config['metadata_metric_name'] = instance.get(
'metadata_metric_name', default_instance.get('metadata_metric_name')
)
# Map of metadata key names to label names
config['metadata_label_map'] = instance.get(
'metadata_label_map', default_instance.get('metadata_label_map', {})
)
config['_default_metric_transformers'] = {}
if config['metadata_metric_name'] and config['metadata_label_map']:
config['_default_metric_transformers'][config['metadata_metric_name']] = self.transform_metadata
# Whether or not to enable flushing of the first value of monotonic counts
config['_flush_first_value'] = False
# Whether to use process_start_time_seconds to decide if counter-like values should be flushed
# on first scrape.
config['use_process_start_time'] = is_affirmative(_get_setting('use_process_start_time', False))
return config
def get_http_handler(self, scraper_config):
"""
Get http handler for a specific scraper config.
The http handler is cached using `prometheus_url` as key.
"""
prometheus_url = scraper_config['prometheus_url']
if prometheus_url in self._http_handlers:
return self._http_handlers[prometheus_url]
# TODO: Deprecate this behavior in Agent 8
if scraper_config['ssl_ca_cert'] is False:
scraper_config['ssl_verify'] = False
# TODO: Deprecate this behavior in Agent 8
if scraper_config['ssl_verify'] is False:
scraper_config.setdefault('tls_ignore_warning', True)
http_handler = self._http_handlers[prometheus_url] = RequestsWrapper(
scraper_config, self.init_config, self.HTTP_CONFIG_REMAPPER, self.log
)
headers = http_handler.options['headers']
bearer_token = scraper_config['_bearer_token']
if bearer_token is not None:
headers['Authorization'] = 'Bearer {}'.format(bearer_token)
# TODO: Determine if we really need this
headers.setdefault('accept-encoding', 'gzip')
# Explicitly set the content type we accept
headers.setdefault('accept', 'text/plain')
return http_handler
def reset_http_config(self):
"""
You may need to use this when configuration is determined dynamically during every
check run, such as when polling an external resource like the Kubelet.
"""
self._http_handlers.clear()
def parse_metric_family(self, response, scraper_config):
"""
Parse the MetricFamily from a valid `requests.Response` object to provide a MetricFamily object.
The text format uses iter_lines() generator.
"""
if response.encoding is None:
response.encoding = 'utf-8'
input_gen = response.iter_lines(decode_unicode=True)
if scraper_config['_text_filter_blacklist']:
input_gen = self._text_filter_input(input_gen, scraper_config)
for metric in text_fd_to_metric_families(input_gen):
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_INPUT_COUNT, len(metric.samples), scraper_config
)
type_override = scraper_config['type_overrides'].get(metric.name)
if type_override:
metric.type = type_override
elif scraper_config['_type_override_patterns']:
for pattern, new_type in iteritems(scraper_config['_type_override_patterns']):
if pattern.search(metric.name):
metric.type = new_type
break
if metric.type not in self.METRIC_TYPES:
continue
metric.name = self._remove_metric_prefix(metric.name, scraper_config)
yield metric
def _text_filter_input(self, input_gen, scraper_config):
"""
Filters out the text input line by line to avoid parsing and processing
metrics we know we don't want to process. This only works on `text/plain`
payloads, and is an INTERNAL FEATURE implemented for the kubelet check
:param input_get: line generator
:output: generator of filtered lines
"""
for line in input_gen:
for item in scraper_config['_text_filter_blacklist']:
if item in line:
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT, 1, scraper_config)
break
else:
# No blacklist matches, passing the line through
yield line
def _remove_metric_prefix(self, metric, scraper_config):
prometheus_metrics_prefix = scraper_config['prometheus_metrics_prefix']
return metric[len(prometheus_metrics_prefix) :] if metric.startswith(prometheus_metrics_prefix) else metric
def scrape_metrics(self, scraper_config):
"""
Poll the data from Prometheus and return the metrics as a generator.
"""
response = self.poll(scraper_config)
if scraper_config['telemetry']:
if 'content-length' in response.headers:
content_len = int(response.headers['content-length'])
else:
content_len = len(response.content)
self._send_telemetry_gauge(self.TELEMETRY_GAUGE_MESSAGE_SIZE, content_len, scraper_config)
try:
# no dry run if no label joins
if not scraper_config['label_joins']:
scraper_config['_dry_run'] = False
elif not scraper_config['_watched_labels']:
watched = scraper_config['_watched_labels']
watched['sets'] = {}
watched['keys'] = {}
watched['singles'] = set()
for key, val in iteritems(scraper_config['label_joins']):
labels = []
if 'labels_to_match' in val:
labels = val['labels_to_match']
elif 'label_to_match' in val:
self.log.warning("`label_to_match` is being deprecated, please use `labels_to_match`")
if isinstance(val['label_to_match'], list):
labels = val['label_to_match']
else:
labels = [val['label_to_match']]
if labels:
s = frozenset(labels)
watched['sets'][key] = s
watched['keys'][key] = ','.join(s)
if len(labels) == 1:
watched['singles'].add(labels[0])
for metric in self.parse_metric_family(response, scraper_config):
yield metric
# Set dry run off
scraper_config['_dry_run'] = False
# Garbage collect unused mapping and reset active labels
for metric, mapping in list(iteritems(scraper_config['_label_mapping'])):
for key in list(mapping):
if (
metric in scraper_config['_active_label_mapping']
and key not in scraper_config['_active_label_mapping'][metric]
):
del scraper_config['_label_mapping'][metric][key]
scraper_config['_active_label_mapping'] = {}
finally:
response.close()
def process(self, scraper_config, metric_transformers=None):
"""
Polls the data from Prometheus and submits them as Datadog metrics.
`endpoint` is the metrics endpoint to use to poll metrics from Prometheus
Note that if the instance has a `tags` attribute, it will be pushed
automatically as additional custom tags and added to the metrics
"""
transformers = scraper_config['_default_metric_transformers'].copy()
if metric_transformers:
transformers.update(metric_transformers)
counter_buffer = []
agent_start_time = None
process_start_time = None
if not scraper_config['_flush_first_value'] and scraper_config['use_process_start_time']:
agent_start_time = datadog_agent.get_process_start_time()
for metric in self.scrape_metrics(scraper_config):
if agent_start_time is not None:
if metric.name == 'process_start_time_seconds' and metric.samples:
min_metric_value = min(s[self.SAMPLE_VALUE] for s in metric.samples)
if process_start_time is None or min_metric_value < process_start_time:
process_start_time = min_metric_value
if metric.type in self.METRICS_WITH_COUNTERS:
counter_buffer.append(metric)
continue
self.process_metric(metric, scraper_config, metric_transformers=transformers)
if agent_start_time and process_start_time and agent_start_time < process_start_time:
# If agent was started before the process, we assume counters were started recently from zero,
# and thus we can compute the rates.
scraper_config['_flush_first_value'] = True
for metric in counter_buffer:
self.process_metric(metric, scraper_config, metric_transformers=transformers)
scraper_config['_flush_first_value'] = True
def transform_metadata(self, metric, scraper_config):
labels = metric.samples[0][self.SAMPLE_LABELS]
for metadata_name, label_name in iteritems(scraper_config['metadata_label_map']):
if label_name in labels:
self.set_metadata(metadata_name, labels[label_name])
def _metric_name_with_namespace(self, metric_name, scraper_config):
namespace = scraper_config['namespace']
if not namespace:
return metric_name
return '{}.{}'.format(namespace, metric_name)
def _telemetry_metric_name_with_namespace(self, metric_name, scraper_config):
namespace = scraper_config['namespace']
if not namespace:
return '{}.{}'.format('telemetry', metric_name)
return '{}.{}.{}'.format(namespace, 'telemetry', metric_name)
def _send_telemetry_gauge(self, metric_name, val, scraper_config):
if scraper_config['telemetry']:
metric_name_with_namespace = self._telemetry_metric_name_with_namespace(metric_name, scraper_config)
# Determine the tags to send
custom_tags = scraper_config['custom_tags']
tags = list(custom_tags)
tags.extend(scraper_config['_metric_tags'])
self.gauge(metric_name_with_namespace, val, tags=tags)
def _send_telemetry_counter(self, metric_name, val, scraper_config, extra_tags=None):
if scraper_config['telemetry']:
metric_name_with_namespace = self._telemetry_metric_name_with_namespace(metric_name, scraper_config)
# Determine the tags to send
custom_tags = scraper_config['custom_tags']
tags = list(custom_tags)
tags.extend(scraper_config['_metric_tags'])
if extra_tags:
tags.extend(extra_tags)
self.count(metric_name_with_namespace, val, tags=tags)
def _store_labels(self, metric, scraper_config):
# If targeted metric, store labels
if metric.name not in scraper_config['label_joins']:
return
watched = scraper_config['_watched_labels']
matching_labels = watched['sets'][metric.name]
mapping_key = watched['keys'][metric.name]
labels_to_get = scraper_config['label_joins'][metric.name]['labels_to_get']
get_all = '*' in labels_to_get
match_all = mapping_key == '*'
for sample in metric.samples:
# metadata-only metrics that are used for label joins are always equal to 1
# this is required for metrics where all combinations of a state are sent
# but only the active one is set to 1 (others are set to 0)
# example: kube_pod_status_phase in kube-state-metrics
if sample[self.SAMPLE_VALUE] != 1:
continue
sample_labels = sample[self.SAMPLE_LABELS]
sample_labels_keys = sample_labels.keys()
if match_all or matching_labels.issubset(sample_labels_keys):
label_dict = dict()
if get_all:
for label_name, label_value in iteritems(sample_labels):
if label_name in matching_labels:
continue
label_dict[label_name] = label_value
else:
for label_name in labels_to_get:
if label_name in sample_labels:
label_dict[label_name] = sample_labels[label_name]
if match_all:
mapping_value = '*'
else:
mapping_value = ','.join([sample_labels[l] for l in matching_labels])
scraper_config['_label_mapping'].setdefault(mapping_key, {}).setdefault(mapping_value, {}).update(
label_dict
)
def _join_labels(self, metric, scraper_config):
# Filter metric to see if we can enrich with joined labels
if not scraper_config['label_joins']:
return
label_mapping = scraper_config['_label_mapping']
active_label_mapping = scraper_config['_active_label_mapping']
watched = scraper_config['_watched_labels']
sets = watched['sets']
keys = watched['keys']
singles = watched['singles']
for sample in metric.samples:
sample_labels = sample[self.SAMPLE_LABELS]
sample_labels_keys = sample_labels.keys()
# Match with wildcard label
# Label names are [a-zA-Z0-9_]*, so no risk of collision
if '*' in singles:
active_label_mapping.setdefault('*', {})['*'] = True
if '*' in label_mapping and '*' in label_mapping['*']:
sample_labels.update(label_mapping['*']['*'])
# Match with single labels
matching_single_labels = singles.intersection(sample_labels_keys)
for label in matching_single_labels:
mapping_key = label
mapping_value = sample_labels[label]
active_label_mapping.setdefault(mapping_key, {})[mapping_value] = True
if mapping_key in label_mapping and mapping_value in label_mapping[mapping_key]:
sample_labels.update(label_mapping[mapping_key][mapping_value])
# Match with tuples of labels
for key, mapping_key in iteritems(keys):
if mapping_key in matching_single_labels:
continue
matching_labels = sets[key]
if matching_labels.issubset(sample_labels_keys):
matching_values = [sample_labels[l] for l in matching_labels]
mapping_value = ','.join(matching_values)
active_label_mapping.setdefault(mapping_key, {})[mapping_value] = True
if mapping_key in label_mapping and mapping_value in label_mapping[mapping_key]:
sample_labels.update(label_mapping[mapping_key][mapping_value])
def _ignore_metrics_by_label(self, scraper_config, metric_name, sample):
ignore_metrics_by_label = scraper_config['ignore_metrics_by_labels']
sample_labels = sample[self.SAMPLE_LABELS]
for label_key, label_values in ignore_metrics_by_label.items():
if not label_values:
self.log.debug(
"Skipping filter label `%s` with an empty values list, did you mean to use '*' wildcard?", label_key
)
elif '*' in label_values:
# Wildcard '*' means all metrics with label_key will be ignored
self.log.debug("Detected wildcard for label `%s`", label_key)
if label_key in sample_labels.keys():
self.log.debug("Skipping metric `%s` due to label key matching: %s", metric_name, label_key)
return True
else:
for val in label_values:
if label_key in sample_labels and sample_labels[label_key] == val:
self.log.debug(
"Skipping metric `%s` due to label `%s` value matching: %s", metric_name, label_key, val
)
return True
return False
def process_metric(self, metric, scraper_config, metric_transformers=None):
"""
Handle a Prometheus metric according to the following flow:
- search `scraper_config['metrics_mapper']` for a prometheus.metric to datadog.metric mapping
- call check method with the same name as the metric
- log info if none of the above worked
`metric_transformers` is a dict of `<metric name>:<function to run when the metric name is encountered>`
"""
# If targeted metric, store labels
self._store_labels(metric, scraper_config)
if scraper_config['ignore_metrics']:
if metric.name in scraper_config['_ignored_metrics']:
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config
)
return # Ignore the metric
if scraper_config['_ignored_re'] and scraper_config['_ignored_re'].search(metric.name):
# Metric must be ignored
scraper_config['_ignored_metrics'].add(metric.name)
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config
)
return # Ignore the metric
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_PROCESS_COUNT, len(metric.samples), scraper_config)
if self._filter_metric(metric, scraper_config):
return # Ignore the metric
# Filter metric to see if we can enrich with joined labels
self._join_labels(metric, scraper_config)
if scraper_config['_dry_run']:
return
try:
self.submit_openmetric(scraper_config['metrics_mapper'][metric.name], metric, scraper_config)
except KeyError:
if metric_transformers is not None and metric.name in metric_transformers:
try:
# Get the transformer function for this specific metric
transformer = metric_transformers[metric.name]
transformer(metric, scraper_config)
except Exception as err:
self.log.warning('Error handling metric: %s - error: %s', metric.name, err)
return
# check for wildcards in transformers
for transformer_name, transformer in iteritems(metric_transformers):
if transformer_name.endswith('*') and metric.name.startswith(transformer_name[:-1]):
transformer(metric, scraper_config, transformer_name)
# try matching wildcards
if scraper_config['_wildcards_re'] and scraper_config['_wildcards_re'].search(metric.name):
self.submit_openmetric(metric.name, metric, scraper_config)
return
self.log.debug(
'Skipping metric `%s` as it is not defined in the metrics mapper, '
'has no transformer function, nor does it match any wildcards.',
metric.name,
)
def poll(self, scraper_config, headers=None):
"""
Returns a valid `requests.Response`, otherwise raise requests.HTTPError if the status code of the
response isn't valid - see `response.raise_for_status()`
The caller needs to close the requests.Response.
Custom headers can be added to the default headers.
"""
endpoint = scraper_config.get('prometheus_url')
# Should we send a service check for when we make a request
health_service_check = scraper_config['health_service_check']
service_check_name = self._metric_name_with_namespace('prometheus.health', scraper_config)
service_check_tags = ['endpoint:{}'.format(endpoint)]
service_check_tags.extend(scraper_config['custom_tags'])
try:
response = self.send_request(endpoint, scraper_config, headers)
except requests.exceptions.SSLError:
self.log.error("Invalid SSL settings for requesting %s endpoint", endpoint)
raise
except IOError:
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
try:
response.raise_for_status()
if health_service_check:
self.service_check(service_check_name, AgentCheck.OK, tags=service_check_tags)
return response
except requests.HTTPError:
response.close()
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
def send_request(self, endpoint, scraper_config, headers=None):
kwargs = {}
if headers:
kwargs['headers'] = headers
http_handler = self.get_http_handler(scraper_config)
return http_handler.get(endpoint, stream=True, **kwargs)
def get_hostname_for_sample(self, sample, scraper_config):
"""
Expose the label_to_hostname mapping logic to custom handler methods
"""
return self._get_hostname(None, sample, scraper_config)
def submit_openmetric(self, metric_name, metric, scraper_config, hostname=None):
"""
For each sample in the metric, report it as a gauge with all labels as tags
except if a labels `dict` is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
`send_histograms_buckets` is used to specify if you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of `tag:value` that will be added to the
metric when sending the gauge to Datadog.
"""
if metric.type in ["gauge", "counter", "rate"]:
metric_name_with_namespace = self._metric_name_with_namespace(metric_name, scraper_config)
for sample in metric.samples:
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
# Determine the tags to send
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
if metric.type == "counter" and scraper_config['send_monotonic_counter']:
self.monotonic_count(
metric_name_with_namespace,
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_flush_first_value'],
)
elif metric.type == "rate":
self.rate(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
else:
self.gauge(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
# Metric is a "counter" but legacy behavior has "send_as_monotonic" defaulted to False
# Submit metric as monotonic_count with appended name
if metric.type == "counter" and scraper_config['send_monotonic_with_gauge']:
self.monotonic_count(
metric_name_with_namespace + '.total',
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_flush_first_value'],
)
elif metric.type == "histogram":
self._submit_gauges_from_histogram(metric_name, metric, scraper_config)
elif metric.type == "summary":
self._submit_gauges_from_summary(metric_name, metric, scraper_config)
else:
self.log.error("Metric type %s unsupported for metric %s.", metric.type, metric_name)
def _get_hostname(self, hostname, sample, scraper_config):
"""
If hostname is None, look at label_to_hostname setting
"""
if (
hostname is None
and scraper_config['label_to_hostname'] is not None
and sample[self.SAMPLE_LABELS].get(scraper_config['label_to_hostname'])
):
hostname = sample[self.SAMPLE_LABELS][scraper_config['label_to_hostname']]
suffix = scraper_config['label_to_hostname_suffix']
if suffix is not None:
hostname += suffix
return hostname
def _submit_gauges_from_summary(self, metric_name, metric, scraper_config, hostname=None):
"""
Extracts metrics from a prometheus summary metric and sends them as gauges
"""
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith("_sum"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(
scraper_config['send_distribution_sums_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.sum".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_flush_first_value'],
)
elif sample[self.SAMPLE_NAME].endswith("_count"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_flush_first_value'],
)
else:
try:
quantile = sample[self.SAMPLE_LABELS]["quantile"]
except KeyError:
# TODO: In the Prometheus spec the 'quantile' label is optional, but it's not clear yet
# what we should do in this case. Let's skip for now and submit the rest of metrics.
message = (
'"quantile" label not present in metric %r. '
'Quantile-less summary metrics are not currently supported. Skipping...'
)
self.log.debug(message, metric_name)
continue
sample[self.SAMPLE_LABELS]["quantile"] = str(float(quantile))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self.gauge(
"{}.quantile".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
)
def _submit_gauges_from_histogram(self, metric_name, metric, scraper_config, hostname=None):
"""
Extracts metrics from a prometheus histogram and sends them as gauges
"""
if scraper_config['non_cumulative_buckets']:
self._decumulate_histogram_buckets(metric)
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith("_sum") and not scraper_config['send_distribution_buckets']:
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(
scraper_config['send_distribution_sums_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.sum".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_flush_first_value'],
)
elif sample[self.SAMPLE_NAME].endswith("_count") and not scraper_config['send_distribution_buckets']:
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
if scraper_config['send_histograms_buckets']:
tags.append("upper_bound:none")
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_flush_first_value'],
)
elif scraper_config['send_histograms_buckets'] and sample[self.SAMPLE_NAME].endswith("_bucket"):
if scraper_config['send_distribution_buckets']:
self._submit_sample_histogram_buckets(metric_name, sample, scraper_config, hostname)
elif "Inf" not in sample[self.SAMPLE_LABELS]["le"] or scraper_config['non_cumulative_buckets']:
sample[self.SAMPLE_LABELS]["le"] = str(float(sample[self.SAMPLE_LABELS]["le"]))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_flush_first_value'],
)
def _compute_bucket_hash(self, tags):
# we need the unique context for all the buckets
# hence we remove the "le" tag
return hash(frozenset(sorted((k, v) for k, v in iteritems(tags) if k != 'le')))
def _decumulate_histogram_buckets(self, metric):
"""
Decumulate buckets in a given histogram metric and adds the lower_bound label (le being upper_bound)
"""
bucket_values_by_context_upper_bound = {}
for sample in metric.samples:
if sample[self.SAMPLE_NAME].endswith("_bucket"):
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
if context_key not in bucket_values_by_context_upper_bound:
bucket_values_by_context_upper_bound[context_key] = {}
bucket_values_by_context_upper_bound[context_key][float(sample[self.SAMPLE_LABELS]["le"])] = sample[
self.SAMPLE_VALUE
]
sorted_buckets_by_context = {}
for context in bucket_values_by_context_upper_bound:
sorted_buckets_by_context[context] = sorted(bucket_values_by_context_upper_bound[context])
# Tuples (lower_bound, upper_bound, value)
bucket_tuples_by_context_upper_bound = {}
for context in sorted_buckets_by_context:
for i, upper_b in enumerate(sorted_buckets_by_context[context]):
if i == 0:
if context not in bucket_tuples_by_context_upper_bound:
bucket_tuples_by_context_upper_bound[context] = {}
if upper_b > 0:
# positive buckets start at zero
bucket_tuples_by_context_upper_bound[context][upper_b] = (
0,
upper_b,
bucket_values_by_context_upper_bound[context][upper_b],
)
else:
# negative buckets start at -inf
bucket_tuples_by_context_upper_bound[context][upper_b] = (
self.MINUS_INF,
upper_b,
bucket_values_by_context_upper_bound[context][upper_b],
)
continue
tmp = (
bucket_values_by_context_upper_bound[context][upper_b]
- bucket_values_by_context_upper_bound[context][sorted_buckets_by_context[context][i - 1]]
)
bucket_tuples_by_context_upper_bound[context][upper_b] = (
sorted_buckets_by_context[context][i - 1],
upper_b,
tmp,
)
# modify original metric to inject lower_bound & modified value
for i, sample in enumerate(metric.samples):
if not sample[self.SAMPLE_NAME].endswith("_bucket"):
continue
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
matching_bucket_tuple = bucket_tuples_by_context_upper_bound[context_key][
float(sample[self.SAMPLE_LABELS]["le"])
]
# Replacing the sample tuple
sample[self.SAMPLE_LABELS]["lower_bound"] = str(matching_bucket_tuple[0])
metric.samples[i] = Sample(sample[self.SAMPLE_NAME], sample[self.SAMPLE_LABELS], matching_bucket_tuple[2])
def _submit_sample_histogram_buckets(self, metric_name, sample, scraper_config, hostname=None):
if "lower_bound" not in sample[self.SAMPLE_LABELS] or "le" not in sample[self.SAMPLE_LABELS]:
self.log.warning(
"Metric: %s was not containing required bucket boundaries labels: %s",
metric_name,
sample[self.SAMPLE_LABELS],
)
return
sample[self.SAMPLE_LABELS]["le"] = str(float(sample[self.SAMPLE_LABELS]["le"]))
sample[self.SAMPLE_LABELS]["lower_bound"] = str(float(sample[self.SAMPLE_LABELS]["lower_bound"]))
if sample[self.SAMPLE_LABELS]["le"] == sample[self.SAMPLE_LABELS]["lower_bound"]:
# this can happen for -inf/-inf bucket that we don't want to send (always 0)
self.log.warning(
"Metric: %s has bucket boundaries equal, skipping: %s", metric_name, sample[self.SAMPLE_LABELS]
)
return
tags = self._metric_tags(metric_name, sample[self.SAMPLE_VALUE], sample, scraper_config, hostname)
self.submit_histogram_bucket(
self._metric_name_with_namespace(metric_name, scraper_config),
sample[self.SAMPLE_VALUE],
float(sample[self.SAMPLE_LABELS]["lower_bound"]),
float(sample[self.SAMPLE_LABELS]["le"]),
True,
hostname,
tags,
flush_first_value=scraper_config['_flush_first_value'],
)
def _submit_distribution_count(
self,
monotonic,
send_monotonic_with_gauge,
metric_name,
value,
tags=None,
hostname=None,
flush_first_value=False,
):
if monotonic:
self.monotonic_count(metric_name, value, tags=tags, hostname=hostname, flush_first_value=flush_first_value)
else:
self.gauge(metric_name, value, tags=tags, hostname=hostname)
if send_monotonic_with_gauge:
self.monotonic_count(
metric_name + ".total", value, tags=tags, hostname=hostname, flush_first_value=flush_first_value
)
def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None):
custom_tags = scraper_config['custom_tags']
_tags = list(custom_tags)
_tags.extend(scraper_config['_metric_tags'])
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name not in scraper_config['exclude_labels']:
if label_name in scraper_config['include_labels'] or len(scraper_config['include_labels']) == 0:
tag_name = scraper_config['labels_mapper'].get(label_name, label_name)
_tags.append('{}:{}'.format(to_native_string(tag_name), to_native_string(label_value)))
return self._finalize_tags_to_submit(
_tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname
)
def _is_value_valid(self, val):
return not (isnan(val) or isinf(val))
def _get_bearer_token(self, bearer_token_auth, bearer_token_path):
if bearer_token_auth is False:
return None
path = None
if bearer_token_path is not None:
if isfile(bearer_token_path):
path = bearer_token_path
else:
self.log.error("File not found: %s", bearer_token_path)
elif isfile(self.KUBERNETES_TOKEN_PATH):
path = self.KUBERNETES_TOKEN_PATH
if path is None:
self.log.error("Cannot get bearer token from bearer_token_path or auto discovery")
raise IOError("Cannot get bearer token from bearer_token_path or auto discovery")
try:
with open(path, 'r') as f:
return f.read().rstrip()
except Exception as err:
self.log.error("Cannot get bearer token from path: %s - error: %s", path, err)
raise
def _histogram_convert_values(self, metric_name, converter):
def _convert(metric, scraper_config=None):
for index, sample in enumerate(metric.samples):
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if sample[self.SAMPLE_NAME].endswith("_sum"):
lst = list(sample)
lst[self.SAMPLE_VALUE] = converter(val)
metric.samples[index] = tuple(lst)
elif sample[self.SAMPLE_NAME].endswith("_bucket") and "Inf" not in sample[self.SAMPLE_LABELS]["le"]:
sample[self.SAMPLE_LABELS]["le"] = str(converter(float(sample[self.SAMPLE_LABELS]["le"])))
self.submit_openmetric(metric_name, metric, scraper_config)
return _convert
def _histogram_from_microseconds_to_seconds(self, metric_name):
return self._histogram_convert_values(metric_name, lambda v: v / self.MICROS_IN_S)
def _histogram_from_seconds_to_microseconds(self, metric_name):
return self._histogram_convert_values(metric_name, lambda v: v * self.MICROS_IN_S)
def _summary_convert_values(self, metric_name, converter):
def _convert(metric, scraper_config=None):
for index, sample in enumerate(metric.samples):
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if sample[self.SAMPLE_NAME].endswith("_count"):
continue
else:
lst = list(sample)
lst[self.SAMPLE_VALUE] = converter(val)
metric.samples[index] = tuple(lst)
self.submit_openmetric(metric_name, metric, scraper_config)
return _convert
def _summary_from_microseconds_to_seconds(self, metric_name):
return self._summary_convert_values(metric_name, lambda v: v / self.MICROS_IN_S)
def _summary_from_seconds_to_microseconds(self, metric_name):
return self._summary_convert_values(metric_name, lambda v: v * self.MICROS_IN_S)
|
py | b4089ee281dd54f43a47631e929287d6a15e774a | from __future__ import print_function, absolute_import, division
import math
from functools import reduce
import numpy
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
from .imputils import (lower_builtin, lower_getattr, lower_getattr_generic,
lower_cast, iternext_impl,
impl_ret_borrowed, impl_ret_untracked)
from . import optional
from .. import typing, types, cgutils, utils
@lower_builtin('is not', types.Any, types.Any)
def generic_is_not(context, builder, sig, args):
"""
Implement `x is not y` as `not (x is y)`.
"""
is_impl = context.get_function('is', sig)
return builder.not_(is_impl(builder, args))
#-------------------------------------------------------------------------------
def _int_arith_flags(rettype):
"""
Return the modifier flags for integer arithmetic.
"""
if rettype.signed:
# Ignore the effects of signed overflow. This is important for
# optimization of some indexing operations. For example
# array[i+1] could see `i+1` trigger a signed overflow and
# give a negative number. With Python's indexing, a negative
# index is treated differently: its resolution has a runtime cost.
# Telling LLVM to ignore signed overflows allows it to optimize
# away the check for a negative `i+1` if it knows `i` is positive.
return ['nsw']
else:
return []
def int_add_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
res = builder.add(a, b, flags=_int_arith_flags(sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_sub_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
res = builder.sub(a, b, flags=_int_arith_flags(sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_mul_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
res = builder.mul(a, b, flags=_int_arith_flags(sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_divmod(context, builder, x, y):
"""
Reference Objects/intobject.c
xdivy = x / y;
xmody = (long)(x - (unsigned long)xdivy * y);
/* If the signs of x and y differ, and the remainder is non-0,
* C89 doesn't define whether xdivy is now the floor or the
* ceiling of the infinitely precise quotient. We want the floor,
* and we have it iff the remainder's sign matches y's.
*/
if (xmody && ((y ^ xmody) < 0) /* i.e. and signs differ */) {
xmody += y;
--xdivy;
assert(xmody && ((y ^ xmody) >= 0));
}
*p_xdivy = xdivy;
*p_xmody = xmody;
"""
assert x.type == y.type
xdivy = builder.sdiv(x, y)
xmody = builder.srem(x, y) # Intel has divmod instruction
ZERO = Constant.null(y.type)
ONE = Constant.int(y.type, 1)
y_xor_xmody_ltz = builder.icmp(lc.ICMP_SLT, builder.xor(y, xmody), ZERO)
xmody_istrue = builder.icmp(lc.ICMP_NE, xmody, ZERO)
cond = builder.and_(xmody_istrue, y_xor_xmody_ltz)
bb1 = builder.basic_block
with builder.if_then(cond):
xmody_plus_y = builder.add(xmody, y)
xdivy_minus_1 = builder.sub(xdivy, ONE)
bb2 = builder.basic_block
resdiv = builder.phi(y.type)
resdiv.add_incoming(xdivy, bb1)
resdiv.add_incoming(xdivy_minus_1, bb2)
resmod = builder.phi(x.type)
resmod.add_incoming(xmody, bb1)
resmod.add_incoming(xmody_plus_y, bb2)
return resdiv, resmod
@lower_builtin('/?', types.Integer, types.Integer)
@lower_builtin('//', types.Integer, types.Integer)
def int_floordiv_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
res = cgutils.alloca_once(builder, a.type)
with builder.if_else(cgutils.is_scalar_zero(builder, b), likely=False
) as (if_zero, if_non_zero):
with if_zero:
if not context.error_model.fp_zero_division(
builder, ("integer division by zero",)):
# No exception raised => return 0
# XXX We should also set the FPU exception status, but
# there's no easy way to do that from LLVM.
builder.store(b, res)
with if_non_zero:
if sig.return_type.signed:
quot, _ = int_divmod(context, builder, a, b)
else:
quot = builder.udiv(a, b)
builder.store(quot, res)
return impl_ret_untracked(context, builder, sig.return_type,
builder.load(res))
@lower_builtin('/', types.Integer, types.Integer)
def int_truediv_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
with cgutils.if_zero(builder, b):
context.error_model.fp_zero_division(builder, ("division by zero",))
res = builder.fdiv(a, b)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin('%', types.Integer, types.Integer)
def int_rem_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
res = cgutils.alloca_once(builder, a.type)
with builder.if_else(cgutils.is_scalar_zero(builder, b), likely=False
) as (if_zero, if_non_zero):
with if_zero:
if not context.error_model.fp_zero_division(
builder, ("modulo by zero",)):
# No exception raised => return 0
# XXX We should also set the FPU exception status, but
# there's no easy way to do that from LLVM.
builder.store(b, res)
with if_non_zero:
if sig.return_type.signed:
_, rem = int_divmod(context, builder, a, b)
else:
rem = builder.urem(a, b)
builder.store(rem, res)
return impl_ret_untracked(context, builder, sig.return_type,
builder.load(res))
def int_power_impl(context, builder, sig, args):
"""
a ^ b, where a is an integer or real, and b an integer
"""
is_integer = isinstance(sig.args[0], types.Integer)
tp = sig.return_type
zerodiv_return = False
if is_integer and not context.error_model.raise_on_fp_zero_division:
# If not raising, return 0x8000... when computing 0 ** <negative number>
zerodiv_return = -1 << (tp.bitwidth - 1)
def int_power(a, b):
# Ensure computations are done with a large enough width
r = tp(1)
a = tp(a)
if b < 0:
invert = True
exp = -b
if exp < 0:
raise OverflowError
if is_integer:
if a == 0:
if zerodiv_return:
return zerodiv_return
else:
raise ZeroDivisionError("0 cannot be raised to a negative power")
if a != 1 and a != -1:
return 0
else:
invert = False
exp = b
if exp > 0x10000:
# Optimization cutoff: fallback on the generic algorithm
return math.pow(a, float(b))
while exp != 0:
if exp & 1:
r *= a
exp >>= 1
a *= a
return 1.0 / r if invert else r
res = context.compile_internal(builder, int_power, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_slt_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_SLT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_sle_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_SLE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_sgt_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_SGT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_sge_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_SGE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_ult_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_ULT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_ule_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_ULE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_ugt_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_UGT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_uge_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_UGE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_eq_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_EQ, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_ne_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_NE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_abs_impl(context, builder, sig, args):
[x] = args
ZERO = Constant.null(x.type)
ltz = builder.icmp(lc.ICMP_SLT, x, ZERO)
negated = builder.neg(x)
res = builder.select(ltz, negated, x)
return impl_ret_untracked(context, builder, sig.return_type, res)
def uint_abs_impl(context, builder, sig, args):
[x] = args
return impl_ret_untracked(context, builder, sig.return_type, x)
def int_shl_impl(context, builder, sig, args):
[valty, amtty] = sig.args
[val, amt] = args
val = context.cast(builder, val, valty, sig.return_type)
amt = context.cast(builder, amt, amtty, sig.return_type)
res = builder.shl(val, amt)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_shr_impl(context, builder, sig, args):
[valty, amtty] = sig.args
[val, amt] = args
val = context.cast(builder, val, valty, sig.return_type)
amt = context.cast(builder, amt, amtty, sig.return_type)
if sig.return_type.signed:
res = builder.ashr(val, amt)
else:
res = builder.lshr(val, amt)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_and_impl(context, builder, sig, args):
[at, bt] = sig.args
[av, bv] = args
cav = context.cast(builder, av, at, sig.return_type)
cbc = context.cast(builder, bv, bt, sig.return_type)
res = builder.and_(cav, cbc)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_or_impl(context, builder, sig, args):
[at, bt] = sig.args
[av, bv] = args
cav = context.cast(builder, av, at, sig.return_type)
cbc = context.cast(builder, bv, bt, sig.return_type)
res = builder.or_(cav, cbc)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_xor_impl(context, builder, sig, args):
[at, bt] = sig.args
[av, bv] = args
cav = context.cast(builder, av, at, sig.return_type)
cbc = context.cast(builder, bv, bt, sig.return_type)
res = builder.xor(cav, cbc)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_negate_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
# Negate before upcasting, for unsigned numbers
res = builder.neg(val)
res = context.cast(builder, res, typ, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_positive_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
res = context.cast(builder, val, typ, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_invert_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
# Invert before upcasting, for unsigned numbers
res = builder.xor(val, Constant.all_ones(val.type))
res = context.cast(builder, res, typ, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
def bool_invert_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
res = builder.sub(Constant.int(val.type, 1), val)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_sign_impl(context, builder, sig, args):
"""
np.sign(int)
"""
[x] = args
POS = Constant.int(x.type, 1)
NEG = Constant.int(x.type, -1)
ZERO = Constant.int(x.type, 0)
cmp_zero = builder.icmp(lc.ICMP_EQ, x, ZERO)
cmp_pos = builder.icmp(lc.ICMP_SGT, x, ZERO)
presult = cgutils.alloca_once(builder, x.type)
bb_zero = builder.append_basic_block(".zero")
bb_postest = builder.append_basic_block(".postest")
bb_pos = builder.append_basic_block(".pos")
bb_neg = builder.append_basic_block(".neg")
bb_exit = builder.append_basic_block(".exit")
builder.cbranch(cmp_zero, bb_zero, bb_postest)
with builder.goto_block(bb_zero):
builder.store(ZERO, presult)
builder.branch(bb_exit)
with builder.goto_block(bb_postest):
builder.cbranch(cmp_pos, bb_pos, bb_neg)
with builder.goto_block(bb_pos):
builder.store(POS, presult)
builder.branch(bb_exit)
with builder.goto_block(bb_neg):
builder.store(NEG, presult)
builder.branch(bb_exit)
builder.position_at_end(bb_exit)
res = builder.load(presult)
return impl_ret_untracked(context, builder, sig.return_type, res)
lower_builtin('==', types.boolean, types.boolean)(int_eq_impl)
lower_builtin('!=', types.boolean, types.boolean)(int_ne_impl)
lower_builtin('<', types.boolean, types.boolean)(int_ult_impl)
lower_builtin('<=', types.boolean, types.boolean)(int_ule_impl)
lower_builtin('>', types.boolean, types.boolean)(int_ugt_impl)
lower_builtin('>=', types.boolean, types.boolean)(int_uge_impl)
lower_builtin('~', types.boolean)(bool_invert_impl)
def _implement_integer_operators():
ty = types.Integer
lower_builtin('+', ty, ty)(int_add_impl)
lower_builtin('-', ty, ty)(int_sub_impl)
lower_builtin('*', ty, ty)(int_mul_impl)
lower_builtin('==', ty, ty)(int_eq_impl)
lower_builtin('!=', ty, ty)(int_ne_impl)
lower_builtin('<<', ty, ty)(int_shl_impl)
lower_builtin('>>', ty, ty)(int_shr_impl)
lower_builtin('&', ty, ty)(int_and_impl)
lower_builtin('|', ty, ty)(int_or_impl)
lower_builtin('^', ty, ty)(int_xor_impl)
lower_builtin('-', ty)(int_negate_impl)
lower_builtin('+', ty)(int_positive_impl)
lower_builtin('~', ty)(int_invert_impl)
lower_builtin('**', ty, ty)(int_power_impl)
lower_builtin(pow, ty, ty)(int_power_impl)
for ty in types.unsigned_domain:
lower_builtin('<', ty, ty)(int_ult_impl)
lower_builtin('<=', ty, ty)(int_ule_impl)
lower_builtin('>', ty, ty)(int_ugt_impl)
lower_builtin('>=', ty, ty)(int_uge_impl)
lower_builtin('**', types.float64, ty)(int_power_impl)
lower_builtin(pow, types.float64, ty)(int_power_impl)
lower_builtin(abs, ty)(uint_abs_impl)
for ty in types.signed_domain:
lower_builtin('<', ty, ty)(int_slt_impl)
lower_builtin('<=', ty, ty)(int_sle_impl)
lower_builtin('>', ty, ty)(int_sgt_impl)
lower_builtin('>=', ty, ty)(int_sge_impl)
lower_builtin('**', types.float64, ty)(int_power_impl)
lower_builtin(pow, types.float64, ty)(int_power_impl)
lower_builtin(abs, ty)(int_abs_impl)
_implement_integer_operators()
def optional_is_none(context, builder, sig, args):
"""Check if an Optional value is invalid
"""
[lty, rty] = sig.args
[lval, rval] = args
# Make sure None is on the right
if lty == types.none:
lty, rty = rty, lty
lval, rval = rval, lval
opt_type = lty
opt_val = lval
del lty, rty, lval, rval
opt = context.make_optional(opt_type)(context, builder, opt_val)
res = builder.not_(cgutils.as_bool_bit(builder, opt.valid))
return impl_ret_untracked(context, builder, sig.return_type, res)
def optional_is_not_none(context, builder, sig, args):
"""Check if an Optional value is valid
"""
res = builder.not_(optional_is_none(context, builder, sig, args))
return impl_ret_untracked(context, builder, sig.return_type, res)
# None is/not None
lower_builtin('is', types.none, types.none)(optional.always_return_true_impl)
# Optional is None
lower_builtin('is', types.Optional, types.none)(optional_is_none)
lower_builtin('is', types.none, types.Optional)(optional_is_none)
@lower_getattr_generic(types.Optional)
def optional_getattr(context, builder, typ, value, attr):
"""
Optional.__getattr__ => redirect to the wrapped type.
"""
inner_type = typ.type
val = context.cast(builder, value, typ, inner_type)
imp = context.get_getattr(inner_type, attr)
return imp(context, builder, inner_type, val, attr)
@lower_getattr_generic(types.DeferredType)
def deferred_getattr(context, builder, typ, value, attr):
"""
Deferred.__getattr__ => redirect to the actual type.
"""
inner_type = typ.get()
val = context.cast(builder, value, typ, inner_type)
imp = context.get_getattr(inner_type, attr)
return imp(context, builder, inner_type, val, attr)
@lower_cast(types.Any, types.DeferredType)
def any_to_deferred(context, builder, fromty, toty, val):
actual = context.cast(builder, val, fromty, toty.get())
model = context.data_model_manager[toty]
return model.set(builder, model.make_uninitialized(), actual)
@lower_cast(types.DeferredType, types.Any)
def deferred_to_any(context, builder, fromty, toty, val):
model = context.data_model_manager[fromty]
val = model.get(builder, val)
return context.cast(builder, val, fromty.get(), toty)
def real_add_impl(context, builder, sig, args):
res = builder.fadd(*args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_sub_impl(context, builder, sig, args):
res = builder.fsub(*args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_mul_impl(context, builder, sig, args):
res = builder.fmul(*args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_div_impl(context, builder, sig, args):
with cgutils.if_zero(builder, args[1]):
context.error_model.fp_zero_division(builder, ("division by zero",))
res = builder.fdiv(*args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_divmod(context, builder, x, y):
assert x.type == y.type
floatty = x.type
module = builder.module
fname = ".numba.python.rem.%s" % x.type
fnty = Type.function(floatty, (floatty, floatty, Type.pointer(floatty)))
fn = module.get_or_insert_function(fnty, fname)
if fn.is_declaration:
fn.linkage = lc.LINKAGE_LINKONCE_ODR
fnbuilder = lc.Builder.new(fn.append_basic_block('entry'))
fx, fy, pmod = fn.args
div, mod = real_divmod_func_body(context, fnbuilder, fx, fy)
fnbuilder.store(mod, pmod)
fnbuilder.ret(div)
pmod = cgutils.alloca_once(builder, floatty)
quotient = builder.call(fn, (x, y, pmod))
return quotient, builder.load(pmod)
def real_divmod_func_body(context, builder, vx, wx):
# Reference Objects/floatobject.c
#
# float_divmod(PyObject *v, PyObject *w)
# {
# double vx, wx;
# double div, mod, floordiv;
# CONVERT_TO_DOUBLE(v, vx);
# CONVERT_TO_DOUBLE(w, wx);
# mod = fmod(vx, wx);
# /* fmod is typically exact, so vx-mod is *mathematically* an
# exact multiple of wx. But this is fp arithmetic, and fp
# vx - mod is an approximation; the result is that div may
# not be an exact integral value after the division, although
# it will always be very close to one.
# */
# div = (vx - mod) / wx;
# if (mod) {
# /* ensure the remainder has the same sign as the denominator */
# if ((wx < 0) != (mod < 0)) {
# mod += wx;
# div -= 1.0;
# }
# }
# else {
# /* the remainder is zero, and in the presence of signed zeroes
# fmod returns different results across platforms; ensure
# it has the same sign as the denominator; we'd like to do
# "mod = wx * 0.0", but that may get optimized away */
# mod *= mod; /* hide "mod = +0" from optimizer */
# if (wx < 0.0)
# mod = -mod;
# }
# /* snap quotient to nearest integral value */
# if (div) {
# floordiv = floor(div);
# if (div - floordiv > 0.5)
# floordiv += 1.0;
# }
# else {
# /* div is zero - get the same sign as the true quotient */
# div *= div; /* hide "div = +0" from optimizers */
# floordiv = div * vx / wx; /* zero w/ sign of vx/wx */
# }
# return Py_BuildValue("(dd)", floordiv, mod);
# }
pmod = cgutils.alloca_once(builder, vx.type)
pdiv = cgutils.alloca_once(builder, vx.type)
pfloordiv = cgutils.alloca_once(builder, vx.type)
mod = builder.frem(vx, wx)
div = builder.fdiv(builder.fsub(vx, mod), wx)
builder.store(mod, pmod)
builder.store(div, pdiv)
ZERO = Constant.real(vx.type, 0)
ONE = Constant.real(vx.type, 1)
mod_istrue = builder.fcmp(lc.FCMP_ONE, mod, ZERO)
wx_ltz = builder.fcmp(lc.FCMP_OLT, wx, ZERO)
mod_ltz = builder.fcmp(lc.FCMP_OLT, mod, ZERO)
with builder.if_then(mod_istrue):
wx_ltz_ne_mod_ltz = builder.icmp(lc.ICMP_NE, wx_ltz, mod_ltz)
with builder.if_then(wx_ltz_ne_mod_ltz):
mod = builder.fadd(mod, wx)
div = builder.fsub(div, ONE)
builder.store(mod, pmod)
builder.store(div, pdiv)
del mod
del div
with cgutils.ifnot(builder, mod_istrue):
mod = builder.load(pmod)
mod = builder.fmul(mod, mod)
builder.store(mod, pmod)
del mod
with builder.if_then(wx_ltz):
mod = builder.load(pmod)
mod = builder.fsub(ZERO, mod)
builder.store(mod, pmod)
del mod
div = builder.load(pdiv)
div_istrue = builder.fcmp(lc.FCMP_ONE, div, ZERO)
with builder.if_then(div_istrue):
module = builder.module
floorfn = lc.Function.intrinsic(module, lc.INTR_FLOOR, [wx.type])
floordiv = builder.call(floorfn, [div])
floordivdiff = builder.fsub(div, floordiv)
floordivincr = builder.fadd(floordiv, ONE)
HALF = Constant.real(wx.type, 0.5)
pred = builder.fcmp(lc.FCMP_OGT, floordivdiff, HALF)
floordiv = builder.select(pred, floordivincr, floordiv)
builder.store(floordiv, pfloordiv)
with cgutils.ifnot(builder, div_istrue):
div = builder.fmul(div, div)
builder.store(div, pdiv)
floordiv = builder.fdiv(builder.fmul(div, vx), wx)
builder.store(floordiv, pfloordiv)
return builder.load(pfloordiv), builder.load(pmod)
def real_mod_impl(context, builder, sig, args):
x, y = args
res = cgutils.alloca_once(builder, x.type)
with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False
) as (if_zero, if_non_zero):
with if_zero:
if not context.error_model.fp_zero_division(
builder, ("modulo by zero",)):
# No exception raised => compute the nan result,
# and set the FP exception word for Numpy warnings.
rem = builder.frem(x, y)
builder.store(rem, res)
with if_non_zero:
_, rem = real_divmod(context, builder, x, y)
builder.store(rem, res)
return impl_ret_untracked(context, builder, sig.return_type,
builder.load(res))
def real_floordiv_impl(context, builder, sig, args):
x, y = args
res = cgutils.alloca_once(builder, x.type)
with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False
) as (if_zero, if_non_zero):
with if_zero:
if not context.error_model.fp_zero_division(
builder, ("division by zero",)):
# No exception raised => compute the +/-inf or nan result,
# and set the FP exception word for Numpy warnings.
quot = builder.fdiv(x, y)
builder.store(quot, res)
with if_non_zero:
quot, _ = real_divmod(context, builder, x, y)
builder.store(quot, res)
return impl_ret_untracked(context, builder, sig.return_type,
builder.load(res))
def real_power_impl(context, builder, sig, args):
x, y = args
module = builder.module
if context.implement_powi_as_math_call:
imp = context.get_function(math.pow, sig)
res = imp(builder, args)
else:
fn = lc.Function.intrinsic(module, lc.INTR_POW, [y.type])
res = builder.call(fn, (x, y))
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_lt_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_OLT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_le_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_OLE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_gt_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_OGT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_ge_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_OGE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_eq_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_OEQ, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_ne_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_UNE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_abs_impl(context, builder, sig, args):
[ty] = sig.args
sig = typing.signature(ty, ty)
impl = context.get_function(math.fabs, sig)
return impl(builder, args)
def real_negate_impl(context, builder, sig, args):
from . import mathimpl
res = mathimpl.negate_real(builder, args[0])
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_positive_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
res = context.cast(builder, val, typ, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_sign_impl(context, builder, sig, args):
"""
np.sign(float)
"""
[x] = args
POS = Constant.real(x.type, 1)
NEG = Constant.real(x.type, -1)
ZERO = Constant.real(x.type, 0)
presult = cgutils.alloca_once(builder, x.type)
is_pos = builder.fcmp(lc.FCMP_OGT, x, ZERO)
is_neg = builder.fcmp(lc.FCMP_OLT, x, ZERO)
with builder.if_else(is_pos) as (gt_zero, not_gt_zero):
with gt_zero:
builder.store(POS, presult)
with not_gt_zero:
with builder.if_else(is_neg) as (lt_zero, not_lt_zero):
with lt_zero:
builder.store(NEG, presult)
with not_lt_zero:
# For both NaN and 0, the result of sign() is simply
# the input value.
builder.store(x, presult)
res = builder.load(presult)
return impl_ret_untracked(context, builder, sig.return_type, res)
ty = types.Float
lower_builtin('+', ty, ty)(real_add_impl)
lower_builtin('-', ty, ty)(real_sub_impl)
lower_builtin('*', ty, ty)(real_mul_impl)
lower_builtin('/?', ty, ty)(real_div_impl)
lower_builtin('//', ty, ty)(real_floordiv_impl)
lower_builtin('/', ty, ty)(real_div_impl)
lower_builtin('%', ty, ty)(real_mod_impl)
lower_builtin('**', ty, ty)(real_power_impl)
lower_builtin(pow, ty, ty)(real_power_impl)
lower_builtin('==', ty, ty)(real_eq_impl)
lower_builtin('!=', ty, ty)(real_ne_impl)
lower_builtin('<', ty, ty)(real_lt_impl)
lower_builtin('<=', ty, ty)(real_le_impl)
lower_builtin('>', ty, ty)(real_gt_impl)
lower_builtin('>=', ty, ty)(real_ge_impl)
lower_builtin(abs, ty)(real_abs_impl)
lower_builtin('-', ty)(real_negate_impl)
lower_builtin('+', ty)(real_positive_impl)
del ty
class Complex64(cgutils.Structure):
_fields = [('real', types.float32),
('imag', types.float32)]
class Complex128(cgutils.Structure):
_fields = [('real', types.float64),
('imag', types.float64)]
def get_complex_info(ty):
if ty == types.complex64:
cmplxcls = Complex64
elif ty == types.complex128:
cmplxcls = Complex128
else:
raise TypeError(ty)
return cmplxcls, ty.underlying_float
@lower_getattr(types.Complex, "real")
def complex_real_impl(context, builder, typ, value):
cplx_cls = context.make_complex(typ)
cplx = cplx_cls(context, builder, value=value)
res = cplx.real
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.Complex, "imag")
def complex_imag_impl(context, builder, typ, value):
cplx_cls = context.make_complex(typ)
cplx = cplx_cls(context, builder, value=value)
res = cplx.imag
return impl_ret_untracked(context, builder, typ, res)
@lower_builtin("complex.conjugate", types.Complex)
def complex_conjugate_impl(context, builder, sig, args):
from . import mathimpl
cplx_cls = context.make_complex(sig.args[0])
z = cplx_cls(context, builder, args[0])
z.imag = mathimpl.negate_real(builder, z.imag)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_real_impl(context, builder, typ, value):
return impl_ret_untracked(context, builder, typ, value)
def real_imag_impl(context, builder, typ, value):
res = cgutils.get_null_value(value.type)
return impl_ret_untracked(context, builder, typ, res)
def real_conjugate_impl(context, builder, sig, args):
return impl_ret_untracked(context, builder, sig.return_type, args[0])
for cls in (types.Float, types.Integer):
lower_getattr(cls, "real")(real_real_impl)
lower_getattr(cls, "imag")(real_imag_impl)
lower_builtin("complex.conjugate", cls)(real_conjugate_impl)
@lower_builtin("**", types.complex128, types.complex128)
@lower_builtin(pow, types.complex128, types.complex128)
def complex128_power_impl(context, builder, sig, args):
[ca, cb] = args
a = Complex128(context, builder, value=ca)
b = Complex128(context, builder, value=cb)
c = Complex128(context, builder)
module = builder.module
pa = a._getpointer()
pb = b._getpointer()
pc = c._getpointer()
# Optimize for square because cpow looses a lot of precsiion
TWO = context.get_constant(types.float64, 2)
ZERO = context.get_constant(types.float64, 0)
b_real_is_two = builder.fcmp(lc.FCMP_OEQ, b.real, TWO)
b_imag_is_zero = builder.fcmp(lc.FCMP_OEQ, b.imag, ZERO)
b_is_two = builder.and_(b_real_is_two, b_imag_is_zero)
with builder.if_else(b_is_two) as (then, otherwise):
with then:
# Lower as multiplication
res = complex_mul_impl(context, builder, sig, (ca, ca))
cres = Complex128(context, builder, value=res)
c.real = cres.real
c.imag = cres.imag
with otherwise:
# Lower with call to external function
fnty = Type.function(Type.void(), [pa.type] * 3)
cpow = module.get_or_insert_function(fnty, name="numba.math.cpow")
builder.call(cpow, (pa, pb, pc))
res = builder.load(pc)
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_add_impl(context, builder, sig, args):
[cx, cy] = args
complexClass = context.make_complex(sig.args[0])
x = complexClass(context, builder, value=cx)
y = complexClass(context, builder, value=cy)
z = complexClass(context, builder)
a = x.real
b = x.imag
c = y.real
d = y.imag
z.real = builder.fadd(a, c)
z.imag = builder.fadd(b, d)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_sub_impl(context, builder, sig, args):
[cx, cy] = args
complexClass = context.make_complex(sig.args[0])
x = complexClass(context, builder, value=cx)
y = complexClass(context, builder, value=cy)
z = complexClass(context, builder)
a = x.real
b = x.imag
c = y.real
d = y.imag
z.real = builder.fsub(a, c)
z.imag = builder.fsub(b, d)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_mul_impl(context, builder, sig, args):
"""
(a+bi)(c+di)=(ac-bd)+i(ad+bc)
"""
[cx, cy] = args
complexClass = context.make_complex(sig.args[0])
x = complexClass(context, builder, value=cx)
y = complexClass(context, builder, value=cy)
z = complexClass(context, builder)
a = x.real
b = x.imag
c = y.real
d = y.imag
ac = builder.fmul(a, c)
bd = builder.fmul(b, d)
ad = builder.fmul(a, d)
bc = builder.fmul(b, c)
z.real = builder.fsub(ac, bd)
z.imag = builder.fadd(ad, bc)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
NAN = float('nan')
def complex_div_impl(context, builder, sig, args):
def complex_div(a, b):
# This is CPython's algorithm (in _Py_c_quot()).
areal = a.real
aimag = a.imag
breal = b.real
bimag = b.imag
if not breal and not bimag:
raise ZeroDivisionError("complex division by zero")
if abs(breal) >= abs(bimag):
# Divide tops and bottom by b.real
if not breal:
return complex(NAN, NAN)
ratio = bimag / breal
denom = breal + bimag * ratio
return complex(
(areal + aimag * ratio) / denom,
(aimag - areal * ratio) / denom)
else:
# Divide tops and bottom by b.imag
if not bimag:
return complex(NAN, NAN)
ratio = breal / bimag
denom = breal * ratio + bimag
return complex(
(a.real * ratio + a.imag) / denom,
(a.imag * ratio - a.real) / denom)
res = context.compile_internal(builder, complex_div, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_negate_impl(context, builder, sig, args):
from . import mathimpl
[typ] = sig.args
[val] = args
cmplxcls = context.make_complex(typ)
cmplx = cmplxcls(context, builder, value=val)
res = cmplxcls(context, builder)
res.real = mathimpl.negate_real(builder, cmplx.real)
res.imag = mathimpl.negate_real(builder, cmplx.imag)
res = res._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_positive_impl(context, builder, sig, args):
[val] = args
return impl_ret_untracked(context, builder, sig.return_type, val)
def complex_eq_impl(context, builder, sig, args):
[cx, cy] = args
complexClass = context.make_complex(sig.args[0])
x = complexClass(context, builder, value=cx)
y = complexClass(context, builder, value=cy)
reals_are_eq = builder.fcmp(lc.FCMP_OEQ, x.real, y.real)
imags_are_eq = builder.fcmp(lc.FCMP_OEQ, x.imag, y.imag)
res = builder.and_(reals_are_eq, imags_are_eq)
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_ne_impl(context, builder, sig, args):
[cx, cy] = args
complexClass = context.make_complex(sig.args[0])
x = complexClass(context, builder, value=cx)
y = complexClass(context, builder, value=cy)
reals_are_ne = builder.fcmp(lc.FCMP_UNE, x.real, y.real)
imags_are_ne = builder.fcmp(lc.FCMP_UNE, x.imag, y.imag)
res = builder.or_(reals_are_ne, imags_are_ne)
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_abs_impl(context, builder, sig, args):
"""
abs(z) := hypot(z.real, z.imag)
"""
def complex_abs(z):
return math.hypot(z.real, z.imag)
res = context.compile_internal(builder, complex_abs, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
ty = types.Complex
lower_builtin("+", ty, ty)(complex_add_impl)
lower_builtin("-", ty, ty)(complex_sub_impl)
lower_builtin("*", ty, ty)(complex_mul_impl)
lower_builtin("/?", ty, ty)(complex_div_impl)
lower_builtin("/", ty, ty)(complex_div_impl)
lower_builtin("-", ty)(complex_negate_impl)
lower_builtin("+", ty)(complex_positive_impl)
# Complex modulo is deprecated in python3
lower_builtin('==', ty, ty)(complex_eq_impl)
lower_builtin('!=', ty, ty)(complex_ne_impl)
lower_builtin(abs, ty)(complex_abs_impl)
del ty
#------------------------------------------------------------------------------
def number_not_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
istrue = context.cast(builder, val, typ, sig.return_type)
res = builder.not_(istrue)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(bool, types.boolean)
def bool_as_bool(context, builder, sig, args):
[val] = args
return val
@lower_builtin(bool, types.Integer)
def int_as_bool(context, builder, sig, args):
[val] = args
return builder.icmp_unsigned('!=', val, ir.Constant(val.type, 0))
@lower_builtin(bool, types.Float)
def float_as_bool(context, builder, sig, args):
[val] = args
return builder.fcmp(lc.FCMP_UNE, val, ir.Constant(val.type, 0.0))
@lower_builtin(bool, types.Complex)
def complex_as_bool(context, builder, sig, args):
[typ] = sig.args
[val] = args
cmplx = context.make_complex(typ)(context, builder, val)
real, imag = cmplx.real, cmplx.imag
zero = ir.Constant(real.type, 0.0)
real_istrue = builder.fcmp(lc.FCMP_UNE, real, zero)
imag_istrue = builder.fcmp(lc.FCMP_UNE, imag, zero)
return builder.or_(real_istrue, imag_istrue)
for ty in (types.Integer, types.Float, types.Complex):
lower_builtin('not', ty)(number_not_impl)
lower_builtin('not', types.boolean)(number_not_impl)
#------------------------------------------------------------------------------
def make_pair(first_type, second_type):
return cgutils.create_struct_proxy(types.Pair(first_type, second_type))
@lower_builtin('getitem', types.CPointer, types.Integer)
def getitem_cpointer(context, builder, sig, args):
base_ptr, idx = args
elem_ptr = builder.gep(base_ptr, [idx])
res = builder.load(elem_ptr)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin('setitem', types.CPointer, types.Integer,
types.Any)
def setitem_cpointer(context, builder, sig, args):
base_ptr, idx, val = args
elem_ptr = builder.gep(base_ptr, [idx])
builder.store(val, elem_ptr)
#-------------------------------------------------------------------------------
@lower_builtin(max, types.VarArg(types.Any))
def max_impl(context, builder, sig, args):
argtys = sig.args
for a in argtys:
if a not in types.number_domain:
raise AssertionError("only implemented for numeric types")
def domax(a, b):
at, av = a
bt, bv = b
ty = context.typing_context.unify_types(at, bt)
cav = context.cast(builder, av, at, ty)
cbv = context.cast(builder, bv, bt, ty)
cmpsig = typing.signature(types.boolean, ty, ty)
ge = context.get_function(">=", cmpsig)
pred = ge(builder, (cav, cbv))
res = builder.select(pred, cav, cbv)
return ty, res
typvals = zip(argtys, args)
resty, resval = reduce(domax, typvals)
return impl_ret_borrowed(context, builder, sig.return_type, resval)
@lower_builtin(min, types.VarArg(types.Any))
def min_impl(context, builder, sig, args):
argtys = sig.args
for a in argtys:
if a not in types.number_domain:
raise AssertionError("only implemented for numeric types")
def domax(a, b):
at, av = a
bt, bv = b
ty = context.typing_context.unify_types(at, bt)
cav = context.cast(builder, av, at, ty)
cbv = context.cast(builder, bv, bt, ty)
cmpsig = typing.signature(types.boolean, ty, ty)
le = context.get_function("<=", cmpsig)
pred = le(builder, (cav, cbv))
res = builder.select(pred, cav, cbv)
return ty, res
typvals = zip(argtys, args)
resty, resval = reduce(domax, typvals)
return impl_ret_borrowed(context, builder, sig.return_type, resval)
def _round_intrinsic(tp):
# round() rounds half to even on Python 3, away from zero on Python 2.
if utils.IS_PY3:
return "llvm.rint.f%d" % (tp.bitwidth,)
else:
return "llvm.round.f%d" % (tp.bitwidth,)
@lower_builtin(round, types.Float)
def round_impl_unary(context, builder, sig, args):
fltty = sig.args[0]
llty = context.get_value_type(fltty)
module = builder.module
fnty = Type.function(llty, [llty])
fn = module.get_or_insert_function(fnty, name=_round_intrinsic(fltty))
res = builder.call(fn, args)
if utils.IS_PY3:
# unary round() returns an int on Python 3
res = builder.fptosi(res, context.get_value_type(sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(round, types.Float, types.Integer)
def round_impl_binary(context, builder, sig, args):
fltty = sig.args[0]
# Allow calling the intrinsic from the Python implementation below.
# This avoids the conversion to an int in Python 3's unary round().
_round = types.ExternalFunction(
_round_intrinsic(fltty), typing.signature(fltty, fltty))
def round_ndigits(x, ndigits):
if math.isinf(x) or math.isnan(x):
return x
if ndigits >= 0:
if ndigits > 22:
# pow1 and pow2 are each safe from overflow, but
# pow1*pow2 ~= pow(10.0, ndigits) might overflow.
pow1 = 10.0 ** (ndigits - 22)
pow2 = 1e22
else:
pow1 = 10.0 ** ndigits
pow2 = 1.0
y = (x * pow1) * pow2
if math.isinf(y):
return x
return (_round(y) / pow2) / pow1
else:
pow1 = 10.0 ** (-ndigits)
y = x / pow1
return _round(y) * pow1
res = context.compile_internal(builder, round_ndigits, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
#-------------------------------------------------------------------------------
# Numeric constructors
@lower_builtin(int, types.Any)
def int_impl(context, builder, sig, args):
[ty] = sig.args
[val] = args
res = context.cast(builder, val, ty, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(float, types.Any)
def float_impl(context, builder, sig, args):
[ty] = sig.args
[val] = args
res = context.cast(builder, val, ty, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(complex, types.VarArg(types.Any))
def complex_impl(context, builder, sig, args):
complex_type = sig.return_type
float_type = complex_type.underlying_float
complex_cls = context.make_complex(complex_type)
if len(sig.args) == 1:
[argty] = sig.args
[arg] = args
if isinstance(argty, types.Complex):
# Cast Complex* to Complex*
res = context.cast(builder, arg, argty, complex_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
else:
real = context.cast(builder, arg, argty, float_type)
imag = context.get_constant(float_type, 0)
elif len(sig.args) == 2:
[realty, imagty] = sig.args
[real, imag] = args
real = context.cast(builder, real, realty, float_type)
imag = context.cast(builder, imag, imagty, float_type)
cmplx = complex_cls(context, builder)
cmplx.real = real
cmplx.imag = imag
res = cmplx._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(types.NumberClass, types.Any)
def number_constructor(context, builder, sig, args):
"""
Call a number class, e.g. np.int32(...)
"""
if isinstance(sig.return_type, types.Array):
# Array constructor
impl = context.get_function(numpy.array, sig)
return impl(builder, args)
else:
# Scalar constructor
[val] = args
[valty] = sig.args
return context.cast(builder, val, valty, sig.return_type)
#-------------------------------------------------------------------------------
# Implicit casts between numerics
@lower_cast(types.Integer, types.Integer)
def integer_to_integer(context, builder, fromty, toty, val):
if toty.bitwidth == fromty.bitwidth:
# Just a change of signedness
return val
elif toty.bitwidth < fromty.bitwidth:
# Downcast
return builder.trunc(val, context.get_value_type(toty))
elif fromty.signed:
# Signed upcast
return builder.sext(val, context.get_value_type(toty))
else:
# Unsigned upcast
return builder.zext(val, context.get_value_type(toty))
@lower_cast(types.Integer, types.voidptr)
def integer_to_voidptr(context, builder, fromty, toty, val):
return builder.inttoptr(val, context.get_value_type(toty))
@lower_cast(types.Float, types.Float)
def float_to_float(context, builder, fromty, toty, val):
lty = context.get_value_type(toty)
if fromty.bitwidth < toty.bitwidth:
return builder.fpext(val, lty)
else:
return builder.fptrunc(val, lty)
@lower_cast(types.Integer, types.Float)
def integer_to_float(context, builder, fromty, toty, val):
lty = context.get_value_type(toty)
if fromty.signed:
return builder.sitofp(val, lty)
else:
return builder.uitofp(val, lty)
@lower_cast(types.Float, types.Integer)
def float_to_integer(context, builder, fromty, toty, val):
lty = context.get_value_type(toty)
if toty.signed:
return builder.fptosi(val, lty)
else:
return builder.fptoui(val, lty)
@lower_cast(types.Float, types.Complex)
@lower_cast(types.Integer, types.Complex)
def non_complex_to_complex(context, builder, fromty, toty, val):
real = context.cast(builder, val, fromty, toty.underlying_float)
imag = context.get_constant(toty.underlying_float, 0)
cmplx = context.make_complex(toty)(context, builder)
cmplx.real = real
cmplx.imag = imag
return cmplx._getvalue()
@lower_cast(types.Complex, types.Complex)
def complex_to_complex(context, builder, fromty, toty, val):
srccls, srcty = get_complex_info(fromty)
dstcls, dstty = get_complex_info(toty)
src = srccls(context, builder, value=val)
dst = dstcls(context, builder)
dst.real = context.cast(builder, src.real, srcty, dstty)
dst.imag = context.cast(builder, src.imag, srcty, dstty)
return dst._getvalue()
@lower_cast(types.Any, types.Boolean)
def any_to_boolean(context, builder, fromty, toty, val):
return context.is_true(builder, fromty, val)
@lower_cast(types.Boolean, types.Any)
def boolean_to_any(context, builder, fromty, toty, val):
# Casting from boolean to anything first casts to int32
asint = builder.zext(val, Type.int())
return context.cast(builder, asint, types.int32, toty)
# -----------------------------------------------------------------------------
@lower_builtin(type, types.Any)
def type_impl(context, builder, sig, args):
"""
One-argument type() builtin.
"""
return context.get_dummy_value()
|
py | b4089f5d40c4a25d16316cdc90cb807bccad9ab2 | import roadrunner
import teplugins as tel
try:
pm = tel.createPluginManager()
if tel.loadPlugins(pm) == False:
print tel.getPluginLoadErrors(pm)
except Exception as e:
print 'Problem: ' + `e` |
py | b4089fea9a67061e96044549297b4a2f0faf0a96 | # Model attributes
import json
import os
from pkg_resources import resource_filename
MODEL_NAME="persistence"
SELECTED_SUBMODEL_PARAMS_FILE=resource_filename("subseasonal_toolkit",
os.path.join("models",MODEL_NAME,"selected_submodel.json"))
def get_selected_submodel_name(gt_id, target_horizon):
"""Returns the name of the selected submodel for this model and given task
Args:
gt_id: ground truth identifier in {"contest_tmp2m", "contest_precip"}
target_horizon: string in {"34w", "56w"}
"""
# Read in selected model parameters for given task
with open(SELECTED_SUBMODEL_PARAMS_FILE, 'r') as params_file:
json_args = json.load(params_file)[f'{gt_id}_{target_horizon}']
# Return submodel name associated with these parameters
return get_submodel_name(**json_args)
def get_submodel_name():
"""Returns submodel name for a given setting of model parameters
"""
submodel_name = f"{MODEL_NAME}"
return submodel_name
|
py | b408a0839494314963eb0373c709e542155e6236 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libxcomposite(AutotoolsPackage):
"""libXcomposite - client library for the Composite extension to the
X11 protocol."""
homepage = "http://cgit.freedesktop.org/xorg/lib/libXcomposite"
url = "https://www.x.org/archive/individual/lib/libXcomposite-0.4.4.tar.gz"
version('0.4.4', 'af860b1554a423735d831e6f29ac1ef5')
depends_on('libx11')
depends_on('libxfixes')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build')
depends_on('util-macros', type='build')
|
py | b408a09eadb414b7923243fb2b77f44d2772a0da | """SCons.Tool.GettextCommon module
Used by several tools of `gettext` toolset.
"""
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/GettextCommon.py 2018/09/30 19:25:33 Sye"
import SCons.Warnings
import re
#############################################################################
class XgettextToolWarning(SCons.Warnings.Warning): pass
class XgettextNotFound(XgettextToolWarning): pass
class MsginitToolWarning(SCons.Warnings.Warning): pass
class MsginitNotFound(MsginitToolWarning): pass
class MsgmergeToolWarning(SCons.Warnings.Warning): pass
class MsgmergeNotFound(MsgmergeToolWarning): pass
class MsgfmtToolWarning(SCons.Warnings.Warning): pass
class MsgfmtNotFound(MsgfmtToolWarning): pass
#############################################################################
SCons.Warnings.enableWarningClass(XgettextToolWarning)
SCons.Warnings.enableWarningClass(XgettextNotFound)
SCons.Warnings.enableWarningClass(MsginitToolWarning)
SCons.Warnings.enableWarningClass(MsginitNotFound)
SCons.Warnings.enableWarningClass(MsgmergeToolWarning)
SCons.Warnings.enableWarningClass(MsgmergeNotFound)
SCons.Warnings.enableWarningClass(MsgfmtToolWarning)
SCons.Warnings.enableWarningClass(MsgfmtNotFound)
#############################################################################
#############################################################################
class _POTargetFactory(object):
""" A factory of `PO` target files.
Factory defaults differ from these of `SCons.Node.FS.FS`. We set `precious`
(this is required by builders and actions gettext) and `noclean` flags by
default for all produced nodes.
"""
def __init__(self, env, nodefault=True, alias=None, precious=True
, noclean=True):
""" Object constructor.
**Arguments**
- *env* (`SCons.Environment.Environment`)
- *nodefault* (`boolean`) - if `True`, produced nodes will be ignored
from default target `'.'`
- *alias* (`string`) - if provided, produced nodes will be automatically
added to this alias, and alias will be set as `AlwaysBuild`
- *precious* (`boolean`) - if `True`, the produced nodes will be set as
`Precious`.
- *noclen* (`boolean`) - if `True`, the produced nodes will be excluded
from `Clean`.
"""
self.env = env
self.alias = alias
self.precious = precious
self.noclean = noclean
self.nodefault = nodefault
def _create_node(self, name, factory, directory=None, create=1):
""" Create node, and set it up to factory settings. """
import SCons.Util
node = factory(name, directory, create)
node.set_noclean(self.noclean)
node.set_precious(self.precious)
if self.nodefault:
self.env.Ignore('.', node)
if self.alias:
self.env.AlwaysBuild(self.env.Alias(self.alias, node))
return node
def Entry(self, name, directory=None, create=1):
""" Create `SCons.Node.FS.Entry` """
return self._create_node(name, self.env.fs.Entry, directory, create)
def File(self, name, directory=None, create=1):
""" Create `SCons.Node.FS.File` """
return self._create_node(name, self.env.fs.File, directory, create)
#############################################################################
#############################################################################
_re_comment = re.compile(r'(#[^\n\r]+)$', re.M)
_re_lang = re.compile(r'([a-zA-Z0-9_]+)', re.M)
#############################################################################
def _read_linguas_from_files(env, linguas_files=None):
""" Parse `LINGUAS` file and return list of extracted languages """
import SCons.Util
import SCons.Environment
global _re_comment
global _re_lang
if not SCons.Util.is_List(linguas_files) \
and not SCons.Util.is_String(linguas_files) \
and not isinstance(linguas_files, SCons.Node.FS.Base) \
and linguas_files:
# If, linguas_files==True or such, then read 'LINGUAS' file.
linguas_files = ['LINGUAS']
if linguas_files is None:
return []
fnodes = env.arg2nodes(linguas_files)
linguas = []
for fnode in fnodes:
contents = _re_comment.sub("", fnode.get_text_contents())
ls = [l for l in _re_lang.findall(contents) if l]
linguas.extend(ls)
return linguas
#############################################################################
#############################################################################
from SCons.Builder import BuilderBase
#############################################################################
class _POFileBuilder(BuilderBase):
""" `PO` file builder.
This is multi-target single-source builder. In typical situation the source
is single `POT` file, e.g. `messages.pot`, and there are multiple `PO`
targets to be updated from this `POT`. We must run
`SCons.Builder.BuilderBase._execute()` separatelly for each target to track
dependencies separatelly for each target file.
**NOTE**: if we call `SCons.Builder.BuilderBase._execute(.., target, ...)`
with target being list of all targets, all targets would be rebuilt each time
one of the targets from this list is missing. This would happen, for example,
when new language `ll` enters `LINGUAS_FILE` (at this moment there is no
`ll.po` file yet). To avoid this, we override
`SCons.Builder.BuilerBase._execute()` and call it separatelly for each
target. Here we also append to the target list the languages read from
`LINGUAS_FILE`.
"""
#
# * The argument for overriding _execute(): We must use environment with
# builder overrides applied (see BuilderBase.__init__(). Here it comes for
# free.
# * The argument against using 'emitter': The emitter is called too late
# by BuilderBase._execute(). If user calls, for example:
#
# env.POUpdate(LINGUAS_FILE = 'LINGUAS')
#
# the builder throws error, because it is called with target=None,
# source=None and is trying to "generate" sources or target list first.
# If user calls
#
# env.POUpdate(['foo', 'baz'], LINGUAS_FILE = 'LINGUAS')
#
# the env.BuilderWrapper() calls our builder with target=None,
# source=['foo', 'baz']. The BuilderBase._execute() then splits execution
# and execute iterativelly (recursion) self._execute(None, source[i]).
# After that it calls emitter (which is quite too late). The emitter is
# also called in each iteration, what makes things yet worse.
def __init__(self, env, **kw):
if not 'suffix' in kw:
kw['suffix'] = '$POSUFFIX'
if not 'src_suffix' in kw:
kw['src_suffix'] = '$POTSUFFIX'
if not 'src_builder' in kw:
kw['src_builder'] = '_POTUpdateBuilder'
if not 'single_source' in kw:
kw['single_source'] = True
alias = None
if 'target_alias' in kw:
alias = kw['target_alias']
del kw['target_alias']
if not 'target_factory' in kw:
kw['target_factory'] = _POTargetFactory(env, alias=alias).File
BuilderBase.__init__(self, **kw)
def _execute(self, env, target, source, *args, **kw):
""" Execute builder's actions.
Here we append to `target` the languages read from `$LINGUAS_FILE` and
apply `SCons.Builder.BuilderBase._execute()` separatelly to each target.
The arguments and return value are same as for
`SCons.Builder.BuilderBase._execute()`.
"""
import SCons.Util
import SCons.Node
linguas_files = None
if 'LINGUAS_FILE' in env and env['LINGUAS_FILE']:
linguas_files = env['LINGUAS_FILE']
# This prevents endless recursion loop (we'll be invoked once for
# each target appended here, we must not extend the list again).
env['LINGUAS_FILE'] = None
linguas = _read_linguas_from_files(env, linguas_files)
if SCons.Util.is_List(target):
target.extend(linguas)
elif target is not None:
target = [target] + linguas
else:
target = linguas
if not target:
# Let the SCons.BuilderBase to handle this patologic situation
return BuilderBase._execute(self, env, target, source, *args, **kw)
# The rest is ours
if not SCons.Util.is_List(target):
target = [target]
result = []
for tgt in target:
r = BuilderBase._execute(self, env, [tgt], source, *args, **kw)
result.extend(r)
if linguas_files is not None:
env['LINGUAS_FILE'] = linguas_files
return SCons.Node.NodeList(result)
#############################################################################
import SCons.Environment
#############################################################################
def _translate(env, target=None, source=SCons.Environment._null, *args, **kw):
""" Function for `Translate()` pseudo-builder """
if target is None: target = []
pot = env.POTUpdate(None, source, *args, **kw)
po = env.POUpdate(target, pot, *args, **kw)
return po
#############################################################################
#############################################################################
class RPaths(object):
""" Callable object, which returns pathnames relative to SCons current
working directory.
It seems like `SCons.Node.FS.Base.get_path()` returns absolute paths
for nodes that are outside of current working directory (`env.fs.getcwd()`).
Here, we often have `SConscript`, `POT` and `PO` files within `po/`
directory and source files (e.g. `*.c`) outside of it. When generating `POT`
template file, references to source files are written to `POT` template, so
a translator may later quickly jump to appropriate source file and line from
its `PO` editor (e.g. `poedit`). Relative paths in `PO` file are usually
interpreted by `PO` editor as paths relative to the place, where `PO` file
lives. The absolute paths would make resultant `POT` file nonportable, as
the references would be correct only on the machine, where `POT` file was
recently re-created. For such reason, we need a function, which always
returns relative paths. This is the purpose of `RPaths` callable object.
The `__call__` method returns paths relative to current working directory, but
we assume, that *xgettext(1)* is run from the directory, where target file is
going to be created.
Note, that this may not work for files distributed over several hosts or
across different drives on windows. We assume here, that single local
filesystem holds both source files and target `POT` templates.
Intended use of `RPaths` - in `xgettext.py`::
def generate(env):
from GettextCommon import RPaths
...
sources = '$( ${_concat( "", SOURCES, "", __env__, XgettextRPaths, TARGET, SOURCES)} $)'
env.Append(
...
XGETTEXTCOM = 'XGETTEXT ... ' + sources,
...
XgettextRPaths = RPaths(env)
)
"""
# NOTE: This callable object returns pathnames of dirs/files relative to
# current working directory. The pathname remains relative also for entries
# that are outside of current working directory (node, that
# SCons.Node.FS.File and siblings return absolute path in such case). For
# simplicity we compute path relative to current working directory, this
# seems be enough for our purposes (don't need TARGET variable and
# SCons.Defaults.Variable_Caller stuff).
def __init__(self, env):
""" Initialize `RPaths` callable object.
**Arguments**:
- *env* - a `SCons.Environment.Environment` object, defines *current
working dir*.
"""
self.env = env
# FIXME: I'm not sure, how it should be implemented (what the *args are in
# general, what is **kw).
def __call__(self, nodes, *args, **kw):
""" Return nodes' paths (strings) relative to current working directory.
**Arguments**:
- *nodes* ([`SCons.Node.FS.Base`]) - list of nodes.
- *args* - currently unused.
- *kw* - currently unused.
**Returns**:
- Tuple of strings, which represent paths relative to current working
directory (for given environment).
"""
import os
import SCons.Node.FS
rpaths = ()
cwd = self.env.fs.getcwd().get_abspath()
for node in nodes:
rpath = None
if isinstance(node, SCons.Node.FS.Base):
rpath = os.path.relpath(node.get_abspath(), cwd)
# FIXME: Other types possible here?
if rpath is not None:
rpaths += (rpath,)
return rpaths
#############################################################################
#############################################################################
def _init_po_files(target, source, env):
""" Action function for `POInit` builder. """
nop = lambda target, source, env: 0
if 'POAUTOINIT' in env:
autoinit = env['POAUTOINIT']
else:
autoinit = False
# Well, if everything outside works well, this loop should do single
# iteration. Otherwise we are rebuilding all the targets even, if just
# one has changed (but is this our fault?).
for tgt in target:
if not tgt.exists():
if autoinit:
action = SCons.Action.Action('$MSGINITCOM', '$MSGINITCOMSTR')
else:
msg = 'File ' + repr(str(tgt)) + ' does not exist. ' \
+ 'If you are a translator, you can create it through: \n' \
+ '$MSGINITCOM'
action = SCons.Action.Action(nop, msg)
status = action([tgt], source, env)
if status: return status
return 0
#############################################################################
#############################################################################
def _detect_xgettext(env):
""" Detects *xgettext(1)* binary """
if 'XGETTEXT' in env:
return env['XGETTEXT']
xgettext = env.Detect('xgettext');
if xgettext:
return xgettext
raise SCons.Errors.StopError(XgettextNotFound, "Could not detect xgettext")
return None
#############################################################################
def _xgettext_exists(env):
return _detect_xgettext(env)
#############################################################################
#############################################################################
def _detect_msginit(env):
""" Detects *msginit(1)* program. """
if 'MSGINIT' in env:
return env['MSGINIT']
msginit = env.Detect('msginit');
if msginit:
return msginit
raise SCons.Errors.StopError(MsginitNotFound, "Could not detect msginit")
return None
#############################################################################
def _msginit_exists(env):
return _detect_msginit(env)
#############################################################################
#############################################################################
def _detect_msgmerge(env):
""" Detects *msgmerge(1)* program. """
if 'MSGMERGE' in env:
return env['MSGMERGE']
msgmerge = env.Detect('msgmerge');
if msgmerge:
return msgmerge
raise SCons.Errors.StopError(MsgmergeNotFound, "Could not detect msgmerge")
return None
#############################################################################
def _msgmerge_exists(env):
return _detect_msgmerge(env)
#############################################################################
#############################################################################
def _detect_msgfmt(env):
""" Detects *msgmfmt(1)* program. """
if 'MSGFMT' in env:
return env['MSGFMT']
msgfmt = env.Detect('msgfmt')
if msgfmt:
return msgfmt
raise SCons.Errors.StopError(MsgfmtNotFound, "Could not detect msgfmt")
return None
#############################################################################
def _msgfmt_exists(env):
return _detect_msgfmt(env)
#############################################################################
#############################################################################
def tool_list(platform, env):
""" List tools that shall be generated by top-level `gettext` tool """
return ['xgettext', 'msginit', 'msgmerge', 'msgfmt']
#############################################################################
|
py | b408a1033b1b17588fc124c5a48a741d06b5c53e | from random import choice as randchoice
from discord.ext import commands
rules = {
1: 'Do not talk about /b/',
2: 'Do NOT talk about /b/',
3: 'We are Anonymous',
4: 'Anonymous is legion',
5: 'Anonymous never forgives',
6: 'Anonymous can be a horrible, senseless, uncaring monster',
7: 'Anonymous is still able to deliver',
8: 'There are no real rules about posting',
9: 'There are no real rules about moderation either - enjoy your ban',
10: "If you enjoy any rival sites - DON'T",
11: 'All your carefully picked arguments can easily be ignored',
2: 'Anything you say can and will be used against you',
13: 'Anything you say can be turned into something else - fixed',
14: 'Do not argue with trolls - it means that they win',
15: 'The harder you try the harder you will fail',
16: 'If you fail in epic proportions, it may just become a winning'
' failure',
17: 'Every win fails eventually',
18: 'Everything that can be labeled can be hated',
19: 'The more you hate it the stronger it gets',
20: 'Nothing is to be taken seriously',
21: 'Original content is original only for a few seconds before getting'
' old',
22: 'Copypasta is made to ruin every last bit of originality',
23: 'Copypasta is made to ruin every last bit of originality',
24: 'Every repost it always a repost of a repost',
25: 'Relation to the original topic decreases with every single post',
26: 'Any topic can easily be turned into something totally unrelated',
27: "Always question a person's sexual prefrences without any real reason",
28: "Always question a person's gender - just incase it's really a man",
29: 'In the internet all girls are men and all kids are undercover FBI'
' agents',
30: 'There are no girls on the internet',
31: 'TITS or GTFO - the choice is yours',
32: 'You must have pictures to prove your statements',
33: "Lurk more - it's never enough",
34: 'There is porn of it, no exceptions',
35: 'If no porn is found at the moment, it will be made',
36: 'There will always be even more fucked up shit than what you just saw',
37: 'You can not divide by zero (just because the calculator says so)',
38: 'No real limits of any kind apply here - not even the sky',
39: 'CAPSLOCK IS CRUISE CONTROL FOR COOL',
40: 'EVEN WITH CRUISE CONTROL YOU STILL HAVE TO STEER',
41: "Desu isn't funny. Seriously guys. It's worse than Chuck Norris"
" jokes.",
42: 'Nothing is Sacred.',
43: 'The more beautiful and pure a thing is - the more satisfying it is'
' to corrupt it',
44: 'Even one positive comment about Japanese things can make you a'
' weaboo',
45: 'When one sees a lion, one must get into the car.',
46: 'There is always furry porn of it.',
47: 'The pool is always closed.'
}
class Rules:
def __init__(self, bot):
self.bot = bot
@commands.command()
async def roti(self, num: int=None):
"""ROTI"""
if num:
if num < 1:
await self.bot.say('LOL SO FUNNY')
return
if num > 47:
await self.bot.say('Not thaaaat high.')
return
await self.bot.say("RULE {}: {}".format(num, rules[num]))
return
rule = randchoice(list(rules.keys()))
await self.bot.say("RULE {}: {}".format(rule, rules[rule]))
def setup(bot):
n = Rules(bot)
bot.add_cog(n)
|
py | b408a2e680bdf01b6e7d0fa598d9593400213331 | from abc import ABC, abstractmethod
import copy
import time
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from cogdl.data import Dataset, Data
from cogdl.data.sampler import (
NodeSampler,
EdgeSampler,
RWSampler,
MRWSampler,
LayerSampler,
NeighborSampler
)
from cogdl.models.supervised_model import (
SupervisedHeterogeneousNodeClassificationModel,
SupervisedHomogeneousNodeClassificationModel,
)
from cogdl.trainers.supervised_trainer import SupervisedHeterogeneousNodeClassificationTrainer
class SampledTrainer(SupervisedHeterogeneousNodeClassificationTrainer):
@abstractmethod
def fit(self, model: SupervisedHeterogeneousNodeClassificationModel, dataset: Dataset):
raise NotImplemented
@abstractmethod
def _train_step(self):
pass
@abstractmethod
def _test_step(self, split="val"):
pass
def __init__(self, args):
self.device = args.device_id[0] if not args.cpu else "cpu"
self.patience = args.patience
self.max_epoch = args.max_epoch
self.lr = args.lr
self.weight_decay = args.weight_decay
@classmethod
def build_trainer_from_args(cls, args):
return cls(args)
def train(self):
epoch_iter = tqdm(range(self.max_epoch))
patience = 0
best_score = 0
best_loss = np.inf
max_score = 0
min_loss = np.inf
best_model = copy.deepcopy(self.model)
for epoch in epoch_iter:
self._train_step()
train_acc, _ = self._test_step(split="train")
val_acc, val_loss = self._test_step(split="val")
epoch_iter.set_description(
f"Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}"
)
if val_loss <= min_loss or val_acc >= max_score:
if val_acc >= best_score: # SAINT loss is not accurate
best_loss = val_loss
best_score = val_acc
best_model = copy.deepcopy(self.model)
min_loss = np.min((min_loss, val_loss))
max_score = np.max((max_score, val_acc))
patience = 0
else:
patience += 1
if patience == self.patience:
self.model = best_model
epoch_iter.close()
break
return best_model
class SAINTTrainer(SampledTrainer):
def __init__(self, args):
super(SAINTTrainer, self).__init__(args)
self.args_sampler = self.sampler_from_args(args)
@classmethod
def build_trainer_from_args(cls, args):
return cls(args)
def sampler_from_args(self, args):
args_sampler = {
"sampler": args.sampler,
"sample_coverage": args.sample_coverage,
"size_subgraph": args.size_subgraph,
"num_walks": args.num_walks,
"walk_length": args.walk_length,
"size_frontier": args.size_frontier
}
return args_sampler
def fit(self, model: SupervisedHeterogeneousNodeClassificationModel, dataset: Dataset):
self.data = dataset.data
self.data.apply(lambda x: x.to(self.device))
self.model = model
if self.args_sampler["sampler"] == "node":
self.sampler = NodeSampler(self.data, self.args_sampler)
elif self.args_sampler["sampler"] == "edge":
self.sampler = EdgeSampler(self.data, self.args_sampler)
elif self.args_sampler["sampler"] == "rw":
self.sampler = RWSampler(self.data, self.args_sampler)
elif self.args_sampler["sampler"] == "mrw":
self.sampler = MRWSampler(self.data, self.args_sampler)
self.optimizer = torch.optim.Adam(
model.parameters(), lr=self.lr, weight_decay=self.weight_decay
)
best_model = self.train()
self.model = best_model
return self.model
def _train_step(self):
self.data = self.sampler.get_subgraph("train")
self.data.apply(lambda x: x.to(self.device))
self.model.train()
self.optimizer.zero_grad()
self.model.loss(self.data).backward()
self.optimizer.step()
def _test_step(self, split="val"):
self.data = self.sampler.get_subgraph(split)
self.data.apply(lambda x: x.to(self.device))
self.model.eval()
if split == "train":
mask = self.data.train_mask
elif split == "val":
mask = self.data.val_mask
else:
mask = self.data.test_mask
with torch.no_grad():
logits = self.model.predict(self.data)
loss = (torch.nn.NLLLoss(reduction="none")(logits[mask], self.data.y[mask]) * self.data.norm_loss[mask]).sum()
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
return acc, loss
class NeighborSamplingTrainer(SampledTrainer):
model: torch.nn.Module
def __init__(self, args):
super(NeighborSamplingTrainer, self).__init__(args)
self.hidden_size = args.hidden_size
self.sample_size = args.sample_size
self.batch_size = args.batch_size
self.num_workers = 4 if not hasattr(args, "num_workers") else args.num_workers
self.eval_per_epoch = 5
self.patience = self.patience // self.eval_per_epoch
def fit(self, model, dataset):
self.data = Data.from_pyg_data(dataset[0])
self.train_loader = NeighborSampler(
data=self.data,
mask=self.data.train_mask,
sizes=self.sample_size,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=True
)
self.test_loader = NeighborSampler(
data=self.data,
mask=None,
sizes=[-1],
batch_size=self.batch_size,
shuffle=False
)
self.model = model.to(self.device)
self.model.set_data_device(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
best_model = self.train()
self.model = best_model
acc, loss = self._test_step()
return dict(Acc=acc["test"], ValAcc=acc["val"])
def train(self):
epoch_iter = tqdm(range(self.max_epoch))
patience = 0
max_score = 0
min_loss = np.inf
best_model = copy.deepcopy(self.model)
for epoch in epoch_iter:
self._train_step()
if (epoch + 1) % self.eval_per_epoch == 0:
acc, loss = self._test_step()
train_acc = acc["train"]
val_acc = acc["val"]
val_loss = loss["val"]
epoch_iter.set_description(
f"Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}"
)
if val_loss <= min_loss or val_acc >= max_score:
if val_loss <= min_loss:
best_model = copy.deepcopy(self.model)
min_loss = np.min((min_loss, val_loss))
max_score = np.max((max_score, val_acc))
patience = 0
else:
patience += 1
if patience == self.patience:
self.model = best_model
epoch_iter.close()
break
return best_model
def _train_step(self):
self.model.train()
for target_id, n_id, adjs in self.train_loader:
self.optimizer.zero_grad()
x_src = self.data.x[n_id].to(self.device)
y = self.data.y[target_id].to(self.device)
loss = self.model.loss(x_src, adjs, y)
loss.backward()
self.optimizer.step()
def _test_step(self, split="val"):
self.model.eval()
masks = {
"train": self.data.train_mask,
"val": self.data.val_mask,
"test": self.data.test_mask
}
with torch.no_grad():
logits = self.model.inference(self.data.x, self.test_loader)
loss = {
key: F.nll_loss(logits[val], self.data.y[val])
for key, val in masks.items()
}
pred = {
key: logits[val].max(1)[1]
for key, val in masks.items()
}
acc = {
key: pred[key].eq(self.data.y[val]).sum().item() / val.sum().item()
for key, val in masks.items()
}
return acc, loss
@classmethod
def build_trainer_from_args(cls, args):
return cls(args)
"""
class LayerSampledTrainer(SampledTrainer):
def __init__(self, args):
self.device = torch.device('cpu' if args.cpu else 'cuda')
self.patience = args.patience
self.max_epoch = args.max_epoch
self.batch_size = args.batch_size
def fit(self, model: SamplingNodeClassificationModel, dataset: Dataset):
self.model = model.to(self.device)
self.data = dataset.data
self.data.apply(lambda x: x.to(self.device))
self.sampler = LayerSampler(self.data, self.model, {})
self.num_nodes = self.data.x.shape[0]
self.adj_list = self.data.edge_index.detach().cpu().numpy()
self.model.set_adj(self.adj_list, self.num_nodes)
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
epoch_iter = tqdm(range(self.max_epoch))
patience = 0
best_score = 0
best_loss = np.inf
max_score = 0
min_loss = np.inf
for epoch in epoch_iter:
self._train_step()
train_acc, _ = self._test_step(split="train")
val_acc, val_loss = self._test_step(split="val")
epoch_iter.set_description(
f"Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}"
)
if val_loss <= min_loss or val_acc >= max_score:
if val_loss <= best_loss: # and val_acc >= best_score:
best_loss = val_loss
best_score = val_acc
best_model = copy.deepcopy(self.model)
min_loss = np.min((min_loss, val_loss))
max_score = np.max((max_score, val_acc))
patience = 0
else:
patience += 1
if patience == self.patience:
self.model = best_model
epoch_iter.close()
break
def _train_step(self):
self.model.train()
train_nodes = np.where(self.data.train_mask.detach().cpu().numpy())[0]
train_labels = self.data.y.detach().cpu().numpy()
for batch_nodes, batch_labels in get_batches(train_nodes, train_labels, batch_size=self.batch_size):
batch_nodes = torch.LongTensor(batch_nodes)
batch_labels = torch.LongTensor(batch_labels).to(self.device)
sampled_x, sampled_adj, var_loss = self.sampler.sampling(self.data.x, batch_nodes)
self.optimizer.zero_grad()
output = self.model(sampled_x, sampled_adj)
loss = F.nll_loss(output, batch_labels) + 0.5 * var_loss
loss.backward()
self.optimizer.step()
def _test_step(self, split="val"):
self.model.eval()
_, mask = list(self.data(f"{split}_mask"))[0]
test_nodes = np.where(mask.detach().cpu().numpy())[0]
test_labels = self.data.y.detach().cpu().numpy()
all_loss = []
all_acc = []
for batch_nodes, batch_labels in get_batches(test_nodes, test_labels, batch_size=self.batch_size):
batch_nodes = torch.LongTensor(batch_nodes)
batch_labels = torch.LongTensor(batch_labels).to(self.device)
sampled_x, sampled_adj, var_loss = self.model.sampling(self.data.x, batch_nodes)
with torch.no_grad():
logits = self.model(sampled_x, sampled_adj)
loss = F.nll_loss(logits, batch_labels)
pred = logits.max(1)[1]
acc = pred.eq(self.data.y[batch_nodes]).sum().item() / batch_nodes.shape[0]
all_loss.append(loss.item())
all_acc.append(acc)
return np.mean(all_acc), np.mean(all_loss)
"""
|
py | b408a38e8f2d01776164f2cc6f7742ab4e0c959c | days_in_feb = 28
# print ํจ์๋ ์ซ์๋ ๋ฌธ์์ด์ ์ถ๋ ฅํ ์ ์์ต๋๋ค
print(days_in_feb)
# + ์ฐ์ฐ์๋ ๋ ๊ฐ์ ์ซ์๋ฅผ ๋ํ๊ฑฐ๋ ๋ ๋ฌธ์์ด์ ์ฐ๊ฒฐํ ์ ์์ต๋๋ค
# ํ์ง๋ง, ์ซ์์ ๋ฌธ์์ด์ + ํ๋๋ก ์ ๋ฌํ๋ฉด ์ค๋ฅ๋ฅผ ๋ฐ์์ํต๋๋ค.
print(days_in_feb + ' days in February')
# ๊ฒฐ๊ณผ๋ฅผ ์ถ๋ ฅํ๋ ค๋ฉด ์ซ์๋ฅผ ๋ฌธ์์ด๋ก ๋ณํ(convert)ํด์ผํฉ๋๋ค.
# ์ด ์ฝ๋๋ ์ ์๋ํฉ๋๋ค
print(str(days_in_feb) + ' days in February')
|
py | b408a413468345f3d35b9095b32ecb5c834c0b02 |
import sys
import copy
import moveit_commander
from moveit_commander.exception import MoveItCommanderException
from moveit_commander.conversions import list_to_pose, pose_to_list
import numpy as np
import tf.transformations as tr
from sami.interface import ArmIF
class MoveItPlug(ArmIF):
def __init__(self, options):
super(MoveItPlug, self).__init__()
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.moveg = moveit_commander.MoveGroupCommander(options['group'])
self.ee_link = self.moveg.get_end_effector_link()
self.pframe = self.moveg.get_planning_frame()
self.numj = len(self.moveg.get_joints())
def move_joints(self, joints, velocity):
if velocity is None:
self.moveg.set_max_velocity_scaling_factor(self.velocity)
else:
self.moveg.set_max_velocity_scaling_factor(velocity)
try:
self.moveg.go(joints, wait=True)
self.moveg.stop()
except MoveItCommanderException as e:
self.last_error_msg = str(e)
return False
return True
def move_pose(self, pose, velocity):
if velocity is None:
self.moveg.set_max_velocity_scaling_factor(self.velocity)
else:
self.moveg.set_max_velocity_scaling_factor(velocity)
try:
self.moveg.set_pose_target(pose)
ok = self.moveg.go(wait=True)
self.moveg.stop()
self.moveg.clear_pose_targets()
if not ok:
self.last_error_msg = "No motion plan found."
return ok
except MoveItCommanderException as e:
self.last_error_msg = str(e)
self.moveg.clear_pose_targets()
return False
def move_pose_relative(self, dpose, velocity):
if velocity is None:
self.moveg.set_max_velocity_scaling_factor(self.velocity)
else:
self.moveg.set_max_velocity_scaling_factor(velocity)
pose = pose_to_list(self.moveg.get_current_pose().pose)
t_xform = np.dot(tr.translation_matrix(pose[0:3]), tr.quaternion_matrix(pose[3:]))
s_xform = np.dot(tr.translation_matrix(dpose[0:3]), tr.euler_matrix(*dpose[3:]))
xform = np.dot(t_xform, s_xform)
pose = tr.translation_from_matrix(xform).tolist() + list(tr.euler_from_matrix(xform))
wp = [list_to_pose(pose)] # waypoints
(plan, fraction) = self.moveg.compute_cartesian_path(wp, eef_step = 0.01, jump_threshold = 0.0)
if fraction < 1.0:
self.last_error_msg = "No motion plan found."
return False
v = self.velocity if velocity is None else velocity
plan = self.moveg.retime_trajectory(self.robot.get_current_state(), plan, v)
try:
self.moveg.execute(plan, wait=True)
self.moveg.stop()
except MoveItCommanderException as e:
self.last_error_msg = str(e)
return False
return True
|
py | b408a4afda406b84199314011e584aec4c747232 | from aws_cdk import core
import aws_cdk.aws_ec2 as ec2
class MwaaRedshiftVPC(core.Stack):
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Create VPC network
self.vpc = ec2.Vpc(
self,
id="MWAA-RedShiftCluster-VPC",
cidr="10.192.0.0/16",
nat_gateways=1,
subnet_configuration=[
ec2.SubnetConfiguration(
name="public", cidr_mask=20,
reserved=False, subnet_type=ec2.SubnetType.PUBLIC),
ec2.SubnetConfiguration(
name="private", cidr_mask=20,
reserved=False, subnet_type=ec2.SubnetType.PRIVATE)
],
max_azs=2,
enable_dns_hostnames=True,
enable_dns_support=True
)
core.CfnOutput(
self,
id="VPCId",
value=self.vpc.vpc_id,
description="VPC ID",
export_name=f"{self.region}:{self.account}:{self.stack_name}:vpc-id"
)
|
py | b408a701b109261488de822959ade689b0e80090 | import ee
from ee.ee_exception import EEException
try:
ee.Initialize()
except EEException as e:
from oauth2client.service_account import ServiceAccountCredentials
credentials = ServiceAccountCredentials.from_p12_keyfile(
service_account_email='',
filename='',
private_key_password='notasecret',
scopes=ee.oauth.SCOPE + ' https://www.googleapis.com/auth/drive ')
ee.Initialize(credentials)
# This script processes a single Landsat 8 image of interest.
# WQ parameters such chlor-a, secchi depth, trophic state index are calculated
# How to use:
# (1) If a "geometry" variable exists in the imports window, delete it.
# (2) Within the map, select the point button near the top left side.
# (3) Create a new point by clicking on a location of interest.
# (4) Adjust the time frame you wish to browse by adding here:
geometry = ee.Geometry.Polygon([[[30.76171875, 0.9049611504960419],
[30.8935546875, -3.487377195492663],
[35.5517578125, -3.2680324702882952],
[35.5517578125, 1.9593043032313748]]])
# begin date
iniDate = '2015-05-01'
# end date
endDate = '2018-03-31'
# (5) Adjust a cloud % threshold here:
oliCloudPerc = 5
# (6) Click Run
# (7) The "available imagery" ImageCollection within the console displays all available imagery. If you wish to investigate a single
# image from this collection, find the FILE_ID within the features and copy and paste it here:
l8 = ee.Image('LANDSAT/LC08/C01/T1/LC08_170060_20171226')
# (8) Click "Run"
# (9) Export each image by clicking on the run button within the "tasks" tab.
# Author: Benjamin Page #
# Citations:
# Page, B.P. and Mishra, D.R., 2018. A modified atmospheric correction when coupling sentinel-2 and landsat-8 for inland water quality monitoring, In Review
#########################################################/
#########################################################/
#########################################################/
# Import Collections #
# landsat 8 raw dn
OLI_DN = ee.ImageCollection('LANDSAT/LC08/C01/T1')
# landsat-8 surface reflactance product (for masking purposes)
SRP = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
# toms / omi
ozone = ee.ImageCollection('TOMS/MERGED')
#########################################################/
#########################################################/
#########################################################/
# Filtering Collection and Masking #
pi = ee.Image(3.141592)
# water mask
startMonth = 5
endMonth = 9
startYear = 2013
endYear = 2017
forMask = SRP.filterBounds(geometry).select('B6').filterMetadata('CLOUD_COVER', "less_than", 10).filter \
(ee.Filter.calendarRange(startMonth, endMonth, 'month')).filter(ee.Filter.calendarRange(startYear, endYear, 'year'))
mask = ee.Image(forMask.select('B6').median().lt(600))
mask = mask.updateMask(mask)
# filter landsat 8 collection
FC_OLI = OLI_DN.filterDate(iniDate, endDate).filterBounds(geometry).filterMetadata('CLOUD_COVER', "less_than", oliCloudPerc)
# oli image date
oliDate = l8.date()
footprint = l8.geometry()
#########################################################/
#########################################################/
#########################################################/
################################################################################
# single OLI image ATMOSPHERIC CORRECTION #
# dem
DEM_OLI = ee.Image('USGS/SRTMGL1_003').clip(footprint)
# ozone
DU_OLI = ee.Image(ozone.filterDate(iniDate ,endDate).filterBounds(footprint).mean())
# Julian Day
imgDate_OLI = ee.Date(l8.get('system:time_start'))
FOY_OLI = ee.Date.fromYMD(imgDate_OLI.get('year') ,1 ,1)
JD_OLI = imgDate_OLI.difference(FOY_OLI ,'day').int().add(1)
# Earth-Sun distance
d_OLI = ee.Image.constant(l8.get('EARTH_SUN_DISTANCE'))
# Sun elevation
SunEl_OLI = ee.Image.constant(l8.get('SUN_ELEVATION'))
# Sun azimuth
SunAz_OLI = ee.Image.constant(l8.get('SUN_AZIMUTH'))
# Satellite zenith
SatZe_OLI = ee.Image(0.0)
cosdSatZe_OLI = (SatZe_OLI).multiply(pi.divide(ee.Image(180))).cos()
sindSatZe_OLI = (SatZe_OLI).multiply(pi.divide(ee.Image(180))).sin()
# Satellite azimuth
SatAz_OLI = ee.Image(0.0)
# Sun zenith
SunZe_OLI = ee.Image(90).subtract(SunEl_OLI)
cosdSunZe_OLI = SunZe_OLI.multiply(pi.divide(ee.Image.constant(180))).cos() # in degrees
sindSunZe_OLI = SunZe_OLI.multiply(pi.divide(ee.Image(180))).sin() # in degrees
# Relative azimuth
RelAz_OLI = ee.Image(SunAz_OLI)
cosdRelAz_OLI = RelAz_OLI.multiply(pi.divide(ee.Image(180))).cos()
# Pressure calculation
P_OLI = ee.Image(101325).multiply(ee.Image(1).subtract(ee.Image(0.0000225577).multiply(DEM_OLI)).pow(5.25588)).multiply \
(0.01)
Po_OLI = ee.Image(1013.25)
# Radiometric Calibration #
# define bands to be converted to radiance
bands_OLI = ['B1' ,'B2' ,'B3' ,'B4' ,'B5' ,'B6' ,'B7']
# radiance_mult_bands
rad_mult_OLI = ee.Image(ee.Array([ee.Image(l8.get('RADIANCE_MULT_BAND_1')),
ee.Image(l8.get('RADIANCE_MULT_BAND_2')),
ee.Image(l8.get('RADIANCE_MULT_BAND_3')),
ee.Image(l8.get('RADIANCE_MULT_BAND_4')),
ee.Image(l8.get('RADIANCE_MULT_BAND_5')),
ee.Image(l8.get('RADIANCE_MULT_BAND_6')),
ee.Image(l8.get('RADIANCE_MULT_BAND_7'))]
)).toArray(1)
# radiance add band
rad_add_OLI = ee.Image(ee.Array([ee.Image(l8.get('RADIANCE_ADD_BAND_1')),
ee.Image(l8.get('RADIANCE_ADD_BAND_2')),
ee.Image(l8.get('RADIANCE_ADD_BAND_3')),
ee.Image(l8.get('RADIANCE_ADD_BAND_4')),
ee.Image(l8.get('RADIANCE_ADD_BAND_5')),
ee.Image(l8.get('RADIANCE_ADD_BAND_6')),
ee.Image(l8.get('RADIANCE_ADD_BAND_7'))]
)).toArray(1)
# create an empty image to save new radiance bands to
imgArr_OLI = l8.select(bands_OLI).toArray().toArray(1)
Ltoa_OLI = imgArr_OLI.multiply(rad_mult_OLI).add(rad_add_OLI)
# esun
ESUN_OLI = ee.Image.constant(197.24790954589844)\
.addBands(ee.Image.constant(201.98426818847656))\
.addBands(ee.Image.constant(186.12677001953125))\
.addBands(ee.Image.constant(156.95257568359375))\
.addBands(ee.Image.constant(96.04714965820312))\
.addBands(ee.Image.constant(23.8833221450863))\
.addBands(ee.Image.constant(8.04995873449635)).toArray().toArray(1)
ESUN_OLI = ESUN_OLI.multiply(ee.Image(1))
ESUNImg_OLI = ESUN_OLI.arrayProject([0]).arrayFlatten([bands_OLI])
# Ozone Correction #
# Ozone coefficients
koz_OLI = ee.Image.constant(0.0039).addBands(ee.Image.constant(0.0218))\
.addBands(ee.Image.constant(0.1078))\
.addBands(ee.Image.constant(0.0608))\
.addBands(ee.Image.constant(0.0019))\
.addBands(ee.Image.constant(0))\
.addBands(ee.Image.constant(0))\
.toArray().toArray(1)
# Calculate ozone optical thickness
Toz_OLI = koz_OLI.multiply(DU_OLI).divide(ee.Image.constant(1000))
# Calculate TOA radiance in the absense of ozone
Lt_OLI = Ltoa_OLI.multiply(((Toz_OLI)).multiply
((ee.Image.constant(1).divide(cosdSunZe_OLI)).add(ee.Image.constant(1).divide(cosdSatZe_OLI))).exp())
# Rayleigh optical thickness
bandCenter_OLI = ee.Image(443).divide(1000).addBands(ee.Image(483).divide(1000))\
.addBands(ee.Image(561).divide(1000))\
.addBands(ee.Image(655).divide(1000))\
.addBands(ee.Image(865).divide(1000))\
.addBands(ee.Image(1609).divide(1000))\
.addBands(ee.Number(2201).divide(1000))\
.toArray().toArray(1)
# create an empty image to save new Tr values to
Tr_OLI = (P_OLI.divide(Po_OLI)).multiply(ee.Image(0.008569).multiply(bandCenter_OLI.pow(-4))).multiply((ee.Image(1).add\
(ee.Image(0.0113).multiply(bandCenter_OLI.pow(-2))).add(ee.Image(0.00013).multiply(bandCenter_OLI.pow(-4)))))
# Fresnel Reflection #
# Specular reflection (s- and p- polarization states)
theta_V_OLI = ee.Image(0.0000000001)
sin_theta_j_OLI = sindSunZe_OLI.divide(ee.Image(1.333))
theta_j_OLI = sin_theta_j_OLI.asin().multiply(ee.Image(180).divide(pi))
theta_SZ_OLI = SunZe_OLI
R_theta_SZ_s_OLI = (((theta_SZ_OLI.multiply(pi.divide(ee.Image(180)))).subtract
(theta_j_OLI.multiply(pi.divide(ee.Image(180))))).sin().pow(2)).divide((((theta_SZ_OLI.multiply
(pi.divide(ee.Image(180)))).add(theta_j_OLI.multiply(pi.divide(ee.Image(180))))).sin().pow(2)))
R_theta_V_s_OLI = ee.Image(0.0000000001)
R_theta_SZ_p_OLI = (((theta_SZ_OLI.multiply(pi.divide(180))).subtract(theta_j_OLI.multiply(pi.divide(180)))).tan().pow(2)).\
divide((((theta_SZ_OLI.multiply(pi.divide(180))).add(theta_j_OLI.multiply(pi.divide(180)))).tan().pow(2)))
R_theta_V_p_OLI = ee.Image(0.0000000001)
R_theta_SZ_OLI = ee.Image(0.5).multiply(R_theta_SZ_s_OLI.add(R_theta_SZ_p_OLI))
R_theta_V_OLI = ee.Image(0.5).multiply(R_theta_V_s_OLI.add(R_theta_V_p_OLI))
# Rayleigh scattering phase function #
# Sun-sensor geometry
theta_neg_OLI = ((cosdSunZe_OLI.multiply(ee.Image(-1))).multiply(cosdSatZe_OLI)).\
subtract((sindSunZe_OLI).multiply(sindSatZe_OLI).multiply(cosdRelAz_OLI))
theta_neg_inv_OLI = theta_neg_OLI.acos().multiply(ee.Image(180).divide(pi))
theta_pos_OLI = (cosdSunZe_OLI.multiply(cosdSatZe_OLI)).\
subtract(sindSunZe_OLI.multiply(sindSatZe_OLI).multiply(cosdRelAz_OLI))
theta_pos_inv_OLI = theta_pos_OLI.acos().multiply(ee.Image(180).divide(pi))
cosd_tni_OLI = theta_neg_inv_OLI.multiply(pi.divide(180)).cos() # in degrees
cosd_tpi_OLI = theta_pos_inv_OLI.multiply(pi.divide(180)).cos() # in degrees
Pr_neg_OLI = ee.Image(0.75).multiply((ee.Image(1).add(cosd_tni_OLI.pow(2))))
Pr_pos_OLI = ee.Image(0.75).multiply((ee.Image(1).add(cosd_tpi_OLI.pow(2))))
# Rayleigh scattering phase function
Pr_OLI = Pr_neg_OLI.add((R_theta_SZ_OLI.add(R_theta_V_OLI)).multiply(Pr_pos_OLI))
# Calulate Lr,
denom_OLI = ee.Image(4).multiply(pi).multiply(cosdSatZe_OLI)
Lr_OLI = (ESUN_OLI.multiply(Tr_OLI)).multiply(Pr_OLI.divide(denom_OLI))
# Rayleigh corrected radiance
Lrc_OLI = (Lt_OLI.divide(ee.Image(10))).subtract(Lr_OLI)
LrcImg_OLI = Lrc_OLI.arrayProject([0]).arrayFlatten([bands_OLI])
# Rayleigh corrected reflectance
prc_OLI = Lrc_OLI.multiply(pi).multiply(d_OLI.pow(2)).divide(ESUN_OLI.multiply(cosdSunZe_OLI))
prcImg_OLI = prc_OLI.arrayProject([0]).arrayFlatten([bands_OLI])
# Aerosol Correction #
# Bands in nm
bands_nm_OLI = ee.Image(443).addBands(ee.Image(483))\
.addBands(ee.Image(561))\
.addBands(ee.Image(655))\
.addBands(ee.Image(865))\
.addBands(ee.Image(0))\
.addBands(ee.Image(0))\
.toArray().toArray(1)
# Lam in SWIR bands
Lam_6_OLI = LrcImg_OLI.select('B6')
Lam_7_OLI = LrcImg_OLI.select('B7')
# Calculate aerosol type
eps_OLI = (((((Lam_7_OLI).divide(ESUNImg_OLI.select('B7'))).log()).\
subtract(((Lam_6_OLI).divide(ESUNImg_OLI.select('B6'))).log())).divide(ee.Image(2201).subtract(ee.Image(1609))))\
.multiply(mask)
# Calculate multiple scattering of aerosols for each band
Lam_OLI = (Lam_7_OLI).multiply(((ESUN_OLI).divide(ESUNImg_OLI.select('B7'))))\
.multiply((eps_OLI.multiply(ee.Image(-1))).multiply((bands_nm_OLI.divide(ee.Image(2201)))).exp())
# diffuse transmittance
trans_OLI = Tr_OLI.multiply(ee.Image(-1)).divide(ee.Image(2)).multiply(ee.Image(1).divide(cosdSatZe_OLI)).exp()
# Compute water-leaving radiance
Lw_OLI = Lrc_OLI.subtract(Lam_OLI).divide(trans_OLI)
# water-leaving reflectance
pw_OLI = (Lw_OLI.multiply(pi).multiply(d_OLI.pow(2)).divide(ESUN_OLI.multiply(cosdSunZe_OLI)))
pwImg_OLI = pw_OLI.arrayProject([0]).arrayFlatten([bands_OLI])
# Rrs
Rrs = (pw_OLI.divide(pi).arrayProject([0]).arrayFlatten([bands_OLI]).slice(0, 5))
###########################################################/
# Models #
# Surface water temperature
TIRS_1 = l8.select('B10')
b10_add_band = ee.Number(TIRS_1.get('RADIANCE_ADD_BAND_10'))
b10_mult_band = ee.Number(TIRS_1.get('RADIANCE_MULT_BAND_10'))
Oi = ee.Number(0.29)
TIRS_cal = (TIRS_1.multiply(b10_mult_band).add(b10_add_band).subtract(Oi)) # .multiply(lakes)
K1_b10 = ee.Number(TIRS_1.get('K1_CONSTANT_BAND_10'))
K2_b10 = ee.Number(TIRS_1.get('K2_CONSTANT_BAND_10'))
LST = ((ee.Image(K2_b10).divide(((ee.Image(K1_b10).divide(ee.Image(TIRS_cal))).add(ee.Image(1))).log()))\
.subtract(ee.Image(273))).multiply(mask) # Celsius
LST = (ee.Image(0.7745).multiply(LST)).add(ee.Image(9.6502)) # calibration R2 = 0.8599
# chlor_a
# Chlorophyll-a OC3
a0 = ee.Image(0.2412)
a1 = ee.Image(-2.0546)
a2 = ee.Image(1.1776)
a3 = ee.Image(-0.5538)
a4 = ee.Image(-0.4570)
log_BG = (Rrs.select('B1').divide(Rrs.select('B3'))).log10()
a1a = a1.multiply(log_BG.pow(1))
a2a = a2.multiply(log_BG.pow(2))
a3a = a3.multiply(log_BG.pow(3))
a4a = a4.multiply(log_BG.pow(4))
sum = a1a.add(a2a).add(a3a).add(a4a)
log10_chlor_a = a0.add(sum)
chlor_a = ee.Image(10).pow(log10_chlor_a)
chlor_a_cal = ee.Image(4.0752).multiply(chlor_a).subtract(ee.Image(3.9617))
# SD
ln_BlueRed = (Rrs.select('B2').divide(Rrs.select('B4'))).log()
lnMOSD = (ee.Image(1.4856).multiply(ln_BlueRed)).add(ee.Image(0.2734)) # R2 = 0.8748 with in-situ
MOSD = ee.Image(10).pow(lnMOSD) # log space to (m)
SD = (ee.Image(0.1777).multiply(MOSD)).add(ee.Image(1.0813))
# tsi
TSI_c = ee.Image(30.6).add(ee.Image(9.81)).multiply(chlor_a_cal.log())
TSI_s = ee.Image(60.0).subtract(ee.Image(14.41)).multiply(SD.log())
TSI = (TSI_c.add(TSI_s)).divide(ee.Image(2))
# Reclassify TSI
# Create conditions
mask1 = TSI.lt(30) # (1)
mask2 = TSI.gte(30).And(TSI.lt(40)) # (2)
mask3 = TSI.gte(40).And(TSI.lt(50)) # (3)
mask4 = TSI.gte(50).And(TSI.lt(60)) # (4)
mask5 = TSI.gte(60).And(TSI.lt(70)) # (5)
mask6 = TSI.gte(70).And(TSI.lt(80)) # (6)
mask7 = TSI.gte(80) # (7)
# Reclassify conditions into new values
img1 = TSI.where(mask1.eq(1), 1).mask(mask1)
img2 = TSI.where(mask2.eq(1), 2).mask(mask2)
img3 = TSI.where(mask3.eq(1), 3).mask(mask3)
img4 = TSI.where(mask4.eq(1), 4).mask(mask4)
img5 = TSI.where(mask5.eq(1), 5).mask(mask5)
img6 = TSI.where(mask6.eq(1), 6).mask(mask6)
img7 = TSI.where(mask7.eq(1), 7).mask(mask7)
# Ouput of reclassified image
TSI_R = img1.unmask(img2).unmask(img3).unmask(img4).unmask(img5).unmask(img6).unmask(img7)
l8Lyr = l8.multiply(mask)
l8VisParams = {'bands': 'B4,B3,B2', 'min': 0, 'max': 15000}
l8MapId = l8Lyr.getMapId(l8VisParams)
LSTvisParams = {'min': 20, 'max': 30, 'palette': 'darkblue,blue,white,red,darkred'}
LSTMapId = LST.getMapId(LSTvisParams)
chlor_a_cal_visParams = {'min': 0, 'max': 80, 'palette':'blue,cyan,limegreen,yellow,orange,darkred'}
chlor_a_cal_mapid = chlor_a_cal.getMapId(chlor_a_cal_visParams)
SDvisParams = {'min': 0, 'max': 3, 'palette': 'darkred,orange,yellow,limegreen,cyan,blue'}
SDMapId = SD.getMapId(SDvisParams)
TSIvisParams = {'min': 30, 'max': 70, 'palette': 'blue,cyan,limegreen,yellow,orange,darkred'}
TSIMapId = TSI.getMapId(TSIvisParams)
TSI_R_visParams = {'min': 1, 'max': 7, 'palette': 'purple,blue,limegreen,yellow,orange,orangered,darkred'}
TSI_R_MapId = TSI_R.getMapId(TSI_R_visParams)
print 'L8',l8MapId
print 'LST',LSTMapId
print 'Chlor A',chlor_a_cal_mapid
print 'SD',SDMapId
print 'TSI',TSIMapId
print 'TSI R',TSI_R_MapId
# Map Layers
# Map.addLayer(l8.multiply(mask), {bands: ['B4', 'B3', 'B2'], min: 0, max: 15000}, 'rgb', false) # rgb
# Map.addLayer(LST, {min: 20, max: 30, palette: ['darkblue', 'blue', 'white', 'red', 'darkred']}, 'LST', false)
# Map.addLayer(chlor_a_cal, {min: 0, max: 80, palette: ['blue', 'cyan', 'limegreen', 'yellow', 'orange', 'darkred']},
# 'chlor_a', false)
# Map.addLayer(SD, {min: 0, max: 3, palette: ['darkred', 'orange', 'yellow', 'limegreen', 'cyan', 'blue']}, 'SD', false)
# Map.addLayer(TSI, {min: 30, max: 70, palette: ['blue', 'cyan', 'limegreen', 'yellow', 'orange', 'darkred']}, 'TSI',
# false)
# Map.addLayer(TSI_R,
# {min: 1, max: 7, palette: ['purple', 'blue', 'limegreen', 'yellow', 'orange', 'orangered', 'darkred']},
# 'TSI_R', true)
#
#
|
py | b408a8524445898f576d64fc0576b37762f2b369 | #!/usr/bin/python
#
# Copyright 2011-2013 Software freedom conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.common.exceptions import WebDriverException
from .service import Service
from .options import Options
class WebDriver(RemoteWebDriver):
"""
Controls the ChromeDriver and allows you to drive the browser.
You will need to download the ChromeDriver executable from
http://chromedriver.storage.googleapis.com/index.html
"""
def __init__(self, executable_path="chromedriver", port=0,
chrome_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
if chrome_options is None:
# desired_capabilities stays as passed in
if desired_capabilities is None:
desired_capabilities = Options().to_capabilities()
else:
if desired_capabilities is None:
desired_capabilities = chrome_options.to_capabilities()
else:
desired_capabilities.update(chrome_options.to_capabilities())
self.service = Service(executable_path, port=port,
service_args=service_args, log_path=service_log_path)
self.service.start()
try:
RemoteWebDriver.__init__(self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities,
keep_alive=True)
except:
self.quit()
raise
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the ChromeDriver executable
that is started when starting the ChromeDriver
"""
try:
RemoteWebDriver.quit(self)
except:
# We don't care about the message because something probably has gone wrong
pass
finally:
self.service.stop()
|
py | b408a8a84a5cb25da6fcd28f10322d7611bd8215 | import matplotlib.pyplot as plt
import numpy as np
fh = open('epoch_cost_acc_50.txt')
#fh = open('epoch_cost_acc_50_.txt')
epochs, costs, accs, batchs = [], [], [], []
for line in fh:
epoch, cost, acc, batch = line.split(',')
epochs.append(int(epoch))
costs.append(float(cost))
accs.append(float(acc))
batchs.append(float(batch))
print "minimal cost", min(costs), np.argmin(costs)
print "maximal acc", max(accs), np.argmax(accs)
print "num", len(accs)
plt.plot(costs, 'b')
plt.xlabel("training step")
plt.ylabel("Cross entropy cost per event")
plt.title("Cost of validation per 100 batches of 15 event set of 500 random events")
plt.show()
plt.ylim(0,1)
plt.plot(accs, 'b')
plt.ylabel("Accuracy")
plt.xlabel("training step")
plt.title("Acc of validation per 100 batches of 15 event set of 500 random events")
plt.show()
plt.plot(epochs)
plt.show()
|
py | b408a8c75bf971f15dfb21674016bac555af0d65 | import os
import re
from Utils.release_notes_generator import (get_release_notes_dict,
generate_release_notes_summary,
get_pack_entities,
read_and_format_release_note,
merge_version_blocks,
EMPTY_LINES_REGEX,
get_new_entity_record,
construct_entities_block,
aggregate_release_notes,
aggregate_release_notes_for_marketplace)
TEST_DATA_PATH = 'Tests/scripts/infrastructure_tests/tests_data/RN_tests_data'
VERSION = 'VERSION'
ASSET_ID = 'ASSET_ID'
class TestReadAndFormatReleaseNote:
def test_sanity(self):
"""
Given
- A release note file with 2 Integrations:
- FakePack1_Integration1
- FakePack1_Integration2
When
- Formatting a release notes file.
Then
- Ensure both integration appear in the formatted string
"""
rn_file = os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '1_1_0.md')
formatted_text = read_and_format_release_note(rn_file)
assert 'FakePack1_Integration1' in formatted_text
assert 'FakePack1_Integration2' in formatted_text
def test_ignored_release_notes_block(self):
"""
Given
- A release note file with an Integration and a Script:
- FakePack4_Script1
- FakePack4_Integration1 - should be ignored
When
- Formatting a release notes file.
Then
- Ensure only the script appears in the formatted string
"""
rn_file = os.path.join(TEST_DATA_PATH, 'FakePack4', 'ReleaseNotes', '1_1_0.md')
formatted_text = read_and_format_release_note(rn_file)
assert 'FakePack4_Script1' in formatted_text
assert 'FakePack4_Integration1' not in formatted_text
def test_ignored_entire_release_note(self):
"""
Given
- A release note file with an Integration and a Script:
- FakePack4_Script1
- FakePack4_Integration1
When
- Formatting a release notes file.
Then
- Ensure formatted string is empty.
"""
rn_file = os.path.join(TEST_DATA_PATH, 'FakePack4', 'ReleaseNotes', '1_0_1.md')
formatted_text = read_and_format_release_note(rn_file)
assert formatted_text == ''
class TestGenerateReleaseNotesSummary:
def setup(self):
self._version = VERSION
self._asset_id = ASSET_ID
self._outfile = 'temp.md'
def test_added_pack(self):
"""
Given
- A repository of two new packs:
- FakePack3 version 1.0.0
- FakePack4 version 1.0.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the release notes summary contains two packs:
- FakePack3 with version 1.0.0
- FakePack4 with version 1.0.0
"""
new_packs_rn = {
'FakePack3': get_pack_entities(os.path.join(TEST_DATA_PATH, 'FakePack3')),
'FakePack4': get_pack_entities(os.path.join(TEST_DATA_PATH, 'FakePack4')),
}
packs_metadta_dict = {
'FakePack3': {},
'FakePack4': {}
}
rn_summary = generate_release_notes_summary(
new_packs_rn, {}, packs_metadta_dict, self._version, self._asset_id, 'temp.md')
assert '## New: FakePack3 Pack v1.0.0' in rn_summary
assert '## New: FakePack4 Pack v1.0.0' in rn_summary
def test_added_partner_pack(self):
"""
Given
- A repository of two new packs:
- FakePack3 version 1.0.0, metadata "supports" field has value "partner"
- FakePack4 version 1.0.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the release notes summary contains two packs:
- FakePack3 with version 1.0.0 and has the string "(Partner Supported)" after the version
- FakePack4 with version 1.0.0 dose not have the string "(Partner Supported)" after the version
"""
new_packs_rn = {
'FakePack3': get_pack_entities(os.path.join(TEST_DATA_PATH, 'FakePack3')),
'FakePack4': get_pack_entities(os.path.join(TEST_DATA_PATH, 'FakePack4')),
}
packs_metadta_dict = {
'FakePack3': {'support': 'partner'},
'FakePack4': {'support': 'xsoar'}
}
rn_summary = generate_release_notes_summary(
new_packs_rn, {}, packs_metadta_dict, self._version, self._asset_id, 'temp.md')
assert '## New: FakePack3 Pack v1.0.0 (Partner Supported)' in rn_summary
assert '## New: FakePack4 Pack v1.0.0' in rn_summary
assert '## New: FakePack4 Pack v1.0.0 (Partner Supported)' not in rn_summary
def test_two_packs(self):
"""
Given
- A repository of two packs updates and release notes:
- FakePack1 with versions 1.1.0 and 2.0.0
- FakePack2 version 1.1.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the output of get_release_notes_dict() is a valid dict of (pack_name, dict(pack_version, release_note)).
- the release notes summary contains two packs with 3 updates:
- FakePack1 with versions 1.1.0 and 2.0.0
- FakePack2 with versions 1.1.0
"""
release_notes_files = [
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '1_1_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '2_0_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack2', 'ReleaseNotes', '1_1_0.md'),
]
rn_dict, _ = get_release_notes_dict(release_notes_files)
packs_metadta_dict = {
'FakePack1': {},
'FakePack2': {}
}
assert '1.1.0' in rn_dict['FakePack1'].keys()
assert '2.0.0' in rn_dict['FakePack1'].keys()
assert '1.1.0' in rn_dict['FakePack2'].keys()
rn_summary = generate_release_notes_summary({}, rn_dict, packs_metadta_dict, self._version, self._asset_id, self._outfile)
assert VERSION in rn_summary and ASSET_ID in rn_summary # summary title
assert '### FakePack1 Pack v2.0.0' in rn_summary
assert '##### FakePack1_Integration1' in rn_summary
assert 'This is a fake1 minor release note.' in rn_summary
assert 'This is a fake1 major release note.' in rn_summary
assert '### FakePack2 Pack v1.1.0' in rn_summary
assert '##### FakePack2_Script1' in rn_summary
assert 'This is a fake2 major release note.' in rn_summary
def test_updated_partner_pack(self):
"""
Given
- A repository of two packs updates and release notes:
- FakePack1 with version 2.0.0 metadata "supports" field has value "partner"
- FakePack2 version 1.1.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the output of get_release_notes_dict() is a valid dict of (pack_name, dict(pack_version, release_note)).
- the release notes summary contains two packs with the flowing:
- FakePack1 with version 2.0.0 and has the string "(Partner Supported)" after the version
- FakePack2 with version 1.1.0 dose not have the string "(Partner Supported)" after the version
"""
release_notes_files = [
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '1_1_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '2_0_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack2', 'ReleaseNotes', '1_1_0.md'),
]
rn_dict, _ = get_release_notes_dict(release_notes_files)
packs_metadta_dict = {
'FakePack1': {'support': 'partner'},
'FakePack2': {'support': 'xsoar'}
}
assert '2.0.0' in rn_dict['FakePack1'].keys()
assert '1.1.0' in rn_dict['FakePack2'].keys()
rn_summary = generate_release_notes_summary({}, rn_dict, packs_metadta_dict, self._version, self._asset_id, self._outfile)
assert VERSION in rn_summary and ASSET_ID in rn_summary # summary title
assert '### FakePack1 Pack v2.0.0 (Partner Supported)' in rn_summary
assert '### FakePack2 Pack v1.1.0' in rn_summary
assert '### FakePack2 Pack v1.1.0 (Partner Supported)' not in rn_summary
def test_release_notes_summary_with_empty_lines_in_rn(self):
"""
Given
- A repository contains a FakePack3 update with ignored release notes.
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the output of get_release_notes_dict() is a dict of (pack_name, dict(pack_version, release_note)).
- empty lines (with dashes) are removed from the release notes summary.
"""
release_notes_files = [
os.path.join(TEST_DATA_PATH, 'FakePack3', 'ReleaseNotes', '1_0_1.md')
]
packs_metadta_dict = {
'FakePack3': {}
}
rn_dict, _ = get_release_notes_dict(release_notes_files)
assert '1.0.1' in rn_dict['FakePack3'].keys()
assert len(rn_dict) == 1
rn_summary = generate_release_notes_summary({}, rn_dict, packs_metadta_dict, self._version, self._asset_id, self._outfile)
print(rn_summary)
match = re.search(EMPTY_LINES_REGEX, rn_summary)
assert match is None
def test_release_notes_summary_with_ignored_rns(self):
"""
Given
- A repository of a packs update and release notes:
- FakePack4 with versions 1.0.1 and 1.1.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the output of get_release_notes_dict() is a valid dict of (pack_name, dict(pack_version, release_note))
- the release notes summary contains one packs with 1 updates:
- FakePack4 version 1.1.0
- the summary does not contain release notes 1.0.1, because it is ignored.
"""
release_notes_files = [
os.path.join(TEST_DATA_PATH, 'FakePack4', 'ReleaseNotes', '1_0_1.md'),
os.path.join(TEST_DATA_PATH, 'FakePack4', 'ReleaseNotes', '1_1_0.md'),
]
packs_metadta_dict = {
'FakePack4': {}
}
rn_dict, _ = get_release_notes_dict(release_notes_files)
assert '1.1.0' in rn_dict['FakePack4'].keys()
assert len(rn_dict) == 1
rn_summary = generate_release_notes_summary({}, rn_dict, packs_metadta_dict, self._version, self._asset_id, self._outfile)
assert '### FakePack4 Pack v1.1.0' in rn_summary
assert '##### FakePack4_Script1' in rn_summary
class TestMergeVersionBlocks:
def test_aggregate_release_notes_for_marketplace(self):
"""
Given
- Two release notes files with content entity instance wrapped with ** and entity type contains spaces.
When
- Merging the two release notes files into one file.
Then
- Ensure that the content entity instance is wrapped with **.
- Ensure that the content entity type contains whitespace.
- Ensure that the content of both RN files appears in the result file.
"""
release_notes_paths = [
os.path.join(TEST_DATA_PATH, 'FakePack6', 'ReleaseNotes', '1_0_1.md'),
os.path.join(TEST_DATA_PATH, 'FakePack6', 'ReleaseNotes', '1_0_2.md'),
]
pack_versions_dict = {}
for path in release_notes_paths:
with open(path) as file_:
pack_versions_dict[os.path.basename(os.path.splitext(path)[0])] = file_.read()
rn_block = aggregate_release_notes_for_marketplace(pack_versions_dict)
assert 'Incident Fields' in rn_block
assert '**XDR Alerts**' in rn_block
assert 'First' in rn_block
assert 'Second' in rn_block
assert rn_block.endswith('\n')
assert rn_block.startswith('\n')
def test_spaced_content_entity_and_old_format(self):
"""
Given
- Two release notes files with content entity instance wrapped with ** and entity type contains spaces.
When
- Merging the two release notes files into one file.
Then
- Ensure that the content entity instance is wrapped with **.
- Ensure that the content entity type contains whitespace.
- Ensure that the content of both RN files appears in the result file.
"""
release_notes_paths = [
os.path.join(TEST_DATA_PATH, 'FakePack6', 'ReleaseNotes', '1_0_1.md'),
os.path.join(TEST_DATA_PATH, 'FakePack6', 'ReleaseNotes', '1_0_2.md'),
]
pack_versions_dict = {}
for path in release_notes_paths:
with open(path) as file_:
pack_versions_dict[os.path.basename(os.path.splitext(path)[0])] = file_.read()
rn_block, latest_version = merge_version_blocks(pack_versions_dict)
assert 'Incident Fields' in rn_block
assert '**XDR Alerts**' in rn_block
assert 'First' in rn_block
assert 'Second' in rn_block
assert latest_version == '1_0_2'
def test_sanity(self):
"""
Given
two changes in foreign content types
When
two pack versions that modified different items.
Then
type sections appears one after the other
"""
release_notes_paths = [
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '1_1_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '2_1_0.md'),
]
pack_versions_dict = {}
for path in release_notes_paths:
with open(path) as file_:
pack_versions_dict[os.path.basename(os.path.splitext(path)[0])] = file_.read()
rn_block = aggregate_release_notes('FakePack', pack_versions_dict, {})
assert 'FakePack1_Playbook1' in rn_block
assert 'FakePack1_Playbook2' in rn_block
assert 'FakePack1_Integration1' in rn_block
assert 'FakePack1_Integration2' in rn_block
assert 'v2_1_0' in rn_block
assert 'v1_1_0' not in rn_block
def test_similiar_entities(self):
"""
Given
two changes in similar content entities
When
two pack versions that modified the same items.
Then
one integration section appears
one entity title for each one with two comments
"""
release_notes_paths = [
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '1_1_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '2_0_0.md'),
]
pack_versions_dict = {}
for path in release_notes_paths:
with open(path) as file_:
pack_versions_dict[os.path.basename(os.path.splitext(path)[0])] = file_.read()
rn_block = aggregate_release_notes('FakePack', pack_versions_dict, {})
assert rn_block.count('Integrations') == 1
assert rn_block.count('FakePack1_Integration1') == 1
assert rn_block.count('FakePack1_Integration2') == 1
assert 'v2_0_0' in rn_block
assert 'v1_1_0' not in rn_block
assert 'fake1 minor' in rn_block
assert 'fake2 minor' in rn_block
assert 'fake1 major' in rn_block
assert 'fake2 major' in rn_block
def test_get_new_entity_record_integration(self):
"""
Given
fake integration path.
When
getting entity record for integration.
Then
Ensure the method is valid and returns the integration name and description.
"""
name, description = get_new_entity_record(os.path.join(TEST_DATA_PATH,
'FakePack5', 'Integrations', 'fake_integration.yml'))
assert name == 'fake_integration'
assert description == 'Use the Zoom integration manage your Zoom users and meetings'
def test_get_new_entity_record_layout(self):
"""
Given
fake layout path.
When
getting entity record for layout.
Then
Ensure the method is valid and returns the layout name and description.
"""
name, description = get_new_entity_record(os.path.join(TEST_DATA_PATH,
'FakePack5', 'Layouts', 'fake_layout.json'))
assert name == 'Fake layout - Close'
assert description == ''
def test_get_new_entity_record_classifier(self):
"""
Given
fake classifier path.
When
getting entity record for classifier.
Then
Ensure the method is valid and returns the classifier name and description.
"""
name, description = get_new_entity_record(os.path.join(TEST_DATA_PATH,
'FakePack5', 'Classifiers', 'fake_classifier.json'))
assert name == 'Fake classifier'
assert description == 'Maps incoming Prisma Cloud event fields.'
def test_construct_entities_block_integration(self):
"""
Given
integration entities_data.
When
generates pack release note block for integration.
Then
Ensure the method is valid and the release note block contains Tanium integration.
"""
entities_data = {'Integrations': {'Tanium': 'Tanium endpoint security and systems management'}}
rn = construct_entities_block(entities_data)
assert '### Integrations' in rn
assert '##### Tanium' in rn
assert 'Tanium endpoint security and systems management' in rn
def test_construct_entities_block_indicator_types(self):
"""
Given
indicator type entities_data.
When
generates pack release note block for indicator type.
Then
Ensure the method is valid and the release note block contains accountRep indicator.
"""
entities_data = {'IndicatorTypes': {'accountRep': ''}}
rn = construct_entities_block(entities_data)
assert '### Indicator Types' in rn
assert '- **accountRep**' in rn
|
py | b408ab51815c85241ee173165d1f795263b76235 | from vtk import *
source = vtkRandomGraphSource()
source.DirectedOff()
source.SetNumberOfVertices(100)
source.SetEdgeProbability(0.1)
source.SetUseEdgeProbability(True)
source.AllowParallelEdgesOn()
source.AllowSelfLoopsOn()
source.SetStartWithTree(True)
# Connect to the centrality filter.
centrality = vtkBoostBrandesCentrality ()
centrality.SetInputConnection(source.GetOutputPort())
# Find the minimal spanning tree
mstTreeSelection = vtkBoostKruskalMinimumSpanningTree()
mstTreeSelection.SetInputConnection(centrality.GetOutputPort())
mstTreeSelection.SetEdgeWeightArrayName("centrality")
mstTreeSelection.NegateEdgeWeightsOn()
mstTreeSelection.Update()
# Take selection and extract a graph
extract_graph = vtkExtractSelectedGraph()
extract_graph.AddInputConnection(centrality.GetOutputPort())
extract_graph.SetSelectionConnection(mstTreeSelection.GetOutputPort())
# Extract a tree from the graph
extract_tree = vtkBoostBreadthFirstSearchTree()
extract_tree.AddInputConnection(extract_graph.GetOutputPort())
# Create a graph layout view
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(centrality.GetOutputPort())
view.SetVertexLabelArrayName("centrality")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("centrality")
view.SetColorVertices(True)
view.SetEdgeColorArrayName("centrality")
view.SetColorEdges(True)
view.SetLayoutStrategyToSimple2D()
# Setup a couple layout strategies so we can switch
# them out for comparison
treeStrat = vtkTreeLayoutStrategy();
treeStrat.RadialOn()
treeStrat.SetAngle(120)
treeStrat.SetLogSpacingValue(1)
forceStrat = vtkSimple2DLayoutStrategy()
forceStrat.SetEdgeWeightField("centrality")
# Create an HGV
view2 = vtkHierarchicalGraphView()
view2.SetHierarchyFromInputConnection(extract_tree.GetOutputPort())
view2.SetGraphFromInputConnection(centrality.GetOutputPort())
view2.SetVertexColorArrayName("centrality")
view2.SetColorVertices(True)
view2.SetVertexLabelArrayName("centrality")
view2.SetVertexLabelVisibility(True)
view2.SetEdgeColorArrayName("centrality")
view2.SetColorEdges(True)
view2.SetBundlingStrength(.75)
view2.SetLayoutStrategy(forceStrat)
#view2.SetLayoutStrategy(treeStrat)
# Make sure all views are using a pedigree id selection
view.SetSelectionType(2)
view2.SetSelectionType(2)
# Create a selection link and set both view to use it
#selectionLink = vtkSelectionLink()
#view.GetRepresentation(0).SetSelectionLink(selectionLink)
#view2.GetRepresentation(0).SetSelectionLink(selectionLink)
# Set the selection to be the MST
view.GetRepresentation(0).GetSelectionLink().SetSelection(mstTreeSelection.GetOutput())
# Set the selection to be the MST
view2.GetGraphRepresentation().GetSelectionLink().SetSelection(mstTreeSelection.GetOutput())
# Set the theme on the view
theme = vtkViewTheme.CreateMellowTheme()
theme.SetLineWidth(4)
theme.SetPointSize(8)
theme.SetSelectedCellColor(1,0,1)
theme.SetSelectedPointColor(1,0,1)
view.ApplyViewTheme(theme)
theme.SetLineWidth(1)
view2.ApplyViewTheme(theme)
window = vtkRenderWindow()
window.SetSize(600, 600)
view.SetupRenderWindow(window)
view.GetRenderer().ResetCamera()
window2 = vtkRenderWindow()
window2.SetSize(600, 600)
view2.SetupRenderWindow(window2)
view2.GetRenderer().ResetCamera()
window.GetInteractor().Start()
|
py | b408ac18e813a25af5f99664b6fed3782a8bfe0a | import requests
import json
import paramiko
from collections import defaultdict, OrderedDict
import time
import sys
import itertools
from math import ceil
import threading
import concurrent.futures
import subprocess
from random import randint
server_aca_repo_path = ''
aca_data_destination_path = '/test/gtest/aca_data.json'
aca_data_local_path = './aca_data.json'
ips_ports_ip_prefix = "123."
mac_port_prefix = "6c:dd:ee:"
# Transfer the file locally to aca nodes
def upload_file_aca(host, user, password, server_path, local_path, timeout=600):
"""
:param host
:param user
:param password
:param server_path: /root/alcor-control-agent/test/gtest
:param local_path: ./text.txt
:param timeout
:return: bool
"""
try:
for host_ip in host:
t = paramiko.Transport((host_ip, 22))
t.banner_timeout = timeout
t.connect(username=user, password=password)
sftp = paramiko.SFTPClient.from_transport(t)
sftp.put(local_path, server_path)
t.close()
return True
except Exception as e:
print(e)
return False
# Execute remote SSH commands
def exec_sshCommand_aca(host, user, password, cmd, timeout=60, output=True):
"""
:param host
:param user
:param password
:param cmd
:param seconds
:return: dict
"""
result = {'status': [], 'data': [], 'error': False} # Record return result
try:
# Create a SSHClient instance
ssh = paramiko.SSHClient()
ssh.banner_timeout = timeout
# set host key
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Connect to remote server
ssh.connect(host, 22, user, password, timeout=timeout)
for command in cmd:
# execute command
print(f'executing command: {command}')
stdin, stdout, stderr = ssh.exec_command(
command, get_pty=True, timeout=timeout)
# If need password
if 'sudo' in command:
stdin.write(password + '\n')
# result of execution,return a list
# out1 = stdout.readlines()
out2 = stdout.read()
# execution state:0 means success,1 means failure
channel = stdout.channel
status = channel.recv_exit_status()
result['status'].append(status)
decoded_output = out2.decode()
result['data'].append(decoded_output)
if output:
print(f'Output: {decoded_output}')
ssh.close() # close ssh connection
return result
except Exception as e:
print(f'Exception when executing command:{e}')
result['error'] = True
return result
def talk_to_zeta(file_path, zgc_api_url, zeta_data, port_api_upper_limit, time_interval_between_calls_in_seconds, ports_to_send_to_aca):
headers = {'Content-type': 'application/json'}
# create ZGC
ZGC_data = zeta_data["ZGC_data"]
print(f'ZGC_data: \n{ZGC_data}')
zgc_response = requests.post(
zgc_api_url + "/zgcs", data=json.dumps(ZGC_data), headers=headers)
print(f'zgc creation response: \n{zgc_response.text}')
if zgc_response.status_code >= 300:
print('Failed to create zgc, pseudo controller will stop now.')
return False
zgc_id = zgc_response.json()['zgc_id']
# add Nodes
for node in zeta_data["NODE_data"]:
node_data = node
node_data['zgc_id'] = zgc_id
print(f'node_data: \n{node_data}')
node_response_data = requests.post(
zgc_api_url + "/nodes", data=json.dumps(node_data), headers=headers)
print(f'Response for adding node: {node_response_data.text}')
if node_response_data.status_code >= 300:
print('Failed to create nodes, pseudo controller will stop now.')
return False
json_content_for_aca = dict()
json_content_for_aca['vpc_response'] = {}
json_content_for_aca['port_response'] = {}
# first delay
# TODO: Check if this can be removed.
print('Sleep 10 seconds after the Nodes call')
time.sleep(10)
# add VPC
for item in zeta_data["VPC_data"]:
VPC_data = item
print(f'VPC_data: \n{VPC_data}')
vpc_response = requests.post(
zgc_api_url + "/vpcs", data=json.dumps(VPC_data), headers=headers)
print(f'Response for adding VPC: {vpc_response.text}')
if vpc_response.status_code >= 300:
print('Failed to create vpc, pseudo controller will stop now.')
return False
json_content_for_aca['vpc_response'] = (vpc_response.json())
# second delay
# TODO: Check if this can be removed.
print('Sleep 10 seconds after the VPC call')
time.sleep(10)
print('Start calling /ports API')
# notify ZGC the ports created on each ACA
PORT_data = zeta_data["PORT_data"]
amount_of_ports = len(PORT_data)
all_post_responses = []
all_ports_start_time = time.time()
print(f'Port_data length: \n{amount_of_ports}')
should_sleep = True
for i in range(ceil(len(PORT_data) / port_api_upper_limit)):
start_idx = i * port_api_upper_limit
end_idx = start_idx
if end_idx + port_api_upper_limit >= amount_of_ports:
end_idx = amount_of_ports
else:
end_idx = end_idx + port_api_upper_limit
if start_idx == end_idx:
end_idx = end_idx + 1
if end_idx == amount_of_ports:
should_sleep = False
print(
f'In this /ports POST call, we are calling with port from {start_idx} to {end_idx}')
one_call_start_time = time.time()
port_response = requests.post(
zgc_api_url + "/ports", data=json.dumps(PORT_data[start_idx: end_idx]), headers=headers)
if port_response.status_code >= 300:
print(
f'Call failed for index {start_idx} to {end_idx}, \nstatus code: {port_response.status_code}, \ncontent: {port_response.content}\nExiting')
return False
one_call_end_time = time.time()
print(
f'ONE PORT post call ended, for {end_idx - start_idx} ports creation it took: {one_call_end_time - one_call_start_time} seconds')
all_post_responses.append(port_response.json())
if should_sleep:
time.sleep(time_interval_between_calls_in_seconds)
all_ports_end_time = time.time()
print(
f'ALL PORT post call ended, for {amount_of_ports} ports creation it took: {all_ports_end_time - all_ports_start_time} seconds')
json_content_for_aca['port_response'] = list(
itertools.chain.from_iterable(all_post_responses))[:ports_to_send_to_aca]
print(
f'Amount of ports to send to aca: {len(json_content_for_aca["port_response"])}')
with open('aca_data.json', 'w') as outfile:
json.dump(json_content_for_aca, outfile)
print(f'The aca data is exported to {aca_data_local_path}')
return json_content_for_aca
# the ports' info inside are based on the PORT_data in zeta_data.json, please modify it accordingly to suit your needs
def get_port_template(i):
if i % 2 == 0:
return {
"port_id": "333d4fae-7dec-11d0-a765-00a0c9341120",
"vpc_id": "3dda2801-d675-4688-a63f-dcda8d327f61",
"ips_port": [
{
"ip": "10.10.0.92",
"vip": ""
}
],
"mac_port": "cc:dd:ee:ff:11:22",
"ip_node": "192.168.20.92",
# "ip_node": "172.16.150.221",
"mac_node": "e8:bd:d1:01:77:ec"
# "mac_node": "64:6e:97:0d:80:a9"
}
return {
"port_id": "99976feae-7dec-11d0-a765-00a0c9342230",
"vpc_id": "3dda2801-d675-4688-a63f-dcda8d327f61",
"ips_port": [
{
"ip": "10.10.0.93",
"vip": ""
}
],
"mac_port": "6c:dd:ee:ff:11:32",
"ip_node": "192.168.20.93",
# "ip_node": "172.16.150.222",
"mac_node": "e8:bd:d1:01:72:c8"
# "mac_node": "64:6e:97:1c:8e:65"
}
def generate_ports(ports_to_create):
print(f'Need to generate {ports_to_create} ports')
node_data = {}
all_ports_generated = [] # Need to skip when i == 0
i = 0
while len(all_ports_generated) != ports_to_create:
if i % 10 != 0:
port_template_to_use = get_port_template(i)
port_id = '{0:07d}ae-7dec-11d0-a765-00a0c9341120'.format(i)
ip_2nd_octet = str((i // 10000))
ip_3rd_octet = str((i % 10000 // 100))
ip_4th_octet = str((i % 100))
ip = ips_ports_ip_prefix + ip_2nd_octet + \
"." + ip_3rd_octet + "." + ip_4th_octet
mac = mac_port_prefix + ip_2nd_octet + ":" + ip_3rd_octet + ":" + ip_4th_octet
port_template_to_use['port_id'] = port_id
port_template_to_use['ips_port'][0]['ip'] = ip
port_template_to_use['mac_port'] = mac
all_ports_generated.append(port_template_to_use)
i = i + 1
return all_ports_generated
# To run the pseudo controller, the user either runs it without specifying how many ports to create, which leads to creating 2 ports;
# if you specify the amount of ports to create (up to one milliion ports), using the command 'python3 run.py amount_of_ports_to_create', the controller will that many ports
# Also, three more params are added.
# First is port_api_upper_limit, which should not exceed 4000, it is the batch number for each /ports POST call.
# Second is time_interval_between_calls_in_seconds, it is the time the pseudo controller sleeps after each /port POST call, except for the last call.
# Third is how many ports to send to aca, this amount is defaulted to the 2, if specified, no more than amount_of_ports_to_create will be send to aca. However, we suggest not to set this number to more than 10, as it may significantly slow down the aca nodes, as the amount of ports (also amount of containers to be created on the aca nodes) increases.
# After ports are created and aca data is sent to the aca nodes, testcase DISABLED_zeta_scale_container will be called on the aca nodes, to create the aca data, construct the goalstate accordingly, and spins up containers that reprsents the ports.
# After that, 3 ping tests will be performed from aca parent node to aca child node with random ports selected, which are followed by another 3 similar ping test from the aca child node to aca parent node.
# So if you only want to run the two nodes test, you can simply run 'python3 run.py'
# If you want to try to scale test, you can run 'python3 run.py total_amount_of_ports how_many_ports_each_batch how_many_seconds_controller_sleeps_after_each_call how_many_port_to_send_to_aca'
def run():
# rebuild zgc nodes kvm and cleanup zeta data
subprocess.call(
['/home/user/ws/zzxgzgz/zeta/deploy/zeta_deploy.sh', '-d', 'lab'])
port_api_upper_limit = 1000
time_interval_between_calls_in_seconds = 10
ports_to_create = 2
# right now the only argument should be how many ports to be generated.
arguments = sys.argv
print(f'Arguments: {arguments}')
file_path = './data/zeta_data.json'
# file_path = './data/zeta_data_sdn.json'
zeta_data = {}
with open(file_path, 'r', encoding='utf8')as fp:
zeta_data = json.loads(fp.read())
server_aca_repo_path = zeta_data['server_aca_repo_path']
print(f'Server aca repo path: {server_aca_repo_path}')
zgc_api_url = zeta_data["zeta_api_ip"]
testcases_to_run = ['DISABLED_zeta_scale_container',
'DISABLED_zeta_scale_container']
execute_ping = True
# second argument should be amount of ports to be generated
if len(arguments) > 1:
ports_to_create = int(arguments[1])
if ports_to_create > 1000000:
print(
f'You tried to create {ports_to_create} ports, but the pseudo controller only supports up to 1,000,000 ports, sorry.')
return
print("Has arguments, need to generate some ports!")
if ports_to_create >= 2:
print(f'Trying to create {ports_to_create} ports.')
zeta_data['PORT_data'] = generate_ports(ports_to_create)
execute_ping = True
print(
f'After generating ports, we now have {len(zeta_data["PORT_data"])} entries in the PORT_data')
elif ports_to_create < 2:
print('Too little ports to create, please enter a bigger number')
if len(arguments) > 2:
arg2 = int(arguments[2])
if arg2 <= 4000:
port_api_upper_limit = arg2
print(f'Set the amount of ports in each port call to be {arg2}')
else:
print(
f'You are trying to call the /nodes api with more than {arg2} entries per time, which is too much. Please enter a number no more than 4000.')
return
if len(arguments) > 3:
arg3 = int(arguments[3])
time_interval_between_calls_in_seconds = arg3
print(
f'Set time interval between /nodes POST calls to be {arg3} seconds.')
ports_to_send_to_aca = 2
if len(arguments) > 4:
arg4 = int(arguments[4])
ports_to_send_to_aca = arg4
print(
f'Set amount of ports to sent to aca to be: {ports_to_send_to_aca}')
json_content_for_aca = talk_to_zeta(file_path, zgc_api_url, zeta_data,
port_api_upper_limit, time_interval_between_calls_in_seconds, ports_to_send_to_aca)
if json_content_for_aca is False:
print('Failed to talk to Zeta, pseudo controller will exit now.')
aca_nodes_data = zeta_data["aca_nodes"]
aca_nodes_ip = aca_nodes_data['ip']
res = upload_file_aca(aca_nodes_data['ip'], aca_nodes_data['username'], aca_nodes_data['password'],
server_aca_repo_path + aca_data_destination_path, aca_data_local_path)
if not res:
print("upload file %s failed" % aca_data_local_path)
return
else:
print("upload file %s successfully" % aca_data_local_path)
print('Before the Ping test, remove previously created containers on aca nodes, if any.')
remove_container_cmd = [
'docker rm -f $(docker ps --filter "label=test=zeta" -aq)']
aca_nodes = aca_nodes_ip
exec_sshCommand_aca(
host=aca_nodes[0], user=aca_nodes_data['username'], password=aca_nodes_data['password'], cmd=remove_container_cmd, timeout=20)
exec_sshCommand_aca(
host=aca_nodes[1], user=aca_nodes_data['username'], password=aca_nodes_data['password'], cmd=remove_container_cmd, timeout=20)
test_start_time = time.time()
# Execute remote command, use the transferred file to change the information in aca_test_ovs_util.cpp,recompile using 'make',perform aca_test
cmd_child = [
f'cd {server_aca_repo_path};sudo ./build/tests/aca_tests --gtest_also_run_disabled_tests --gtest_filter=*{testcases_to_run[0]}']
cmd_parent = [
f'cd {server_aca_repo_path};sudo ./build/tests/aca_tests --gtest_also_run_disabled_tests --gtest_filter=*{testcases_to_run[1]}']
with concurrent.futures.ThreadPoolExecutor() as executor:
future_child = executor.submit(
exec_sshCommand_aca, aca_nodes[1], aca_nodes_data['username'], aca_nodes_data['password'], cmd_child, 1500, False)
future_parent = executor.submit(
exec_sshCommand_aca, aca_nodes[0], aca_nodes_data['username'], aca_nodes_data['password'], cmd_parent, 1500, False)
result_child = future_child.result()
result_parent = future_parent.result()
text_file_child = open("output_child.log", "w")
text_file_child.write(result_child['data'][0])
text_file_child.close()
text_file_parent = open("output_parent.log", "w")
text_file_parent.write(result_parent['data'][0])
text_file_parent.close()
print("Port set up finished")
test_end_time = time.time()
print(
f'Time took for the tests of ACA nodes are {test_end_time - test_start_time} seconds.')
if execute_ping:
print('Time for the Ping test')
parent_ports = [port for port in json_content_for_aca['port_response'] if (
port['ip_node'].split('.'))[3] == (zeta_data['aca_nodes']['ip'][0].split('.'))[3]]
parent_node_containers_names_string = ""
for port in parent_ports:
parent_node_containers_names_string = parent_node_containers_names_string + \
f' con-{port["ips_port"][0]["ip"]}'
child_ports = [port for port in json_content_for_aca['port_response'] if (
port['ip_node'].split('.'))[3] == (zeta_data['aca_nodes']['ip'][1].split('.'))[3]]
child_node_containers_names_string = ""
for port in child_ports:
child_node_containers_names_string = child_node_containers_names_string + \
f' con-{port["ips_port"][0]["ip"]}'
ping_result = {}
if len(parent_ports) > 0 and len(child_ports) > 0:
ping_times = 3
print(
f"*************Doing ping from parent: {aca_nodes[0]} to child: {aca_nodes[1]}*************")
for i in range(ping_times):
dump_flow_cmd = ['sudo ovs-ofctl dump-flows br-tun']
br_tun_before_ping = exec_sshCommand_aca(
host=aca_nodes[0], user=aca_nodes_data['username'], password=aca_nodes_data['password'], cmd=dump_flow_cmd, timeout=20)
pinger = parent_ports[randint(0,
len(parent_ports)-1)]["ips_port"][0]["ip"]
pingee = child_ports[randint(0,
len(child_ports)-1)]["ips_port"][0]["ip"]
ping_cmd = [f'docker exec con-{pinger} ping -c1 {pingee}']
print(f'Command for ping: {ping_cmd[0]}')
ping_result = exec_sshCommand_aca(
host=aca_nodes[0], user=aca_nodes_data['username'], password=aca_nodes_data['password'], cmd=ping_cmd, timeout=20)
br_tun_after_ping = exec_sshCommand_aca(
host=aca_nodes[0], user=aca_nodes_data['username'], password=aca_nodes_data['password'], cmd=dump_flow_cmd, timeout=20)
print(f'Ping succeeded: {ping_result["status"][0] == 0}')
print(
f"*************Doing ping from child: {aca_nodes[1]} to parent: {aca_nodes[0]}*************")
for i in range(ping_times):
dump_flow_cmd = ['sudo ovs-ofctl dump-flows br-tun']
br_tun_before_ping = exec_sshCommand_aca(
host=aca_nodes[1], user=aca_nodes_data['username'], password=aca_nodes_data['password'], cmd=dump_flow_cmd, timeout=20)
pinger = child_ports[randint(0,
len(child_ports)-1)]["ips_port"][0]["ip"]
pingee = parent_ports[randint(0,
len(parent_ports)-1)]["ips_port"][0]["ip"]
ping_cmd = [f'docker exec con-{pinger} ping -c1 {pingee}']
print(f'Command for ping: {ping_cmd[0]}')
ping_result = exec_sshCommand_aca(
host=aca_nodes[1], user=aca_nodes_data['username'], password=aca_nodes_data['password'], cmd=ping_cmd, timeout=20)
br_tun_after_ping = exec_sshCommand_aca(
host=aca_nodes[1], user=aca_nodes_data['username'], password=aca_nodes_data['password'], cmd=dump_flow_cmd, timeout=20)
print(f'Ping succeeded: {ping_result["status"][0] == 0}')
else:
print(f'Either parent or child does not have any ports, somethings wrong.')
print('This is the end of the pseudo controller, goodbye.')
if __name__ == '__main__':
run()
|
py | b408ad4b753c56486f0f8f23d4c9690d5fd3bfde | import pyautogui as gui
import time
wait=time.sleep
from playsound import playsound
import threading
noisy=False
class alarm:
noisy=False
def stop():
global noisy
noisy=False
def play():
global noisy
noisy=True
def loop():
while True:
if noisy:
playsound('P:/ROBLOX/Noafk/alarm.mp3')
pass
threading.Thread(target=loop).start()
while True:
showgui=True
while showgui:
what=gui.confirm("you about to afk kick","Antiafk",["mute","start"])
if what=="mute":
alarm.stop()
elif what=="start":
alarm.stop()
showgui=False
wait(15*60)
alarm.play() |
py | b408ad5b12d804a39dee343698bf00b45eda2ce6 | #
# Copyright (c) 2009-2016, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El, math, time
m = 10
cutoff = 1000
output = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Construct s and z in the (product) cone
# =======================================
def ConstructPrimalDual(m):
s = El.DistMultiVec()
z = El.DistMultiVec()
orders = El.DistMultiVec(El.iTag)
firstInds = El.DistMultiVec(El.iTag)
sampleRad = 1./math.sqrt(1.*m)
El.Uniform( s, 3*m, 1, 0, sampleRad )
El.Uniform( z, 3*m, 1, 0, sampleRad )
s.Set( 0, 0, 2. )
s.Set( m, 0, 3. )
s.Set( 2*m, 0, 4. )
z.Set( 0, 0, 5. )
z.Set( m, 0, 6. )
z.Set( 2*m, 0, 7. )
El.Zeros( orders, 3*m, 1 )
El.Zeros( firstInds, 3*m, 1 )
for i in xrange(m):
orders.Set( i, 0, m )
orders.Set( i+m, 0, m )
orders.Set( i+2*m, 0, m )
firstInds.Set( i, 0, 0 )
firstInds.Set( i+m, 0, m )
firstInds.Set( i+2*m, 0, 2*m )
return s, z, orders, firstInds
s, z, orders, firstInds = ConstructPrimalDual(m)
n = s.Height()
if output:
El.Print( s, "s" )
El.Print( z, "z" )
El.Print( orders, "orders" )
El.Print( firstInds, "firstInds" )
# Compute the (Jordan) determinants and number of non-positive SOC members
# ========================================================================
sDets = El.SOCDets( s, orders, firstInds, cutoff )
zDets = El.SOCDets( z, orders, firstInds, cutoff )
sDetsBcast = El.DistMultiVec()
zDetsBcast = El.DistMultiVec()
El.Copy( sDets, sDetsBcast )
El.Copy( zDets, zDetsBcast )
El.SOCBroadcast( sDetsBcast, orders, firstInds, cutoff )
El.SOCBroadcast( zDetsBcast, orders, firstInds, cutoff )
sNumNonPos = El.NumNonSOC( s, orders, firstInds, cutoff )
zNumNonPos = El.NumNonSOC( z, orders, firstInds, cutoff )
if output:
El.Print( sDets, "det(s)" )
El.Print( zDets, "det(z)" )
El.Print( sDetsBcast, "Broadcasted det(s)" )
El.Print( zDetsBcast, "Broadcasted det(z)" )
if worldRank == 0:
print "# non-SOC in s:", sNumNonPos
print "# non-SOC in z:", zNumNonPos
# Compute the square-roots of s and z
# ===================================
sRoot = El.SOCSquareRoot( s, orders, firstInds, cutoff )
zRoot = El.SOCSquareRoot( z, orders, firstInds, cutoff )
sRootSquared = El.SOCApply( sRoot, sRoot, orders, firstInds, cutoff )
zRootSquared = El.SOCApply( zRoot, zRoot, orders, firstInds, cutoff )
if output:
El.Print( sRoot, "sqrt(s)" )
El.Print( zRoot, "sqrt(z)" )
El.Print( sRootSquared, "(sqrt(s))^2" )
El.Print( zRootSquared, "(sqrt(z))^2" )
# Compute the inverses of s and z
# ===============================
sInv = El.SOCInverse( s, orders, firstInds, cutoff )
zInv = El.SOCInverse( z, orders, firstInds, cutoff )
sInv_s = El.SOCApply( sInv, s, orders, firstInds, cutoff )
zInv_z = El.SOCApply( zInv, z, orders, firstInds, cutoff )
s_sInv = El.SOCApply( s, sInv, orders, firstInds, cutoff )
z_zInv = El.SOCApply( z, zInv, orders, firstInds, cutoff )
if output:
El.Print( sInv, "inv(s)" )
El.Print( zInv, "inv(z)" )
El.Print( sInv_s, "s o inv(s)" )
El.Print( zInv_z, "z o inv(z)" )
El.Print( s_sInv, "inv(s) o s" )
El.Print( z_zInv, "inv(z) o z" )
# Compute the Nesterov-Todd scaling point of (s,z)
# ================================================
w = El.SOCNesterovTodd( s, z, orders, firstInds, cutoff )
wRoot = El.SOCSquareRoot( w, orders, firstInds, cutoff )
wRootInv = El.SOCInverse( wRoot, orders, firstInds, cutoff )
sNT = El.SOCApplyQuadratic( wRootInv, s, orders, firstInds, cutoff )
zNT = El.SOCApplyQuadratic( wRoot, z, orders, firstInds, cutoff )
if output:
El.Print( w, "w" )
El.Print( sNT, "s_NT" )
El.Print( zNT, "z_NT" )
# Compute the minimum non-negative step length, alpha, such that s + alpha y
# touches the boundary of the product cone
y = El.DistMultiVec()
El.Uniform( y, n, 1 )
upperBound = 100.
alpha = El.MaxStepInSOC( s, y, orders, firstInds, upperBound, cutoff )
p = El.DistMultiVec()
El.Copy( s, p )
El.Axpy( alpha, y, p )
pDets = El.SOCDets( p, orders, firstInds, cutoff )
if output:
El.Print( y, "y" )
if worldRank == 0:
print "maximum step in cone is:", alpha
El.Print( p, "s + alpha y" )
El.Print( pDets, "det(s + alpha y)" )
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
py | b408ad6619bdf0b118f08e89edc50d6660c133a9 | class BaseFHIRError(Exception):
pass
class ResourceNotFound(BaseFHIRError):
pass
class InvalidResponse(BaseFHIRError):
pass
class AuthorizationError(BaseFHIRError):
pass
class OperationOutcome(BaseFHIRError):
pass
class MultipleResourcesFound(BaseFHIRError):
pass
|
py | b408adf72e6d4c205b61766ef8b9fb1dee0cffad | from nose.tools import assert_equals, assert_is_none, assert_is_not_none, assert_raises, assert_true
from numpy import allclose, array, array_equal, can_cast, float32
from thunder.rdds.data import Data
from test_utils import PySparkTestCase
class TestImagesGetters(PySparkTestCase):
"""Test `get` and related methods on an Images-like Data object
"""
def setUp(self):
super(TestImagesGetters, self).setUp()
self.ary1 = array([[1, 2], [3, 4]], dtype='int16')
self.ary2 = array([[5, 6], [7, 8]], dtype='int16')
rdd = self.sc.parallelize([(0, self.ary1), (1, self.ary2)])
self.images = Data(rdd, dtype='int16')
def test_getMissing(self):
assert_is_none(self.images.get(-1))
def test_get(self):
assert_true(array_equal(self.ary2, self.images.get(1)))
# keys are integers, ask for sequence
assert_raises(ValueError, self.images.get, (1, 2))
def test_getMany(self):
vals = self.images.getMany([0, -1, 1, 0])
assert_equals(4, len(vals))
assert_true(array_equal(self.ary1, vals[0]))
assert_is_none(vals[1])
assert_true(array_equal(self.ary2, vals[2]))
assert_true(array_equal(self.ary1, vals[3]))
# keys are integers, ask for sequences:
assert_raises(ValueError, self.images.get, [(0, 0)])
assert_raises(ValueError, self.images.get, [0, (0, 0), 1, 0])
def test_getRanges(self):
vals = self.images.getRange(slice(None))
assert_equals(2, len(vals))
assert_equals(0, vals[0][0])
assert_equals(1, vals[1][0])
assert_true(array_equal(self.ary1, vals[0][1]))
assert_true(array_equal(self.ary2, vals[1][1]))
vals = self.images.getRange(slice(0, 1))
assert_equals(1, len(vals))
assert_equals(0, vals[0][0])
assert_true(array_equal(self.ary1, vals[0][1]))
vals = self.images.getRange(slice(1))
assert_equals(1, len(vals))
assert_equals(0, vals[0][0])
assert_true(array_equal(self.ary1, vals[0][1]))
vals = self.images.getRange(slice(1, 2))
assert_equals(1, len(vals))
assert_equals(1, vals[0][0])
assert_true(array_equal(self.ary2, vals[0][1]))
vals = self.images.getRange(slice(2, 3))
assert_equals(0, len(vals))
# keys are integers, ask for sequence
assert_raises(ValueError, self.images.getRange, [slice(1), slice(1)])
# raise exception if 'step' specified:
assert_raises(ValueError, self.images.getRange, slice(1, 2, 2))
def test_brackets(self):
vals = self.images[1]
assert_true(array_equal(self.ary2, vals))
vals = self.images[0:1]
assert_equals(1, len(vals))
assert_true(array_equal(self.ary1, vals[0]))
vals = self.images[:]
assert_equals(2, len(vals))
assert_true(array_equal(self.ary1, vals[0]))
assert_true(array_equal(self.ary2, vals[1]))
vals = self.images[1:4]
assert_equals(1, len(vals))
assert_true(array_equal(self.ary2, vals[0]))
vals = self.images[1:]
assert_equals(1, len(vals))
assert_true(array_equal(self.ary2, vals[0]))
vals = self.images[:1]
assert_equals(1, len(vals))
assert_true(array_equal(self.ary1, vals[0]))
assert_raises(KeyError, self.images.__getitem__, 2)
assert_raises(KeyError, self.images.__getitem__, slice(2, 3))
class TestSeriesGetters(PySparkTestCase):
"""Test `get` and related methods on a Series-like Data object
"""
def setUp(self):
super(TestSeriesGetters, self).setUp()
self.dataLocal = [
((0, 0), array([1.0, 2.0, 3.0], dtype='float32')),
((0, 1), array([2.0, 2.0, 4.0], dtype='float32')),
((1, 0), array([4.0, 2.0, 1.0], dtype='float32')),
((1, 1), array([3.0, 1.0, 1.0], dtype='float32'))
]
self.series = Data(self.sc.parallelize(self.dataLocal), dtype='float32')
def test_getMissing(self):
assert_is_none(self.series.get((-1, -1)))
def test_get(self):
expected = self.dataLocal[1][1]
assert_true(array_equal(expected, self.series.get((0, 1))))
assert_raises(ValueError, self.series.get, 1) # keys are sequences, ask for integer
assert_raises(ValueError, self.series.get, (1, 2, 3)) # key length mismatch
def test_getMany(self):
vals = self.series.getMany([(0, 0), (17, 256), (1, 0), (0, 0)])
assert_equals(4, len(vals))
assert_true(array_equal(self.dataLocal[0][1], vals[0]))
assert_is_none(vals[1])
assert_true(array_equal(self.dataLocal[2][1], vals[2]))
assert_true(array_equal(self.dataLocal[0][1], vals[3]))
assert_raises(ValueError, self.series.getMany, [1]) # keys are sequences, ask for integer
assert_raises(ValueError, self.series.getMany, [(0, 0), 1, (1, 0), (0, 0)]) # asking for integer again
def test_getRanges(self):
vals = self.series.getRange([slice(2), slice(2)])
assert_equals(4, len(vals))
assert_equals(self.dataLocal[0][0], vals[0][0])
assert_equals(self.dataLocal[1][0], vals[1][0])
assert_equals(self.dataLocal[2][0], vals[2][0])
assert_equals(self.dataLocal[3][0], vals[3][0])
assert_true(array_equal(self.dataLocal[0][1], vals[0][1]))
assert_true(array_equal(self.dataLocal[1][1], vals[1][1]))
assert_true(array_equal(self.dataLocal[2][1], vals[2][1]))
assert_true(array_equal(self.dataLocal[3][1], vals[3][1]))
vals = self.series.getRange([slice(2), slice(1)])
assert_equals(2, len(vals))
assert_equals(self.dataLocal[0][0], vals[0][0])
assert_equals(self.dataLocal[2][0], vals[1][0])
assert_true(array_equal(self.dataLocal[0][1], vals[0][1]))
assert_true(array_equal(self.dataLocal[2][1], vals[1][1]))
vals = self.series.getRange([slice(None), slice(1, 2)])
assert_equals(2, len(vals))
assert_equals(self.dataLocal[1][0], vals[0][0])
assert_equals(self.dataLocal[3][0], vals[1][0])
assert_true(array_equal(self.dataLocal[1][1], vals[0][1]))
assert_true(array_equal(self.dataLocal[3][1], vals[1][1]))
vals = self.series.getRange([slice(None), slice(None)])
assert_equals(4, len(vals))
assert_equals(self.dataLocal[0][0], vals[0][0])
assert_equals(self.dataLocal[1][0], vals[1][0])
assert_equals(self.dataLocal[2][0], vals[2][0])
assert_equals(self.dataLocal[3][0], vals[3][0])
assert_true(array_equal(self.dataLocal[0][1], vals[0][1]))
assert_true(array_equal(self.dataLocal[1][1], vals[1][1]))
assert_true(array_equal(self.dataLocal[2][1], vals[2][1]))
assert_true(array_equal(self.dataLocal[3][1], vals[3][1]))
vals = self.series.getRange([0, slice(None)])
assert_equals(2, len(vals))
assert_equals(self.dataLocal[0][0], vals[0][0])
assert_equals(self.dataLocal[1][0], vals[1][0])
assert_true(array_equal(self.dataLocal[0][1], vals[0][1]))
assert_true(array_equal(self.dataLocal[1][1], vals[1][1]))
vals = self.series.getRange([0, 1])
assert_equals(1, len(vals))
assert_equals(self.dataLocal[1][0], vals[0][0])
assert_true(array_equal(self.dataLocal[1][1], vals[0][1]))
vals = self.series.getRange([slice(2, 3), slice(None)])
assert_equals(0, len(vals))
# keys are sequences, ask for single slice
assert_raises(ValueError, self.series.getRange, slice(2, 3))
# ask for wrong number of slices
assert_raises(ValueError, self.series.getRange, [slice(2, 3), slice(2, 3), slice(2, 3)])
# raise exception if 'step' specified:
assert_raises(ValueError, self.series.getRange, [slice(0, 4, 2), slice(2, 3)])
def test_brackets(self):
# returns just value; calls `get`
vals = self.series[(1, 0)]
assert_true(array_equal(self.dataLocal[2][1], vals))
# tuple isn't needed; returns just value, calls `get`
vals = self.series[0, 1]
assert_true(array_equal(self.dataLocal[1][1], vals))
# if slices are passed, calls `getRange`, returns values
vals = self.series[0:1, 1:2]
assert_equals(1, len(vals))
assert_true(array_equal(self.dataLocal[1][1], vals[0]))
# if slice extends out of bounds, return only the elements that are in bounds
vals = self.series[:4, :1]
assert_equals(2, len(vals))
assert_true(array_equal(self.dataLocal[0][1], vals[0]))
assert_true(array_equal(self.dataLocal[2][1], vals[1]))
# empty slice works
vals = self.series[:, 1:2]
assert_equals(2, len(vals))
assert_true(array_equal(self.dataLocal[1][1], vals[0]))
assert_true(array_equal(self.dataLocal[3][1], vals[1]))
# multiple empty slices work
vals = self.series[:, :]
assert_equals(4, len(vals))
assert_true(array_equal(self.dataLocal[0][1], vals[0]))
assert_true(array_equal(self.dataLocal[1][1], vals[1]))
assert_true(array_equal(self.dataLocal[2][1], vals[2]))
assert_true(array_equal(self.dataLocal[3][1], vals[3]))
# mixing slices and individual indicies works:
vals = self.series[0, :]
assert_equals(2, len(vals))
assert_true(array_equal(self.dataLocal[0][1], vals[0]))
assert_true(array_equal(self.dataLocal[1][1], vals[1]))
# trying to getitem a key that doesn't exist throws a KeyError
assert_raises(KeyError, self.series.__getitem__, (25, 17))
# passing a range that is completely out of bounds throws a KeyError
assert_raises(KeyError, self.series.__getitem__, (slice(2, 3), slice(None, None)))
class TestCasting(PySparkTestCase):
def setUp(self):
super(TestCasting, self).setUp()
# float16 max value is 6.55040e+04 (np.finfo(np.float16))
# "*Big*" values are too large to cast safely down to float16s
DATA = [
('float32Array', array([1.1, 2.2], dtype='float32')),
('float32BigArray', array([1.1e+05, 2.2e+05], dtype='float32')),
('float32Scalar', float32(1.1)),
('float32BigScalar', float32(4.4e+05)),
('pythonFloatScalar', 1.1),
('pythonFloatBigScalar', 5.5e+05)
]
for datum in DATA:
k, v = datum
rdd = self.sc.parallelize([(0, v)])
data = Data(rdd, nrecords=1, dtype='float32')
setattr(self, k, v)
setattr(self, k+"RDD", rdd)
setattr(self, k+"Data", data)
self.allCases = [datum[0] for datum in DATA]
def test_casting(self):
"""Tests casting of numpy arrays, numpy scalars, and python scalars with Data.astype
"""
isDowncastable = lambda name: 'Big' not in name
for caseName in self.allCases:
origVal, data = getattr(self, caseName), getattr(self, caseName+"Data")
upcasted = data.astype('float64').first()[1]
downcasted = data.astype('float16', casting="unsafe").first()[1]
# upcasting to float64 shouldn't be a problem
assert_true(allclose(origVal, upcasted, rtol=1e-05))
if isDowncastable(caseName):
# if the value is in range, we expect to get back something reasonably close to our original
# values after downcasting to float16
assert_true(allclose(origVal, downcasted, rtol=1e-03))
else:
# the unsafe cast will return *something*, but we don't have any expectations as to what:
assert_is_not_none(downcasted)
# raises py4j.protocol.Py4JJavaError after a TypeError on workers:
# we're not importing py4j, and it seems like overkill to do so just for this one assertion,
# so just assert an Exception.
assert_raises(Exception, data.astype('float16', casting="safe").first)
class TestDataMethods(PySparkTestCase):
def test_sortbykey(self):
dataLocal = [
((0, 0), array([0])),
((0, 1), array([0])),
((0, 2), array([0])),
((1, 0), array([0])),
((1, 1), array([0])),
((1, 2), array([0]))
]
data = Data(self.sc.parallelize(dataLocal))
out = data.sortByKey().keys().collect()
assert(array_equal(out, [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]))
dataLocal = [
((0,), array([0])),
((1,), array([0])),
((2,), array([0]))
]
data = Data(self.sc.parallelize(dataLocal))
out = data.sortByKey().keys().collect()
assert(array_equal(out, [(0,), (1,), (2,)]))
def test_collect(self):
dataLocal = [
((0, 0), array([0])),
((0, 1), array([1])),
((0, 2), array([2])),
((1, 0), array([3])),
((1, 1), array([4])),
((1, 2), array([5]))
]
data = Data(self.sc.parallelize(dataLocal))
out = data.collectKeysAsArray()
assert(array_equal(out, [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]]))
out = data.collectValuesAsArray()
assert(array_equal(out, [[0], [1], [2], [3], [4], [5]]))
def test_collect_with_sorting(self):
dataLocal = [
((0, 0), array([0])),
((0, 1), array([1])),
((0, 2), array([2])),
((1, 0), array([3])),
((1, 1), array([4])),
((1, 2), array([5]))
]
data = Data(self.sc.parallelize(dataLocal))
out = data.collectKeysAsArray(sorting=True)
assert(array_equal(out, [[0, 0], [1, 0], [0, 1], [1, 1], [0, 2], [1, 2]]))
out = data.collectValuesAsArray(sorting=True)
print(out)
assert(array_equal(out, [[0], [3], [1], [4], [2], [5]])) |
py | b408ae7de1881994f8f874887238ab3dd7a97674 | import torch
from torchvision import models
default_device = "cuda" if torch.cuda.is_available() else "cpu"
models_list = ["vgg16bn", "resnet18", "resnet34", "resnet50", "resnet101", "googlenet", "alexnet"]
def get_model(model_name: str, device: str = default_device):
lowered_model_name = model_name.lower()
if lowered_model_name == "vgg16bn":
model = models.vgg16_bn(pretrained=True).to(device)
model.options = {
"target_layer": model.features
}
elif lowered_model_name == "resnet18":
model = models.resnet18(pretrained=True).to(device)
model.options = {
"target_layer": model.layer4
}
elif lowered_model_name == "resnet34":
model = models.resnet34(pretrained=True).to(device)
model.options = {
"target_layer": model.layer4
}
elif lowered_model_name == "resnet50":
model = models.resnet50(pretrained=True).to(device)
model.options = {
"target_layer": model.layer4
}
elif lowered_model_name == "resnet101":
model = models.resnet101(pretrained=True).to(device)
model.options = {
"target_layer": model.layer4
}
elif lowered_model_name == "googlenet":
model = models.googlenet(pretrained=True).to(device)
model.options = {
"target_layer": model.inception5b
}
elif lowered_model_name == "alexnet":
model = models.alexnet(pretrained=True).to(device)
model.options = {
"target_layer": model.features
}
else:
raise ValueError(f"Invalid model name '{model_name}' (Supported models: {models_list})")
return model.eval()
|
py | b408b0c1b9613d846a12607c4dc1fa107d0cb8f3 | import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
DEFAULT_GUESTBOOK_NAME = 'default_guestbook'
# We set a parent key on the 'Greetings' to ensure that they are all in the same
# entity group. Queries across the single entity group will be consistent.
# However, the write rate should be limited to ~1/second.
def guestbook_key(guestbook_name=DEFAULT_GUESTBOOK_NAME):
"""Constructs a Datastore key for a Guestbook entity with guestbook_name."""
return ndb.Key('Guestbook', guestbook_name)
class Greeting(ndb.Model):
"""Models an individual Guestbook entry with author, content, and date."""
author = ndb.UserProperty()
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
class MainPage(webapp2.RequestHandler):
def get(self):
guestbook_name = self.request.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greetings_query = Greeting.query(
ancestor=guestbook_key(guestbook_name)).order(-Greeting.date)
greetings = greetings_query.fetch(10)
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'greetings': greetings,
'guestbook_name': urllib.quote_plus(guestbook_name),
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class Guestbook(webapp2.RequestHandler):
def post(self):
# We set the same parent key on the 'Greeting' to ensure each Greeting
# is in the same entity group. Queries across the single entity group
# will be consistent. However, the write rate to a single entity group
# should be limited to ~1/second.
guestbook_name = self.request.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greeting = Greeting(parent=guestbook_key(guestbook_name))
if users.get_current_user():
greeting.author = users.get_current_user()
greeting.content = self.request.get('content')
greeting.put()
query_params = {'guestbook_name': guestbook_name}
self.redirect('/?' + urllib.urlencode(query_params))
application = webapp2.WSGIApplication([
('/', MainPage),
('/sign', Guestbook),
], debug=True)
|
py | b408b0e7956382f5307023c4eb862f4ffc47eb33 | # Copyright 2019 Alibaba Cloud Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from alibabacloud.exceptions import ClientException
from alibabacloud.resources.base import ServiceResource
from alibabacloud.resources.collection import _create_resource_collection, \
_create_special_resource_collection
from alibabacloud.resources.collection import _create_sub_resource_with_page_collection
from alibabacloud.resources.collection import _create_sub_resource_without_page_collection
from alibabacloud.utils.utils import _new_get_key_in_response, _transfer_params
class _RDSResource(ServiceResource):
def __init__(self, _client=None):
ServiceResource.__init__(self, 'rds', _client=_client)
self.db_instances = _create_resource_collection(
_RDSDBInstanceResource,
_client,
_client.describe_db_instances,
'Items.DBInstance',
'DBInstanceId',
key_to_total_count="TotalRecordCount",
key_to_page_size="PageRecordCount",
key_to_page_number="PageNumber")
self.migrate_tasks = _create_resource_collection(
_RDSMigrateTaskResource,
_client,
_client.describe_migrate_tasks,
'Items.MigrateTask',
'MigrateTaskId',
key_to_total_count="TotalRecordCount",
key_to_page_size="PageRecordCount",
key_to_page_number="PageNumber")
self.parameters = _create_special_resource_collection(
_RDSParameterResource, _client, _client.describe_parameters,
'ConfigParameters.DBInstanceParameter', 'ParameterName',
)
self.regions = _create_special_resource_collection(
_RDSRegionResource, _client, _client.describe_regions,
'Regions.RDSRegion', 'RegionId',
)
self.slow_logs = _create_resource_collection(
_RDSSlowLogResource,
_client,
_client.describe_slow_logs,
'Items.SQLSlowLog',
'SlowLogId',
key_to_total_count="TotalRecordCount",
key_to_page_size="PageRecordCount",
key_to_page_number="PageNumber")
self.tasks = _create_resource_collection(
_RDSTaskResource, _client, _client.describe_tasks,
'Items.TaskProgressInfo', 'TaskId',
)
def clone_db_instance(self, **params):
_params = _transfer_params(params)
response = self._client.clone_db_instance(**_params)
db_instance_id = _new_get_key_in_response(response, 'DBInstanceId')
return _RDSDBInstanceResource(db_instance_id, _client=self._client)
def create_db_instance(self, **params):
_params = _transfer_params(params)
response = self._client.create_db_instance(**_params)
db_instance_id = _new_get_key_in_response(response, 'DBInstanceId')
return _RDSDBInstanceResource(db_instance_id, _client=self._client)
def recovery_db_instance(self, **params):
_params = _transfer_params(params)
response = self._client.recovery_db_instance(**_params)
db_instance_id = _new_get_key_in_response(response, 'DBInstanceId')
return _RDSDBInstanceResource(db_instance_id, _client=self._client)
def create_diagnostic_report(self, **params):
_params = _transfer_params(params)
response = self._client.create_diagnostic_report(**_params)
report_id = _new_get_key_in_response(response, 'ReportId')
return _RDSDiagnosticReportResource(report_id, _client=self._client)
class _RDSDBInstanceResource(ServiceResource):
def __init__(self, db_instance_id, _client=None):
ServiceResource.__init__(self, "rds.db_instance", _client=_client)
self.db_instance_id = db_instance_id
self.auto_upgrade_minor_version = None
self.category = None
self.connection_mode = None
self.create_time = None
self.db_instance_class = None
self.db_instance_description = None
self.db_instance_net_type = None
self.db_instance_status = None
self.db_instance_storage_type = None
self.db_instance_type = None
self.destroy_time = None
self.engine = None
self.engine_version = None
self.expire_time = None
self.guard_db_instance_id = None
self.ins_id = None
self.instance_network_type = None
self.lock_mode = None
self.lock_reason = None
self.master_instance_id = None
self.mutri_orsignle = None
self.pay_type = None
self.read_only_db_instance_ids = None
self.region_id = None
self.replicate_id = None
self.resource_group_id = None
self.temp_db_instance_id = None
self.vswitch_id = None
self.vpc_cloud_instance_id = None
self.vpc_id = None
self.zone_id = None
self.accounts = _create_sub_resource_without_page_collection(
_RDSAccountResource,
_client,
_client.describe_accounts,
'Accounts.DBInstanceAccount',
'AccountName',
parent_identifier="DBInstanceId",
parent_identifier_value=self.db_instance_id)
self.backups = _create_sub_resource_with_page_collection(
_RDSBackupResource,
_client,
_client.describe_backups,
'Items.Backup',
'BackupId',
parent_identifier="DBInstanceId",
parent_identifier_value=self.db_instance_id,
key_to_total_count="TotalRecordCount",
key_to_page_size="PageRecordCount",
key_to_page_number="PageNumber")
self.dbs = _create_sub_resource_without_page_collection(
_RDSDBResource,
_client,
_client.describe_databases,
'Databases.Database',
'DBName',
parent_identifier="DBInstanceId",
parent_identifier_value=self.db_instance_id)
def add_tags_to_resource(self, **params):
_params = _transfer_params(params)
self._client.add_tags_to_resource(db_instance_id=self.db_instance_id, **_params)
def allocate_instance_private_connection(self, **params):
_params = _transfer_params(params)
self._client.allocate_instance_private_connection(
db_instance_id=self.db_instance_id, **_params)
def allocate_instance_public_connection(self, **params):
_params = _transfer_params(params)
self._client.allocate_instance_public_connection(
db_instance_id=self.db_instance_id, **_params)
def allocate_instance_vpc_network_type(self, **params):
_params = _transfer_params(params)
self._client.allocate_instance_vpc_network_type(
db_instance_id=self.db_instance_id, **_params)
def allocate_read_write_splitting_connection(self, **params):
_params = _transfer_params(params)
self._client.allocate_read_write_splitting_connection(
db_instance_id=self.db_instance_id, **_params)
def calculate_db_instance_weight(self, **params):
_params = _transfer_params(params)
self._client.calculate_db_instance_weight(db_instance_id=self.db_instance_id, **_params)
def cancel_import(self, **params):
_params = _transfer_params(params)
self._client.cancel_import(db_instance_id=self.db_instance_id, **_params)
def check_instance_exist(self, **params):
_params = _transfer_params(params)
self._client.check_instance_exist(db_instance_id=self.db_instance_id, **_params)
def check_recovery_conditions(self, **params):
_params = _transfer_params(params)
self._client.check_recovery_conditions(db_instance_id=self.db_instance_id, **_params)
def copy_database_between_instances(self, **params):
_params = _transfer_params(params)
self._client.copy_database_between_instances(db_instance_id=self.db_instance_id, **_params)
def create_read_only(self, **params):
_params = _transfer_params(params)
self._client.create_read_only_db_instance(db_instance_id=self.db_instance_id, **_params)
def delete(self, **params):
_params = _transfer_params(params)
self._client.delete_db_instance(db_instance_id=self.db_instance_id, **_params)
def descibe_imports_from_database(self, **params):
_params = _transfer_params(params)
self._client.descibe_imports_from_database(db_instance_id=self.db_instance_id, **_params)
def describe_backup_database(self, **params):
_params = _transfer_params(params)
self._client.describe_backup_database(db_instance_id=self.db_instance_id, **_params)
def describe_backup_policy(self, **params):
_params = _transfer_params(params)
self._client.describe_backup_policy(db_instance_id=self.db_instance_id, **_params)
def describe_backup_tasks(self, **params):
_params = _transfer_params(params)
self._client.describe_backup_tasks(db_instance_id=self.db_instance_id, **_params)
def describe_binlog_files(self, **params):
_params = _transfer_params(params)
self._client.describe_binlog_files(db_instance_id=self.db_instance_id, **_params)
def describe_cloud_db_expert_service(self, **params):
_params = _transfer_params(params)
self._client.describe_cloud_db_expert_service(db_instance_id=self.db_instance_id, **_params)
def describe_cross_region_backups(self, **params):
_params = _transfer_params(params)
self._client.describe_cross_region_backups(db_instance_id=self.db_instance_id, **_params)
def describe_cross_region_log_backup_files(self, **params):
_params = _transfer_params(params)
self._client.describe_cross_region_log_backup_files(
db_instance_id=self.db_instance_id, **_params)
def describe_db_instance_attribute(self, **params):
_params = _transfer_params(params)
self._client.describe_db_instance_attribute(db_instance_id=self.db_instance_id, **_params)
def describe_db_instance_ha_config(self, **params):
_params = _transfer_params(params)
self._client.describe_db_instance_ha_config(db_instance_id=self.db_instance_id, **_params)
def describe_db_instance_ip_array_list(self, **params):
_params = _transfer_params(params)
self._client.describe_db_instance_ip_array_list(
db_instance_id=self.db_instance_id, **_params)
def describe_db_instance_ip_hostname(self, **params):
_params = _transfer_params(params)
self._client.describe_db_instance_ip_hostname(db_instance_id=self.db_instance_id, **_params)
def describe_db_instance_monitor(self, **params):
_params = _transfer_params(params)
self._client.describe_db_instance_monitor(db_instance_id=self.db_instance_id, **_params)
def describe_db_instance_net_info(self, **params):
_params = _transfer_params(params)
self._client.describe_db_instance_net_info(db_instance_id=self.db_instance_id, **_params)
def describe_db_instance_performance(self, **params):
_params = _transfer_params(params)
self._client.describe_db_instance_performance(db_instance_id=self.db_instance_id, **_params)
def describe_db_instance_proxy_configuration(self, **params):
_params = _transfer_params(params)
self._client.describe_db_instance_proxy_configuration(
db_instance_id=self.db_instance_id, **_params)
def describe_db_instance_ssl(self, **params):
_params = _transfer_params(params)
self._client.describe_db_instance_ssl(db_instance_id=self.db_instance_id, **_params)
def describe_db_instance_tde(self, **params):
_params = _transfer_params(params)
self._client.describe_db_instance_tde(db_instance_id=self.db_instance_id, **_params)
def describe_dtc_security_ip_hosts_for_sql_server(self, **params):
_params = _transfer_params(params)
self._client.describe_dtc_security_ip_hosts_for_sql_server(
db_instance_id=self.db_instance_id, **_params)
def describe_diagnostic_report_list(self, **params):
_params = _transfer_params(params)
self._client.describe_diagnostic_report_list(db_instance_id=self.db_instance_id, **_params)
def describe_error_logs(self, **params):
_params = _transfer_params(params)
self._client.describe_error_logs(db_instance_id=self.db_instance_id, **_params)
def describe_instance_cross_backup_policy(self, **params):
_params = _transfer_params(params)
self._client.describe_instance_cross_backup_policy(
db_instance_id=self.db_instance_id, **_params)
def describe_instance_vpc_migrate_info(self, **params):
_params = _transfer_params(params)
self._client.describe_instance_vpc_migrate_info(
db_instance_id=self.db_instance_id, **_params)
def describe_log_backup_files(self, **params):
_params = _transfer_params(params)
self._client.describe_log_backup_files(db_instance_id=self.db_instance_id, **_params)
def describe_meta_list(self, **params):
_params = _transfer_params(params)
self._client.describe_meta_list(db_instance_id=self.db_instance_id, **_params)
def describe_migrate_tasks_for_sql_server(self, **params):
_params = _transfer_params(params)
self._client.describe_migrate_tasks_for_sql_server(
db_instance_id=self.db_instance_id, **_params)
def describe_modify_parameter_log(self, **params):
_params = _transfer_params(params)
self._client.describe_modify_parameter_log(db_instance_id=self.db_instance_id, **_params)
def describe_oss_downloads(self, **params):
_params = _transfer_params(params)
self._client.describe_oss_downloads(db_instance_id=self.db_instance_id, **_params)
def describe_oss_downloads_for_sql_server(self, **params):
_params = _transfer_params(params)
self._client.describe_oss_downloads_for_sql_server(
db_instance_id=self.db_instance_id, **_params)
def describe_proxy_function_support(self, **params):
_params = _transfer_params(params)
self._client.describe_proxy_function_support(db_instance_id=self.db_instance_id, **_params)
def describe_read_db_instance_delay(self, **params):
_params = _transfer_params(params)
self._client.describe_read_db_instance_delay(db_instance_id=self.db_instance_id, **_params)
def describe_resource_usage(self, **params):
_params = _transfer_params(params)
self._client.describe_resource_usage(db_instance_id=self.db_instance_id, **_params)
def describe_sql_log_files(self, **params):
_params = _transfer_params(params)
self._client.describe_sql_log_files(db_instance_id=self.db_instance_id, **_params)
def describe_sql_log_records(self, **params):
_params = _transfer_params(params)
self._client.describe_sql_log_records(db_instance_id=self.db_instance_id, **_params)
def describe_sql_log_report_list(self, **params):
_params = _transfer_params(params)
self._client.describe_sql_log_report_list(db_instance_id=self.db_instance_id, **_params)
def describe_sql_log_reports(self, **params):
_params = _transfer_params(params)
self._client.describe_sql_log_reports(db_instance_id=self.db_instance_id, **_params)
def describe_sql_reports(self, **params):
_params = _transfer_params(params)
self._client.describe_sql_reports(db_instance_id=self.db_instance_id, **_params)
def describe_security_group_configuration(self, **params):
_params = _transfer_params(params)
self._client.describe_security_group_configuration(
db_instance_id=self.db_instance_id, **_params)
def describe_slow_log_records(self, **params):
_params = _transfer_params(params)
self._client.describe_slow_log_records(db_instance_id=self.db_instance_id, **_params)
def describe_task_info(self, **params):
_params = _transfer_params(params)
self._client.describe_task_info(db_instance_id=self.db_instance_id, **_params)
def describe_templates_list(self, **params):
_params = _transfer_params(params)
self._client.describe_templates_list(db_instance_id=self.db_instance_id, **_params)
def grant_operator_permission(self, **params):
_params = _transfer_params(params)
self._client.grant_operator_permission(db_instance_id=self.db_instance_id, **_params)
def import_data_for_sql_server(self, **params):
_params = _transfer_params(params)
self._client.import_data_for_sql_server(db_instance_id=self.db_instance_id, **_params)
def import_database_between_instances(self, **params):
_params = _transfer_params(params)
self._client.import_database_between_instances(
db_instance_id=self.db_instance_id, **_params)
def migrate_security_ip_mode(self, **params):
_params = _transfer_params(params)
self._client.migrate_security_ip_mode(db_instance_id=self.db_instance_id, **_params)
def migrate_to_other_region(self, **params):
_params = _transfer_params(params)
self._client.migrate_to_other_region(db_instance_id=self.db_instance_id, **_params)
def migrate_to_other_zone(self, **params):
_params = _transfer_params(params)
self._client.migrate_to_other_zone(db_instance_id=self.db_instance_id, **_params)
def modify_auto_upgrade_minor_version(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_auto_upgrade_minor_version(
db_instance_id=self.db_instance_id, **_params)
def modify_backup_policy(self, **params):
_params = _transfer_params(params)
self._client.modify_backup_policy(db_instance_id=self.db_instance_id, **_params)
def modify_collation_time_zone(self, **params):
_params = _transfer_params(params)
self._client.modify_collation_time_zone(db_instance_id=self.db_instance_id, **_params)
def modify_connection_mode(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_connection_mode(
db_instance_id=self.db_instance_id, **_params)
def modify_connection_string(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_connection_string(
db_instance_id=self.db_instance_id, **_params)
def modify_dtc_security_ip_hosts_for_sql_server(self, **params):
_params = _transfer_params(params)
self._client.modify_dtc_security_ip_hosts_for_sql_server(
db_instance_id=self.db_instance_id, **_params)
def modify_description(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_description(db_instance_id=self.db_instance_id, **_params)
def modify_instance_auto_renewal_attribute(self, **params):
_params = _transfer_params(params)
self._client.modify_instance_auto_renewal_attribute(
db_instance_id=self.db_instance_id, **_params)
def modify_instance_cross_backup_policy(self, **params):
_params = _transfer_params(params)
self._client.modify_instance_cross_backup_policy(
db_instance_id=self.db_instance_id, **_params)
def modify_maintain_time(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_maintain_time(db_instance_id=self.db_instance_id, **_params)
def modify_monitor(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_monitor(db_instance_id=self.db_instance_id, **_params)
def modify_my_sqldb_instance_delay(self, **params):
_params = _transfer_params(params)
self._client.modify_my_sqldb_instance_delay(db_instance_id=self.db_instance_id, **_params)
def modify_network_expire_time(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_network_expire_time(
db_instance_id=self.db_instance_id, **_params)
def modify_network_type(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_network_type(db_instance_id=self.db_instance_id, **_params)
def modify_parameter(self, **params):
_params = _transfer_params(params)
self._client.modify_parameter(db_instance_id=self.db_instance_id, **_params)
def modify_pay_type(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_pay_type(db_instance_id=self.db_instance_id, **_params)
def modify_proxy_configuration(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_proxy_configuration(
db_instance_id=self.db_instance_id, **_params)
def modify_read_write_splitting_connection(self, **params):
_params = _transfer_params(params)
self._client.modify_read_write_splitting_connection(
db_instance_id=self.db_instance_id, **_params)
def modify_readonly_instance_delay_replication_time(self, **params):
_params = _transfer_params(params)
self._client.modify_readonly_instance_delay_replication_time(
db_instance_id=self.db_instance_id, **_params)
def modify_resource_group(self, **params):
_params = _transfer_params(params)
self._client.modify_resource_group(db_instance_id=self.db_instance_id, **_params)
def modify_sql_collector_policy(self, **params):
_params = _transfer_params(params)
self._client.modify_sql_collector_policy(db_instance_id=self.db_instance_id, **_params)
def modify_ssl(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_ssl(db_instance_id=self.db_instance_id, **_params)
def modify_security_group_configuration(self, **params):
_params = _transfer_params(params)
self._client.modify_security_group_configuration(
db_instance_id=self.db_instance_id, **_params)
def modify_security_ips(self, **params):
_params = _transfer_params(params)
self._client.modify_security_ips(db_instance_id=self.db_instance_id, **_params)
def modify_spec(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_spec(db_instance_id=self.db_instance_id, **_params)
def modify_tde(self, **params):
_params = _transfer_params(params)
self._client.modify_db_instance_tde(db_instance_id=self.db_instance_id, **_params)
def purge_db_instance_log(self, **params):
_params = _transfer_params(params)
self._client.purge_db_instance_log(db_instance_id=self.db_instance_id, **_params)
def release_instance_public_connection(self, **params):
_params = _transfer_params(params)
self._client.release_instance_public_connection(
db_instance_id=self.db_instance_id, **_params)
def release_read_write_splitting_connection(self, **params):
_params = _transfer_params(params)
self._client.release_read_write_splitting_connection(
db_instance_id=self.db_instance_id, **_params)
def remove_tags_from_resource(self, **params):
_params = _transfer_params(params)
self._client.remove_tags_from_resource(db_instance_id=self.db_instance_id, **_params)
def renew_instance(self, **params):
_params = _transfer_params(params)
self._client.renew_instance(db_instance_id=self.db_instance_id, **_params)
def request_service_of_cloud_db_expert(self, **params):
_params = _transfer_params(params)
self._client.request_service_of_cloud_db_expert(
db_instance_id=self.db_instance_id, **_params)
def restart(self, **params):
_params = _transfer_params(params)
self._client.restart_db_instance(db_instance_id=self.db_instance_id, **_params)
def restore(self, **params):
_params = _transfer_params(params)
self._client.restore_db_instance(db_instance_id=self.db_instance_id, **_params)
def restore_table(self, **params):
_params = _transfer_params(params)
self._client.restore_table(db_instance_id=self.db_instance_id, **_params)
def revoke_operator_permission(self, **params):
_params = _transfer_params(params)
self._client.revoke_operator_permission(db_instance_id=self.db_instance_id, **_params)
def switch_db_instance_ha(self, **params):
_params = _transfer_params(params)
self._client.switch_db_instance_ha(db_instance_id=self.db_instance_id, **_params)
def switch_db_instance_net_type(self, **params):
_params = _transfer_params(params)
self._client.switch_db_instance_net_type(db_instance_id=self.db_instance_id, **_params)
def switch_db_instance_vpc(self, **params):
_params = _transfer_params(params)
self._client.switch_db_instance_vpc(db_instance_id=self.db_instance_id, **_params)
def upgrade_db_instance_engine_version(self, **params):
_params = _transfer_params(params)
self._client.upgrade_db_instance_engine_version(
db_instance_id=self.db_instance_id, **_params)
def upgrade_db_instance_kernel_version(self, **params):
_params = _transfer_params(params)
self._client.upgrade_db_instance_kernel_version(
db_instance_id=self.db_instance_id, **_params)
def create_account(self, **params):
_params = _transfer_params(params)
self._client.create_account(db_instance_id=self.db_instance_id, **_params)
account_name = _params.get("account_name")
return _RDSAccountResource(account_name, self.db_instance_id, _client=self._client)
def create_backup(self, **params):
_params = _transfer_params(params)
response = self._client.create_backup(db_instance_id=self.db_instance_id, **_params)
backup_id = _new_get_key_in_response(response, 'BackupJobId')
return _RDSBackupResource(backup_id, self.db_instance_id, _client=self._client)
def create_database(self, **params):
_params = _transfer_params(params)
self._client.create_database(db_instance_id=self.db_instance_id, **_params)
db_name = _params.get("db_name")
return _RDSDBResource(db_name, self.db_instance_id, _client=self._client)
def create_temp_db_instance(self, **params):
_params = _transfer_params(params)
response = self._client.create_temp_db_instance(
db_instance_id=self.db_instance_id, **_params)
temp_db_instance_id = _new_get_key_in_response(response, 'TempDBInstanceId')
return _RDSTempDBInstanceResource(
temp_db_instance_id,
self.db_instance_id,
_client=self._client)
def refresh(self):
result = self._client.describe_db_instances(db_instance_id=self.db_instance_id)
items = _new_get_key_in_response(result, 'Items.DBInstance')
if not items:
raise ClientException(
msg="Failed to find db_instance data from DescribeDBInstances response. "
"DBInstanceId = {0}".format(
self.db_instance_id))
self._assign_attributes(items[0])
class _RDSAccountResource(ServiceResource):
def __init__(self, account_name, db_instance_id, _client=None):
ServiceResource.__init__(self, "rds.account", _client=_client)
self.account_name = account_name
self.db_instance_id = db_instance_id
self.account_description = None
self.account_status = None
self.account_type = None
self.database_privileges = None
self.priv_exceeded = None
def check_account_name_available(self, **params):
_params = _transfer_params(params)
self._client.check_account_name_available(
account_name=self.account_name,
db_instance_id=self.db_instance_id,
**_params)
def delete(self, **params):
_params = _transfer_params(params)
self._client.delete_account(
account_name=self.account_name,
db_instance_id=self.db_instance_id,
**_params)
def modify_description(self, **params):
_params = _transfer_params(params)
self._client.modify_account_description(
account_name=self.account_name,
db_instance_id=self.db_instance_id,
**_params)
def reset(self, **params):
_params = _transfer_params(params)
self._client.reset_account(
account_name=self.account_name,
db_instance_id=self.db_instance_id,
**_params)
def reset_account_for_pg(self, **params):
_params = _transfer_params(params)
self._client.reset_account_for_pg(
account_name=self.account_name,
db_instance_id=self.db_instance_id,
**_params)
def reset_account_password(self, **params):
_params = _transfer_params(params)
self._client.reset_account_password(
account_name=self.account_name,
db_instance_id=self.db_instance_id,
**_params)
def grant_account_privilege(self, **params):
_params = _transfer_params(params)
self._client.grant_account_privilege(
account_name=self.account_name,
db_instance_id=self.db_instance_id,
**_params)
def revoke_account_privilege(self, **params):
_params = _transfer_params(params)
self._client.revoke_account_privilege(
account_name=self.account_name,
db_instance_id=self.db_instance_id,
**_params)
def refresh(self):
result = self._client.describe_accounts(
account_name=self.account_name,
db_instance_id=self.db_instance_id)
items = _new_get_key_in_response(result, 'Accounts.DBInstanceAccount')
if not items:
raise ClientException(msg="Failed to find account data from DescribeAccounts response. "
"AccountName = {0}".format(self.account_name))
self._assign_attributes(items[0])
def wait_until(self, target_account_status, timeout=120):
start_time = time.time()
while True:
end_time = time.time()
if end_time - start_time >= timeout:
raise Exception("Timed out: no {0} status after {1} seconds.".format(
target_account_status, timeout))
self.refresh()
if self.account_status == target_account_status:
return
time.sleep(1)
class _RDSBackupResource(ServiceResource):
def __init__(self, backup_id, db_instance_id, _client=None):
ServiceResource.__init__(self, "rds.backup", _client=_client)
self.backup_id = backup_id
self.db_instance_id = db_instance_id
self.backup_db_names = None
self.backup_download_url = None
self.backup_end_time = None
self.backup_extraction_status = None
self.backup_intranet_download_url = None
self.backup_location = None
self.backup_method = None
self.backup_mode = None
self.backup_scale = None
self.backup_size = None
self.backup_start_time = None
self.backup_status = None
self.backup_type = None
self.host_instance_id = None
self.meta_status = None
self.slave_status = None
self.store_status = None
self.total_backup_size = None
def delete(self, **params):
_params = _transfer_params(params)
self._client.delete_backup(
backup_id=self.backup_id,
db_instance_id=self.db_instance_id,
**_params)
def refresh(self):
result = self._client.describe_backups(
backup_id=self.backup_id,
db_instance_id=self.db_instance_id)
items = _new_get_key_in_response(result, 'Items.Backup')
if not items:
raise ClientException(msg="Failed to find backup data from DescribeBackups response. "
"BackupId = {0}".format(self.backup_id))
self._assign_attributes(items[0])
def wait_until(self, target_backup_status, timeout=120):
start_time = time.time()
while True:
end_time = time.time()
if end_time - start_time >= timeout:
raise Exception("Timed out: no {0} status after {1} seconds.".format(
target_backup_status, timeout))
self.refresh()
if self.backup_status == target_backup_status:
return
time.sleep(1)
class _RDSDBResource(ServiceResource):
def __init__(self, db_name, db_instance_id, _client=None):
ServiceResource.__init__(self, "rds.db", _client=_client)
self.db_name = db_name
self.db_instance_id = db_instance_id
self.accounts = None
self.character_set_name = None
self.db_description = None
self.db_status = None
self.engine = None
def create_migrate_task(self, **params):
_params = _transfer_params(params)
self._client.create_migrate_task(
db_name=self.db_name,
db_instance_id=self.db_instance_id,
**_params)
def create_migrate_task_for_sql_server(self, **params):
_params = _transfer_params(params)
self._client.create_migrate_task_for_sql_server(
db_name=self.db_name, db_instance_id=self.db_instance_id, **_params)
def delete(self, **params):
_params = _transfer_params(params)
self._client.delete_database(
db_name=self.db_name,
db_instance_id=self.db_instance_id,
**_params)
def modify_db_description(self, **params):
_params = _transfer_params(params)
self._client.modify_db_description(
db_name=self.db_name,
db_instance_id=self.db_instance_id,
**_params)
def create_online_database_task(self, **params):
_params = _transfer_params(params)
self._client.create_online_database_task(
db_name=self.db_name, db_instance_id=self.db_instance_id, **_params)
def refresh(self):
result = self._client.describe_databases(
db_name=self.db_name, db_instance_id=self.db_instance_id)
items = _new_get_key_in_response(result, 'Databases.Database')
if not items:
raise ClientException(msg="Failed to find db data from DescribeDatabases response. "
"DBName = {0}".format(self.db_name))
self._assign_attributes(items[0])
def wait_until(self, target_db_status, timeout=120):
start_time = time.time()
while True:
end_time = time.time()
if end_time - start_time >= timeout:
raise Exception("Timed out: no {0} status after {1} seconds.".format(
target_db_status, timeout))
self.refresh()
if self.db_status == target_db_status:
return
time.sleep(1)
class _RDSTempDBInstanceResource(ServiceResource):
def __init__(self, temp_db_instance_id, db_instance_id, _client=None):
ServiceResource.__init__(self, "rds.temp_db_instance", _client=_client)
self.temp_db_instance_id = temp_db_instance_id
self.db_instance_id = db_instance_id
class _RDSDiagnosticReportResource(ServiceResource):
def __init__(self, report_id, _client=None):
ServiceResource.__init__(self, "rds.diagnostic_report", _client=_client)
self.report_id = report_id
class _RDSMigrateTaskResource(ServiceResource):
def __init__(self, migrate_task_id, _client=None):
ServiceResource.__init__(self, "rds.migrate_task", _client=_client)
self.migrate_task_id = migrate_task_id
self.backup_mode = None
self.create_time = None
self.db_name = None
self.description = None
self.end_time = None
self.is_db_replaced = None
self.status = None
class _RDSParameterResource(ServiceResource):
def __init__(self, parameter_name, _client=None):
ServiceResource.__init__(self, "rds.parameter", _client=_client)
self.parameter_name = parameter_name
self.parameter_description = None
self.parameter_value = None
class _RDSRegionResource(ServiceResource):
def __init__(self, region_id, _client=None):
ServiceResource.__init__(self, "rds.region", _client=_client)
self.region_id = region_id
self.local_name = None
self.region_endpoint = None
self.status = None
def refresh(self):
result = self._client.describe_regions(region_id=self.region_id)
items = _new_get_key_in_response(result, 'Regions.Region')
if not items:
raise ClientException(msg="Failed to find region data from DescribeRegions response. "
"RegionId = {0}".format(self.region_id))
self._assign_attributes(items[0])
class _RDSSlowLogResource(ServiceResource):
def __init__(self, slow_log_id, _client=None):
ServiceResource.__init__(self, "rds.slow_log", _client=_client)
self.slow_log_id = slow_log_id
self.avg_execution_time = None
self.create_time = None
self.db_name = None
self.max_execution_time = None
self.max_lock_time = None
self.my_sql_total_execution_counts = None
self.my_sql_total_execution_times = None
self.parse_max_row_count = None
self.parse_total_row_counts = None
self.report_time = None
self.return_max_row_count = None
self.return_total_row_counts = None
self.sqlhash = None
self.sql_id_str = None
self.sql_server_total_execution_counts = None
self.sql_server_total_execution_times = None
self.sql_text = None
self.total_lock_times = None
self.total_logical_read_counts = None
self.total_physical_read_counts = None
class _RDSTaskResource(ServiceResource):
def __init__(self, task_id, _client=None):
ServiceResource.__init__(self, "rds.task", _client=_client)
self.task_id = task_id
self.creation_time = None
self.finished_time = None
self.support_cancel = None
self.task_action = None
self.task_status = None
def refresh(self):
result = self._client.describe_tasks(task_ids=self.task_id)
items = _new_get_key_in_response(result, 'TaskSet.Task')
if not items:
raise ClientException(msg="Failed to find task data from DescribeTasks response. "
"TaskId = {0}".format(self.task_id))
self._assign_attributes(items[0])
|
py | b408b1f0b59842fa4c9ac5634528c80424701b1c | from django.contrib import auth
from django.contrib.auth import authenticate, login
from django.core.files.temp import NamedTemporaryFile
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.db import transaction
from django.db.utils import IntegrityError
import json
from django_facebook import exceptions as facebook_exceptions, \
settings as facebook_settings, signals
from django_facebook.api import get_facebook_graph
from django_facebook.utils import get_registration_backend, get_form_class, \
get_profile_model, to_bool, get_user_model, get_instance_for,\
get_user_attribute, try_get_profile, get_model_for_attribute,\
get_instance_for_attribute, update_user_attributes
from random import randint
import logging
import sys
import urllib
try:
import urllib2
except ImportError:
import urllib.error as urllib2
logger = logging.getLogger(__name__)
class CONNECT_ACTIONS:
class LOGIN:
pass
class CONNECT(LOGIN):
pass
class REGISTER:
pass
def connect_user(request, access_token=None, facebook_graph=None, connect_facebook=False):
'''
Given a request either
- (if authenticated) connect the user
- login
- register
'''
user = None
graph = facebook_graph or get_facebook_graph(request, access_token)
converter = get_instance_for('user_conversion', graph)
assert converter.is_authenticated()
facebook_data = converter.facebook_profile_data()
force_registration = request.POST.get('force_registration') or \
request.GET.get('force_registration') or \
request.POST.get('force_registration_hard') or \
request.GET.get('force_registration_hard')
logger.debug('force registration is set to %s', force_registration)
if connect_facebook and request.user.is_authenticated() and not force_registration:
# we should only allow connect if users indicate they really want to connect
# only when the request.CONNECT_FACEBOOK = 1
# if this isn't present we just do a login
action = CONNECT_ACTIONS.CONNECT
# default behaviour is not to overwrite old data
user = _connect_user(request, converter, overwrite=True)
else:
email = facebook_data.get('email', False)
email_verified = facebook_data.get('verified', False)
kwargs = {}
if email and email_verified:
kwargs = {'facebook_email': email}
auth_user = authenticate(facebook_id=facebook_data['id'], **kwargs)
if auth_user and not force_registration:
action = CONNECT_ACTIONS.LOGIN
# Has the user registered without Facebook, using the verified FB
# email address?
# It is after all quite common to use email addresses for usernames
update = getattr(auth_user, 'fb_update_required', False)
profile = try_get_profile(auth_user)
current_facebook_id = get_user_attribute(
auth_user, profile, 'facebook_id')
if not current_facebook_id:
update = True
# login the user
user = _login_user(request, converter, auth_user, update=update)
else:
action = CONNECT_ACTIONS.REGISTER
# when force registration is active we should remove the old
# profile
try:
user = _register_user(request, converter,
remove_old_connections=force_registration)
except facebook_exceptions.AlreadyRegistered as e:
# in Multithreaded environments it's possible someone beats us to
# the punch, in that case just login
logger.info(
'parallel register encountered, slower thread is doing a login')
auth_user = authenticate(
facebook_id=facebook_data['id'], **kwargs)
if not auth_user:
# We don't have a valid user so raise
raise e
action = CONNECT_ACTIONS.LOGIN
user = _login_user(request, converter, auth_user, update=False)
_update_likes_and_friends(request, user, converter)
_update_access_token(user, graph)
logger.info('connect finished with action %s', action)
return action, user
def _login_user(request, facebook, authenticated_user, update=False):
login(request, authenticated_user)
if update:
_connect_user(request, facebook)
return authenticated_user
def _connect_user(request, facebook, overwrite=True):
'''
Update the fields on the user model and connects it to the facebook account
'''
if not request.user.is_authenticated():
raise ValueError(
'Connect user can only be used on authenticated users')
if not facebook.is_authenticated():
raise ValueError(
'Facebook needs to be authenticated for connect flows')
data = facebook.facebook_profile_data()
facebook_id = data['id']
# see if we already have profiles connected to this Facebook account
old_connections = _get_old_connections(facebook_id, request.user.id)[:20]
if old_connections and not (request.POST.get('confirm_connect') or
request.GET.get('confirm_connect')):
raise facebook_exceptions.AlreadyConnectedError(list(old_connections))
user = _update_user(request.user, facebook, overwrite=overwrite)
return user
def _update_likes_and_friends(request, user, facebook):
# store likes and friends if configured
sid = transaction.savepoint()
try:
if facebook_settings.FACEBOOK_STORE_LIKES:
facebook.get_and_store_likes(user)
if facebook_settings.FACEBOOK_STORE_FRIENDS:
facebook.get_and_store_friends(user)
transaction.savepoint_commit(sid)
except IntegrityError as e:
logger.warn(u'Integrity error encountered during registration, '
'probably a double submission %s' % e,
exc_info=sys.exc_info(), extra={
'request': request,
'data': {
'body': unicode(e),
}
})
transaction.savepoint_rollback(sid)
def _update_access_token(user, graph):
'''
Conditionally updates the access token in the database
'''
profile = try_get_profile(user)
model_or_profile = get_instance_for_attribute(
user, profile, 'access_token')
# store the access token for later usage if the profile model supports it
if model_or_profile:
# update if not equal to the current token
new_token = graph.access_token != model_or_profile.access_token
token_message = 'a new' if new_token else 'the same'
logger.info(
'found %s token %s', token_message, graph.access_token[:10])
if new_token:
logger.info('access token changed, updating now')
model_or_profile.update_access_token(graph.access_token)
model_or_profile.save()
# see if we can extend the access token
# this runs in a task, after extending the token we fire an event
model_or_profile.extend_access_token()
def _register_user(request, facebook, profile_callback=None,
remove_old_connections=False):
'''
Creates a new user and authenticates
The registration form handles the registration and validation
Other data on the user profile is updates afterwards
if remove_old_connections = True we will disconnect old
profiles from their facebook flow
'''
if not facebook.is_authenticated():
raise ValueError(
'Facebook needs to be authenticated for connect flows')
# get the backend on new registration systems, or none
# if we are on an older version
backend = get_registration_backend()
logger.info('running backend %s for registration', backend)
# gets the form class specified in FACEBOOK_REGISTRATION_FORM
form_class = get_form_class(backend, request)
facebook_data = facebook.facebook_registration_data()
data = request.POST.copy()
for k, v in facebook_data.items():
if not data.get(k):
data[k] = v
if remove_old_connections:
_remove_old_connections(facebook_data['facebook_id'])
if request.POST.get('force_registration_hard') or \
request.GET.get('force_registration_hard'):
data['email'] = data['email'].replace(
'@', '+test%s@' % randint(0, 1000000000))
form = form_class(data=data, files=request.FILES,
initial={'ip': request.META['REMOTE_ADDR']})
if not form.is_valid():
# show errors in sentry
form_errors = form.errors
error = facebook_exceptions.IncompleteProfileError(
'Facebook signup incomplete')
error.form = form
raise error
try:
# for new registration systems use the backends methods of saving
new_user = None
if backend:
new_user = backend.register(request,
form=form, **form.cleaned_data)
# fall back to the form approach
if new_user is None:
raise ValueError(
'new_user is None, note that backward compatability for the older versions of django registration has been dropped.')
except IntegrityError as e:
# this happens when users click multiple times, the first request registers
# the second one raises an error
raise facebook_exceptions.AlreadyRegistered(e)
# update some extra data not yet done by the form
new_user = _update_user(new_user, facebook)
signals.facebook_user_registered.send(sender=get_user_model(),
user=new_user, facebook_data=facebook_data, request=request, converter=facebook)
# IS this the correct way for django 1.3? seems to require the backend
# attribute for some reason
new_user.backend = 'django_facebook.auth_backends.FacebookBackend'
auth.login(request, new_user)
return new_user
def _get_old_connections(facebook_id, current_user_id=None):
'''
Gets other accounts connected to this facebook id, which are not
attached to the current user
'''
user_or_profile_model = get_model_for_attribute('facebook_id')
other_facebook_accounts = user_or_profile_model.objects.filter(
facebook_id=facebook_id)
kwargs = {}
if current_user_id:
# if statement since we need to support both
user_model = get_user_model()
if user_or_profile_model == user_model:
kwargs['id'] = current_user_id
else:
kwargs['user'] = current_user_id
other_facebook_accounts = other_facebook_accounts.exclude(**kwargs)
return other_facebook_accounts
def _remove_old_connections(facebook_id, current_user_id=None):
'''
Removes the facebook id for profiles with the specified facebook id
which arent the current user id
'''
other_facebook_accounts = _get_old_connections(
facebook_id, current_user_id)
other_facebook_accounts.update(facebook_id=None)
def _update_user(user, facebook, overwrite=True):
'''
Updates the user and his/her profile with the data from facebook
'''
# if you want to add fields to ur user model instead of the
# profile thats fine
# partial support (everything except raw_data and facebook_id is included)
facebook_data = facebook.facebook_registration_data(username=False)
facebook_fields = ['facebook_name', 'facebook_profile_url', 'gender',
'date_of_birth', 'about_me', 'website_url', 'first_name', 'last_name']
profile = try_get_profile(user)
# which attributes to update
attributes_dict = {}
# send the signal that we're updating
signals.facebook_pre_update.send(sender=get_user_model(), user=user,
profile=profile, facebook_data=facebook_data)
# set the facebook id and make sure we are the only user with this id
current_facebook_id = get_user_attribute(user, profile, 'facebook_id')
facebook_id_changed = facebook_data['facebook_id'] != current_facebook_id
overwrite_allowed = overwrite or not current_facebook_id
# update the facebook id and access token
facebook_id_overwritten = False
if facebook_id_changed and overwrite_allowed:
# when not overwriting we only update if there is no
# profile.facebook_id
logger.info('profile facebook id changed from %s to %s',
repr(facebook_data['facebook_id']),
repr(current_facebook_id))
attributes_dict['facebook_id'] = facebook_data['facebook_id']
facebook_id_overwritten = True
if facebook_id_overwritten:
_remove_old_connections(facebook_data['facebook_id'], user.id)
# update all fields on both user and profile
for f in facebook_fields:
facebook_value = facebook_data.get(f, False)
current_value = get_user_attribute(user, profile, f, None)
if facebook_value and not current_value:
attributes_dict[f] = facebook_value
# write the raw data in case we missed something
serialized_fb_data = json.dumps(facebook.facebook_profile_data())
current_raw_data = get_user_attribute(user, profile, 'raw_data')
if current_raw_data != serialized_fb_data:
attributes_dict['raw_data'] = serialized_fb_data
image_url = facebook_data['image']
# update the image if we are allowed and have to
if facebook_settings.FACEBOOK_STORE_LOCAL_IMAGE:
image_field = get_user_attribute(user, profile, 'image', True)
if not image_field:
image_name, image_file = _update_image(
facebook_data['facebook_id'], image_url)
image_field.save(image_name, image_file)
# save both models if they changed
update_user_attributes(user, profile, attributes_dict)
if getattr(user, '_fb_is_dirty', False):
user.save()
if getattr(profile, '_fb_is_dirty', False):
profile.save()
signals.facebook_post_update.send(sender=get_user_model(),
user=user, profile=profile, facebook_data=facebook_data)
return user
def _update_image(facebook_id, image_url):
'''
Updates the user profile's image to the given image url
Unfortunately this is quite a pain to get right with Django
Suggestions to improve this are welcome
'''
image_name = 'fb_image_%s.jpg' % facebook_id
image_temp = NamedTemporaryFile()
try:
image_response = urllib2.urlopen(image_url)
except AttributeError:
image_response = urllib.request.urlopen(image_url)
image_content = image_response.read()
image_temp.write(image_content)
http_message = image_response.info()
image_size = len(image_content)
try:
content_type = http_message.type
except AttributeError:
content_type = http_message.get_content_type()
image_file = InMemoryUploadedFile(
file=image_temp, name=image_name, field_name='image',
content_type=content_type, size=image_size, charset=None
)
image_file.seek(0)
image_temp.flush()
return image_name, image_file
def update_connection(request, graph):
'''
A special purpose view for updating the connection with an existing user
- updates the access token (already done in get_graph)
- sets the facebook_id if nothing is specified
- stores friends and likes if possible
'''
converter = get_instance_for('user_conversion', graph)
user = _connect_user(request, converter, overwrite=False)
_update_likes_and_friends(request, user, converter)
_update_access_token(user, graph)
return user
|
py | b408b2b9614c125d42bc959188fcc7e62b2660fa | from datetime import date
import requests
from celery.exceptions import SoftTimeLimitExceeded
from django.conf import settings
from django.core.cache import cache
from django.core.files.base import ContentFile
from django.db import transaction
from requests.exceptions import RequestException
from covid_cases.clients import NICDGISClient, SACoronavirusClient
from covid_cases.models import (
SACoronavirusCaseImage,
SACoronavirusCounter,
Ward,
WardCase,
)
from covid_cases.utils import get_filename_from_url, normalise_text
from healthcheck.celery import app
@app.task(
autoretry_for=(RequestException, SoftTimeLimitExceeded),
max_retries=5,
retry_backoff=True,
soft_time_limit=60,
time_limit=90,
acks_late=True,
)
def scrape_nicd_gis():
if not settings.ENABLE_NICD_GIS_SCRAPING:
return "Skipping task, disabled in config"
client = NICDGISClient()
# Only update if the total number of cases has increased
db_total = WardCase.objects.get_total_cases()
api_total = client.get_total_cases()
if db_total >= api_total:
return f"Skipping, database cases {db_total} >= API cases {api_total}"
created, updated = 0, 0
with transaction.atomic():
for record in client.get_ward_cases_data()["features"]:
record = record["attributes"]
ward = Ward.get_ward(
province=normalise_text(record["Province"] or ""),
district=normalise_text(record["District"] or ""),
sub_district=normalise_text(record["Sub_Distri"] or ""),
sub_district_id=record["Sub_district_ID"],
ward_id=normalise_text(record["WardID"] or ""),
ward_number=normalise_text(record["WardNumber"] or ""),
)
_, c = WardCase.objects.update_or_create(
object_id=record["OBJECTID_1"],
date=date.today(),
defaults={
"ward": ward,
"male": record["Male"],
"female": record["Female"],
"unknown_gender": record["Unknown_Ge"],
"age_1_10": record["Age_1_10_y"],
"age_11_20": record["Age_11_20_"],
"age_21_30": record["Age_21_30_"],
"age_31_40": record["Age_31_40_"],
"age_41_50": record["Age_41_50_"],
"age_51_60": record["Age_51_60_"],
"age_61_70": record["Age_61_70_"],
"age_71_80": record["Age_71_80_"],
"age_81": record["Age_81_yrs"],
"unknown_age": record["Unknown_Ag"],
"latest": record["Latest"],
"total_number_of_cases": record["Tot_No_of_Cases"],
},
)
if c:
created += 1
else:
updated += 1
return f"Created {created} case entries, updated {updated} case entries"
@app.task(
autoretry_for=(RequestException, SoftTimeLimitExceeded),
max_retries=5,
retry_backoff=True,
soft_time_limit=60,
time_limit=90,
acks_late=True,
)
def scrape_sacoronavirus_homepage():
if not settings.ENABLE_SACORONAVIRUS_SCRAPING:
return "Skipping task, disabled in config"
client = SACoronavirusClient()
# Only update if any of the numbers have increased
try:
api_values = client.get_homepage_counters()
db_values = SACoronavirusCounter.objects.latest("date")
if all(
(
db_values.tests >= api_values.tests,
db_values.positive >= api_values.positive,
db_values.recoveries >= api_values.recoveries,
db_values.deaths >= api_values.deaths,
db_values.vaccines >= api_values.vaccines,
)
):
return f"Skipping, no increases in values {api_values}"
except SACoronavirusCounter.DoesNotExist:
pass
_, c = SACoronavirusCounter.objects.update_or_create(
date=date.today(),
defaults={
"tests": api_values.tests,
"positive": api_values.positive,
"recoveries": api_values.recoveries,
"deaths": api_values.deaths,
"vaccines": api_values.vaccines,
},
)
if c:
return f"Created {date.today().isoformat()} {api_values}"
else:
return f"Updated {date.today().isoformat()} {api_values}"
@app.task(
autoretry_for=(RequestException, SoftTimeLimitExceeded),
max_retries=5,
retry_backoff=True,
soft_time_limit=300,
time_limit=400,
acks_late=True,
)
def scrape_sacoronavirus_case_images():
if not settings.ENABLE_SACORONAVIRUS_SCRAPING:
return "Skipping task, disabled in config"
client = SACoronavirusClient()
# Only update if we don't have this file yet
total = 0
for image in client.get_daily_cases_image_urls():
try:
SACoronavirusCaseImage.objects.get(url=image.url)
continue
except SACoronavirusCaseImage.DoesNotExist:
pass
image_data = requests.get(
image.url, headers={"User-Agent": "contactndoh-whatsapp"}, timeout=30
)
image_data.raise_for_status()
file = ContentFile(
image_data.content, name=get_filename_from_url(image_data.url)
)
SACoronavirusCaseImage.objects.create(
url=image.url, image=file, date=image.date
)
total += 1
if total > 0:
cache.delete("latest_image")
return f"Downloaded {total} images"
|
py | b408b3b86b7970ae11437d2f9915b41661bf66ab | # https://leetcode.com/problems/longest-palindromic-substring/
class Solution:
def longestPalindrome(self, s: str):
a = s
b = s[::-1]
la = len(a)
lb = len(b)
if len(s) == 0:
return ""
matrix = [[0 for _ in range(lb + 1)] for _ in range(la + 1)]
matrix_ans = [[[] for _ in range(lb + 1)] for _ in range(la + 1)]
for i in range(la):
for j in range(lb):
# print(i, "i", j, "j")
# print(matrix)
matrix[i][j] = max(matrix[i - 1][j], matrix[i][j - 1])
if matrix[i - 1][j] >= matrix[i][j - 1]:
matrix_ans[i][j].extend(matrix_ans[i - 1][j])
else:
matrix_ans[i][j].extend(matrix_ans[i][j - 1])
if a[i] == b[j]:
if matrix[i][j] > 1 + matrix[i - 1][j - 1]:
matrix_ans[i][j] = matrix_ans[i][j]
else:
matrix_ans[i][j] = matrix_ans[i - 1][j - 1]
matrix_ans[i][j].extend([a[i]])
matrix[i][j] = max(matrix[i][j], 1 + matrix[i - 1][j - 1])
# print(ans)
# print(matrix)
# print(matrix_ans)
ans = "".join(matrix_ans[la - 1][lb - 1])
return ans
|
py | b408b5789ae7e7286c9430a4fd3443a4f597b780 |
# Copyright 2020 Bradbase
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Optional, List
@dataclass
class Auth:
auth_type: str
@dataclass
class PersonalAccessToken(Auth):
account_id: str
access_token: str
put_auth_in_header: bool
def __init__(self, account_id, access_token, put_auth_in_header= True):
super().__init__('Personal Access Token')
self.account_id = account_id
if access_token.find('Bearer') > -1:
self.access_token = access_token
else:
self.access_token = 'Bearer ' + access_token
self.put_auth_in_header = put_auth_in_header
# Authorization Code Flow
@dataclass
class OAuth2_ServerSide_Token():
access_token: str
refresh_token: str
expires_in: int
expires_at: float
def __init__(self, access_token, refresh_token, expires_in, expires_at, refresh_url=None):
if access_token.upper().find('BEARER ') > -1:
self.access_token = access_token
else:
self.access_token = 'Bearer ' + access_token
self.refresh_token = refresh_token
self.expires_in = expires_in
self.expires_at = expires_at
self.refresh_url = refresh_url
# Authorization Code Flow
@dataclass
class OAuth2_ServerSide(Auth):
"""Implicit Code Grant Flow for OAuth2"""
refresh_url: Optional[str]
client_id: str
client_secret: str
token: OAuth2_ServerSide_Token
def __init__(self, client_id, client_secret, token, refresh_url):
super().__init__(auth_type = 'Server Side Applications')
self.client_id = client_id
self.client_secret = client_secret
self.token = token
self.refresh_url = refresh_url
# Implicit Code Flow
@dataclass
class OAuth2_ClientSide_Token():
access_token: str
expires_in: int
token_type: str
scope: List[str]
def __init__(self, access_token, expires_in, token_type, scope):
if access_token.upper().find('BEARER') > -1:
self.access_token = access_token
else:
self.access_token = 'Bearer ' + access_token
self.expires_in = expires_in
self.token_type = token_type
self.scope = scope
# # Implicit Code Flow
# @dataclass
# class OAuth2_ClientSide(Auth):
# """Authorizaton Code Grant Flow for OAuth2"""
# token: Optional[OAuth2_ClientSide_Token]
# client_id: str
# auth_url: str
# # scopes: List[str]
#
# def __init__(self, client_id, auth_url):
# super().__init__('Client Side Applications')
# self.client_id = client_id
# self.auth_url = auth_url
@dataclass
class ErrorMessage:
message: str
@dataclass
class Company:
base_uri: str = None
full_domain: str = None
name: str = None
is_active: bool = False
week_start_day: str = None
wants_timestamp_timers: bool = False
time_format: str = None
plan_type: str = None
expense_feature: bool = False
invoice_feature: bool = False
estimate_feature: bool = False
approval_required: bool = False
clock: str = None
decimal_symbol: str = None
thousands_separator: str = None
color_scheme: str = None
@dataclass
class ExpenseCategory:
unit_name: Optional[str]
unit_price: Optional[float]
id: int = None
name: str = None
is_active: bool = False
created_at: str = None
updated_at: str = None
@dataclass
class InvoiceRef:
id: int = None
number: str = None
@dataclass
class Receipt:
url: str = None
file_name: str = None
file_size: int = None
content_type: str = None
@dataclass
class User:
id: int = None
name: str = None
@dataclass
class ClientRef:
id: int = None
name: str = None
@dataclass
class Client:
address: Optional[str]
id: int = None
name: str = None
currency: str = None
is_active: bool = None
created_at: str = None
updated_at: str = None
@dataclass
class UserAssignment:
budget: Optional[float]
id: int = None
is_project_manager: bool = None
is_active: bool = None
created_at: str = None
updated_at: str = None
hourly_rate: float = None
@dataclass
class Project:
code: Optional[str]
id: int = None
name: str = None
@dataclass
class Expense:
locked_reason: Optional[str]
user: Optional[User]
receipt: Optional[Receipt]
invoice: Optional[InvoiceRef]
project: Optional[Project]
notes: Optional[str]
id: int = None
total_cost: float = None
units: float = None
is_closed: bool = False
is_locked: bool = False
is_billed: bool = False
spent_date: str = None
created_at: str = None
updated_at: str = None
billable: bool = True
user_assignment: UserAssignment = None
expense_category: ExpenseCategory = None
client: Client = None
@dataclass
class LineItem:
project: Optional[Project]
id: int = None
kind: str = None
description: str = None
quantity: float = None
unit_price: float = None
amount: float = None
taxed: bool = None
taxed2: bool = None
@dataclass
class ExpenseImport:
summary_type: str
#from: str = None
to: str = None
attach_receipt: str = None
@dataclass
class TimeImport:
summary_type: str
#from: str = None
to: str = None
@dataclass
class LineItemImport:
time: Optional[TimeImport]
expenses: Optional[ExpenseImport]
project_ids: List[Project]
@dataclass
class Creator:
id: int = None
name: str = None
@dataclass
class Estimate:
purchase_order: Optional[str]
tax: Optional[float]
tax_amount: Optional[float]
tax2: Optional[float]
tax2_amount: Optional[float]
discount: Optional[float]
discount_amount: Optional[float]
sent_at: Optional[str]
accepted_at: Optional[str]
declined_at: Optional[str]
issue_date: Optional[str]
line_items: Optional[List[LineItem]]
notes: Optional[str]
id: int = None
client_key: str = None
number: str = None
amount: float = None
subject: str = None
state: str = None
due_date: str = None
created_at: str = None
updated_at: str = None
currency: str = None
creator: Creator = None
@dataclass
class Invoice:
purchase_order: Optional[str]
tax: Optional[float]
tax_amount: Optional[float]
tax2: Optional[float]
tax2_amount: Optional[float]
discount: Optional[float]
discount_amount: Optional[float]
period_start: Optional[str]
period_end: Optional[str]
paid_date: Optional[str]
closed_at: Optional[str]
paid_at: Optional[str]
estimate: Optional[Estimate]
retainer: Optional[str]
sent_at: Optional[str]
line_items: Optional[List[LineItem]]
notes: Optional[str]
id: int = None
client_key: str = None
number: str = None
amount: float = None
due_amount: float = None
subject: str = None
state: str = None
issue_date: str = None
due_date: str = None
payment_term: str = None
created_at: str = None
updated_at: str = None
currency: str = None
creator: Creator = None
client: ClientRef = None
@dataclass
class FreeFormInvoice:
notes: Optional[str]
client_id: int
retainer_id: int = None
estimate_id: int = None
number: str = None
purchase_order: str = None
tax: float = None
tax2: float = None
discount: float = None
subject: str = None
currency: str = None
issue_date: str = None
due_date: str = None
payment_term: str = None
line_items: List[LineItem] = None
@dataclass
class InvoiceImport:
notes: Optional[str]
line_items_import: Optional[LineItemImport]
client_id: int
retainer_id: int = None
estimate_id: int = None
number: str = None
purchase_order: str = None
tax: float = None
tax2: float = None
discount: float = None
subject: str = None
currency: str = None
issue_date: str = None
due_date: str = None
payment_term: str = None
@dataclass
class ClientContact:
title: Optional[str]
last_name: Optional[str]
id: int = None
first_name: str = None
email: str = None
phone_office: str = None
phone_mobile: str = None
fax: str = None
created_at: str = None
updated_at: str = None
client: Client = None
@dataclass
class Recipient:
name: str = None
email: str = None
@dataclass
class InvoiceMessage:
send_reminder_on: Optional[bool]
event_type: Optional[str]
recipients: List[Recipient]
subject: Optional[str]
body: Optional[str]
id: int = None
sent_by: str = None
sent_by_email: str = None
sent_from: str = None
sent_from_email: str = None
include_link_to_client_invoice: bool = None
send_me_a_copy: bool = None
thank_you: bool = None
reminder: bool = None
created_at: str = None
updated_at: str = None
attach_pdf: bool = None
@dataclass
class PaymentGateway:
id: Optional[int]
name: Optional[str]
@dataclass
class InvoicePayment:
transaction_id: Optional[str]
payment_gateway: Optional[PaymentGateway]
id: int = None
amount: float = None
paid_at: str = None
paid_date: str = None
recorded_by: str = None
recorded_by_email: str = None
notes: str = None
created_at: str = None
updated_at: str = None
def __init__(self, id, amount, paid_at, paid_date, recorded_by, recorded_by_email, notes, created_at, updated_at, transaction_id = None, payment_gateway = None):
self.id= int(id)
self.amount= float(amount) # TODO: dacite (or something) isn't casting here when a dict is used in an invoice_payments
self.paid_at= str(paid_at)
self.paid_date= str(paid_date)
self.recorded_by= str(recorded_by)
self.recorded_by_email= str(recorded_by_email)
self.notes= str(notes)
self.created_at= str(created_at)
self.updated_at= str(updated_at)
self.transaction_id= transaction_id
self.payment_gateway= payment_gateway
@dataclass
class InvoiceItemCategory:
id: int = None
name: str = None
use_as_service: bool = None
use_as_expense: bool = None
created_at: str = None
updated_at: str = None
@dataclass
class EstimateMessage:
event_type: Optional[str]
subject: Optional[str]
body: Optional[str]
recipients: List[Recipient]
id: int = None
sent_by: str = None
sent_by_email: str = None
sent_from: str = None
sent_from_email: str = None
send_me_a_copy: bool = None
created_at: str = None
updated_at: str = None
@dataclass
class EstimateItemCategory:
id: int = None
name: str = None
created_at: str = None
updated_at: str = None
@dataclass
class TaskRef:
id: str = None
name: str = None
@dataclass
class Task:
default_hourly_rate: Optional[float]
id: int = None
name: str = None
billable_by_default: bool = None
is_default: bool = None
is_active: bool = None
created_at: str = None
updated_at: str = None
@dataclass
class TaskAssignmentRef:
id: int = None
name: str = None
@dataclass
class TaskAssignment:
budget: Optional[float]
hourly_rate: Optional[float]
id: int = None
is_project_manager: bool = None
is_active: bool = None
created_at: str = None
updated_at: str = None
project: Project = None
task: TaskAssignmentRef = None
@dataclass
class UserAssignment:
budget: Optional[float]
hourly_rate: Optional[float]
id: int = None
is_project_manager: bool = None
is_active: bool = None
created_at: str = None
updated_at: str = None
project: Project = None
user: User = None
@dataclass
class ProjectTaskAssignments:
hourly_rate: Optional[float]
budget: Optional[float]
id: int = None
billable: bool = None
is_active: bool = None
created_at: str = None
updated_at: str = None
task: TaskRef = None
@dataclass
class TimeEntry:
notes: Optional[str]
locked_reason: Optional[str]
timer_started_at: Optional[str]
started_time: Optional[str]
ended_time: Optional[str]
invoice: Optional[InvoiceRef]
external_reference: Optional[str]
billable_rate: Optional[float]
id: int = None
spent_date: str = None
user: User = None
client: Client = None
project: Project = None
task: Task = None
user_assignment: UserAssignment = None
task_assignment: ProjectTaskAssignments = None
hours: float = None
created_at: str = None
updated_at: str = None
is_locked: bool = None
is_closed: bool = None
is_billed: bool = None
is_running: bool = None
billable: bool = None
budgeted: bool = None
cost_rate: float = None
@dataclass
class Project:
over_budget_notification_date: Optional[str]
starts_on: Optional[str]
ends_on: Optional[str]
cost_budget: Optional[float]
hourly_rate: Optional[float]
fee: Optional[float]
budget: Optional[float]
notes: Optional[str]
code: Optional[str]
id: int = None
name: str = None
is_active: bool = None
bill_by: str = None
budget_by: str = None
budget_is_monthly: bool = None
notify_when_over_budget: bool = None
over_budget_notification_percentage: float = None
show_budget_to_all: bool = None
created_at: str = None
updated_at: str = None
is_billable: bool = None
is_fixed_fee: bool = None
client: Client = None
cost_budget_include_expenses: bool = None
@dataclass
class Role:
id: int = None
name: str = None
user_ids: List[int] = None
created_at: str = None
updated_at: str = None
@dataclass
class BillableRate:
start_date: Optional[str]
end_date: Optional[str]
id: int = None
amount: float = None
created_at: str = None
updated_at: str = None
@dataclass
class CostRate:
start_date: Optional[str]
end_date: Optional[str]
id: int = None
amount: float = None
created_at: str = None
updated_at: str = None
@dataclass
class ProjectAssignment:
budget: Optional[float]
hourly_rate: Optional[float]
id: int = None
is_project_manager: bool = None
is_active: bool = None
created_at: str = None
updated_at: str = None
project: Project = None
client: ClientRef = None
task_assignment: List[ProjectTaskAssignments] = None
@dataclass
class User:
default_hourly_rate: Optional[float]
cost_rate: Optional[float]
id: int = None
first_name: str = None
last_name: str = None
email: str = None
telephone: str = None
timezone: str = None
has_access_to_all_future_projects: bool = None
is_contractor: bool = None
is_admin: bool = None
is_project_manager: bool = None
can_see_rates: bool = None
can_create_projects: bool = None
can_create_invoices: bool = None
is_active: bool = None
created_at: str = None
updated_at: str = None
weekly_capacity: int = None
roles: List[str] = None
avatar_url: str = None
@dataclass
class DetailedTimeEntry:
notes: Optional[str]
external_reference_url: Optional[str]
roles: Optional[str]
date: str
client: str
project: str
project_code: str
task: str
hours: float
billable: str
invoiced: str
approved: str
first_name: str
last_name: str
employee: str
billable_rate: float
billable_amount: float
cost_rate: float
cost_amount: float
currency: str
@dataclass
class Links:
next: Optional[str]
previous: Optional[str]
first: str
last: str
@dataclass
class BasePage:
previous_page: Optional[int]
next_page: Optional[int]
per_page: int = None
total_pages: int = None
total_entries: int = None
page: int = 1
links: Links = None
@dataclass
class ClientContacts(BasePage):
contacts: List[ClientContact] = field(init=False)
@dataclass
class Clients(BasePage):
clients: List[Client] = field(init=False)
@dataclass
class InvoiceMessages(BasePage):
invoice_messages: List[InvoiceMessage] = field(init=False)
@dataclass
class InvoicePayments(BasePage):
invoice_payments: List[InvoicePayment] = field(init=False)
@dataclass
class Invoices(BasePage):
invoices: List[Invoice] = field(init=False)
@dataclass
class InvoiceItemCategories(BasePage):
invoice_item_categories: List[InvoiceItemCategory] = field(init=False)
@dataclass
class EstimateMessages(BasePage):
estimate_messages: List[EstimateMessage] = field(init=False)
@dataclass
class Estimates(BasePage):
estimates: List[Estimate] = field(init=False)
@dataclass
class EstimateItemCategories(BasePage):
estimate_item_categories: List[EstimateItemCategory] = field(init=False)
@dataclass
class Expenses(BasePage):
expenses: List[Expense] = field(init=False)
@dataclass
class ExpenseCategories(BasePage):
expense_categories: List[ExpenseCategory] = field(init=False)
@dataclass
class Tasks(BasePage):
tasks: List[Task] = field(init=False)
@dataclass
class TimeEntries(BasePage):
time_entries: List[TimeEntry] = field(init=False)
@dataclass
class UserAssignments(BasePage):
user_assignments: List[UserAssignment] = field(init=False)
@dataclass
class TaskAssignments(BasePage):
task_assignments: List[TaskAssignment] = field(init=False)
@dataclass
class Projects(BasePage):
projects: List[Project] = field(init=False)
@dataclass
class Roles(BasePage):
roles: List[Role] = field(init=False)
@dataclass
class BillableRates(BasePage):
billable_rates: List[BillableRate] = field(init=False)
@dataclass
class UserCostRates(BasePage):
cost_rates: List[CostRate] = field(init=False)
@dataclass
class ProjectAssignments(BasePage):
project_assignments: List[ProjectAssignment] = field(init=False)
@dataclass
class Users(BasePage):
users: List[User] = field(init=False)
@dataclass
class DetailedTimeReport():
detailed_time_entries: List[DetailedTimeEntry]
|
py | b408b6f308802bc5dd6196de2868dd033091d0e2 | import logging
import os
from pathlib import Path
from typing import Union
import exceptions as ex
LEVEL = Union[str, int]
class Logger(logging.Logger):
MSG_FMT = "[{asctime},{msecs:3.0f}] [{levelname}] " \
"[{process}:{module}:{funcName}] {message}"
DATE_FMT = "%d.%m.%Y %H:%M:%S"
LOG_FOLDER = Path('logs')
LOG_FILE_NAME = 'converter.log'
def __init__(self,
name: str,
level: str or int,
*,
fmt: str = None,
date_fmt: str = None,
log_folder: str or Path = None,
log_file_name: str or Path = None) -> None:
super().__init__(name, level)
self.MSG_FMT = fmt or self.MSG_FMT
self.DATE_FMT = date_fmt or self.DATE_FMT
self.LOG_FOLDER = log_folder or self.LOG_FOLDER
self.LOG_FILE_NAME = log_file_name or self.LOG_FILE_NAME
os.makedirs(self.LOG_FOLDER, exist_ok=True)
self.__log_path = self.LOG_FOLDER / self.LOG_FILE_NAME
self.__formatter = logging.Formatter(
fmt=self.MSG_FMT, datefmt=self.DATE_FMT, style='{'
)
# don't forget to add the logger the global loggers storage
logging.Logger.manager.loggerDict[name] = self
@property
def log_file_path(self) -> Path:
return self.__log_path
@property
def formatter(self) -> logging.Formatter:
return self.__formatter
@property
def stream_handler(self) -> logging.StreamHandler:
return self._get_handler(logging.StreamHandler)
@property
def file_handler(self) -> logging.FileHandler:
return self._get_handler(logging.FileHandler)
def add_stream_handler(self,
level: LEVEL) -> None:
try:
self.stream_handler
except ValueError:
pass
else:
self.error(f"Stream handler even exists")
raise ex.HandlerEvenExistsError("Stream handler even exists")
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(self.formatter)
self.addHandler(handler)
def add_file_handler(self,
level: LEVEL) -> None:
try:
self.file_handler
except ValueError:
pass
else:
self.error(f"File handler even exists")
raise ex.HandlerEvenExistsError("File handler even exists")
handler = logging.FileHandler(
self.log_file_path, delay=True, encoding='utf-8'
)
handler.setLevel(level)
handler.setFormatter(self.formatter)
self.addHandler(handler)
def _get_handler(self,
handler_type: type) -> logging.Handler:
for handler in self.handlers:
if isinstance(handler, handler_type):
return handler
raise ex.HandlerNotFoundError(
f"There's no '{handler_type.__class__.__name__}'")
def _set_handler_level(self,
handler_type: type,
level: LEVEL):
try:
handler = self._get_handler(handler_type)
except ex.HandlerNotFoundError as e:
self.error(f"There's no {handler_type.__class__.__name__}")
raise
try:
level = level.upper()
except AttributeError:
pass
handler.setLevel(level)
def set_stream_handler_level(self,
level: LEVEL) -> None:
self._set_handler_level(type(self.stream_handler), level)
def set_file_handler_level(self,
level: LEVEL) -> None:
self._set_handler_level(type(self.file_handler), level)
def __iter__(self) -> iter:
return iter(self.handlers)
def __contains__(self, item: type) -> bool:
try:
self._get_handler(item)
except ValueError:
return False
return True
|
py | b408b70027df3eab63c5073214151799e981449b | """
Generic utilities.
"""
import functools
import hmac
import os
import sys
import time
from functools import wraps
from hashlib import sha1
from time import sleep as retry_sleep # so that we can patch it for tests.
from typing import Optional
import cachetools.func
import requests
from flask import request, Response
from flask_dance.contrib.jira import jira
from urlobject import URLObject
from openedx_webhooks import logger
from openedx_webhooks.oauth import get_github_session, jira_get
from openedx_webhooks.types import JiraDict
def environ_get(name: str, default=None) -> str:
"""
Get an environment variable, raising an error if it's missing.
"""
val = os.environ.get(name, default)
if val is None:
raise Exception(f"Required environment variable {name!r} is missing")
return val
def _check_auth(username, password):
"""
Checks if a username / password combination is valid.
"""
return (
username == os.environ.get('HTTP_BASIC_AUTH_USERNAME') and
password == os.environ.get('HTTP_BASIC_AUTH_PASSWORD')
)
def _authenticate():
"""
Sends a 401 response that enables basic auth
"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
)
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not _check_auth(auth.username, auth.password):
return _authenticate()
return f(*args, **kwargs)
return decorated
def log_check_response(response, raise_for_status=True):
"""
Logs HTTP request and response at debug level and checks if it succeeded.
Arguments:
response (requests.Response)
raise_for_status (bool): if True, call raise_for_status on the response
also.
"""
msg = "Request: {0.method} {0.url}: {0.body!r}".format(response.request)
logger.debug(msg)
msg = "Response: {0.status_code} {0.reason!r} for {0.url}: {0.content!r}".format(response)
logger.debug(msg)
if raise_for_status:
try:
response.raise_for_status()
except Exception as exc:
req = response.request
logger.exception(f"HTTP request failed: {req.method} {req.url}. Response body: {response.content}")
raise
def log_rate_limit():
"""Get stats from GitHub about the current rate limit, and log them."""
rate = get_github_session().get("/rate_limit").json()['rate']
reset = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(rate['reset']))
logger.info(f"Rate limit: {rate['limit']}, used {rate['used']}, remaining {rate['remaining']}. Reset is at {reset}")
def is_valid_payload(secret: str, signature: str, payload: bytes) -> bool:
"""
Ensure payload is valid according to signature.
Make sure the payload hashes to the signature as calculated using
the shared secret.
Arguments:
secret (str): The shared secret
signature (str): Signature as calculated by the server, sent in
the request
payload (bytes): The request payload
Returns:
bool: Is the payload legit?
"""
mac = hmac.new(secret.encode(), msg=payload, digestmod=sha1)
digest = 'sha1=' + mac.hexdigest()
return hmac.compare_digest(digest.encode(), signature.encode())
def text_summary(text, length=40):
"""
Make a summary of `text`, at most `length` chars long.
The middle will be elided if needed.
"""
if len(text) <= length:
return text
else:
start = (length - 3) // 2
end = (length - 3 - start)
return text[:start] + "..." + text[-end:]
def retry_get(session, url, **kwargs):
"""
Get a URL, but retry if it returns a 404.
GitHub has been known to send us a pull request event, and then return a
404 when we ask for the comments on the pull request. This will retry
with a pause to get the real answer.
"""
tries = 10
while True:
resp = session.get(url, **kwargs)
if resp.status_code == 404:
tries -= 1
if tries == 0:
break
retry_sleep(.5)
continue
else:
break
return resp
def paginated_get(url, session=None, limit=None, per_page=100, callback=None, **kwargs):
"""
Retrieve all objects from a paginated API.
Assumes that the pagination is specified in the "link" header, like
Github's v3 API.
The `limit` describes how many results you'd like returned. You might get
more than this, but you won't make more requests to the server once this
limit has been exceeded. For example, paginating by 100, if you set a
limit of 250, three requests will be made, and you'll get 300 objects.
"""
url = URLObject(url).set_query_param('per_page', str(per_page))
limit = limit or 999999999
session = session or requests.Session()
returned = 0
while url:
resp = retry_get(session, url, **kwargs)
log_check_response(resp)
if callable(callback):
callback(resp)
for item in resp.json():
yield item
returned += 1
url = None
if resp.links and returned < limit:
url = resp.links.get("next", {}).get("url", "")
def jira_paginated_get(url, session=None,
start=0, start_param="startAt", obj_name=None,
retries=3, debug=False, **fields):
"""
Like ``paginated_get``, but uses JIRA's conventions for a paginated API, which
are different from Github's conventions.
"""
session = session or requests.Session()
url = URLObject(url)
more_results = True
while more_results:
result_url = (
url.set_query_param(start_param, str(start))
.set_query_params(**fields)
)
for _ in range(retries):
try:
if debug:
print(result_url, file=sys.stderr)
result_resp = session.get(result_url)
result = result_resp.json()
break
except ValueError:
continue
result_resp.raise_for_status()
result = result_resp.json()
if not result:
break
if obj_name:
objs = result[obj_name]
else:
objs = result
for obj in objs:
yield obj
# are we done yet?
if isinstance(result, dict):
returned = len(objs)
total = result["total"]
if start + returned < total:
start += returned
else:
more_results = False
else:
# `result` is a list
start += len(result)
more_results = True # just keep going until there are no more results.
# A list of all the memoized functions, so that `clear_memoized_values` can
# clear them all.
_memoized_functions = []
def memoize(func):
"""Cache the value returned by a function call forever."""
func = functools.lru_cache()(func)
_memoized_functions.append(func)
return func
def memoize_timed(minutes):
"""Cache the value of a function for `minutes` minutes."""
def _timed(func):
# We use time.time as the timer so that freezegun can test it, and in a
# new function so that freezegun's patching will work. Freezegun doesn't
# patch time.monotonic, and we aren't that picky about the time anyway.
def patchable_timer():
return time.time()
func = cachetools.func.ttl_cache(ttl=60 * minutes, timer=patchable_timer)(func)
_memoized_functions.append(func)
return func
return _timed
def clear_memoized_values():
"""Clear all the values saved by @memoize and @memoize_timed, to ensure isolated tests."""
for func in _memoized_functions:
func.cache_clear()
def minimal_wsgi_environ():
values = {
"HTTP_HOST", "SERVER_NAME", "SERVER_PORT", "REQUEST_METHOD",
"SCRIPT_NAME", "PATH_INFO", "QUERY_STRING", "wsgi.url_scheme",
}
return {key: value for key, value in request.environ.items()
if key in values}
def sentry_extra_context(data_dict):
"""Apply the keys and values from data_dict to the Sentry extra context."""
from sentry_sdk import configure_scope
with configure_scope() as scope:
for key, value in data_dict.items():
scope.set_extra(key, value)
@memoize_timed(minutes=30)
def get_jira_custom_fields(session=None):
"""
Return a name-to-id mapping for the custom fields on JIRA.
"""
session = session or jira
field_resp = session.get("/rest/api/2/field")
field_resp.raise_for_status()
fields = field_resp.json()
return {f["name"]: f["id"] for f in fields if f["custom"]}
def get_jira_issue(key: str, missing_ok: bool = False) -> Optional[JiraDict]:
"""
Get the dictionary for a Jira issue, from its key.
Args:
key: the Jira id of the issue to find.
missing_ok: True if this function should return None for missing issue.
Returns:
A dict of Jira information, or None if missing_ok is True, and the issue
is missing.
"""
resp = jira_get("/rest/api/2/issue/{key}".format(key=key))
if resp.status_code == 404 and missing_ok:
return None
log_check_response(resp)
return resp.json()
def github_pr_repo(issue):
custom_fields = get_jira_custom_fields()
pr_repo = issue["fields"].get(custom_fields["Repo"])
parent_ref = issue["fields"].get("parent")
if not pr_repo and parent_ref:
parent = get_jira_issue(parent_ref["key"])
pr_repo = parent["fields"].get(custom_fields["Repo"])
return pr_repo
def github_pr_num(issue):
custom_fields = get_jira_custom_fields()
pr_num = issue["fields"].get(custom_fields["PR Number"])
parent_ref = issue["fields"].get("parent")
if not pr_num and parent_ref:
parent = get_jira_issue(parent_ref["key"])
pr_num = parent["fields"].get(custom_fields["PR Number"])
try:
return int(pr_num)
except Exception: # pylint: disable=broad-except
return None
def github_pr_url(issue):
"""
Return the pull request URL for the given JIRA issue,
or raise an exception if they can't be determined.
"""
pr_repo = github_pr_repo(issue)
pr_num = github_pr_num(issue)
if not pr_repo or not pr_num:
issue_key = issue["key"]
fail_msg = '{key} is missing "Repo" or "PR Number" fields'.format(key=issue_key)
raise Exception(fail_msg)
return "/repos/{repo}/pulls/{num}".format(repo=pr_repo, num=pr_num)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.