filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_24504
|
import math
import os
import time
from copy import deepcopy
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
def init_seeds(seed=0):
torch.manual_seed(seed)
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
if seed == 0: # slower, more reproducible
cudnn.deterministic = True
cudnn.benchmark = False
else: # faster, less reproducible
cudnn.deterministic = False
cudnn.benchmark = True
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
cpu_request = device.lower() == 'cpu'
if device and not cpu_request: # if device requested other than 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity
cuda = False if cpu_request else torch.cuda.is_available()
if cuda:
c = 1024 ** 2 # bytes to MB
ng = torch.cuda.device_count()
if ng > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
s = 'Using CUDA '
for i in range(0, ng):
if i == 1:
s = ' ' * len(s)
print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
(s, i, x[i].name, x[i].total_memory / c))
else:
print('Using CPU')
print('') # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
torch.cuda.synchronize() if torch.cuda.is_available() else None
return time.time()
def is_parallel(model):
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
with torch.no_grad():
# init
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
bias=True).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False):
# Plots a line-by-line description of a PyTorch model
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPS
from thop import profile
flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2
fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS
except:
fs = ''
print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = models.__dict__[name](pretrained=True)
# Display model properties
input_size = [3, 224, 224]
input_space = 'RGB'
input_range = [0, 1]
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for x in [input_size, input_space, input_range, mean, std]:
print(x + ' =', eval(x))
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio
# scales img(bs,3,y,x) by ratio
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
gs = 128#64#32 # (pixels) grid size
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)
|
the-stack_0_24506
|
#!/usr/bin/env python3
import os
import time
import argparse
import logging
import numpy as np
import collections
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as scheduler
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from libcube import cubes
from libcube import model
from libcube import conf
log = logging.getLogger("train")
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)-15s %(levelname)s %(message)s", level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ini", required=True, help="Ini file to use for this run")
parser.add_argument("-n", "--name", required=True, help="Name of the run")
args = parser.parse_args()
config = conf.Config(args.ini)
device = torch.device("cuda" if config.train_cuda else "cpu")
name = config.train_name(suffix=args.name)
writer = SummaryWriter(comment="-" + name)
save_path = os.path.join("saves", name)
os.makedirs(save_path)
cube_env = cubes.get(config.cube_type)
assert isinstance(cube_env, cubes.CubeEnv)
log.info("Selected cube: %s", cube_env)
value_targets_method = model.ValueTargetsMethod(config.train_value_targets_method)
net = model.Net(cube_env.encoded_shape, len(cube_env.action_enum)).to(device)
print(net)
opt = optim.Adam(net.parameters(), lr=config.train_learning_rate)
sched = scheduler.StepLR(opt, 1, gamma=config.train_lr_decay_gamma) if config.train_lr_decay_enabled else None
step_idx = 0
buf_policy_loss, buf_value_loss, buf_loss = [], [], []
buf_policy_loss_raw, buf_value_loss_raw, buf_loss_raw = [], [], []
buf_mean_values = []
ts = time.time()
best_loss = None
log.info("Generate scramble buffer...")
scramble_buf = collections.deque(maxlen=config.scramble_buffer_batches*config.train_batch_size)
scramble_buf.extend(model.make_scramble_buffer(cube_env, config.train_batch_size*2, config.train_scramble_depth))
log.info("Generated buffer of size %d", len(scramble_buf))
while True:
step_idx += 1
x_t, weights_t, y_policy_t, y_value_t = model.sample_batch(
scramble_buf, net, device, config.train_batch_size, value_targets_method)
opt.zero_grad()
policy_out_t, value_out_t = net(x_t)
value_out_t = value_out_t.squeeze(-1)
value_loss_t = (value_out_t - y_value_t)**2
value_loss_raw_t = value_loss_t.mean()
if config.weight_samples:
value_loss_t *= weights_t
value_loss_t = value_loss_t.mean()
policy_loss_t = F.cross_entropy(policy_out_t, y_policy_t, reduction='none')
policy_loss_raw_t = policy_loss_t.mean()
if config.weight_samples:
policy_loss_t *= weights_t
policy_loss_t = policy_loss_t.mean()
loss_raw_t = policy_loss_raw_t + value_loss_raw_t
loss_t = value_loss_t + policy_loss_t
loss_t.backward()
opt.step()
if config.train_lr_decay_enabled and step_idx % config.train_lr_decay_batches == 0:
sched.step()
log.info("LR decrease to %s", sched.get_last_lr()[0])
writer.add_scalar("lr", sched.get_last_lr()[0], step_idx)
# save data
buf_mean_values.append(value_out_t.mean().item())
buf_policy_loss.append(policy_loss_t.item())
buf_value_loss.append(value_loss_t.item())
buf_loss.append(loss_t.item())
buf_loss_raw.append(loss_raw_t.item())
buf_value_loss_raw.append(value_loss_raw_t.item())
buf_policy_loss_raw.append(policy_loss_raw_t.item())
if config.train_report_batches is not None and step_idx % config.train_report_batches == 0:
m_policy_loss = np.mean(buf_policy_loss)
m_value_loss = np.mean(buf_value_loss)
m_loss = np.mean(buf_loss)
buf_value_loss.clear()
buf_policy_loss.clear()
buf_loss.clear()
m_policy_loss_raw = np.mean(buf_policy_loss_raw)
m_value_loss_raw = np.mean(buf_value_loss_raw)
m_loss_raw = np.mean(buf_loss_raw)
buf_value_loss_raw.clear()
buf_policy_loss_raw.clear()
buf_loss_raw.clear()
m_values = np.mean(buf_mean_values)
buf_mean_values.clear()
dt = time.time() - ts
ts = time.time()
speed = config.train_batch_size * config.train_report_batches / dt
log.info("%d: p_loss=%.3e, v_loss=%.3e, loss=%.3e, speed=%.1f cubes/s",
step_idx, m_policy_loss, m_value_loss, m_loss, speed)
sum_train_data = 0.0
sum_opt = 0.0
writer.add_scalar("loss_policy", m_policy_loss, step_idx)
writer.add_scalar("loss_value", m_value_loss, step_idx)
writer.add_scalar("loss", m_loss, step_idx)
writer.add_scalar("loss_policy_raw", m_policy_loss_raw, step_idx)
writer.add_scalar("loss_value_raw", m_value_loss_raw, step_idx)
writer.add_scalar("loss_raw", m_loss_raw, step_idx)
writer.add_scalar("values", m_values, step_idx)
writer.add_scalar("speed", speed, step_idx)
if best_loss is None:
best_loss = m_loss
elif best_loss > m_loss:
name = os.path.join(save_path, "best_%.4e.dat" % m_loss)
torch.save(net.state_dict(), name)
best_loss = m_loss
if step_idx % config.push_scramble_buffer_iters == 0:
scramble_buf.extend(model.make_scramble_buffer(cube_env, config.train_batch_size,
config.train_scramble_depth))
log.info("Pushed new data in scramble buffer, new size = %d", len(scramble_buf))
if config.train_checkpoint_batches is not None and step_idx % config.train_checkpoint_batches == 0:
name = os.path.join(save_path, "chpt_%06d.dat" % step_idx)
torch.save(net.state_dict(), name)
if config.train_max_batches is not None and config.train_max_batches <= step_idx:
log.info("Limit of train batches reached, exiting")
break
writer.close()
|
the-stack_0_24507
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing commands related to the \
Information Superhighway (yes, Internet). """
import os
from datetime import datetime
import wget
from speedtest import Speedtest
from telethon import functions
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern="^.speed$")
async def speedtst(spd):
""" For .speed command, use SpeedTest to check server speeds. """
await spd.edit("`Running speed test . . .`")
test = Speedtest()
test.get_best_server()
test.download()
test.upload()
test.results.share()
result = test.results.dict()
path = wget.download(result["share"])
output = f"Started at `{result['timestamp']}`\n\n"
output += "Client:\n\n"
output += f"ISP: `{result['client']['isp']}`\n"
output += f"Country: `{result['client']['country']}`\n\n"
output += "Server:\n"
output += f"Name: `{result['server']['name']}`\n"
output += f"Country: `{result['server']['country']}, {result['server']['cc']}`\n"
output += f"Sponsor: `{result['server']['sponsor']}`\n"
output += f"Latency: `{result['server']['latency']}`\n\n"
output += f"Ping: `{result['ping']}`\n"
output += f"Sent: `{humanbytes(result['bytes_sent'])}`\n"
output += f"Received: `{humanbytes(result['bytes_received'])}`\n"
output += f"Download: `{humanbytes(result['download'] / 8)}/s`\n"
output += f"Upload: `{humanbytes(result['upload'] / 8)}/s`"
await spd.delete()
await spd.client.send_file(spd.chat_id, path, caption=output, force_document=False)
os.remove(path)
def speed_convert(size):
"""
Hi human, you can't read bytes?
"""
power = 2 ** 10
zero = 0
units = {0: "", 1: "Kb/s", 2: "Mb/s", 3: "Gb/s", 4: "Tb/s"}
while size > power:
size /= power
zero += 1
return f"{round(size, 2)} {units[zero]}"
@register(outgoing=True, pattern="^.dc$")
async def neardc(event):
""" For .dc command, get the nearest datacenter information. """
result = await event.client(functions.help.GetNearestDcRequest())
await event.edit(
f"🏳🌈 Negara : `{result.country}`\n"
f"Data Center Terdekat : `{result.nearest_dc}`\n"
f"Data Center Pengguna : `{result.this_dc}`"
)
@register(outgoing=True, pattern="^.ping$")
async def pingme(pong):
""" For .ping command, ping the userbot from any chat. """
start = datetime.now()
await pong.edit("`Pong!`")
end = datetime.now()
duration = (end - start).microseconds / 1000
await pong.edit("`█▀█ █▀█ █▄░█ █▀▀ █ \n█▀▀ █▄█ █░▀█ █▄█ ▄\n%sms`" % (duration))
CMD_HELP.update(
{
"speed": ".speed\
\nUsage: Does a speedtest and shows the results."
}
)
CMD_HELP.update(
{
"dc": ".dc\
\nUsage: Finds the nearest datacenter from your server."
}
)
CMD_HELP.update(
{
"ping": ".ping\
\nUsage: Shows how long it takes to ping your bot."
}
)
|
the-stack_0_24508
|
# This is the solution for Euclidean Algorithm > Chocolate by Numbers
#
# This is marked as PAINLESS difficulty
def find_gcd(a, b):
if b == 0:
return a
else:
return find_gcd(b, a % b)
def solution(N, M):
return N // find_gcd(N, M)
print(solution(10, 4))
print(solution(9, 6))
print(solution(10, 11))
|
the-stack_0_24512
|
from core.advbase import *
from slot.d import *
def module():
return Aeleen
class Aeleen(Adv):
a1 = ('bt',0.25)
conf = {}
conf['slots.d'] = AC011_Garland()
conf['acl'] = """
`dragon
`s3, not self.s3_buff
`s1
`s4
`fs, seq=5
"""
coab = ['Blade','Dragonyule_Xainfried','Lin_You']
share = ['Curran']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
the-stack_0_24513
|
# Space Invaders
# Created by Lee Robinson
# Modified by Erin Britz
#!/usr/bin/env python
from pygame import *
import sys
import os
import RPi.GPIO as GPIO
from random import shuffle, randrange, choice
# R G B
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREEN = (78, 255, 87)
YELLOW = (241, 255, 0)
BLUE = (80, 255, 239)
PURPLE = (203, 0, 255)
RED = (237, 28, 36)
ALIEN_ROW = 5
ALIEN_COL = 8
COL_SPACE = 25
ROW_SPACE = 23
WIDTH = 240
HEIGHT = 320
#WIDTH = 800
#HEIGHT = 600
GPIO.setmode(GPIO.BCM)
GPIO.setup(2, GPIO.IN, pull_up_down=GPIO.PUD_UP) #LEFT
GPIO.setup(3, GPIO.IN, pull_up_down=GPIO.PUD_UP) #RIGHT
GPIO.setup(27, GPIO.IN, pull_up_down=GPIO.PUD_UP) #A btn - left red
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP) #B btn - right red
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP) # right black aka p1 start
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP) # left black aka coin1
os.putenv('SDL_FBDEV', '/dev/fb1')
SCREEN = display.set_mode((WIDTH,HEIGHT))
FONT = "fonts/space_invaders.ttf"
IMG_NAMES = ["ship", "ship", "mystery", "enemy1_1", "enemy1_2", "enemy2_1", "enemy2_2",
"enemy3_1", "enemy3_2", "explosionblue", "explosiongreen", "explosionpurple", "laser", "enemylaser", "moose", "intro", "f5", "gameover"]
IMAGES = {name: image.load("images/{}.png".format(name)).convert_alpha()
for name in IMG_NAMES}
class Ship(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = IMAGES["moose"]
self.image = transform.scale(self.image, (25, 24))
self.rect = self.image.get_rect(topleft=(100 , 295))
self.speed = 5
def update(self, keys, *args):
if keys[K_LEFT] and self.rect.x > 0:
self.rect.x -= self.speed
if keys[K_RIGHT] and self.rect.x < 220:
self.rect.x += self.speed
if GPIO.input(2) == False and self.rect.x > 0:
self.rect.x -= self.speed
if GPIO.input(3) == False and self.rect.x > 0:
self.rect.x += self.speed
game.screen.blit(self.image, self.rect)
class Bullet(sprite.Sprite):
def __init__(self, xpos, ypos, direction, speed, filename, side):
sprite.Sprite.__init__(self)
self.image = IMAGES[filename]
self.rect = self.image.get_rect(topleft=(xpos, ypos))
self.speed = speed
self.direction = direction
self.side = side
self.filename = filename
def update(self, keys, *args):
game.screen.blit(self.image, self.rect)
self.rect.y += self.speed * self.direction
if self.rect.y < 15 or self.rect.y > 600:
self.kill()
class Enemy(sprite.Sprite):
def __init__(self, row, column):
sprite.Sprite.__init__(self)
self.row = row
self.column = column
self.images = []
self.load_images()
self.index = 0
self.image = self.images[self.index]
self.rect = self.image.get_rect()
self.direction = 1
self.rightMoves = 10
self.leftMoves = 20
self.moveNumber = 0
self.moveTime = 600
self.firstTime = True
self.movedY = False;
self.columns = [False] * ALIEN_COL
self.aliveColumns = [True] * ALIEN_COL
self.addRightMoves = False
self.addLeftMoves = False
self.numOfRightMoves = 0
self.numOfLeftMoves = 0
self.timer = time.get_ticks()
def update(self, keys, currentTime, killedRow, killedColumn, killedArray):
self.check_column_deletion(killedRow, killedColumn, killedArray)
if currentTime - self.timer > self.moveTime:
self.movedY = False;
if self.moveNumber >= self.rightMoves and self.direction == 1:
self.direction *= -1
self.moveNumber = 0
self.rect.y += 35
self.movedY = True
if self.addRightMoves:
self.rightMoves += self.numOfRightMoves
if self.firstTime:
self.rightMoves = self.leftMoves;
self.firstTime = False;
self.addRightMovesAfterDrop = False
if self.moveNumber >= self.leftMoves and self.direction == -1:
self.direction *= -1
self.moveNumber = 0
self.rect.y += 35
self.movedY = True
if self.addLeftMoves:
self.leftMoves += self.numOfLeftMoves
self.addLeftMovesAfterDrop = False
if self.moveNumber < self.rightMoves and self.direction == 1 and not self.movedY:
self.rect.x += 10
self.moveNumber += 1
if self.moveNumber < self.leftMoves and self.direction == -1 and not self.movedY:
self.rect.x -= 10
self.moveNumber += 1
self.index += 1
if self.index >= len(self.images):
self.index = 0
self.image = self.images[self.index]
self.timer += self.moveTime
game.screen.blit(self.image, self.rect)
def check_column_deletion(self, killedRow, killedColumn, killedArray):
if killedRow != -1 and killedColumn != -1:
killedArray[killedRow][killedColumn] = 1
for column in range(ALIEN_COL):
if all([killedArray[row][column] == 1 for row in range(ALIEN_ROW)]):
self.columns[column] = True
for i in range(ALIEN_ROW):
if all([self.columns[x] for x in range(i + 1)]) and self.aliveColumns[i]:
self.leftMoves += 5
self.aliveColumns[i] = False
if self.direction == -1:
self.rightMoves += 5
else:
self.addRightMoves = True
self.numOfRightMoves += 5
for i in range(ALIEN_ROW):
if all([self.columns[x] for x in range((ALIEN_COL -1), (ALIEN_COL - 2) - i, -1)]) and self.aliveColumns[(ALIEN_COL -1) - i]:
self.aliveColumns[(ALIEN_COL - 1 ) - i] = False
self.rightMoves += 5
if self.direction == 1:
self.leftMoves += 5
else:
self.addLeftMoves = True
self.numOfLeftMoves += 5
def load_images(self):
images = {0: ["1_2", "1_1"],
1: ["2_2", "2_1"],
2: ["2_2", "2_1"],
3: ["3_1", "3_2"],
4: ["3_1", "3_2"],
}
#img1, img2 = (IMAGES["enemy{}".format(img_num)] for img_num in images[self.row])
img1, img2 = (IMAGES["f5"] for img_num in images[self.row])
self.images.append(transform.scale(img1, (24, 24)))
self.images.append(transform.scale(img2, (24, 24)))
class Blocker(sprite.Sprite):
def __init__(self, size, color, row, column):
sprite.Sprite.__init__(self)
self.height = size
self.width = size
self.color = color
self.image = Surface((self.width, self.height))
self.image.fill(self.color)
self.rect = self.image.get_rect()
self.row = row
self.column = column
def update(self, keys, *args):
game.screen.blit(self.image, self.rect)
class Mystery(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = IMAGES["mystery"]
self.image = transform.scale(self.image, (25, 7)) #scaled /5
self.rect = self.image.get_rect(topleft=(-80, 45))
self.row = 5
self.moveTime = 25000
self.direction = 1
self.timer = time.get_ticks()
self.mysteryEntered = mixer.Sound('sounds/mysteryentered.wav')
self.mysteryEntered.set_volume(0.3)
self.playSound = True
def update(self, keys, currentTime, *args):
resetTimer = False
if (currentTime - self.timer > self.moveTime) and (self.rect.x < 0 or self.rect.x > WIDTH) and self.playSound:
self.mysteryEntered.play()
self.playSound = False
if (currentTime - self.timer > self.moveTime) and self.rect.x < 840 and self.direction == 1:
self.mysteryEntered.fadeout(4000)
self.rect.x += 2
game.screen.blit(self.image, self.rect)
if (currentTime - self.timer > self.moveTime) and self.rect.x > -100 and self.direction == -1:
self.mysteryEntered.fadeout(4000)
self.rect.x -= 2
game.screen.blit(self.image, self.rect)
if (self.rect.x > 830):
self.playSound = True
self.direction = -1
resetTimer = True
if (self.rect.x < -90):
self.playSound = True
self.direction = 1
resetTimer = True
if (currentTime - self.timer > self.moveTime) and resetTimer:
self.timer = currentTime
class Explosion(sprite.Sprite):
def __init__(self, xpos, ypos, row, ship, mystery, score):
sprite.Sprite.__init__(self)
self.isMystery = mystery
self.isShip = ship
if mystery:
self.text = Text(FONT, 20, str(score), WHITE, xpos+20, ypos+6)
elif ship:
self.image = IMAGES["moose"]
self.image = transform.scale(self.image, (25, 24))
self.rect = self.image.get_rect(topleft=(xpos, ypos))
else:
self.row = row
self.load_image()
self.image = transform.scale(self.image, (40, 35))
self.rect = self.image.get_rect(topleft=(xpos, ypos))
game.screen.blit(self.image, self.rect)
self.timer = time.get_ticks()
def update(self, keys, currentTime):
if self.isMystery:
if currentTime - self.timer <= 200:
self.text.draw(game.screen)
if currentTime - self.timer > 400 and currentTime - self.timer <= 600:
self.text.draw(game.screen)
if currentTime - self.timer > 600:
self.kill()
elif self.isShip:
if currentTime - self.timer > 300 and currentTime - self.timer <= 600:
game.screen.blit(self.image, self.rect)
if currentTime - self.timer > 900:
self.kill()
else:
if currentTime - self.timer <= 100:
game.screen.blit(self.image, self.rect)
if currentTime - self.timer > 100 and currentTime - self.timer <= 200:
self.image = transform.scale(self.image, (50, 45))
game.screen.blit(self.image, (self.rect.x-6, self.rect.y-6))
if currentTime - self.timer > 400:
self.kill()
def load_image(self):
imgColors = ["purple", "blue", "blue", "green", "green"]
self.image = IMAGES["explosion{}".format(imgColors[self.row])]
class Life(sprite.Sprite):
def __init__(self, xpos, ypos):
sprite.Sprite.__init__(self)
self.image = IMAGES["moose"]
self.image = transform.scale(self.image, (14, 14))
self.rect = self.image.get_rect(topleft=(xpos, ypos))
def update(self, keys, *args):
game.screen.blit(self.image, self.rect)
class Text(object):
def __init__(self, textFont, size, message, color, xpos, ypos):
self.font = font.Font(textFont, size)
self.surface = self.font.render(message, True, color)
self.rect = self.surface.get_rect(topleft=(xpos, ypos))
def draw(self, surface):
surface.blit(self.surface, self.rect)
class SpaceInvaders(object):
def __init__(self):
mixer.pre_init(44100, -16, 1, 512)
init()
self.caption = display.set_caption('Space Invaders')
self.screen = display.set_mode((WIDTH,HEIGHT))
#self.screen = SCREEN
#self.background = image.load('images/background.jpg').convert()
self.background = image.load('images/barcode_s.png').convert()
self.intro = image.load('images/intro.png').convert()
self.gameover = image.load('images/gameover.png').convert()
self.startGame = False
self.mainScreen = True
self.gameOver = False
self.enemyposition = 65
def reset(self, score, lives):
self.player = Ship()
self.playerGroup = sprite.Group(self.player)
self.explosionsGroup = sprite.Group()
self.bullets = sprite.Group()
self.mysteryShip = Mystery()
self.mysteryGroup = sprite.Group(self.mysteryShip)
self.enemyBullets = sprite.Group()
self.reset_lives()
self.make_enemies()
self.allBlockers = sprite.Group(self.make_blockers(0), self.make_blockers(1), self.make_blockers(2), self.make_blockers(3))
self.keys = key.get_pressed()
self.clock = time.Clock()
self.timer = time.get_ticks()
self.noteTimer = time.get_ticks()
self.shipTimer = time.get_ticks()
self.score = score
self.lives = lives
self.create_audio()
self.create_text()
self.killedRow = -1
self.killedColumn = -1
self.makeNewShip = False
self.shipAlive = True
self.killedArray = [[0] * 10 for x in range(5)]
self.enemyposition = 65
def make_blockers(self, number):
blockerGroup = sprite.Group()
for row in range(3):
for column in range(5):
blocker = Blocker(5, BLACK, row, column)
blocker.rect.x = 25 + (50 * number) + (column * blocker.width)
blocker.rect.y = 270 + (row * blocker.height)
blockerGroup.add(blocker)
return blockerGroup
def reset_lives(self):
self.life1 = Life(185, 3)
self.life2 = Life(200, 3)
self.life3 = Life(215, 3)
self.livesGroup = sprite.Group(self.life1, self.life2, self.life3)
def create_audio(self):
self.sounds = {}
for sound_name in ["shoot", "shoot2", "invaderkilled", "mysterykilled", "shipexplosion"]:
self.sounds[sound_name] = mixer.Sound("sounds/{}.wav".format(sound_name))
self.sounds[sound_name].set_volume(0.2)
self.musicNotes = [mixer.Sound("sounds/{}.wav".format(i)) for i in range(4)]
for sound in self.musicNotes:
sound.set_volume(0.5)
self.noteIndex = 0
def play_main_music(self, currentTime):
moveTime = self.enemies.sprites()[0].moveTime
if currentTime - self.noteTimer > moveTime:
self.note = self.musicNotes[self.noteIndex]
if self.noteIndex < 3:
self.noteIndex += 1
else:
self.noteIndex = 0
self.note.play()
self.noteTimer += moveTime
def create_text(self):
self.nextRoundText = Text(FONT, 20, "BARCODE ATTAINED!", GREEN, 5, 270)
self.nextRoundText2 = Text(FONT, 20, " SCAN NOW!", RED, 10, 290)
self.scoreText = Text(FONT, 15, "Score", BLACK, 5, 5)
self.livesText = Text(FONT, 15, "Lives ", BLACK, 135, 5)
def check_input(self):
self.keys = key.get_pressed()
input_A = GPIO.input(27)
input_B = GPIO.input(22)
input_p1 = GPIO.input(18)
input_coin = GPIO.input(23)
for e in event.get():
if e.type == QUIT:
sys.exit()
if e.type == KEYDOWN:
if e.key == K_ESCAPE:
sys.exit()
if e.key == K_SPACE:
if len(self.bullets) == 0 and self.shipAlive:
if self.score < 1000:
bullet = Bullet(self.player.rect.x+12, self.player.rect.y+2, -1, 15, "laser", "center")
self.bullets.add(bullet)
self.allSprites.add(self.bullets)
self.sounds["shoot"].play()
else:
leftbullet = Bullet(self.player.rect.x+4, self.player.rect.y+2, -1, 15, "laser", "left")
rightbullet = Bullet(self.player.rect.x+19, self.player.rect.y+2, -1, 15, "laser", "right")
self.bullets.add(leftbullet)
self.bullets.add(rightbullet)
self.allSprites.add(self.bullets)
self.sounds["shoot2"].play()
if input_coin == False:
sys.exit()
if input_A == False or input_B == False:
if len(self.bullets) == 0 and self.shipAlive:
if self.score < 1000:
bullet = Bullet(self.player.rect.x+12, self.player.rect.y+2, -1, 15, "laser", "center")
self.bullets.add(bullet)
self.allSprites.add(self.bullets)
self.sounds["shoot"].play()
else:
leftbullet = Bullet(self.player.rect.x+4, self.player.rect.y+2, -1, 15, "laser", "left")
rightbullet = Bullet(self.player.rect.x+19, self.player.rect.y+2, -1, 15, "laser", "right")
self.bullets.add(leftbullet)
self.bullets.add(rightbullet)
self.allSprites.add(self.bullets)
self.sounds["shoot2"].play()
if input_p1 == False and GPIO.input(2) == False:
self.enemies.empty()
def make_enemies(self):
enemies = sprite.Group()
for row in range(ALIEN_ROW):
for column in range(ALIEN_COL):
enemy = Enemy(row, column)
enemy.rect.x = 20 + (column * COL_SPACE)
enemy.rect.y = self.enemyposition + (row * ROW_SPACE)
enemies.add(enemy)
self.enemies = enemies
self.allSprites = sprite.Group(self.player, self.enemies, self.livesGroup, self.mysteryShip)
def make_enemies_shoot(self):
columnList = []
for enemy in self.enemies:
columnList.append(enemy.column)
columnSet = set(columnList)
columnList = list(columnSet)
shuffle(columnList)
column = columnList[0]
enemyList = []
rowList = []
for enemy in self.enemies:
if enemy.column == column:
rowList.append(enemy.row)
row = max(rowList)
for enemy in self.enemies:
if enemy.column == column and enemy.row == row:
if (time.get_ticks() - self.timer) > 700:
self.enemyBullets.add(Bullet(enemy.rect.x + 14, enemy.rect.y + 20, 1, 5, "enemylaser", "center"))
self.allSprites.add(self.enemyBullets)
self.timer = time.get_ticks()
def calculate_score(self, row):
scores = {0: 30,
1: 20,
2: 20,
3: 10,
4: 10,
5: choice([50, 100, 150, 300])
}
score = scores[row]
self.score += score
return score
def create_main_menu(self):
for e in event.get():
if e.type == QUIT:
sys.exit()
if e.type == KEYUP:
self.startGame = True
self.mainScreen = False
if GPIO.input(27) == False or GPIO.input(22) == False:
self.startGame = True
self.mainScreen = False
if GPIO.input(23) == False:
sys.exit()
def update_enemy_speed(self):
if len(self.enemies) <= 10:
for enemy in self.enemies:
enemy.moveTime = 400
if len(self.enemies) == 1:
for enemy in self.enemies:
enemy.moveTime = 200
def check_collisions(self):
collidedict = sprite.groupcollide(self.bullets, self.enemyBullets, True, False)
if collidedict:
for value in collidedict.values():
for currentSprite in value:
self.enemyBullets.remove(currentSprite)
self.allSprites.remove(currentSprite)
enemiesdict = sprite.groupcollide(self.bullets, self.enemies, True, False)
if enemiesdict:
for value in enemiesdict.values():
for currentSprite in value:
self.sounds["invaderkilled"].play()
self.killedRow = currentSprite.row
self.killedColumn = currentSprite.column
score = self.calculate_score(currentSprite.row)
explosion = Explosion(currentSprite.rect.x, currentSprite.rect.y, currentSprite.row, False, False, score)
self.explosionsGroup.add(explosion)
self.allSprites.remove(currentSprite)
self.enemies.remove(currentSprite)
self.gameTimer = time.get_ticks()
break
mysterydict = sprite.groupcollide(self.bullets, self.mysteryGroup, True, True)
if mysterydict:
for value in mysterydict.values():
for currentSprite in value:
currentSprite.mysteryEntered.stop()
self.sounds["mysterykilled"].play()
score = self.calculate_score(currentSprite.row)
explosion = Explosion(currentSprite.rect.x, currentSprite.rect.y, currentSprite.row, False, True, score)
self.explosionsGroup.add(explosion)
self.allSprites.remove(currentSprite)
self.mysteryGroup.remove(currentSprite)
newShip = Mystery()
self.allSprites.add(newShip)
self.mysteryGroup.add(newShip)
break
bulletsdict = sprite.groupcollide(self.enemyBullets, self.playerGroup, True, False)
if bulletsdict:
for value in bulletsdict.values():
for playerShip in value:
if self.lives == 3:
self.lives -= 1
self.livesGroup.remove(self.life3)
self.allSprites.remove(self.life3)
elif self.lives == 2:
self.lives -= 1
self.livesGroup.remove(self.life2)
self.allSprites.remove(self.life2)
elif self.lives == 1:
self.lives -= 1
self.livesGroup.remove(self.life1)
self.allSprites.remove(self.life1)
elif self.lives == 0:
self.gameOver = True
self.startGame = False
self.sounds["shipexplosion"].play()
explosion = Explosion(playerShip.rect.x, playerShip.rect.y, 0, True, False, 0)
self.explosionsGroup.add(explosion)
self.allSprites.remove(playerShip)
self.playerGroup.remove(playerShip)
self.makeNewShip = True
self.shipTimer = time.get_ticks()
self.shipAlive = False
if sprite.groupcollide(self.enemies, self.playerGroup, True, True):
self.gameOver = True
self.startGame = False
sprite.groupcollide(self.bullets, self.allBlockers, True, True)
sprite.groupcollide(self.enemyBullets, self.allBlockers, True, True)
sprite.groupcollide(self.enemies, self.allBlockers, False, True)
def create_new_ship(self, createShip, currentTime):
if createShip and (currentTime - self.shipTimer > 900):
self.player = Ship()
self.allSprites.add(self.player)
self.playerGroup.add(self.player)
self.makeNewShip = False
self.shipAlive = True
def create_game_over(self, currentTime):
self.screen.blit(self.gameover, (0,0))
if currentTime - self.timer > 5000:
self.mainScreen = True
for e in event.get():
if e.type == QUIT:
sys.exit()
def main(self):
while True:
if self.mainScreen:
self.reset(0, 3)
self.screen.blit(self.intro, (0,0))
self.create_main_menu()
elif self.startGame:
if len(self.enemies) == 0:
currentTime = time.get_ticks()
if currentTime - self.gameTimer < 3000:
self.screen.blit(self.background, (0,0))
self.scoreText2 = Text(FONT, 20, str(self.score), RED, 85, 5)
self.scoreText.draw(self.screen)
self.scoreText2.draw(self.screen)
self.nextRoundText.draw(self.screen)
self.nextRoundText2.draw(self.screen)
self.livesText.draw(self.screen)
self.livesGroup.update(self.keys)
self.check_input()
if currentTime - self.gameTimer > 3000:
self.reset(self.score, self.lives)
self.enemyposition += 35
self.make_enemies()
self.gameTimer += 3000
else:
currentTime = time.get_ticks()
self.play_main_music(currentTime)
self.screen.blit(self.background, (0,0))
self.allBlockers.update(self.screen)
self.scoreText2 = Text(FONT, 15, str(self.score), RED, 85, 5)
self.scoreText.draw(self.screen)
self.scoreText2.draw(self.screen)
self.livesText.draw(self.screen)
self.check_input()
self.allSprites.update(self.keys, currentTime, self.killedRow, self.killedColumn, self.killedArray)
self.explosionsGroup.update(self.keys, currentTime)
self.check_collisions()
self.create_new_ship(self.makeNewShip, currentTime)
self.update_enemy_speed()
if len(self.enemies) > 0:
self.make_enemies_shoot()
elif self.gameOver:
currentTime = time.get_ticks()
self.create_game_over(currentTime)
display.update()
self.clock.tick(60)
if __name__ == '__main__':
game = SpaceInvaders()
game.main()
|
the-stack_0_24517
|
# For help with generating HTML report files.
def extract_filenames(antecedents, out_path, rename_outfile_fn=None):
# Return list of (in_file, out_file, in_filename, out_filename)
# where the filenames are the full paths, while the files are just
# the file part.
import os
filenames = []
for i, data_node in enumerate(antecedents):
in_filename = data_node.identifier
in_path, in_file = os.path.split(in_filename)
out_file = in_file
if rename_outfile_fn:
out_file = rename_outfile_fn(i, in_file)
out_filename = os.path.join(out_path, out_file)
x = in_file, out_file, in_filename, out_filename
filenames.append(x)
return filenames
def copy_file_or_path(in_filename, out_filename):
import os
import shutil
if os.path.exists(out_filename):
# Need to clean up the path so that old files aren't left
# over.
if os.path.isdir(out_filename):
shutil.rmtree(out_filename)
else:
os.unlink(out_filename)
if os.path.isdir(in_filename):
shutil.copytree(in_filename, out_filename)
else:
shutil.copyfile(in_filename, out_filename)
|
the-stack_0_24518
|
# -*- coding: utf-8 -*-
__version__ = '19.9.1.dev0'
PROJECT_NAME = "galaxy-job-metrics"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_URL = "https://github.com/galaxyproject/galaxy"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_DESCRIPTION = 'Galaxy Job Metrics'
PROJECT_EMAIL = '[email protected]'
RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % (
PROJECT_USERAME, PROJECT_NAME
)
|
the-stack_0_24522
|
import os
import numpy as np
# import scipy
from scipy.io import savemat
from dataTest import elem,time,lat,lon,bed,slat,slon,freq,dir,variables,spcgroup,stations,nodes
from netcdfswan import NetCDFSWAN
from datetime import datetime,timedelta
tmpFolder="../s3/swandata"
def create_bot(filePath,array):
np.savetxt(filePath, array, delimiter=' ')
def create_ele(filePath,array):
index=np.arange(1,len(array)+1)
array=array+1
array=np.column_stack((index,array))
np.savetxt(filePath, array,header='{} 3 0'.format(len(array)))
def create_node(filePath,array):
array=np.column_stack((np.arange(1,len(array)+1),array,array,np.ones(len(array))))
np.savetxt(filePath, array,header='{} 2 0 1'.format(len(array)))
def create_mat(filePath,dic):
# dic:{"datetime":np.array(ntime,datetime64),"hs":np.array((ntime,nnode))}
dts=dic.pop('datetime')
mdict={}
for i,dt in enumerate(dts):
dtStr=dt.astype(object).strftime("%Y%m%d_%H%M%S")
for name in dic:
key="{}_{}".format(name,dtStr)
mdict[key]=dic[name][i]
mdict['__header__']="dummy header"
mdict['__version__']="dummy version"
mdict['__globals__']="dummy globals"
savemat(filePath, mdict)
def create_spc(filePath,dic,station):
# dic:{"datetime":datetime64,"datetime":np.array((ntime,nsnode,nfreq,ndir))}
freq=dic['freq']
dir=dic['dir']
dt=dic['datetime']
spectra=dic['spectra']
latlng=station['latlng']
with open(filePath,"w+") as f:
f.write("SWAN 1 Swan standard spectral file, version\n")
f.write("$ Data produced by SWAN version 41.31 \n")
f.write("$ Project: WCWI_V5 ; run number: 01 \n")
f.write("TIME time-dependent data\n")
f.write(" 1 time coding option\n")
f.write("LONLAT locations in spherical coordinates\n")
f.write(" {} number of locations\n".format(len(latlng)))
arrayStr = np.array2string(latlng,separator=' ').replace('[',"").replace(']',"")
f.write("{}\n".format(arrayStr))
f.write("AFREQ absolute frequencies in Hz\n")
f.write("{} number of frequencies\n".format(len(freq)))
arrayStr = np.array2string(freq,separator='\n').replace('[',"").replace(']',"")
f.write("{}\n".format(arrayStr))
f.write("NDIR spectral nautical directions in degr\n")
f.write(" {} number of directions\n".format(len(dir)))
arrayStr = np.array2string(dir,separator='\n').replace('[',"").replace(']',"")
f.write("{}\n".format(arrayStr))
f.write("QUANT\n")
f.write(" 1 number of quantities in table\n")
f.write("VaDens variance densities in m2/Hz/degr\n")
f.write("m2/Hz/degr unit\n")
f.write("-0.9900E+02 exception value\n")
# ------- To test spinup, add extra date --------------
if dt[0].astype('datetime64[M]').astype(int) % 12 + 1==1:
_d = dt[0]-np.timedelta64(10,"D")
dtStr=_d.astype(object).strftime("%Y%m%d.%H%M%S")
f.write("{} date and time\n".format(dtStr))
for inode,_ in enumerate(latlng):
# print(stationId,inode,i)
f.write("FACTOR\n")
factor=1
array=(spectra[inode,0]/factor).astype("i4")
arrayStr = np.array2string(array,separator=',').replace(" ","").replace('[',"").replace(']',"").replace(","," ")
f.write("{}\n".format(factor))
f.write("{}\n".format(arrayStr))
# -------------------------------------------------------
for i,_ in enumerate(dt):
dtStr=_.astype(object).strftime("%Y%m%d.%H%M%S")
f.write("{} date and time\n".format(dtStr))
for inode,_ in enumerate(latlng):
# print(stationId,inode,i)
f.write("FACTOR\n")
factor=1
array=(spectra[inode,i]/factor).astype("i4")
arrayStr = np.array2string(array,separator=',').replace(" ","").replace('[',"").replace(']',"").replace(","," ")
f.write("{}\n".format(factor))
f.write("{}\n".format(arrayStr))
def getDatetimeIndex(_all,dt):
""" Find first and last datetime index.
"""
startDate=dt[0]
endDate=dt[len(dt)-1]
startIndex=np.where(_all==startDate)[0][0]
if(endDate>_all[len(_all)-1]):
endIndex=len(_all)+1
else:
endIndex=np.where(_all==endDate)[0][0]+1
return startIndex,endIndex
def create_folders(folder,year,month):
# print(folder,year)
yearFolder=os.path.join(folder,str(year))
if not os.path.exists(yearFolder):os.mkdir(yearFolder)
monthFolder=os.path.join(yearFolder,str(month))
if not os.path.exists(monthFolder):os.mkdir(monthFolder)
resultsFolder=os.path.join(monthFolder,"results")
if not os.path.exists(resultsFolder):os.mkdir(resultsFolder)
return resultsFolder
def create_data():
folder=tmpFolder
if not os.path.exists(folder):os.mkdir(folder)
meshFolder=os.path.join(folder,"Mesh")
if not os.path.exists(meshFolder):os.mkdir(meshFolder)
# Create .bot,.ele,.bot
create_ele(os.path.join(meshFolder,"dummy.ele"),elem)
create_bot(os.path.join(meshFolder,"dummy.bot"),bed)
create_node(os.path.join(meshFolder,"dummy.node"),nodes)
# Create output
for iday,t in enumerate(time):
date=t.astype(object)
year = date.year
month = date.month
day=date.day
hour=date.hour
if day==1 and hour ==0:
print(date, "...")
if month+1>12:
endDate= datetime(year+1,1,1)
else:
endDate= datetime(year,month+1,1)
if np.datetime64(endDate)>time[len(time)-1]:
endDate=(time[len(time)-1]).astype(object)
dt=np.arange(date,endDate+timedelta(hours=1), timedelta(hours=1)).astype("datetime64[s]")
startIndex,endIndex=getDatetimeIndex(time,dt)
resultsFolder=create_folders(folder,year,month)
for i,s in enumerate(stations):
station=stations[s]
sIndex=station['start']
eIndex=station['end']
nsnodes=station['nsnodes']
dic={
"datetime":dt,
"freq":freq,
"dir":dir,
"spectra":spcgroup['spectra'][sIndex:eIndex,startIndex:endIndex]
}
create_spc(os.path.join(resultsFolder,"{}.spc".format(s)),dic,station)
for name in variables:
variable=variables[name]
filePath=os.path.join(resultsFolder,name+".mat")
dic={}
for _ in variable:
dic[_]=variable[_][startIndex:endIndex]
dic['datetime']=dt
create_mat(filePath,dic)
def check_data():
for mkey in variables.keys():
for mmkey in list(variables[mkey].keys()):
for month in range(1, 13):
NCDFS = np.array(NetCDFSWAN.load(os.path.join(tmpFolder,f'2000/{str(month)}/results/{mkey}.mat'))[mmkey])
start = int(NCDFS[0][0]//10) # actual value is similar to the index
end = start + NCDFS.shape[0]
v = variables[mkey][mmkey][start: end] # 0 - 745
np.testing.assert_array_equal(NCDFS, v)
print(f"{mkey}.mat ok")
for i, station in enumerate(stations):
n = stations[station]["nsnodes"]
sts = 0
for month in range(1, 13):
NCDFS = NetCDFSWAN.load(os.path.join(tmpFolder,f'2000/{str(month)}/results/{station}.spc'))["spectra"]
ts = NCDFS.shape[0]
spg = spcgroup["spectra"][i, :n]
for node in range(n):
spg_n = spg[node][sts:sts+ts]
NCDFS_n = NCDFS[:, node]
np.testing.assert_array_equal(spg_n, NCDFS_n)
#print(f"station {station} node {node}:", NCDFS_n.shape, spg_n.shape, " start index:", sts)
sts += ts - 1
print(f"{station}.spc ok")
if __name__ == "__main__":
create_data()
# check_data()
|
the-stack_0_24523
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.billing.budgets_v1beta1.services.budget_service import pagers
from google.cloud.billing.budgets_v1beta1.types import budget_model
from google.cloud.billing.budgets_v1beta1.types import budget_service
from .transports.base import BudgetServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import BudgetServiceGrpcTransport
from .transports.grpc_asyncio import BudgetServiceGrpcAsyncIOTransport
class BudgetServiceClientMeta(type):
"""Metaclass for the BudgetService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[BudgetServiceTransport]]
_transport_registry["grpc"] = BudgetServiceGrpcTransport
_transport_registry["grpc_asyncio"] = BudgetServiceGrpcAsyncIOTransport
def get_transport_class(cls,
label: str = None,
) -> Type[BudgetServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class BudgetServiceClient(metaclass=BudgetServiceClientMeta):
"""BudgetService stores Cloud Billing budgets, which define a
budget plan and rules to execute as we track spend against that
plan.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "billingbudgets.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BudgetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BudgetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> BudgetServiceTransport:
"""Returns the transport used by the client instance.
Returns:
BudgetServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def budget_path(billing_account: str,budget: str,) -> str:
"""Returns a fully-qualified budget string."""
return "billingAccounts/{billing_account}/budgets/{budget}".format(billing_account=billing_account, budget=budget, )
@staticmethod
def parse_budget_path(path: str) -> Dict[str,str]:
"""Parses a budget path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)/budgets/(?P<budget>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, BudgetServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the budget service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, BudgetServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, BudgetServiceTransport):
# transport is a BudgetServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError("When providing a transport instance, "
"provide its credentials directly.")
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def create_budget(self,
request: Union[budget_service.CreateBudgetRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> budget_model.Budget:
r"""Creates a new budget. See
<a href="https://cloud.google.com/billing/quotas">Quotas
and limits</a> for more information on the limits of the
number of budgets you can create.
Args:
request (Union[google.cloud.billing.budgets_v1beta1.types.CreateBudgetRequest, dict]):
The request object. Request for CreateBudget
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.billing.budgets_v1beta1.types.Budget:
A budget is a plan that describes
what you expect to spend on Cloud
projects, plus the rules to execute as
spend is tracked against that plan, (for
example, send an alert when 90% of the
target spend is met). The budget time
period is configurable, with options
such as month (default), quarter, year,
or custom time period.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a budget_service.CreateBudgetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, budget_service.CreateBudgetRequest):
request = budget_service.CreateBudgetRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_budget]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_budget(self,
request: Union[budget_service.UpdateBudgetRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> budget_model.Budget:
r"""Updates a budget and returns the updated budget.
WARNING: There are some fields exposed on the Google
Cloud Console that aren't available on this API. Budget
fields that are not exposed in this API will not be
changed by this method.
Args:
request (Union[google.cloud.billing.budgets_v1beta1.types.UpdateBudgetRequest, dict]):
The request object. Request for UpdateBudget
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.billing.budgets_v1beta1.types.Budget:
A budget is a plan that describes
what you expect to spend on Cloud
projects, plus the rules to execute as
spend is tracked against that plan, (for
example, send an alert when 90% of the
target spend is met). The budget time
period is configurable, with options
such as month (default), quarter, year,
or custom time period.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a budget_service.UpdateBudgetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, budget_service.UpdateBudgetRequest):
request = budget_service.UpdateBudgetRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_budget]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("budget.name", request.budget.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_budget(self,
request: Union[budget_service.GetBudgetRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> budget_model.Budget:
r"""Returns a budget.
WARNING: There are some fields exposed on the Google
Cloud Console that aren't available on this API. When
reading from the API, you will not see these fields in
the return value, though they may have been set in the
Cloud Console.
Args:
request (Union[google.cloud.billing.budgets_v1beta1.types.GetBudgetRequest, dict]):
The request object. Request for GetBudget
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.billing.budgets_v1beta1.types.Budget:
A budget is a plan that describes
what you expect to spend on Cloud
projects, plus the rules to execute as
spend is tracked against that plan, (for
example, send an alert when 90% of the
target spend is met). The budget time
period is configurable, with options
such as month (default), quarter, year,
or custom time period.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a budget_service.GetBudgetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, budget_service.GetBudgetRequest):
request = budget_service.GetBudgetRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_budget]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_budgets(self,
request: Union[budget_service.ListBudgetsRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListBudgetsPager:
r"""Returns a list of budgets for a billing account.
WARNING: There are some fields exposed on the Google
Cloud Console that aren't available on this API. When
reading from the API, you will not see these fields in
the return value, though they may have been set in the
Cloud Console.
Args:
request (Union[google.cloud.billing.budgets_v1beta1.types.ListBudgetsRequest, dict]):
The request object. Request for ListBudgets
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.billing.budgets_v1beta1.services.budget_service.pagers.ListBudgetsPager:
Response for ListBudgets
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a budget_service.ListBudgetsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, budget_service.ListBudgetsRequest):
request = budget_service.ListBudgetsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_budgets]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListBudgetsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def delete_budget(self,
request: Union[budget_service.DeleteBudgetRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a budget. Returns successfully if already
deleted.
Args:
request (Union[google.cloud.billing.budgets_v1beta1.types.DeleteBudgetRequest, dict]):
The request object. Request for DeleteBudget
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a budget_service.DeleteBudgetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, budget_service.DeleteBudgetRequest):
request = budget_service.DeleteBudgetRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_budget]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-billing-budgets",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"BudgetServiceClient",
)
|
the-stack_0_24525
|
# -*- coding: utf-8 -*-
from unittest.mock import MagicMock, Mock, patch
from chaoslib.types import Configuration
from chaosaws.eks.actions import create_cluster, delete_cluster, \
terminate_random_nodes
@patch('chaosaws.eks.actions.aws_client', autospec=True)
def test_create_cluster(aws_client):
client = MagicMock()
aws_client.return_value = client
cluster = "eks-cluster"
arn = "arn:aws:iam::123456:role/EKSrole"
vpc_config = {
"subnetIds": ["sub1"],
"securityGroupIds": ["sg1"]
}
create_cluster(
name=cluster, role_arn=arn, vpc_config=vpc_config)
client.create_cluster.assert_called_with(
name=cluster, roleArn=arn, version=None, resourcesVpcConfig=vpc_config)
@patch('chaosaws.eks.actions.aws_client', autospec=True)
def test_delete_cluster(aws_client):
client = MagicMock()
aws_client.return_value = client
cluster = "eks-cluster"
delete_cluster(name=cluster)
client.delete_cluster.assert_called_with(name=cluster)
@patch("chaosaws.eks.actions.aws_client", autospec=True)
@patch("chaosaws.eks.actions.terminate_instance", autospec=True)
def test_terminate_random_nodes_should_terminate_correct_count(
terminate_instance,
aws_client):
terminate_calls = 0
def terminate_side_effect(instance_id: str, configuration: Configuration):
nonlocal terminate_calls
terminate_calls = terminate_calls + 1
return [Mock()]
ec2_client = MagicMock()
ec2_client.describe_instances.side_effect = [
{"Reservations": [
{"Instances": [{"InstanceId": "foo"}]}
]},
{"Reservations": [
{"Instances": [{"State": {"Name": "terminated"}}]}
]}
]
aws_client.return_value = ec2_client
terminate_instance.side_effect = terminate_side_effect
terminate_random_nodes("a_cluster", "eu_west_00", 1, 30)
assert terminate_calls == 1
terminate_instance.assert_called_with(
instance_id="foo", configuration={"aws_region": "eu_west_00"})
|
the-stack_0_24528
|
# Copyright (C) 2002, Thomas Hamelryck ([email protected])
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Half-sphere exposure and coordination number calculation."""
from __future__ import print_function
import warnings
from math import pi
from Bio.PDB.AbstractPropertyMap import AbstractPropertyMap
from Bio.PDB.Polypeptide import CaPPBuilder, is_aa
from Bio.PDB.vectors import rotaxis
class _AbstractHSExposure(AbstractPropertyMap):
"""Abstract class to calculate Half-Sphere Exposure (HSE).
The HSE can be calculated based on the CA-CB vector, or the pseudo CB-CA
vector based on three consecutive CA atoms. This is done by two separate
subclasses.
"""
def __init__(self, model, radius, offset, hse_up_key, hse_down_key,
angle_key=None):
"""Initialize.
:param model: model
:type model: L{Model}
:param radius: HSE radius
:type radius: float
:param offset: number of flanking residues that are ignored in the
calculation of the number of neighbors
:type offset: int
:param hse_up_key: key used to store HSEup in the entity.xtra attribute
:type hse_up_key: string
:param hse_down_key: key used to store HSEdown in the entity.xtra attribute
:type hse_down_key: string
:param angle_key: key used to store the angle between CA-CB and CA-pCB in
the entity.xtra attribute
:type angle_key: string
"""
assert(offset >= 0)
# For PyMOL visualization
self.ca_cb_list = []
ppb = CaPPBuilder()
ppl = ppb.build_peptides(model)
hse_map = {}
hse_list = []
hse_keys = []
for pp1 in ppl:
for i in range(0, len(pp1)):
if i == 0:
r1 = None
else:
r1 = pp1[i - 1]
r2 = pp1[i]
if i == len(pp1) - 1:
r3 = None
else:
r3 = pp1[i + 1]
# This method is provided by the subclasses to calculate HSE
result = self._get_cb(r1, r2, r3)
if result is None:
# Missing atoms, or i==0, or i==len(pp1)-1
continue
pcb, angle = result
hse_u = 0
hse_d = 0
ca2 = r2["CA"].get_vector()
for pp2 in ppl:
for j in range(0, len(pp2)):
if pp1 is pp2 and abs(i - j) <= offset:
# neighboring residues in the chain are ignored
continue
ro = pp2[j]
if not is_aa(ro) or not ro.has_id("CA"):
continue
cao = ro["CA"].get_vector()
d = (cao - ca2)
if d.norm() < radius:
if d.angle(pcb) < (pi / 2):
hse_u += 1
else:
hse_d += 1
res_id = r2.get_id()
chain_id = r2.get_parent().get_id()
# Fill the 3 data structures
hse_map[(chain_id, res_id)] = (hse_u, hse_d, angle)
hse_list.append((r2, (hse_u, hse_d, angle)))
hse_keys.append((chain_id, res_id))
# Add to xtra
r2.xtra[hse_up_key] = hse_u
r2.xtra[hse_down_key] = hse_d
if angle_key:
r2.xtra[angle_key] = angle
AbstractPropertyMap.__init__(self, hse_map, hse_keys, hse_list)
def _get_cb(self, r1, r2, r3):
return NotImplemented
def _get_gly_cb_vector(self, residue):
"""Return a pseudo CB vector for a Gly residue (PRIVATE).
The pseudoCB vector is centered at the origin.
CB coord=N coord rotated over -120 degrees
along the CA-C axis.
"""
try:
n_v = residue["N"].get_vector()
c_v = residue["C"].get_vector()
ca_v = residue["CA"].get_vector()
except Exception:
return None
# center at origin
n_v = n_v - ca_v
c_v = c_v - ca_v
# rotation around c-ca over -120 deg
rot = rotaxis(-pi * 120.0 / 180.0, c_v)
cb_at_origin_v = n_v.left_multiply(rot)
# move back to ca position
cb_v = cb_at_origin_v + ca_v
# This is for PyMol visualization
self.ca_cb_list.append((ca_v, cb_v))
return cb_at_origin_v
class HSExposureCA(_AbstractHSExposure):
"""Class to calculate HSE based on the approximate CA-CB vectors.
Uses three consecutive CA positions.
"""
def __init__(self, model, radius=12, offset=0):
"""Initialse class.
:param model: the model that contains the residues
:type model: L{Model}
:param radius: radius of the sphere (centred at the CA atom)
:type radius: float
:param offset: number of flanking residues that are ignored
in the calculation of the number of neighbors
:type offset: int
"""
_AbstractHSExposure.__init__(self, model, radius, offset,
"EXP_HSE_A_U", "EXP_HSE_A_D",
"EXP_CB_PCB_ANGLE")
def _get_cb(self, r1, r2, r3):
"""Calculate approx CA-CB direction (PRIVATE).
Calculate the approximate CA-CB direction for a central
CA atom based on the two flanking CA positions, and the angle
with the real CA-CB vector.
The CA-CB vector is centered at the origin.
:param r1, r2, r3: three consecutive residues
:type r1, r2, r3: L{Residue}
"""
if r1 is None or r3 is None:
return None
try:
ca1 = r1["CA"].get_vector()
ca2 = r2["CA"].get_vector()
ca3 = r3["CA"].get_vector()
except Exception:
return None
# center
d1 = ca2 - ca1
d3 = ca2 - ca3
d1.normalize()
d3.normalize()
# bisection
b = (d1 + d3)
b.normalize()
# Add to ca_cb_list for drawing
self.ca_cb_list.append((ca2, b + ca2))
if r2.has_id("CB"):
cb = r2["CB"].get_vector()
cb_ca = cb - ca2
cb_ca.normalize()
angle = cb_ca.angle(b)
elif r2.get_resname() == "GLY":
cb_ca = self._get_gly_cb_vector(r2)
if cb_ca is None:
angle = None
else:
angle = cb_ca.angle(b)
else:
angle = None
# vector b is centered at the origin!
return b, angle
def pcb_vectors_pymol(self, filename="hs_exp.py"):
"""Write PyMol script for visualization.
Write a PyMol script that visualizes the pseudo CB-CA directions
at the CA coordinates.
:param filename: the name of the pymol script file
:type filename: string
"""
if not self.ca_cb_list:
warnings.warn("Nothing to draw.", RuntimeWarning)
return
with open(filename, "w") as fp:
fp.write("from pymol.cgo import *\n")
fp.write("from pymol import cmd\n")
fp.write("obj=[\n")
fp.write("BEGIN, LINES,\n")
fp.write("COLOR, %.2f, %.2f, %.2f,\n" % (1.0, 1.0, 1.0))
for (ca, cb) in self.ca_cb_list:
x, y, z = ca.get_array()
fp.write("VERTEX, %.2f, %.2f, %.2f,\n" % (x, y, z))
x, y, z = cb.get_array()
fp.write("VERTEX, %.2f, %.2f, %.2f,\n" % (x, y, z))
fp.write("END]\n")
fp.write("cmd.load_cgo(obj, 'HS')\n")
class HSExposureCB(_AbstractHSExposure):
"""Class to calculate HSE based on the real CA-CB vectors."""
def __init__(self, model, radius=12, offset=0):
"""Initialize class.
:param model: the model that contains the residues
:type model: L{Model}
:param radius: radius of the sphere (centred at the CA atom)
:type radius: float
:param offset: number of flanking residues that are ignored
in the calculation of the number of neighbors
:type offset: int
"""
_AbstractHSExposure.__init__(self, model, radius, offset,
"EXP_HSE_B_U", "EXP_HSE_B_D")
def _get_cb(self, r1, r2, r3):
"""Calculate CB-CA vector (PRIVATE).
:param r1, r2, r3: three consecutive residues (only r2 is used)
:type r1, r2, r3: L{Residue}
"""
if r2.get_resname() == "GLY":
return self._get_gly_cb_vector(r2), 0.0
else:
if r2.has_id("CB") and r2.has_id("CA"):
vcb = r2["CB"].get_vector()
vca = r2["CA"].get_vector()
return (vcb - vca), 0.0
return None
class ExposureCN(AbstractPropertyMap):
"""Residue exposure as number of CA atoms around its CA atom."""
def __init__(self, model, radius=12.0, offset=0):
"""Initialize.
A residue's exposure is defined as the number of CA atoms around
that residues CA atom. A dictionary is returned that uses a L{Residue}
object as key, and the residue exposure as corresponding value.
:param model: the model that contains the residues
:type model: L{Model}
:param radius: radius of the sphere (centred at the CA atom)
:type radius: float
:param offset: number of flanking residues that are ignored in
the calculation of the number of neighbors
:type offset: int
"""
assert(offset >= 0)
ppb = CaPPBuilder()
ppl = ppb.build_peptides(model)
fs_map = {}
fs_list = []
fs_keys = []
for pp1 in ppl:
for i in range(0, len(pp1)):
fs = 0
r1 = pp1[i]
if not is_aa(r1) or not r1.has_id("CA"):
continue
ca1 = r1["CA"]
for pp2 in ppl:
for j in range(0, len(pp2)):
if pp1 is pp2 and abs(i - j) <= offset:
continue
r2 = pp2[j]
if not is_aa(r2) or not r2.has_id("CA"):
continue
ca2 = r2["CA"]
d = (ca2 - ca1)
if d < radius:
fs += 1
res_id = r1.get_id()
chain_id = r1.get_parent().get_id()
# Fill the 3 data structures
fs_map[(chain_id, res_id)] = fs
fs_list.append((r1, fs))
fs_keys.append((chain_id, res_id))
# Add to xtra
r1.xtra["EXP_CN"] = fs
AbstractPropertyMap.__init__(self, fs_map, fs_keys, fs_list)
|
the-stack_0_24530
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubscriptionOperations:
"""SubscriptionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.servicebus.management._generated.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
topic_name: str,
subscription_name: str,
enrich: Optional[bool] = False,
api_version: Optional[str] = "2017_04",
**kwargs
) -> object:
"""Get the details about the subscription of a topic.
Get Subscription.
:param topic_name: name of the topic.
:type topic_name: str
:param subscription_name: name of the subscription.
:type subscription_name: str
:param enrich: A query parameter that sets enrich to true or false.
:type enrich: bool
:param api_version: Api Version.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if enrich is not None:
query_parameters['enrich'] = self._serialize.query("enrich", enrich, 'bool')
if api_version is not None:
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/xml'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ServiceBusManagementError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{topicName}/subscriptions/{subscriptionName}'} # type: ignore
async def put(
self,
topic_name: str,
subscription_name: str,
request_body: object,
api_version: Optional[str] = "2017_04",
if_match: Optional[str] = None,
**kwargs
) -> object:
"""Create or update a subscription.
:param topic_name: name of the topic.
:type topic_name: str
:param subscription_name: name of the subscription.
:type subscription_name: str
:param request_body: Parameters required to make or edit a subscription.
:type request_body: object
:param api_version: Api Version.
:type api_version: str
:param if_match: Match condition for an entity to be updated. If specified and a matching
entity is not found, an error will be raised. To force an unconditional update, set to the
wildcard character (*). If not specified, an insert will be performed when no existing entity
is found to update and a replace will be performed if an existing entity is found.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/atom+xml")
# Construct URL
url = self.put.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if api_version is not None:
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/xml'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request_body, 'object', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ServiceBusManagementError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
put.metadata = {'url': '/{topicName}/subscriptions/{subscriptionName}'} # type: ignore
async def delete(
self,
topic_name: str,
subscription_name: str,
api_version: Optional[str] = "2017_04",
**kwargs
) -> object:
"""Delete the subscription with the given topicName and subscriptionName.
Delete Subscription.
:param topic_name: name of the topic.
:type topic_name: str
:param subscription_name: name of the subscription.
:type subscription_name: str
:param api_version: Api Version.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if api_version is not None:
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/xml'
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ServiceBusManagementError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete.metadata = {'url': '/{topicName}/subscriptions/{subscriptionName}'} # type: ignore
|
the-stack_0_24531
|
# program to demostrate how iterator in python works
class MyIterator :
def __iter__(self) :
self.num = 0
return self
def __next__(self) :
self.num += 1
if self.num <= 10 :
return self.num
else :
raise StopIteration
if __name__ == '__main__' :
ob = MyIterator()
for i in ob :
print(i)
|
the-stack_0_24532
|
from steel_beam_analysis.beam import *
from steel_beam_analysis import units
from steel_beam_analysis.node import *
from steel_beam_analysis.load import *
from steel_beam_analysis.steelBeam import *
node0 = Node(0*units.ft)
node1 = Node(4.5*units.ft, condition = 'pin')
node2 = Node((4.5+26)*units.ft, condition = 'pin')
node3 = Node((4.5+26+4)*units.ft)
nodes = [node0, node1, node2, node3]
iLoc = 0 * units.ft
jLoc = (4.5+26+4) * units.ft
w_D = 20 * units.psf
w_Lr = 20 * units.psf
w_S = 325 * units.psf
trib = 11 * units.ft
distLoad1 = AreaLoad(iLoc = iLoc, jLoc = jLoc, load = w_D, iTrib = trib, jTrib = trib, type = 'D', desc = 'roof dead')
distLoad2 = AreaLoad(iLoc = iLoc, jLoc = jLoc, load = w_Lr, iTrib = trib, jTrib = trib, type = 'Lr', desc = 'roof live')
distLoad3 = AreaLoad(iLoc = iLoc, jLoc = jLoc, load = w_S, iTrib = trib, jTrib = trib, type = 'S', desc = 'roof snow')
distLoads = [distLoad1, distLoad2, distLoad3]
beam = SteelBeam(nodes, rawDistLoads = distLoads, considerSelfWeight = True, depthClass = 10,
weightClass = 112, eleSpacing = 1 * units.inch, outputPDF = True,
name = 'Typical Roof Beam', outputPath = 'output_reports/test5',
project = 'Ski House', level = 'Roof', firm = 'xxx',
engineer = 'xxx', checker = 'None')
print(beam)
|
the-stack_0_24535
|
"""Setup for the pdpipe package."""
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import versioneer
README_RST = ''
with open('README.rst', encoding="utf-8") as f:
README_RST = f.read()
INSTALL_REQUIRES = [
'pandas>=0.18.0', # obviously
'sortedcontainers', # the Bin stage needs a sorted list
'tqdm', # for some pipeline application progress bars
'strct', # ColReorder uses strct.dicts.reverse_dict_partial
'skutil>=0.0.15', # Scale uses skutil.preprocessing.scaler_by_param
'birch>=0.0.34', # for reading config from files / env vars
]
TEST_REQUIRES = [
# testing and coverage
'pytest', 'coverage', 'pytest-cov', 'pytest-ordering',
# non-testing packagesrequired by tests, not by the package
'scikit-learn', 'pdutil', 'nltk', 'xdg',
# dev scripts
'rich',
# to be able to run `python setup.py checkdocs`
'collective.checkdocs', 'pygments',
]
setup(
name='pdpipe',
description="Easy pipelines for pandas.",
long_description=README_RST,
long_description_content_type='text/x-rst',
author="Shay Palachy",
author_email="[email protected]",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
url='https://pdpipe.readthedocs.io/en/latest/',
license="MIT",
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
extras_require={
'sklearn': ['scikit-learn'],
'nltk': ['nltk'],
'test': TEST_REQUIRES
},
platforms=['any'],
keywords='pandas dataframe pipeline data',
classifiers=[
# Trove classifiers
# (https://pypi.python.org/pypi?%3Aaction=list_classifiers)
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
],
)
|
the-stack_0_24536
|
# Copyright 2019 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from enum import Flag
from logging import Logger as builtinLogger, FileHandler, Formatter
from typing import Union
class OutputType(Flag):
NONE = 0
CONSOLE = 1
FILE = 2
CUSTOM = 4
class LogConfig:
default_fmt = "[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s"
def __init__(self,
name: str,
level: str,
file_path: str,
fmt: str,
output_type: 'OutputType'):
self.name: str = name
self.level: str = level
self.file_path: str = file_path
self.fmt: str = fmt
self.output_type: 'OutputType' = output_type
@classmethod
def from_dict(cls, src_config: dict):
config: dict = src_config.get('log')
if config is None:
return
name: str = config.get('name', "Logger")
level: str = config.get('level', 'info').upper()
file_path: str = config.get('filePath', "")
fmt: str = config.get('format', cls.default_fmt)
output_type: 'OutputType' = OutputType.NONE
output_types: str = config.get('outputType')
if output_types:
outputs = output_types.split('|')
for output in outputs:
output_type |= OutputType[output.upper()]
return LogConfig(name, level, file_path, fmt, output_type)
class LoggerUtil(object):
_formatter: 'Formatter' = None
@classmethod
def apply_config(cls, logger: 'builtinLogger', config: dict, handler=None):
log_config: 'LogConfig' = LogConfig.from_dict(config)
logger.handlers.clear()
logger.name = log_config.name
logger.setLevel(log_config.level)
cls._apply_config(logger, log_config, handler)
@classmethod
def print_config(cls, logger: 'builtinLogger', config: dict):
logger.info(f'====================LOG CONFIG START====================')
cls._view_config_info(logger, config, "CONFIG")
logger.info(f'====================LOG CONFIG END======================')
@classmethod
def _view_config_info(cls, logger: 'builtinLogger', conf: dict, prefix: str):
for key, value in conf.items():
if not isinstance(value, dict):
tmp_prefix = '{}.{}'.format(prefix, key)
logger.info(f'[{tmp_prefix}] > {value}')
else:
tmp_prefix = '{}.{}'.format(prefix, key)
cls._view_config_info(logger, value, tmp_prefix)
@classmethod
def _apply_config(cls, logger: 'builtinLogger', log_config: 'LogConfig', custom_handler=None):
cls._formatter = Formatter(log_config.fmt)
if cls._is_flag_on(log_config.output_type, OutputType.CONSOLE):
handler = logging.StreamHandler()
handler.setFormatter(cls._formatter)
logger.addHandler(handler)
if cls._is_flag_on(log_config.output_type, OutputType.FILE):
cls._ensure_dir(log_config.file_path)
handler = FileHandler(log_config.file_path, 'a')
handler.setFormatter(cls._formatter)
logger.addHandler(handler)
if cls._is_flag_on(log_config.output_type, OutputType.CUSTOM):
if custom_handler:
handler = custom_handler
handler.setFormatter(cls._formatter)
logger.addHandler(handler)
@classmethod
def _is_flag_on(cls, src_flag: 'Flag', dest_flag: 'Flag') -> bool:
return src_flag & dest_flag == dest_flag
@classmethod
def _ensure_dir(cls, file_path: str):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
@classmethod
def make_log_msg(cls, tag: str, msg: Union[str, BaseException]):
return f'[{tag}] {msg}'
|
the-stack_0_24537
|
import json, time
# Translate avro messages to NGSI
def to_ngsi(ms):
#
msg_time = time.ctime(ms[0].value()["header"]["time"])
id_string = "aeroloop.wizzit.venac.9:"+str(ms[0].value()["header"]["time"])
entity = {
"id": id_string,
"type":"UAV_simple",
}
value = dict()
for msg in ms:
if msg.topic() == "aeroloop_Location":
m = msg.value()
value['location'] = {
"value": {"type": "Point",
"coordinates":[m['latitude'],m['longitude']]
},
"type": "geo:json"
}
value['elevation'] = {
"value": m['height'],
"type": "meter"
}
entity.update(value)
else:
#if msg.topic() == "aeroloop_CpuUsage":
m = msg.value()
value['CPUUsage'] = {
"value": m['value'],
"type": "percent"
}
entity.update(value)
return entity
|
the-stack_0_24538
|
from setuptools import setup
from setuptools import find_packages
install_requires = [
'six==1.11.0',
'pyyaml==3.12',
'ipaddress==1.0.19',
'tinydb==3.7.0',
'beautifultable==0.3.0',
'logger==1.4',
'flask==0.12.2',
'jsonschema==2.6.0',
'smallcli==0.1',
]
test_requires = []
setup(
name='KNet',
version='1.0.14',
description="Virtual Network Topology Builder",
author="KNet Solutions",
author_email="[email protected]",
url="https://github.com/knetsolutions/KNet",
packages=find_packages(exclude=['test']),
include_package_data=True,
install_requires=install_requires,
license="Apache",
keywords='sdn',
python_requires='>=2.6, <3',
entry_points={
'console_scripts': ['knet-cli=KNet.app:knetcli'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: User Interfaces',
'Programming Language :: Python :: 2.7'
]
)
|
the-stack_0_24539
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility functions used across Superset"""
import collections
import decimal
import errno
import functools
import json
import logging
import os
import platform
import re
import signal
import smtplib
import tempfile
import threading
import traceback
import uuid
import zlib
from datetime import date, datetime, time, timedelta
from distutils.util import strtobool
from email.mime.application import MIMEApplication
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from enum import Enum, IntEnum
from timeit import default_timer
from types import TracebackType
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from urllib.parse import unquote_plus
import bleach
import markdown as md
import numpy as np
import pandas as pd
import sqlalchemy as sa
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.backends.openssl.x509 import _Certificate
from flask import current_app, flash, g, Markup, render_template, request
from flask_appbuilder import SQLA
from flask_appbuilder.security.sqla.models import Role, User
from flask_babel import gettext as __
from flask_babel.speaklater import LazyString
from pandas.api.types import infer_dtype
from pandas.core.dtypes.common import is_numeric_dtype
from sqlalchemy import event, exc, select, Text
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.engine import Connection, Engine
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.sql.type_api import Variant
from sqlalchemy.types import TEXT, TypeDecorator, TypeEngine
from typing_extensions import TypedDict
import _thread # pylint: disable=C0411
from superset.constants import (
EXTRA_FORM_DATA_APPEND_KEYS,
EXTRA_FORM_DATA_OVERRIDE_EXTRA_KEYS,
EXTRA_FORM_DATA_OVERRIDE_REGULAR_MAPPINGS,
)
from superset.errors import ErrorLevel, SupersetErrorType
from superset.exceptions import (
CertificateException,
SupersetException,
SupersetTimeoutException,
)
from superset.typing import FlaskResponse, FormData, Metric
from superset.utils.dates import datetime_to_epoch, EPOCH
from superset.utils.hashing import md5_sha_from_dict, md5_sha_from_str
try:
from pydruid.utils.having import Having
except ImportError:
pass
if TYPE_CHECKING:
from superset.connectors.base.models import BaseColumn, BaseDatasource
from superset.models.core import Database
logging.getLogger("MARKDOWN").setLevel(logging.INFO)
logger = logging.getLogger(__name__)
DTTM_ALIAS = "__timestamp"
JS_MAX_INTEGER = 9007199254740991 # Largest int Java Script can handle 2^53-1
InputType = TypeVar("InputType")
class LenientEnum(Enum):
"""Enums with a `get` method that convert a enum value to `Enum` if it is a
valid value."""
@classmethod
def get(cls, value: Any) -> Any:
try:
return super().__new__(cls, value)
except ValueError:
return None
class AdhocMetricExpressionType(str, Enum):
SIMPLE = "SIMPLE"
SQL = "SQL"
class AnnotationType(str, Enum):
FORMULA = "FORMULA"
INTERVAL = "INTERVAL"
EVENT = "EVENT"
TIME_SERIES = "TIME_SERIES"
class GenericDataType(IntEnum):
"""
Generic database column type that fits both frontend and backend.
"""
NUMERIC = 0
STRING = 1
TEMPORAL = 2
BOOLEAN = 3
# ARRAY = 4 # Mapping all the complex data types to STRING for now
# JSON = 5 # and leaving these as a reminder.
# MAP = 6
# ROW = 7
class ChartDataResultFormat(str, Enum):
"""
Chart data response format
"""
CSV = "csv"
JSON = "json"
class ChartDataResultType(str, Enum):
"""
Chart data response type
"""
COLUMNS = "columns"
FULL = "full"
QUERY = "query"
RESULTS = "results"
SAMPLES = "samples"
TIMEGRAINS = "timegrains"
class DatasourceDict(TypedDict):
type: str
id: int
class ExtraFiltersTimeColumnType(str, Enum):
GRANULARITY = "__granularity"
TIME_COL = "__time_col"
TIME_GRAIN = "__time_grain"
TIME_ORIGIN = "__time_origin"
TIME_RANGE = "__time_range"
class FilterOperator(str, Enum):
"""
Operators used filter controls
"""
EQUALS = "=="
NOT_EQUALS = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUALS = ">="
LESS_THAN_OR_EQUALS = "<="
LIKE = "LIKE"
IS_NULL = "IS NULL"
IS_NOT_NULL = "IS NOT NULL"
IN = "IN" # pylint: disable=invalid-name
NOT_IN = "NOT IN"
REGEX = "REGEX"
class PostProcessingBoxplotWhiskerType(str, Enum):
"""
Calculate cell contibution to row/column total
"""
TUKEY = "tukey"
MINMAX = "min/max"
PERCENTILE = "percentile"
class PostProcessingContributionOrientation(str, Enum):
"""
Calculate cell contibution to row/column total
"""
ROW = "row"
COLUMN = "column"
class QueryMode(str, LenientEnum):
"""
Whether the query runs on aggregate or returns raw records
"""
RAW = "raw"
AGGREGATE = "aggregate"
class QuerySource(Enum):
"""
The source of a SQL query.
"""
CHART = 0
DASHBOARD = 1
SQL_LAB = 2
class QueryStatus(str, Enum): # pylint: disable=too-few-public-methods
"""Enum-type class for query statuses"""
STOPPED: str = "stopped"
FAILED: str = "failed"
PENDING: str = "pending"
RUNNING: str = "running"
SCHEDULED: str = "scheduled"
SUCCESS: str = "success"
TIMED_OUT: str = "timed_out"
class DashboardStatus(str, Enum):
"""Dashboard status used for frontend filters"""
PUBLISHED = "published"
DRAFT = "draft"
class ReservedUrlParameters(str, Enum):
"""
Reserved URL parameters that are used internally by Superset. These will not be
passed to chart queries, as they control the behavior of the UI.
"""
STANDALONE = "standalone"
EDIT_MODE = "edit"
@staticmethod
def is_standalone_mode() -> Optional[bool]:
standalone_param = request.args.get(ReservedUrlParameters.STANDALONE.value)
standalone: Optional[bool] = (
standalone_param and standalone_param != "false" and standalone_param != "0"
)
return standalone
class RowLevelSecurityFilterType(str, Enum):
REGULAR = "Regular"
BASE = "Base"
class TimeRangeEndpoint(str, Enum):
"""
The time range endpoint types which represent inclusive, exclusive, or unknown.
Unknown represents endpoints which are ill-defined as though the interval may be
[start, end] the filter may behave like (start, end] due to mixed data types and
lexicographical ordering.
:see: https://github.com/apache/superset/issues/6360
"""
EXCLUSIVE = "exclusive"
INCLUSIVE = "inclusive"
UNKNOWN = "unknown"
class TemporalType(str, Enum):
"""
Supported temporal types
"""
DATE = "DATE"
DATETIME = "DATETIME"
SMALLDATETIME = "SMALLDATETIME"
TEXT = "TEXT"
TIME = "TIME"
TIMESTAMP = "TIMESTAMP"
class ColumnTypeSource(Enum):
GET_TABLE = 1
CURSOR_DESCRIPION = 2
class ColumnSpec(NamedTuple):
sqla_type: Union[TypeEngine, str]
generic_type: GenericDataType
is_dttm: bool
python_date_format: Optional[str] = None
try:
# Having might not have been imported.
class DimSelector(Having):
def __init__(self, **args: Any) -> None:
# Just a hack to prevent any exceptions
Having.__init__(self, type="equalTo", aggregation=None, value=None)
self.having = {
"having": {
"type": "dimSelector",
"dimension": args["dimension"],
"value": args["value"],
}
}
except NameError:
pass
def flasher(msg: str, severity: str = "message") -> None:
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == "danger":
logger.error(msg)
else:
logger.info(msg)
class _memoized:
"""Decorator that caches a function's return value each time it is called
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
Define ``watch`` as a tuple of attribute names if this Decorator
should account for instance variable changes.
"""
def __init__(
self, func: Callable[..., Any], watch: Optional[Tuple[str, ...]] = None
) -> None:
self.func = func
self.cache: Dict[Any, Any] = {}
self.is_method = False
self.watch = watch or ()
def __call__(self, *args: Any, **kwargs: Any) -> Any:
key = [args, frozenset(kwargs.items())]
if self.is_method:
key.append(tuple([getattr(args[0], v, None) for v in self.watch]))
key = tuple(key) # type: ignore
if key in self.cache:
return self.cache[key]
try:
value = self.func(*args, **kwargs)
self.cache[key] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args, **kwargs)
def __repr__(self) -> str:
"""Return the function's docstring."""
return self.func.__doc__ or ""
def __get__(
self, obj: Any, objtype: Type[Any]
) -> functools.partial: # type: ignore
if not self.is_method:
self.is_method = True
# Support instance methods.
return functools.partial(self.__call__, obj)
def memoized(
func: Optional[Callable[..., Any]] = None, watch: Optional[Tuple[str, ...]] = None
) -> Callable[..., Any]:
if func:
return _memoized(func)
def wrapper(f: Callable[..., Any]) -> Callable[..., Any]:
return _memoized(f, watch)
return wrapper
def parse_js_uri_path_item(
item: Optional[str], unquote: bool = True, eval_undefined: bool = False
) -> Optional[str]:
"""Parse a uri path item made with js.
:param item: a uri path component
:param unquote: Perform unquoting of string using urllib.parse.unquote_plus()
:param eval_undefined: When set to True and item is either 'null' or 'undefined',
assume item is undefined and return None.
:return: Either None, the original item or unquoted item
"""
item = None if eval_undefined and item in ("null", "undefined") else item
return unquote_plus(item) if unquote and item else item
def cast_to_num(value: Optional[Union[float, int, str]]) -> Optional[Union[float, int]]:
"""Casts a value to an int/float
>>> cast_to_num('1 ')
1.0
>>> cast_to_num(' 2')
2.0
>>> cast_to_num('5')
5
>>> cast_to_num('5.2')
5.2
>>> cast_to_num(10)
10
>>> cast_to_num(10.1)
10.1
>>> cast_to_num(None) is None
True
>>> cast_to_num('this is not a string') is None
True
:param value: value to be converted to numeric representation
:returns: value cast to `int` if value is all digits, `float` if `value` is
decimal value and `None`` if it can't be converted
"""
if value is None:
return None
if isinstance(value, (int, float)):
return value
if value.isdigit():
return int(value)
try:
return float(value)
except ValueError:
return None
def list_minus(l: List[Any], minus: List[Any]) -> List[Any]:
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus]
class DashboardEncoder(json.JSONEncoder):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.sort_keys = True
def default(self, o: Any) -> Union[Dict[Any, Any], str]:
if isinstance(o, uuid.UUID):
return str(o)
try:
vals = {k: v for k, v in o.__dict__.items() if k != "_sa_instance_state"}
return {"__{}__".format(o.__class__.__name__): vals}
except Exception: # pylint: disable=broad-except
if isinstance(o, datetime):
return {"__datetime__": o.replace(microsecond=0).isoformat()}
return json.JSONEncoder(sort_keys=True).default(o)
class JSONEncodedDict(TypeDecorator): # pylint: disable=abstract-method
"""Represents an immutable structure as a json-encoded string."""
impl = TEXT
def process_bind_param(
self, value: Optional[Dict[Any, Any]], dialect: str
) -> Optional[str]:
return json.dumps(value) if value is not None else None
def process_result_value(
self, value: Optional[str], dialect: str
) -> Optional[Dict[Any, Any]]:
return json.loads(value) if value is not None else None
def format_timedelta(time_delta: timedelta) -> str:
"""
Ensures negative time deltas are easily interpreted by humans
>>> td = timedelta(0) - timedelta(days=1, hours=5,minutes=6)
>>> str(td)
'-2 days, 18:54:00'
>>> format_timedelta(td)
'-1 day, 5:06:00'
"""
if time_delta < timedelta(0):
return "-" + str(abs(time_delta))
# Change this to format positive time deltas the way you want
return str(time_delta)
def base_json_conv( # pylint: disable=inconsistent-return-statements,too-many-return-statements
obj: Any,
) -> Any:
if isinstance(obj, memoryview):
obj = obj.tobytes()
if isinstance(obj, np.int64):
return int(obj)
if isinstance(obj, np.bool_):
return bool(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, set):
return list(obj)
if isinstance(obj, decimal.Decimal):
return float(obj)
if isinstance(obj, uuid.UUID):
return str(obj)
if isinstance(obj, timedelta):
return format_timedelta(obj)
if isinstance(obj, bytes):
try:
return obj.decode("utf-8")
except Exception: # pylint: disable=broad-except
return "[bytes]"
if isinstance(obj, LazyString):
return str(obj)
def json_iso_dttm_ser(obj: Any, pessimistic: bool = False) -> str:
"""
json serializer that deals with dates
>>> dttm = datetime(1970, 1, 1)
>>> json.dumps({'dttm': dttm}, default=json_iso_dttm_ser)
'{"dttm": "1970-01-01T00:00:00"}'
"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, (datetime, date, time, pd.Timestamp)):
obj = obj.isoformat()
else:
if pessimistic:
return "Unserializable [{}]".format(type(obj))
raise TypeError("Unserializable object {} of type {}".format(obj, type(obj)))
return obj
def pessimistic_json_iso_dttm_ser(obj: Any) -> str:
"""Proxy to call json_iso_dttm_ser in a pessimistic way
If one of object is not serializable to json, it will still succeed"""
return json_iso_dttm_ser(obj, pessimistic=True)
def json_int_dttm_ser(obj: Any) -> float:
"""json serializer that deals with dates"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, (datetime, pd.Timestamp)):
obj = datetime_to_epoch(obj)
elif isinstance(obj, date):
obj = (obj - EPOCH.date()).total_seconds() * 1000
else:
raise TypeError("Unserializable object {} of type {}".format(obj, type(obj)))
return obj
def json_dumps_w_dates(payload: Dict[Any, Any]) -> str:
return json.dumps(payload, default=json_int_dttm_ser)
def error_msg_from_exception(ex: Exception) -> str:
"""Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
"""
msg = ""
if hasattr(ex, "message"):
if isinstance(ex.message, dict): # type: ignore
msg = ex.message.get("message") # type: ignore
elif ex.message: # type: ignore
msg = ex.message # type: ignore
return msg or str(ex)
def markdown(raw: str, markup_wrap: Optional[bool] = False) -> str:
safe_markdown_tags = [
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"b",
"i",
"strong",
"em",
"tt",
"p",
"br",
"span",
"div",
"blockquote",
"code",
"hr",
"ul",
"ol",
"li",
"dd",
"dt",
"img",
"a",
]
safe_markdown_attrs = {
"img": ["src", "alt", "title"],
"a": ["href", "alt", "title"],
}
safe = md.markdown(
raw or "",
extensions=[
"markdown.extensions.tables",
"markdown.extensions.fenced_code",
"markdown.extensions.codehilite",
],
)
safe = bleach.clean(safe, safe_markdown_tags, safe_markdown_attrs)
if markup_wrap:
safe = Markup(safe)
return safe
def readfile(file_path: str) -> Optional[str]:
with open(file_path) as f:
content = f.read()
return content
def generic_find_constraint_name(
table: str, columns: Set[str], referenced: str, database: SQLA
) -> Optional[str]:
"""Utility to find a constraint name in alembic migrations"""
tbl = sa.Table(
table, database.metadata, autoload=True, autoload_with=database.engine
)
for fk in tbl.foreign_key_constraints:
if fk.referred_table.name == referenced and set(fk.column_keys) == columns:
return fk.name
return None
def generic_find_fk_constraint_name( # pylint: disable=invalid-name
table: str, columns: Set[str], referenced: str, insp: Inspector
) -> Optional[str]:
"""Utility to find a foreign-key constraint name in alembic migrations"""
for fk in insp.get_foreign_keys(table):
if (
fk["referred_table"] == referenced
and set(fk["referred_columns"]) == columns
):
return fk["name"]
return None
def generic_find_fk_constraint_names( # pylint: disable=invalid-name
table: str, columns: Set[str], referenced: str, insp: Inspector
) -> Set[str]:
"""Utility to find foreign-key constraint names in alembic migrations"""
names = set()
for fk in insp.get_foreign_keys(table):
if (
fk["referred_table"] == referenced
and set(fk["referred_columns"]) == columns
):
names.add(fk["name"])
return names
def generic_find_uq_constraint_name(
table: str, columns: Set[str], insp: Inspector
) -> Optional[str]:
"""Utility to find a unique constraint name in alembic migrations"""
for uq in insp.get_unique_constraints(table):
if columns == set(uq["column_names"]):
return uq["name"]
return None
def get_datasource_full_name(
database_name: str, datasource_name: str, schema: Optional[str] = None
) -> str:
if not schema:
return "[{}].[{}]".format(database_name, datasource_name)
return "[{}].[{}].[{}]".format(database_name, schema, datasource_name)
def validate_json(obj: Union[bytes, bytearray, str]) -> None:
if obj:
try:
json.loads(obj)
except Exception as ex:
logger.error("JSON is not valid %s", str(ex))
raise SupersetException("JSON is not valid")
class SigalrmTimeout:
"""
To be used in a ``with`` block and timeout its content.
"""
def __init__(self, seconds: int = 1, error_message: str = "Timeout") -> None:
self.seconds = seconds
self.error_message = error_message
def handle_timeout( # pylint: disable=unused-argument
self, signum: int, frame: Any
) -> None:
logger.error("Process timed out")
raise SupersetTimeoutException(
error_type=SupersetErrorType.BACKEND_TIMEOUT_ERROR,
message=self.error_message,
level=ErrorLevel.ERROR,
extra={"timeout": self.seconds},
)
def __enter__(self) -> None:
try:
if threading.current_thread() == threading.main_thread():
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
except ValueError as ex:
logger.warning("timeout can't be used in the current context")
logger.exception(ex)
def __exit__( # pylint: disable=redefined-outer-name,unused-variable,redefined-builtin
self, type: Any, value: Any, traceback: TracebackType
) -> None:
try:
signal.alarm(0)
except ValueError as ex:
logger.warning("timeout can't be used in the current context")
logger.exception(ex)
class TimerTimeout:
def __init__(self, seconds: int = 1, error_message: str = "Timeout") -> None:
self.seconds = seconds
self.error_message = error_message
self.timer = threading.Timer(seconds, _thread.interrupt_main)
def __enter__(self) -> None:
self.timer.start()
def __exit__( # pylint: disable=redefined-outer-name,unused-variable,redefined-builtin
self, type: Any, value: Any, traceback: TracebackType
) -> None:
self.timer.cancel()
if type is KeyboardInterrupt: # raised by _thread.interrupt_main
raise SupersetTimeoutException(
error_type=SupersetErrorType.BACKEND_TIMEOUT_ERROR,
message=self.error_message,
level=ErrorLevel.ERROR,
extra={"timeout": self.seconds},
)
# Windows has no support for SIGALRM, so we use the timer based timeout
timeout: Union[Type[TimerTimeout], Type[SigalrmTimeout]] = (
TimerTimeout if platform.system() == "Windows" else SigalrmTimeout
)
def pessimistic_connection_handling(some_engine: Engine) -> None:
@event.listens_for(some_engine, "engine_connect")
def ping_connection( # pylint: disable=unused-variable
connection: Connection, branch: bool
) -> None:
if branch:
# 'branch' refers to a sub-connection of a connection,
# we don't want to bother pinging on these.
return
# turn off 'close with result'. This flag is only used with
# 'connectionless' execution, otherwise will be False in any case
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
# run a SELECT 1. use a core select() so that
# the SELECT of a scalar value without a table is
# appropriately formatted for the backend
connection.scalar(select([1]))
except exc.DBAPIError as err:
# catch SQLAlchemy's DBAPIError, which is a wrapper
# for the DBAPI's exception. It includes a .connection_invalidated
# attribute which specifies if this connection is a 'disconnect'
# condition, which is based on inspection of the original exception
# by the dialect in use.
if err.connection_invalidated:
# run the same SELECT again - the connection will re-validate
# itself and establish a new connection. The disconnect detection
# here also causes the whole connection pool to be invalidated
# so that all stale connections are discarded.
connection.scalar(select([1]))
else:
raise
finally:
# restore 'close with result'
connection.should_close_with_result = save_should_close_with_result
def notify_user_about_perm_udate( # pylint: disable=too-many-arguments
granter: User,
user: User,
role: Role,
datasource: "BaseDatasource",
tpl_name: str,
config: Dict[str, Any],
) -> None:
msg = render_template(
tpl_name, granter=granter, user=user, role=role, datasource=datasource
)
logger.info(msg)
subject = __(
"[Superset] Access to the datasource %(name)s was granted",
name=datasource.full_name,
)
send_email_smtp(
user.email,
subject,
msg,
config,
bcc=granter.email,
dryrun=not config["EMAIL_NOTIFICATIONS"],
)
def send_email_smtp( # pylint: disable=invalid-name,too-many-arguments,too-many-locals
to: str,
subject: str,
html_content: str,
config: Dict[str, Any],
files: Optional[List[str]] = None,
data: Optional[Dict[str, str]] = None,
images: Optional[Dict[str, bytes]] = None,
dryrun: bool = False,
cc: Optional[str] = None,
bcc: Optional[str] = None,
mime_subtype: str = "mixed",
) -> None:
"""
Send an email with html content, eg:
send_email_smtp(
'[email protected]', 'foo', '<b>Foo</b> bar',['/dev/null'], dryrun=True)
"""
smtp_mail_from = config["SMTP_MAIL_FROM"]
smtp_mail_to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg["Subject"] = subject
msg["From"] = smtp_mail_from
msg["To"] = ", ".join(smtp_mail_to)
msg.preamble = "This is a multi-part message in MIME format."
recipients = smtp_mail_to
if cc:
smtp_mail_cc = get_email_address_list(cc)
msg["CC"] = ", ".join(smtp_mail_cc)
recipients = recipients + smtp_mail_cc
if bcc:
# don't add bcc in header
smtp_mail_bcc = get_email_address_list(bcc)
recipients = recipients + smtp_mail_bcc
msg["Date"] = formatdate(localtime=True)
mime_text = MIMEText(html_content, "html")
msg.attach(mime_text)
# Attach files by reading them from disk
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as f:
msg.attach(
MIMEApplication(
f.read(),
Content_Disposition="attachment; filename='%s'" % basename,
Name=basename,
)
)
# Attach any files passed directly
for name, body in (data or {}).items():
msg.attach(
MIMEApplication(
body, Content_Disposition="attachment; filename='%s'" % name, Name=name
)
)
# Attach any inline images, which may be required for display in
# HTML content (inline)
for msgid, imgdata in (images or {}).items():
image = MIMEImage(imgdata)
image.add_header("Content-ID", "<%s>" % msgid)
image.add_header("Content-Disposition", "inline")
msg.attach(image)
send_mime_email(smtp_mail_from, recipients, msg, config, dryrun=dryrun)
def send_mime_email(
e_from: str,
e_to: List[str],
mime_msg: MIMEMultipart,
config: Dict[str, Any],
dryrun: bool = False,
) -> None:
smtp_host = config["SMTP_HOST"]
smtp_port = config["SMTP_PORT"]
smtp_user = config["SMTP_USER"]
smtp_password = config["SMTP_PASSWORD"]
smtp_starttls = config["SMTP_STARTTLS"]
smtp_ssl = config["SMTP_SSL"]
if not dryrun:
smtp = (
smtplib.SMTP_SSL(smtp_host, smtp_port)
if smtp_ssl
else smtplib.SMTP(smtp_host, smtp_port)
)
if smtp_starttls:
smtp.starttls()
if smtp_user and smtp_password:
smtp.login(smtp_user, smtp_password)
logger.debug("Sent an email to %s", str(e_to))
smtp.sendmail(e_from, e_to, mime_msg.as_string())
smtp.quit()
else:
logger.info("Dryrun enabled, email notification content is below:")
logger.info(mime_msg.as_string())
def get_email_address_list(address_string: str) -> List[str]:
address_string_list: List[str] = []
if isinstance(address_string, str):
address_string_list = re.split(r",|\s|;", address_string)
return [x.strip() for x in address_string_list if x.strip()]
def get_email_address_str(address_string: str) -> str:
address_list = get_email_address_list(address_string)
address_list_str = ", ".join(address_list)
return address_list_str
def choicify(values: Iterable[Any]) -> List[Tuple[Any, Any]]:
"""Takes an iterable and makes an iterable of tuples with it"""
return [(v, v) for v in values]
def zlib_compress(data: Union[bytes, str]) -> bytes:
"""
Compress things in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
"""
if isinstance(data, str):
return zlib.compress(bytes(data, "utf-8"))
return zlib.compress(data)
def zlib_decompress(blob: bytes, decode: Optional[bool] = True) -> Union[bytes, str]:
"""
Decompress things to a string in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
>>> got_str = zlib_decompress(blob)
>>> got_str == json_str
True
"""
if isinstance(blob, bytes):
decompressed = zlib.decompress(blob)
else:
decompressed = zlib.decompress(bytes(blob, "utf-8"))
return decompressed.decode("utf-8") if decode else decompressed
def to_adhoc(
filt: Dict[str, Any], expression_type: str = "SIMPLE", clause: str = "where"
) -> Dict[str, Any]:
result = {
"clause": clause.upper(),
"expressionType": expression_type,
"isExtra": bool(filt.get("isExtra")),
}
if expression_type == "SIMPLE":
result.update(
{
"comparator": filt.get("val"),
"operator": filt.get("op"),
"subject": filt.get("col"),
}
)
elif expression_type == "SQL":
result.update({"sqlExpression": filt.get(clause)})
deterministic_name = md5_sha_from_dict(result)
result["filterOptionName"] = deterministic_name
return result
def merge_extra_form_data(form_data: Dict[str, Any]) -> None:
"""
Merge extra form data (appends and overrides) into the main payload
and add applied time extras to the payload.
"""
filter_keys = ["filters", "adhoc_filters"]
extra_form_data = form_data.pop("extra_form_data", {})
append_filters = extra_form_data.get("filters", None)
# merge append extras
for key in [key for key in EXTRA_FORM_DATA_APPEND_KEYS if key not in filter_keys]:
extra_value = getattr(extra_form_data, key, {})
form_value = getattr(form_data, key, {})
form_value.update(extra_value)
if form_value:
form_data["key"] = extra_value
# map regular extras that apply to form data properties
for src_key, target_key in EXTRA_FORM_DATA_OVERRIDE_REGULAR_MAPPINGS.items():
value = extra_form_data.get(src_key)
if value is not None:
form_data[target_key] = value
# map extras that apply to form data extra properties
extras = form_data.get("extras", {})
for key in EXTRA_FORM_DATA_OVERRIDE_EXTRA_KEYS:
value = extra_form_data.get(key)
if value is not None:
extras[key] = value
if extras:
form_data["extras"] = extras
adhoc_filters = form_data.get("adhoc_filters", [])
form_data["adhoc_filters"] = adhoc_filters
append_adhoc_filters = extra_form_data.get("adhoc_filters", [])
adhoc_filters.extend({"isExtra": True, **fltr} for fltr in append_adhoc_filters)
if append_filters:
adhoc_filters.extend(
to_adhoc({"isExtra": True, **fltr}) for fltr in append_filters if fltr
)
def merge_extra_filters( # pylint: disable=too-many-branches
form_data: Dict[str, Any],
) -> None:
# extra_filters are temporary/contextual filters (using the legacy constructs)
# that are external to the slice definition. We use those for dynamic
# interactive filters like the ones emitted by the "Filter Box" visualization.
# Note extra_filters only support simple filters.
applied_time_extras: Dict[str, str] = {}
form_data["applied_time_extras"] = applied_time_extras
adhoc_filters = form_data.get("adhoc_filters", [])
form_data["adhoc_filters"] = adhoc_filters
merge_extra_form_data(form_data)
if "extra_filters" in form_data:
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
date_options = {
"__time_range": "time_range",
"__time_col": "granularity_sqla",
"__time_grain": "time_grain_sqla",
"__time_origin": "druid_time_origin",
"__granularity": "granularity",
}
# Grab list of existing filters 'keyed' on the column and operator
def get_filter_key(f: Dict[str, Any]) -> str:
if "expressionType" in f:
return "{}__{}".format(f["subject"], f["operator"])
return "{}__{}".format(f["col"], f["op"])
existing_filters = {}
for existing in adhoc_filters:
if (
existing["expressionType"] == "SIMPLE"
and existing.get("comparator") is not None
and existing.get("subject") is not None
):
existing_filters[get_filter_key(existing)] = existing["comparator"]
for filtr in form_data[ # pylint: disable=too-many-nested-blocks
"extra_filters"
]:
filtr["isExtra"] = True
# Pull out time filters/options and merge into form data
filter_column = filtr["col"]
time_extra = date_options.get(filter_column)
if time_extra:
time_extra_value = filtr.get("val")
if time_extra_value:
form_data[time_extra] = time_extra_value
applied_time_extras[filter_column] = time_extra_value
elif filtr["val"]:
# Merge column filters
filter_key = get_filter_key(filtr)
if filter_key in existing_filters:
# Check if the filter already exists
if isinstance(filtr["val"], list):
if isinstance(existing_filters[filter_key], list):
# Add filters for unequal lists
# order doesn't matter
if set(existing_filters[filter_key]) != set(filtr["val"]):
adhoc_filters.append(to_adhoc(filtr))
else:
adhoc_filters.append(to_adhoc(filtr))
else:
# Do not add filter if same value already exists
if filtr["val"] != existing_filters[filter_key]:
adhoc_filters.append(to_adhoc(filtr))
else:
# Filter not found, add it
adhoc_filters.append(to_adhoc(filtr))
# Remove extra filters from the form data since no longer needed
del form_data["extra_filters"]
def merge_request_params(form_data: Dict[str, Any], params: Dict[str, Any]) -> None:
"""
Merge request parameters to the key `url_params` in form_data. Only updates
or appends parameters to `form_data` that are defined in `params; pre-existing
parameters not defined in params are left unchanged.
:param form_data: object to be updated
:param params: request parameters received via query string
"""
url_params = form_data.get("url_params", {})
for key, value in params.items():
if key in ("form_data", "r"):
continue
url_params[key] = value
form_data["url_params"] = url_params
def user_label(user: User) -> Optional[str]:
"""Given a user ORM FAB object, returns a label"""
if user:
if user.first_name and user.last_name:
return user.first_name + " " + user.last_name
return user.username
return None
def get_or_create_db(
database_name: str, sqlalchemy_uri: str, always_create: Optional[bool] = True
) -> "Database":
from superset import db
from superset.models import core as models
database = (
db.session.query(models.Database).filter_by(database_name=database_name).first()
)
if not database and always_create:
logger.info("Creating database reference for %s", database_name)
database = models.Database(database_name=database_name)
db.session.add(database)
if database:
database.set_sqlalchemy_uri(sqlalchemy_uri)
db.session.commit()
return database
def get_example_database() -> "Database":
from superset import conf
db_uri = conf.get("SQLALCHEMY_EXAMPLES_URI") or conf.get("SQLALCHEMY_DATABASE_URI")
return get_or_create_db("examples", db_uri)
def get_main_database() -> "Database":
from superset import conf
db_uri = conf.get("SQLALCHEMY_DATABASE_URI")
return get_or_create_db("main", db_uri)
def backend() -> str:
return get_example_database().backend
def is_adhoc_metric(metric: Metric) -> bool:
return isinstance(metric, dict) and "expressionType" in metric
def get_metric_name(metric: Metric) -> str:
return metric["label"] if is_adhoc_metric(metric) else metric # type: ignore
def get_metric_names(metrics: Sequence[Metric]) -> List[str]:
return [get_metric_name(metric) for metric in metrics]
def ensure_path_exists(path: str) -> None:
try:
os.makedirs(path)
except OSError as exc:
if not (os.path.isdir(path) and exc.errno == errno.EEXIST):
raise
def convert_legacy_filters_into_adhoc( # pylint: disable=invalid-name
form_data: FormData,
) -> None:
mapping = {"having": "having_filters", "where": "filters"}
if not form_data.get("adhoc_filters"):
form_data["adhoc_filters"] = []
for clause, filters in mapping.items():
if clause in form_data and form_data[clause] != "":
form_data["adhoc_filters"].append(to_adhoc(form_data, "SQL", clause))
if filters in form_data:
for filt in filter(lambda x: x is not None, form_data[filters]):
form_data["adhoc_filters"].append(to_adhoc(filt, "SIMPLE", clause))
for key in ("filters", "having", "having_filters", "where"):
if key in form_data:
del form_data[key]
def split_adhoc_filters_into_base_filters( # pylint: disable=invalid-name
form_data: FormData,
) -> None:
"""
Mutates form data to restructure the adhoc filters in the form of the four base
filters, `where`, `having`, `filters`, and `having_filters` which represent
free form where sql, free form having sql, structured where clauses and structured
having clauses.
"""
adhoc_filters = form_data.get("adhoc_filters")
if isinstance(adhoc_filters, list):
simple_where_filters = []
simple_having_filters = []
sql_where_filters = []
sql_having_filters = []
for adhoc_filter in adhoc_filters:
expression_type = adhoc_filter.get("expressionType")
clause = adhoc_filter.get("clause")
if expression_type == "SIMPLE":
if clause == "WHERE":
simple_where_filters.append(
{
"col": adhoc_filter.get("subject"),
"op": adhoc_filter.get("operator"),
"val": adhoc_filter.get("comparator"),
}
)
elif clause == "HAVING":
simple_having_filters.append(
{
"col": adhoc_filter.get("subject"),
"op": adhoc_filter.get("operator"),
"val": adhoc_filter.get("comparator"),
}
)
elif expression_type == "SQL":
if clause == "WHERE":
sql_where_filters.append(adhoc_filter.get("sqlExpression"))
elif clause == "HAVING":
sql_having_filters.append(adhoc_filter.get("sqlExpression"))
form_data["where"] = " AND ".join(
["({})".format(sql) for sql in sql_where_filters]
)
form_data["having"] = " AND ".join(
["({})".format(sql) for sql in sql_having_filters]
)
form_data["having_filters"] = simple_having_filters
form_data["filters"] = simple_where_filters
def get_username() -> Optional[str]:
"""Get username if within the flask context, otherwise return noffin'"""
try:
return g.user.username
except Exception: # pylint: disable=broad-except
return None
def parse_ssl_cert(certificate: str) -> _Certificate:
"""
Parses the contents of a certificate and returns a valid certificate object
if valid.
:param certificate: Contents of certificate file
:return: Valid certificate instance
:raises CertificateException: If certificate is not valid/unparseable
"""
try:
return x509.load_pem_x509_certificate(
certificate.encode("utf-8"), default_backend()
)
except ValueError:
raise CertificateException("Invalid certificate")
def create_ssl_cert_file(certificate: str) -> str:
"""
This creates a certificate file that can be used to validate HTTPS
sessions. A certificate is only written to disk once; on subsequent calls,
only the path of the existing certificate is returned.
:param certificate: The contents of the certificate
:return: The path to the certificate file
:raises CertificateException: If certificate is not valid/unparseable
"""
filename = f"{md5_sha_from_str(certificate)}.crt"
cert_dir = current_app.config["SSL_CERT_PATH"]
path = cert_dir if cert_dir else tempfile.gettempdir()
path = os.path.join(path, filename)
if not os.path.exists(path):
# Validate certificate prior to persisting to temporary directory
parse_ssl_cert(certificate)
cert_file = open(path, "w")
cert_file.write(certificate)
cert_file.close()
return path
def time_function(
func: Callable[..., FlaskResponse], *args: Any, **kwargs: Any
) -> Tuple[float, Any]:
"""
Measures the amount of time a function takes to execute in ms
:param func: The function execution time to measure
:param args: args to be passed to the function
:param kwargs: kwargs to be passed to the function
:return: A tuple with the duration and response from the function
"""
start = default_timer()
response = func(*args, **kwargs)
stop = default_timer()
return (stop - start) * 1000.0, response
def MediumText() -> Variant: # pylint:disable=invalid-name
return Text().with_variant(MEDIUMTEXT(), "mysql")
def shortid() -> str:
return "{}".format(uuid.uuid4())[-12:]
class DatasourceName(NamedTuple):
table: str
schema: str
def get_stacktrace() -> Optional[str]:
if current_app.config["SHOW_STACKTRACE"]:
return traceback.format_exc()
return None
def split(
string: str, delimiter: str = " ", quote: str = '"', escaped_quote: str = r"\""
) -> Iterator[str]:
"""
A split function that is aware of quotes and parentheses.
:param string: string to split
:param delimiter: string defining where to split, usually a comma or space
:param quote: string, either a single or a double quote
:param escaped_quote: string representing an escaped quote
:return: list of strings
"""
parens = 0
quotes = False
i = 0
for j, character in enumerate(string):
complete = parens == 0 and not quotes
if complete and character == delimiter:
yield string[i:j]
i = j + len(delimiter)
elif character == "(":
parens += 1
elif character == ")":
parens -= 1
elif character == quote:
if quotes and string[j - len(escaped_quote) + 1 : j + 1] != escaped_quote:
quotes = False
elif not quotes:
quotes = True
yield string[i:]
def get_iterable(x: Any) -> List[Any]:
"""
Get an iterable (list) representation of the object.
:param x: The object
:returns: An iterable representation
"""
return x if isinstance(x, list) else [x]
def get_form_data_token(form_data: Dict[str, Any]) -> str:
"""
Return the token contained within form data or generate a new one.
:param form_data: chart form data
:return: original token if predefined, otherwise new uuid4 based token
"""
return form_data.get("token") or "token_" + uuid.uuid4().hex[:8]
def get_column_name_from_metric(metric: Metric) -> Optional[str]:
"""
Extract the column that a metric is referencing. If the metric isn't
a simple metric, always returns `None`.
:param metric: Ad-hoc metric
:return: column name if simple metric, otherwise None
"""
if is_adhoc_metric(metric):
metric = cast(Dict[str, Any], metric)
if metric["expressionType"] == AdhocMetricExpressionType.SIMPLE:
return cast(Dict[str, Any], metric["column"])["column_name"]
return None
def get_column_names_from_metrics(metrics: List[Metric]) -> List[str]:
"""
Extract the columns that a list of metrics are referencing. Expcludes all
SQL metrics.
:param metrics: Ad-hoc metric
:return: column name if simple metric, otherwise None
"""
columns: List[str] = []
for metric in metrics:
column_name = get_column_name_from_metric(metric)
if column_name:
columns.append(column_name)
return columns
def extract_dataframe_dtypes(df: pd.DataFrame) -> List[GenericDataType]:
"""Serialize pandas/numpy dtypes to generic types"""
# omitting string types as those will be the default type
inferred_type_map: Dict[str, GenericDataType] = {
"floating": GenericDataType.NUMERIC,
"integer": GenericDataType.NUMERIC,
"mixed-integer-float": GenericDataType.NUMERIC,
"decimal": GenericDataType.NUMERIC,
"boolean": GenericDataType.BOOLEAN,
"datetime64": GenericDataType.TEMPORAL,
"datetime": GenericDataType.TEMPORAL,
"date": GenericDataType.TEMPORAL,
}
generic_types: List[GenericDataType] = []
for column in df.columns:
series = df[column]
inferred_type = infer_dtype(series)
generic_type = inferred_type_map.get(inferred_type, GenericDataType.STRING)
generic_types.append(generic_type)
return generic_types
def extract_column_dtype(col: "BaseColumn") -> GenericDataType:
if col.is_temporal:
return GenericDataType.TEMPORAL
if col.is_numeric:
return GenericDataType.NUMERIC
# TODO: add check for boolean data type when proper support is added
return GenericDataType.STRING
def indexed(
items: List[Any], key: Union[str, Callable[[Any], Any]]
) -> Dict[Any, List[Any]]:
"""Build an index for a list of objects"""
idx: Dict[Any, Any] = {}
for item in items:
key_ = getattr(item, key) if isinstance(key, str) else key(item)
idx.setdefault(key_, []).append(item)
return idx
def is_test() -> bool:
return strtobool(os.environ.get("SUPERSET_TESTENV", "false"))
def get_time_filter_status( # pylint: disable=too-many-branches
datasource: "BaseDatasource", applied_time_extras: Dict[str, str],
) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
temporal_columns = {col.column_name for col in datasource.columns if col.is_dttm}
applied: List[Dict[str, str]] = []
rejected: List[Dict[str, str]] = []
time_column = applied_time_extras.get(ExtraFiltersTimeColumnType.TIME_COL)
if time_column:
if time_column in temporal_columns:
applied.append({"column": ExtraFiltersTimeColumnType.TIME_COL})
else:
rejected.append(
{
"reason": "not_in_datasource",
"column": ExtraFiltersTimeColumnType.TIME_COL,
}
)
if ExtraFiltersTimeColumnType.TIME_GRAIN in applied_time_extras:
# are there any temporal columns to assign the time grain to?
if temporal_columns:
applied.append({"column": ExtraFiltersTimeColumnType.TIME_GRAIN})
else:
rejected.append(
{
"reason": "no_temporal_column",
"column": ExtraFiltersTimeColumnType.TIME_GRAIN,
}
)
if ExtraFiltersTimeColumnType.TIME_RANGE in applied_time_extras:
# are there any temporal columns to assign the time grain to?
if temporal_columns:
applied.append({"column": ExtraFiltersTimeColumnType.TIME_RANGE})
else:
rejected.append(
{
"reason": "no_temporal_column",
"column": ExtraFiltersTimeColumnType.TIME_RANGE,
}
)
if ExtraFiltersTimeColumnType.TIME_ORIGIN in applied_time_extras:
if datasource.type == "druid":
applied.append({"column": ExtraFiltersTimeColumnType.TIME_ORIGIN})
else:
rejected.append(
{
"reason": "not_druid_datasource",
"column": ExtraFiltersTimeColumnType.TIME_ORIGIN,
}
)
if ExtraFiltersTimeColumnType.GRANULARITY in applied_time_extras:
if datasource.type == "druid":
applied.append({"column": ExtraFiltersTimeColumnType.GRANULARITY})
else:
rejected.append(
{
"reason": "not_druid_datasource",
"column": ExtraFiltersTimeColumnType.GRANULARITY,
}
)
return applied, rejected
def format_list(items: Sequence[str], sep: str = ", ", quote: str = '"') -> str:
quote_escaped = "\\" + quote
return sep.join(f"{quote}{x.replace(quote, quote_escaped)}{quote}" for x in items)
def find_duplicates(items: Iterable[InputType]) -> List[InputType]:
"""Find duplicate items in an iterable."""
return [item for item, count in collections.Counter(items).items() if count > 1]
def remove_duplicates(
items: Iterable[InputType], key: Optional[Callable[[InputType], Any]] = None
) -> List[InputType]:
"""Remove duplicate items in an iterable."""
if not key:
return list(dict.fromkeys(items).keys())
seen = set()
result = []
for item in items:
item_key = key(item)
if item_key not in seen:
seen.add(item_key)
result.append(item)
return result
def normalize_dttm_col(
df: pd.DataFrame,
timestamp_format: Optional[str],
offset: int,
time_shift: Optional[timedelta],
) -> None:
if DTTM_ALIAS not in df.columns:
return
if timestamp_format in ("epoch_s", "epoch_ms"):
dttm_col = df[DTTM_ALIAS]
if is_numeric_dtype(dttm_col):
# Column is formatted as a numeric value
unit = timestamp_format.replace("epoch_", "")
df[DTTM_ALIAS] = pd.to_datetime(
dttm_col, utc=False, unit=unit, origin="unix"
)
else:
# Column has already been formatted as a timestamp.
df[DTTM_ALIAS] = dttm_col.apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format
)
if offset:
df[DTTM_ALIAS] += timedelta(hours=offset)
if time_shift is not None:
df[DTTM_ALIAS] += time_shift
def parse_boolean_string(bool_str: Optional[str]) -> bool:
"""
Convert a string representation of a true/false value into a boolean
>>> parse_boolean_string(None)
False
>>> parse_boolean_string('false')
False
>>> parse_boolean_string('true')
True
>>> parse_boolean_string('False')
False
>>> parse_boolean_string('True')
True
>>> parse_boolean_string('foo')
False
>>> parse_boolean_string('0')
False
>>> parse_boolean_string('1')
True
:param bool_str: string representation of a value that is assumed to be boolean
:return: parsed boolean value
"""
if bool_str is None:
return False
try:
return bool(strtobool(bool_str.lower()))
except ValueError:
return False
|
the-stack_0_24542
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import os
import numpy as np
import pytest
from cntk.io import *
from cntk.io import _ReaderConfig
import cntk.io.transforms as xforms
abs_path = os.path.dirname(os.path.abspath(__file__))
AA = np.asarray
def test_text_format(tmpdir):
mbdata = r'''0 |x 560:1 |y 1 0 0 0 0
0 |x 0:1
0 |x 0:1
1 |x 560:1 |y 0 1 0 0 0
1 |x 0:1
1 |x 0:1
1 |x 424:1
'''
tmpfile = str(tmpdir/'mbdata.txt')
with open(tmpfile, 'w') as f:
f.write(mbdata)
input_dim = 1000
num_output_classes = 5
mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs(
features = StreamDef(field='x', shape=input_dim, is_sparse=True),
labels = StreamDef(field='y', shape=num_output_classes, is_sparse=False)
)))
assert isinstance(mb_source, MinibatchSource)
features_si = mb_source.stream_info('features')
labels_si = mb_source.stream_info('labels')
mb = mb_source.next_minibatch(7)
features = mb[features_si]
# 2 samples, max seq len 4, 1000 dim
assert features.shape == (2, 4, input_dim)
assert features.end_of_sweep
assert features.num_sequences == 2
assert features.num_samples == 7
assert features.is_sparse
# TODO features is sparse and cannot be accessed right now:
# *** RuntimeError: DataBuffer/WritableDataBuffer methods can only be called for NDArrayiew objects with dense storage format
# 2 samples, max seq len 4, 1000 dim
#assert features.data().shape().dimensions() == (2, 4, input_dim)
#assert features.data().is_sparse()
labels = mb[labels_si]
# 2 samples, max seq len 1, 5 dim
assert labels.shape == (2, 1, num_output_classes)
assert labels.end_of_sweep
assert labels.num_sequences == 2
assert labels.num_samples == 2
assert not labels.is_sparse
label_data = np.asarray(labels)
assert np.allclose(label_data,
np.asarray([
[[ 1., 0., 0., 0., 0.]],
[[ 0., 1., 0., 0., 0.]]
]))
mb = mb_source.next_minibatch(1)
features = mb[features_si]
labels = mb[labels_si]
assert not features.end_of_sweep
assert not labels.end_of_sweep
assert features.num_samples < 7
assert labels.num_samples == 1
def test_image():
map_file = "input.txt"
mean_file = "mean.txt"
epoch_size = 150
feature_name = "f"
image_width = 100
image_height = 200
num_channels = 3
label_name = "l"
num_classes = 7
transforms = [xforms.crop(crop_type='randomside', side_ratio=0.5, jitter_type='uniratio'),
xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'),
xforms.mean(mean_file)]
image = ImageDeserializer(map_file, StreamDefs(f = StreamDef(field='image', transforms=transforms), l = StreamDef(field='label', shape=num_classes)))
rc = _ReaderConfig(image, randomize=False, epoch_size=epoch_size)
assert rc['epochSize'].value == epoch_size
assert rc['randomize'] == False
assert rc['sampleBasedRandomizationWindow'] == False
assert len(rc['deserializers']) == 1
d = rc['deserializers'][0]
assert d['type'] == 'ImageDeserializer'
assert d['file'] == map_file
assert set(d['input'].keys()) == {label_name, feature_name}
l = d['input'][label_name]
assert l['labelDim'] == num_classes
f = d['input'][feature_name]
assert set(f.keys()) == { 'transforms' }
t0, t1, t2 = f['transforms']
assert t0['type'] == 'Crop'
assert t1['type'] == 'Scale'
assert t2['type'] == 'Mean'
assert t0['cropType'] == 'randomside'
assert t0['sideRatio'] == 0.5
assert t0['aspectRatio'] == 1.0
assert t0['jitterType'] == 'uniratio'
assert t1['width'] == image_width
assert t1['height'] == image_height
assert t1['channels'] == num_channels
assert t1['interpolations'] == 'linear'
assert t2['meanFile'] == mean_file
rc = _ReaderConfig(image, randomize=False, randomization_window = 100,
sample_based_randomization_window = True, epoch_size=epoch_size)
assert rc['epochSize'].value == epoch_size
assert rc['randomize'] == False
assert rc['sampleBasedRandomizationWindow'] == True
assert len(rc['deserializers']) == 1
d = rc['deserializers'][0]
assert d['type'] == 'ImageDeserializer'
assert d['file'] == map_file
assert set(d['input'].keys()) == {label_name, feature_name}
l = d['input'][label_name]
assert l['labelDim'] == num_classes
rc = _ReaderConfig(image, randomize=True, randomization_window = 100,
sample_based_randomization_window = True, epoch_size=epoch_size)
assert rc['epochSize'].value == epoch_size
assert rc['randomize'] == True
assert rc['sampleBasedRandomizationWindow'] == True
assert len(rc['deserializers']) == 1
d = rc['deserializers'][0]
assert d['type'] == 'ImageDeserializer'
assert d['file'] == map_file
assert set(d['input'].keys()) == {label_name, feature_name}
l = d['input'][label_name]
assert l['labelDim'] == num_classes
# TODO depends on ImageReader.dll
'''
mbs = rc.minibatch_source()
sis = mbs.stream_infos()
assert set(sis.keys()) == { feature_name, label_name }
'''
def test_full_sweep_minibatch(tmpdir):
mbdata = r'''0 |S0 0 |S1 0
0 |S0 1 |S1 1
0 |S0 2
0 |S0 3 |S1 3
1 |S0 4
1 |S0 5 |S1 1
1 |S0 6 |S1 2
'''
tmpfile = str(tmpdir/'mbtest.txt')
with open(tmpfile, 'w') as f:
f.write(mbdata)
mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs(
features = StreamDef(field='S0', shape=1),
labels = StreamDef(field='S1', shape=1))),
randomize=False, epoch_size=FULL_DATA_SWEEP)
features_si = mb_source.stream_info('features')
labels_si = mb_source.stream_info('labels')
mb = mb_source.next_minibatch(1000)
assert mb[features_si].num_sequences == 2
assert mb[labels_si].num_sequences == 2
features = mb[features_si]
assert features.end_of_sweep
assert len(features.value) == 2
expected_features = \
[
[[0],[1],[2],[3]],
[[4],[5],[6]]
]
for res, exp in zip (features.value, expected_features):
assert np.allclose(res, exp)
assert np.allclose(features.mask,
[[2, 1, 1, 1],
[2, 1, 1, 0]])
labels = mb[labels_si]
assert labels.end_of_sweep
assert len(labels.value) == 2
expected_labels = \
[
[[0],[1],[3]],
[[1],[2]]
]
for res, exp in zip (labels.value, expected_labels):
assert np.allclose(res, exp)
assert np.allclose(labels.mask,
[[2, 1, 1],
[2, 1, 0]])
def test_large_minibatch(tmpdir):
mbdata = r'''0 |S0 0 |S1 0
0 |S0 1 |S1 1
0 |S0 2
0 |S0 3 |S1 3
0 |S0 4
0 |S0 5 |S1 1
0 |S0 6 |S1 2
'''
tmpfile = str(tmpdir/'mbtest.txt')
with open(tmpfile, 'w') as f:
f.write(mbdata)
mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs(
features = StreamDef(field='S0', shape=1),
labels = StreamDef(field='S1', shape=1))),
randomize=False)
features_si = mb_source.stream_info('features')
labels_si = mb_source.stream_info('labels')
mb = mb_source.next_minibatch(1000)
features = mb[features_si]
labels = mb[labels_si]
# Actually, the minibatch spans over multiple sweeps,
# not sure if this is an artificial situation, but
# maybe instead of a boolean flag we should indicate
# the largest sweep index the data was taken from.
assert features.end_of_sweep
assert labels.end_of_sweep
assert features.num_samples == 1000 - 1000 % 7
assert labels.num_samples == 5 * (1000 // 7)
assert mb[features_si].num_sequences == (1000 // 7)
assert mb[labels_si].num_sequences == (1000 // 7)
@pytest.mark.parametrize("idx, alias_tensor_map, expected", [
(0, {'A': [object()]}, ValueError),
])
def test_sequence_conversion_exceptions(idx, alias_tensor_map, expected):
with pytest.raises(expected):
sequence_to_cntk_text_format(idx, alias_tensor_map)
@pytest.mark.parametrize("idx, alias_tensor_map, expected", [
(0, {'W': AA([])}, ""),
(0, {'W': AA([[[1, 0, 0, 0], [1, 0, 0, 0]]])}, """\
0\t|W 1 0 0 0 1 0 0 0\
"""),
(0, {
'W': AA([[[1, 0, 0, 0], [1, 0, 0, 0]]]),
'L': AA([[[2]]])
},
"""\
0\t|L 2 |W 1 0 0 0 1 0 0 0\
"""),
(0, {
'W': AA([[[1, 0], [1, 0]], [[5, 6], [7, 8]]]),
'L': AA([[[2]]])
},
"""\
0\t|L 2 |W 1 0 1 0
0\t|W 5 6 7 8"""),
])
def test_sequence_conversion_dense(idx, alias_tensor_map, expected):
assert sequence_to_cntk_text_format(idx, alias_tensor_map) == expected
@pytest.mark.parametrize("data, expected", [
([1], True),
([[1, 2]], True),
([[AA([1, 2])]], False),
([AA([1, 2])], False),
([AA([1, 2]), AA([])], False),
])
def test_is_tensor(data, expected):
from cntk.io import _is_tensor
assert _is_tensor(data) == expected
def test_create_two_image_deserializers(tmpdir):
mbdata = r'''filename 0
filename2 0
'''
map_file = str(tmpdir/'mbdata.txt')
with open(map_file, 'w') as f:
f.write(mbdata)
image_width = 100
image_height = 200
num_channels = 3
num_classes = 7
transforms = [xforms.crop(crop_type='randomside', side_ratio=0.5, jitter_type='uniratio'),
xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear')]
image1 = ImageDeserializer(map_file, StreamDefs(f1 = StreamDef(field='image', transforms=transforms)))
image2 = ImageDeserializer(map_file, StreamDefs(f2 = StreamDef(field='image', transforms=transforms)))
mb_source = MinibatchSource([image1, image2])
assert isinstance(mb_source, MinibatchSource)
|
the-stack_0_24543
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from bme280 import bme280, bme280_i2c
# データの取得と表示
def print_weather():
data = bme280.read_all()
p = "{0:7.2f} hPa".format(data.pressure)
h = "{0:7.2f} %".format(data.humidity)
t = "{0:7.2f} C".format(data.temperature)
print('{0}:{1}:{2}'.format(p, h, t))
|
the-stack_0_24545
|
"""
This code was originally published by the following individuals for use with
Scilab:
Copyright (C) 2012 - 2013 - Michael Baudin
Copyright (C) 2012 - Maria Christopoulou
Copyright (C) 2010 - 2011 - INRIA - Michael Baudin
Copyright (C) 2009 - Yann Collette
Copyright (C) 2009 - CEA - Jean-Marc Martinez
website: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
Abraham Lee.
"""
import numpy as np
from .doe_factorial import ff2n
from .doe_repeat_center import repeat_center
__all__ = ['bbdesign']
def bbdesign(n, center=None):
"""
Create a Box-Behnken design
Parameters
----------
n : int
The number of factors in the design
Optional
--------
center : int
The number of center points to include (default = 1).
Returns
-------
mat : 2d-array
The design matrix
Example
-------
::
>>> bbdesign(3)
array([[-1., -1., 0.],
[ 1., -1., 0.],
[-1., 1., 0.],
[ 1., 1., 0.],
[-1., 0., -1.],
[ 1., 0., -1.],
[-1., 0., 1.],
[ 1., 0., 1.],
[ 0., -1., -1.],
[ 0., 1., -1.],
[ 0., -1., 1.],
[ 0., 1., 1.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
"""
assert n>=3, 'Number of variables must be at least 3'
# First, compute a factorial DOE with 2 parameters
H_fact = ff2n(2)
# Now we populate the real DOE with this DOE
# We made a factorial design on each pair of dimensions
# - So, we created a factorial design with two factors
# - Make two loops
Index = 0
nb_lines = (n*(n-1)//2)*H_fact.shape[0]
H = repeat_center(n, nb_lines)
for i in range(n - 1):
for j in range(i + 1, n):
Index = Index + 1
H[max([0, (Index - 1)*H_fact.shape[0]]):Index*H_fact.shape[0], i] = H_fact[:, 0]
H[max([0, (Index - 1)*H_fact.shape[0]]):Index*H_fact.shape[0], j] = H_fact[:, 1]
if center is None:
if n<=16:
points= [0, 0, 0, 3, 3, 6, 6, 6, 8, 9, 10, 12, 12, 13, 14, 15, 16]
center = points[n]
else:
center = n
H = np.c_[H.T, repeat_center(n, center).T].T
return H
|
the-stack_0_24546
|
#!/usr/bin/env python3
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import zipfile
import os
import stat
import sys
def _zip_dir(path, zip_file, prefix):
path = path.rstrip('/\\')
for root, dirs, files in os.walk(path):
for file in files:
if os.path.islink(os.path.join(root, file)):
add_symlink(
zip_file,
os.path.join(root, file),
os.path.join(root.replace(path, prefix), file)
)
continue
zip_file.write(os.path.join(root, file), os.path.join(
root.replace(path, prefix), file))
def add_symlink(zip_file, source, target):
"""Adds a symlink to a zip file.
Args:
zip_file: The ZipFile obj where the symlink will be added.
source: The full path to the symlink.
target: The target path for the symlink within the zip file.
"""
zip_info = zipfile.ZipInfo(target)
zip_info.create_system = 3 # Unix like system
unix_st_mode = (stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH |
stat.S_IWOTH | stat.S_IXOTH)
zip_info.external_attr = unix_st_mode << 16
zip_file.writestr(zip_info, source)
def main(args):
zip_file = zipfile.ZipFile(args.output, 'w', zipfile.ZIP_DEFLATED)
if args.source_file:
with open(args.source_file) as source_file:
file_dict_list = json.load(source_file)
for file_dict in file_dict_list:
if os.path.islink(file_dict['source']):
add_symlink(zip_file, file_dict['source'], file_dict['destination'])
continue
if os.path.isdir(file_dict['source']):
_zip_dir(file_dict['source'], zip_file, file_dict['destination'])
else:
zip_file.write(file_dict['source'], file_dict['destination'])
else:
for path, archive_name in args.input_pairs:
if os.path.islink(path):
add_symlink(zip_file, path, archive_name)
continue
if os.path.isdir(path):
_zip_dir(path, zip_file, archive_name)
else:
zip_file.write(path, archive_name)
zip_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='This script creates zip files.')
parser.add_argument('-o', dest='output', action='store',
help='The name of the output zip file.')
parser.add_argument('-i', dest='input_pairs', nargs=2, action='append',
help='The input file and its destination location in the zip archive.')
parser.add_argument('-f', dest='source_file', action='store',
help='The path to the file list to zip.')
sys.exit(main(parser.parse_args()))
|
the-stack_0_24549
|
#!/usr/bin/env python
####! /usr/global/bin/python
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the gradunwarp package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import argparse as arg
import os
import logging
import sys
sys.path.insert(1,'/group_shares/PSYCH/code/development/pipelines/HCP_NHP_generic/src/gradient_unwarping')
from gradunwarp.core import (globals, coeffs, utils)
from gradunwarp.core.unwarp_resample import Unwarper
log = globals.get_logger()
def argument_parse_gradunwarp():
'''Arguments parser from the command line
'''
# initiate
p = arg.ArgumentParser(version=globals.VERSION, usage=globals.usage)
# required arguments
p.add_argument('infile', action='store',
help='The input warped file (nifti or mgh)')
p.add_argument('outfile', action='store',
help='The output unwarped file (extension should be .nii/.nii.gz/.mgh/.mgz)')
p.add_argument('vendor', action='store', choices=['siemens', 'ge'],
help='vendor (either "ge" or "siemens" for now)')
coef_grp = p.add_mutually_exclusive_group(required=True)
coef_grp.add_argument('-g', '--gradfile', dest='gradfile',
help='The .grad coefficient file')
coef_grp.add_argument('-c', '--coeffile', dest='coeffile',
help='The .coef coefficient file')
# optional arguments
p.add_argument('-w', '--warp', action='store_true', default=False,
help='warp a volume (as opposed to unwarping)')
p.add_argument('-n', '--nojacobian', dest='nojac', action='store_true',
default=False, help='Do not perform Jacobian intensity correction')
p.add_argument('--fovmin', dest='fovmin',
help='the minimum extent of harmonics evaluation grid in meters')
p.add_argument('--fovmax', dest='fovmax',
help='the maximum extent of harmonics evaluation grid in meters')
p.add_argument('--numpoints', dest='numpoints',
help='number of grid points in each direction')
p.add_argument('--interp_order', dest='order',
help='the order of interpolation(1..4) where 1 is linear - default')
p.add_argument('--verbose', action='store_true', default=False)
args = p.parse_args()
# do some validation
if not os.path.exists(args.infile):
raise IOError(args.infile + ' not found')
if args.gradfile:
if not os.path.exists(args.gradfile):
raise IOError(args.gradfile + ' not found')
if args.coeffile:
if not os.path.exists(args.coeffile):
raise IOError(args.coeffile + ' not found')
return args
class GradientUnwarpRunner(object):
''' Takes the option datastructure after parsing the commandline.
run() method performs the actual unwarping
write() method performs the writing of the unwarped volume
'''
def __init__(self, args):
''' constructor takes the option datastructure which is the
result of (options, args) = parser.parse_args()
'''
self.args = args
self.unwarper = None
log.setLevel(logging.INFO)
if hasattr(self.args, 'verbose'):
log.setLevel(logging.DEBUG)
def run(self):
''' run the unwarp resample
'''
# get the spherical harmonics coefficients from parsing
# the given .coeff file xor .grad file
if hasattr(self.args, 'gradfile') and self.args.gradfile:
self.coeffs = coeffs.get_coefficients(self.args.vendor,
self.args.gradfile)
else:
self.coeffs = coeffs.get_coefficients(self.args.vendor,
self.args.coeffile)
self.vol, self.m_rcs2ras = utils.get_vol_affine(self.args.infile)
self.unwarper = Unwarper(self.vol, self.m_rcs2ras, self.args.vendor, self.coeffs, self.args.infile )
if hasattr(self.args, 'fovmin') and self.args.fovmin:
self.unwarper.fovmin = float(self.args.fovmin)
if hasattr(self.args, 'fovmax') and self.args.fovmax:
self.unwarper.fovmax = float(self.args.fovmax)
if hasattr(self.args, 'numpoints') and self.args.numpoints:
self.unwarper.numpoints = int(self.args.numpoints)
if hasattr(self.args, 'warp') and self.args.warp:
self.unwarper.warp = True
if hasattr(self.args, 'nojac') and self.args.nojac:
self.unwarper.nojac = True
if hasattr(self.args, 'order') and self.args.order:
self.unwarper.order = int(self.args.order)
self.unwarper.run()
def write(self):
self.unwarper.write(self.args.outfile)
if __name__ == '__main__':
args = argument_parse_gradunwarp()
grad_unwarp = GradientUnwarpRunner(args)
grad_unwarp.run()
grad_unwarp.write()
|
the-stack_0_24550
|
from django.conf.urls.defaults import *
from courses.views import (CourseOverview,
BySemesterList,
CourseDropPage,
CourseAdmin,
ToggleMembership,
NewCourseAssignment,
AssignmentList,
AssignmentOverview,
SubmitAssignment,
TeamSubmitAssignment,
DeleteSubmission,
CourseMembers,
ResourceList,
ResourceDetails,
NewCourseResource,
EditResource,
EditAssignment,
DeleteResource,
DeleteAssignment)
from django.contrib.auth.decorators import login_required
urlpatterns = patterns('courses.views',
url('^(?P<pk>\w+)/overview/$', login_required(CourseOverview.as_view()), name = 'overview'),
url('^(?P<pk>\w+)/members/$', login_required(CourseMembers.as_view()), name = 'members'),
url('^(?P<pk>\w+)/admin/$', login_required(CourseAdmin.as_view()), name = 'admin'),
url('^(?P<pk>\w+)/assignments/new/$', login_required(NewCourseAssignment.as_view()), name = 'new_assignment'),
url('^(?P<pk>\w+)/assignments/$', login_required(AssignmentList.as_view()), name = 'assignments'),
url('^assignment/(?P<pk>\w+)/overview/$', login_required(AssignmentOverview.as_view()), name = 'assignment_overview'),
url('^(?P<pk>\w+)/assignments/submit/$', login_required(SubmitAssignment.as_view()), name = 'submit_assignment'),
url('^assignment/(?P<pk>\w+)/edit/$', login_required(EditAssignment.as_view()), name = 'edit_assignment'),
url('^(?P<pk>\w+)/assignments/team_submit/$', login_required(TeamSubmitAssignment.as_view()), name = 'team_submit_assignment'),
url('^assignment_submission/delete/$', login_required(DeleteSubmission.as_view()), name = 'delete_submission'),
url('^(?P<pk>\w+)/toggle-membership/$', login_required(ToggleMembership.as_view()), name = 'toggle-membership'),
url('^semester/(?P<pk>\w+)/$', login_required(BySemesterList.as_view()), name = 'by_semester'),
url('^$', login_required(CourseDropPage.as_view()), name = 'drop_page'),
url('^resources/(?P<pk>\w+)/details/$', login_required(ResourceDetails.as_view()), name = 'resource_details'),
url('^resources/(?P<pk>\w+)/edit/$', login_required(EditResource.as_view()), name = 'edit_resource'),
url('^(?P<pk>\w+)/resources/create/$', login_required(NewCourseResource.as_view()), name = 'resource_create'),
url('^(?P<pk>\w+)/resources/$', login_required(ResourceList.as_view()), name = 'resources'),
url('^resources/delete/$', login_required(DeleteResource.as_view()), name = 'delete_resource'),
url('^assignment/delete/$', login_required(DeleteAssignment.as_view()), name = 'delete_assignment'),
)
|
the-stack_0_24553
|
# Copyright (c) 2018, Faststream Technologies
# Author: Sudhanva Narayana
import numpy as np
import pandas as pd
import os
# Import to show plots in seperate Windows
# from IPython import get_ipython
# get_ipython().run_line_magic('matplotlib', 'qt5')
# CURR and PARENT directory constants
CURR_DIR = os.path.dirname(os.path.abspath('__file__'))
PARENT_DIR = os.path.abspath(os.path.join(CURR_DIR, os.pardir))
# Import dataset ignoring headers
df = pd.read_csv(PARENT_DIR + '\\assets\\datasets\\eit.csv', index_col=[0], header = [0], skiprows= [1] ,skipinitialspace=True)
df_ranges = pd.read_csv(PARENT_DIR + '\\assets\\datasets\\eit.csv', index_col=[0], header = [0], skiprows= [0], skipinitialspace=True, nrows=0)
df_columns_ranges = list(df_ranges.columns)
df_columns_colors = list(df.columns)
df_means = df.mean()
target_series = []
# Create target_series list of booleans
for i, color in enumerate(df_columns_colors):
target_series.append(df[color] > df_means[i])
target = np.array(target_series)
target = np.transpose(target[-4:])
target_bools = []
# Create target_bools which creates the final Series of target column
for i in range(len(target)):
if np.sum(target[i]) >= 1:
target_bools.append(1)
else:
target_bools.append(0)
target_bools = pd.Series(target_bools)
columns_tuple_list = []
# Tuple for creating columns for DataFrame
for color, intensity_range in zip(df_columns_colors, df_columns_ranges):
columns_tuple_list.append((color, intensity_range))
# Final DataFrame to csv
df.columns = pd.MultiIndex.from_tuples(columns_tuple_list)
df['target'] = target_bools
df.to_csv(PARENT_DIR + '\\assets\\datasets\\' + 'eit_data.csv')
|
the-stack_0_24556
|
import requests
import os
import webbrowser
import time
import yaml
from pathlib import Path
client_id = 'purchase'
client_secret = os.environ['CLIENT_SECRET']
oauth_host = os.getenv('OAUTH2_HOST', 'localhost')
oauth_port = os.getenv('OAUTH2_PORT', '8080')
base_url = 'http://'+oauth_host+':'+oauth_port
device_url = '/realms/purchase/protocol/openid-connect/auth/device'
token_url = '/realms/purchase/protocol/openid-connect/token'
# call the device code api
print("calling device api")
r = requests.post(base_url+device_url, data = {'client_id':client_id}, auth=(client_id, client_secret))
resp = r.json()
print("received response")
print (resp)
# Open the returned URL
print ("opening url", resp['verification_uri_complete'])
webbrowser.open_new(resp['verification_uri_complete']);
access_token = ""
refresh_token = ""
finished = False
reqdata = {
'grant_type': "urn:ietf:params:oauth:grant-type:device_code",
'device_code': resp['device_code'],
'client_id': client_id
}
print ("polling for tokens while user does browser flow")
# Poll for the tokens
while not finished:
r = requests.post(base_url+token_url, data = reqdata, auth=(client_id, client_secret))
print("recieved status code", r.status_code)
if (r.status_code==200):
tokendata = r.json()
access_token = tokendata['access_token']
refresh_token = tokendata['refresh_token']
finished = True
break
print ("waiting")
time.sleep(resp['interval'])
config = {
'access_token': access_token,
'refresh_token': refresh_token
}
print ("saving to ~/.config/purchase/secrets")
print (yaml.dump(config))
# save the tokens to the home directory
home = str(Path.home())
Path(home + "/.config/purchase").mkdir(parents=True, exist_ok=True)
with open(home + '/.config/purchase/secrets', 'w') as file:
documents = yaml.dump(config, file)
|
the-stack_0_24557
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Sequence, Set, Tuple, Union
import pendulum
from airflow.exceptions import AirflowException
from airflow.serialization.enums import DagAttributeTypes
if TYPE_CHECKING:
from logging import Logger
from airflow.models.dag import DAG
from airflow.models.mappedoperator import MappedOperator
from airflow.utils.edgemodifier import EdgeModifier
from airflow.utils.task_group import TaskGroup
class DependencyMixin:
"""Mixing implementing common dependency setting methods methods like >> and <<."""
@property
def roots(self) -> Sequence["DependencyMixin"]:
"""
List of root nodes -- ones with no upstream dependencies.
a.k.a. the "start" of this sub-graph
"""
raise NotImplementedError()
@property
def leaves(self) -> Sequence["DependencyMixin"]:
"""
List of leaf nodes -- ones with only upstream dependencies.
a.k.a. the "end" of this sub-graph
"""
raise NotImplementedError()
@abstractmethod
def set_upstream(
self,
other: Union["DependencyMixin", Sequence["DependencyMixin"]],
edge_modifier: Optional["EdgeModifier"] = None,
):
"""Set a task or a task list to be directly upstream from the current task."""
raise NotImplementedError()
@abstractmethod
def set_downstream(
self,
other: Union["DependencyMixin", Sequence["DependencyMixin"]],
edge_modifier: Optional["EdgeModifier"] = None,
):
"""Set a task or a task list to be directly downstream from the current task."""
raise NotImplementedError()
def update_relative(
self,
other: "DependencyMixin",
upstream=True,
edge_modifier: Optional["EdgeModifier"] = None,
) -> None:
"""
Update relationship information about another TaskMixin. Default is no-op.
Override if necessary.
"""
def __lshift__(self, other: Union["DependencyMixin", Sequence["DependencyMixin"]]):
"""Implements Task << Task"""
self.set_upstream(other)
return other
def __rshift__(self, other: Union["DependencyMixin", Sequence["DependencyMixin"]]):
"""Implements Task >> Task"""
self.set_downstream(other)
return other
def __rrshift__(self, other: Union["DependencyMixin", Sequence["DependencyMixin"]]):
"""Called for Task >> [Task] because list don't have __rshift__ operators."""
self.__lshift__(other)
return self
def __rlshift__(self, other: Union["DependencyMixin", Sequence["DependencyMixin"]]):
"""Called for Task << [Task] because list don't have __lshift__ operators."""
self.__rshift__(other)
return self
class TaskMixin(DependencyMixin):
""":meta private:"""
def __init_subclass__(cls) -> None:
warnings.warn(
f"TaskMixin has been renamed to DependencyMixin, please update {cls.__name__}",
category=DeprecationWarning,
stacklevel=2,
)
return super().__init_subclass__()
class DAGNode(DependencyMixin, metaclass=ABCMeta):
"""
A base class for a node in the graph of a workflow -- an Operator or a Task Group, either mapped or
unmapped.
"""
dag: Optional["DAG"] = None
task_group: Optional["TaskGroup"] = None
"""The task_group that contains this node"""
@property
@abstractmethod
def node_id(self) -> str:
raise NotImplementedError()
@property
def label(self) -> Optional[str]:
tg = self.task_group
if tg and tg.node_id and tg.prefix_group_id:
# "task_group_id.task_id" -> "task_id"
return self.node_id[len(tg.node_id) + 1 :]
return self.node_id
start_date: Optional[pendulum.DateTime]
end_date: Optional[pendulum.DateTime]
upstream_task_ids: Set[str]
downstream_task_ids: Set[str]
def has_dag(self) -> bool:
return self.dag is not None
@property
def dag_id(self) -> str:
"""Returns dag id if it has one or an adhoc/meaningless ID"""
if self.dag:
return self.dag.dag_id
return "_in_memory_dag_"
@property
def log(self) -> "Logger":
raise NotImplementedError()
@property
@abstractmethod
def roots(self) -> Sequence["DAGNode"]:
raise NotImplementedError()
@property
@abstractmethod
def leaves(self) -> Sequence["DAGNode"]:
raise NotImplementedError()
def _set_relatives(
self,
task_or_task_list: Union[DependencyMixin, Sequence[DependencyMixin]],
upstream: bool = False,
edge_modifier: Optional["EdgeModifier"] = None,
) -> None:
"""Sets relatives for the task or task list."""
from airflow.models.baseoperator import BaseOperator
from airflow.models.mappedoperator import MappedOperator
from airflow.models.operator import Operator
if not isinstance(task_or_task_list, Sequence):
task_or_task_list = [task_or_task_list]
task_list: List[Operator] = []
for task_object in task_or_task_list:
task_object.update_relative(self, not upstream, edge_modifier=edge_modifier)
relatives = task_object.leaves if upstream else task_object.roots
for task in relatives:
if not isinstance(task, (BaseOperator, MappedOperator)):
raise AirflowException(
f"Relationships can only be set between Operators; received {task.__class__.__name__}"
)
task_list.append(task)
# relationships can only be set if the tasks share a single DAG. Tasks
# without a DAG are assigned to that DAG.
dags: Set["DAG"] = {task.dag for task in [*self.roots, *task_list] if task.has_dag() and task.dag}
if len(dags) > 1:
raise AirflowException(f'Tried to set relationships between tasks in more than one DAG: {dags}')
elif len(dags) == 1:
dag = dags.pop()
else:
raise AirflowException(
f"Tried to create relationships between tasks that don't have DAGs yet. "
f"Set the DAG for at least one task and try again: {[self, *task_list]}"
)
if not self.has_dag():
# If this task does not yet have a dag, add it to the same dag as the other task and
# put it in the dag's root TaskGroup.
self.dag = dag
self.dag.task_group.add(self)
def add_only_new(obj, item_set: Set[str], item: str) -> None:
"""Adds only new items to item set"""
if item in item_set:
self.log.warning('Dependency %s, %s already registered for DAG: %s', obj, item, dag.dag_id)
else:
item_set.add(item)
for task in task_list:
if dag and not task.has_dag():
# If the other task does not yet have a dag, add it to the same dag as this task and
# put it in the dag's root TaskGroup.
dag.add_task(task)
dag.task_group.add(task)
if upstream:
add_only_new(task, task.downstream_task_ids, self.node_id)
add_only_new(self, self.upstream_task_ids, task.node_id)
if edge_modifier:
edge_modifier.add_edge_info(self.dag, task.node_id, self.node_id)
else:
add_only_new(self, self.downstream_task_ids, task.node_id)
add_only_new(task, task.upstream_task_ids, self.node_id)
if edge_modifier:
edge_modifier.add_edge_info(self.dag, self.node_id, task.node_id)
def set_downstream(
self,
task_or_task_list: Union[DependencyMixin, Sequence[DependencyMixin]],
edge_modifier: Optional["EdgeModifier"] = None,
) -> None:
"""Set a node (or nodes) to be directly downstream from the current node."""
self._set_relatives(task_or_task_list, upstream=False, edge_modifier=edge_modifier)
def set_upstream(
self,
task_or_task_list: Union[DependencyMixin, Sequence[DependencyMixin]],
edge_modifier: Optional["EdgeModifier"] = None,
) -> None:
"""Set a node (or nodes) to be directly downstream from the current node."""
self._set_relatives(task_or_task_list, upstream=True, edge_modifier=edge_modifier)
@property
def downstream_list(self) -> Iterable["DAGNode"]:
"""List of nodes directly downstream"""
if not self.dag:
raise AirflowException(f'Operator {self} has not been assigned to a DAG yet')
return [self.dag.get_task(tid) for tid in self.downstream_task_ids]
@property
def upstream_list(self) -> Iterable["DAGNode"]:
"""List of nodes directly upstream"""
if not self.dag:
raise AirflowException(f'Operator {self} has not been assigned to a DAG yet')
return [self.dag.get_task(tid) for tid in self.upstream_task_ids]
def get_direct_relative_ids(self, upstream: bool = False) -> Set[str]:
"""
Get set of the direct relative ids to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_task_ids
else:
return self.downstream_task_ids
def get_direct_relatives(self, upstream: bool = False) -> Iterable["DAGNode"]:
"""
Get list of the direct relatives to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_list
else:
return self.downstream_list
def serialize_for_task_group(self) -> Tuple[DagAttributeTypes, Any]:
"""This is used by SerializedTaskGroup to serialize a task group's content."""
raise NotImplementedError()
def _iter_all_mapped_downstreams(self) -> Iterator["MappedOperator"]:
"""Return mapped nodes that are direct dependencies of the current task.
For now, this walks the entire DAG to find mapped nodes that has this
current task as an upstream. We cannot use ``downstream_list`` since it
only contains operators, not task groups. In the future, we should
provide a way to record an DAG node's all downstream nodes instead.
Note that this does not guarantee the returned tasks actually use the
current task for task mapping, but only checks those task are mapped
operators, and are downstreams of the current task.
To get a list of tasks that uses the current task for task mapping, use
:meth:`iter_mapped_dependants` instead.
"""
from airflow.models.mappedoperator import MappedOperator
from airflow.utils.task_group import TaskGroup
def _walk_group(group: TaskGroup) -> Iterable[Tuple[str, DAGNode]]:
"""Recursively walk children in a task group.
This yields all direct children (including both tasks and task
groups), and all children of any task groups.
"""
for key, child in group.children.items():
yield key, child
if isinstance(child, TaskGroup):
yield from _walk_group(child)
tg = self.task_group
if not tg:
raise RuntimeError("Cannot check for mapped dependants when not attached to a DAG")
for key, child in _walk_group(tg):
if key == self.node_id:
continue
if not isinstance(child, MappedOperator):
continue
if self.node_id in child.upstream_task_ids:
yield child
def iter_mapped_dependants(self) -> Iterator["MappedOperator"]:
"""Return mapped nodes that depend on the current task the expansion.
For now, this walks the entire DAG to find mapped nodes that has this
current task as an upstream. We cannot use ``downstream_list`` since it
only contains operators, not task groups. In the future, we should
provide a way to record an DAG node's all downstream nodes instead.
"""
return (
downstream
for downstream in self._iter_all_mapped_downstreams()
if any(p.node_id == self.node_id for p in downstream.iter_mapped_dependencies())
)
|
the-stack_0_24558
|
from __future__ import print_function
import numpy as np
import itertools
from numpy.testing import (assert_equal,
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
from pytest import warns as assert_warns
from scipy.spatial import SphericalVoronoi, distance
from scipy.spatial import _spherical_voronoi as spherical_voronoi
from scipy.spatial.transform import Rotation
from scipy.optimize import linear_sum_assignment
TOL = 1E-10
class TestSphericalVoronoi(object):
def setup_method(self):
self.points = np.array([
[-0.78928481, -0.16341094, 0.59188373],
[-0.66839141, 0.73309634, 0.12578818],
[0.32535778, -0.92476944, -0.19734181],
[-0.90177102, -0.03785291, -0.43055335],
[0.71781344, 0.68428936, 0.12842096],
[-0.96064876, 0.23492353, -0.14820556],
[0.73181537, -0.22025898, -0.6449281],
[0.79979205, 0.54555747, 0.25039913]]
)
# Issue #9386
self.hemisphere_points = np.array([
[0.88610999, -0.42383021, 0.18755541],
[0.51980039, -0.72622668, 0.4498915],
[0.56540011, -0.81629197, -0.11827989],
[0.69659682, -0.69972598, 0.15854467]])
# Issue #8859
phi = np.linspace(0, 2 * np.pi, 10, endpoint=False) # azimuth angle
theta = np.linspace(0.001, np.pi * 0.4, 5) # polar angle
theta = theta[np.newaxis, :].T
phiv, thetav = np.meshgrid(phi, theta)
phiv = np.reshape(phiv, (50, 1))
thetav = np.reshape(thetav, (50, 1))
x = np.cos(phiv) * np.sin(thetav)
y = np.sin(phiv) * np.sin(thetav)
z = np.cos(thetav)
self.hemisphere_points2 = np.concatenate([x, y, z], axis=1)
def test_constructor(self):
center = np.array([1, 2, 3])
radius = 2
s1 = SphericalVoronoi(self.points)
# user input checks in SphericalVoronoi now require
# the radius / center to match the generators so adjust
# accordingly here
s2 = SphericalVoronoi(self.points * radius, radius)
s3 = SphericalVoronoi(self.points + center, center=center)
s4 = SphericalVoronoi(self.points * radius + center, radius, center)
assert_array_equal(s1.center, np.array([0, 0, 0]))
assert_equal(s1.radius, 1)
assert_array_equal(s2.center, np.array([0, 0, 0]))
assert_equal(s2.radius, 2)
assert_array_equal(s3.center, center)
assert_equal(s3.radius, 1)
assert_array_equal(s4.center, center)
assert_equal(s4.radius, radius)
def test_vertices_regions_translation_invariance(self):
sv_origin = SphericalVoronoi(self.points)
center = np.array([1, 1, 1])
sv_translated = SphericalVoronoi(self.points + center, center=center)
assert_equal(sv_origin.regions, sv_translated.regions)
assert_array_almost_equal(sv_origin.vertices + center,
sv_translated.vertices)
def test_vertices_regions_scaling_invariance(self):
sv_unit = SphericalVoronoi(self.points)
sv_scaled = SphericalVoronoi(self.points * 2, 2)
assert_equal(sv_unit.regions, sv_scaled.regions)
assert_array_almost_equal(sv_unit.vertices * 2,
sv_scaled.vertices)
def test_old_radius_api(self):
sv_unit = SphericalVoronoi(self.points, radius=1)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`radius` is `None`")
sv = SphericalVoronoi(self.points, None)
assert_array_almost_equal(sv_unit.vertices, sv.vertices)
def test_old_radius_api_warning(self):
with assert_warns(DeprecationWarning):
SphericalVoronoi(self.points, None)
def test_sort_vertices_of_regions(self):
sv = SphericalVoronoi(self.points)
unsorted_regions = sv.regions
sv.sort_vertices_of_regions()
assert_equal(sorted(sv.regions), sorted(unsorted_regions))
def test_sort_vertices_of_regions_flattened(self):
expected = sorted([[0, 6, 5, 2, 3], [2, 3, 10, 11, 8, 7], [0, 6, 4, 1],
[4, 8, 7, 5, 6], [9, 11, 10], [2, 7, 5],
[1, 4, 8, 11, 9], [0, 3, 10, 9, 1]])
expected = list(itertools.chain(*sorted(expected)))
sv = SphericalVoronoi(self.points)
sv.sort_vertices_of_regions()
actual = list(itertools.chain(*sorted(sv.regions)))
assert_array_equal(actual, expected)
def test_sort_vertices_of_regions_dimensionality(self):
points = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0.5, 0.5, 0.5, 0.5]])
with pytest.raises(TypeError, match="three-dimensional"):
sv = spherical_voronoi.SphericalVoronoi(points)
sv.sort_vertices_of_regions()
def test_num_vertices(self):
# for any n >= 3, a spherical Voronoi diagram has 2n - 4
# vertices; this is a direct consequence of Euler's formula
# as explained by Dinis and Mamede (2010) Proceedings of the
# 2010 International Symposium on Voronoi Diagrams in Science
# and Engineering
sv = SphericalVoronoi(self.points)
expected = self.points.shape[0] * 2 - 4
actual = sv.vertices.shape[0]
assert_equal(actual, expected)
def test_voronoi_circles(self):
sv = spherical_voronoi.SphericalVoronoi(self.points)
for vertex in sv.vertices:
distances = distance.cdist(sv.points, np.array([vertex]))
closest = np.array(sorted(distances)[0:3])
assert_almost_equal(closest[0], closest[1], 7, str(vertex))
assert_almost_equal(closest[0], closest[2], 7, str(vertex))
def test_duplicate_point_handling(self):
# an exception should be raised for degenerate generators
# related to Issue# 7046
self.degenerate = np.concatenate((self.points, self.points))
with assert_raises(ValueError):
spherical_voronoi.SphericalVoronoi(self.degenerate)
def test_incorrect_radius_handling(self):
# an exception should be raised if the radius provided
# cannot possibly match the input generators
with assert_raises(ValueError):
spherical_voronoi.SphericalVoronoi(self.points,
radius=0.98)
def test_incorrect_center_handling(self):
# an exception should be raised if the center provided
# cannot possibly match the input generators
with assert_raises(ValueError):
spherical_voronoi.SphericalVoronoi(self.points,
center=[0.1, 0, 0])
def test_single_hemisphere_handling(self):
# Test solution of Issues #9386, #8859
for points in [self.hemisphere_points, self.hemisphere_points2]:
sv = SphericalVoronoi(points)
triangles = sv._tri.points[sv._tri.simplices]
dots = np.einsum('ij,ij->i', sv.vertices, triangles[:, 0])
circumradii = np.arccos(np.clip(dots, -1, 1))
assert np.max(circumradii) > np.pi / 2
def test_rank_deficient(self):
# rank-1 input cannot be triangulated
points = np.array([[-1, 0, 0], [1, 0, 0]])
with pytest.raises(ValueError, match="Rank of input points"):
spherical_voronoi.SphericalVoronoi(points)
@pytest.mark.parametrize("n", [8, 15, 21])
@pytest.mark.parametrize("radius", [0.5, 1, 2])
@pytest.mark.parametrize("center", [(0, 0, 0), (1, 2, 3)])
def test_geodesic_input(self, n, radius, center):
U = Rotation.random(random_state=0).as_matrix()
thetas = np.linspace(0, 2 * np.pi, n, endpoint=False)
points = np.vstack([np.sin(thetas), np.cos(thetas), np.zeros(n)]).T
points = radius * points @ U
sv = SphericalVoronoi(points + center, radius=radius, center=center)
# each region must have 4 vertices
region_sizes = np.array([len(region) for region in sv.regions])
assert (region_sizes == 4).all()
regions = np.array(sv.regions)
# vertices are those between each pair of input points + north and
# south poles
vertices = sv.vertices - center
assert len(vertices) == n + 2
# verify that north and south poles are orthogonal to geodesic on which
# input points lie
poles = vertices[n:]
assert np.abs(np.dot(points, poles.T)).max() < 1E-10
for point, region in zip(points, sv.regions):
cosine = np.dot(vertices[region], point)
sine = np.linalg.norm(np.cross(vertices[region], point), axis=1)
arclengths = radius * np.arctan2(sine, cosine)
# test arc lengths to poles
assert_almost_equal(arclengths[[1, 3]], radius * np.pi / 2)
# test arc lengths to forward and backward neighbors
assert_almost_equal(arclengths[[0, 2]], radius * np.pi / n)
regions = sv.regions.copy()
sv.sort_vertices_of_regions()
assert regions == sv.regions
@pytest.mark.parametrize("dim", range(2, 7))
def test_higher_dimensions(self, dim):
n = 100
rng = np.random.RandomState(seed=0)
points = rng.randn(n, dim)
points /= np.linalg.norm(points, axis=1)[:, np.newaxis]
sv = SphericalVoronoi(points)
assert sv.vertices.shape[1] == dim
assert len(sv.regions) == n
# verify Euler characteristic
cell_counts = []
simplices = np.sort(sv._tri.simplices)
for i in range(1, dim + 1):
cells = []
for indices in itertools.combinations(range(dim), i):
cells.append(simplices[:, list(indices)])
cells = np.unique(np.concatenate(cells), axis=0)
cell_counts.append(len(cells))
expected_euler = 1 + (-1)**(dim-1)
actual_euler = sum([(-1)**i * e for i, e in enumerate(cell_counts)])
assert expected_euler == actual_euler
@pytest.mark.parametrize("dim", range(2, 7))
def test_cross_polytope_regions(self, dim):
# The hypercube is the dual of the cross-polytope, so the voronoi
# vertices of the cross-polytope lie on the points of the hypercube.
# generate points of the cross-polytope
points = np.concatenate((-np.eye(dim), np.eye(dim)))
sv = SphericalVoronoi(points)
assert all([len(e) == 2**(dim - 1) for e in sv.regions])
# generate points of the hypercube
expected = np.vstack(list(itertools.product([-1, 1], repeat=dim)))
expected = expected.astype(np.float) / np.sqrt(dim)
# test that Voronoi vertices are correctly placed
dist = distance.cdist(sv.vertices, expected)
res = linear_sum_assignment(dist)
assert dist[res].sum() < TOL
@pytest.mark.parametrize("dim", range(2, 4))
def test_hypercube_regions(self, dim):
# The cross-polytope is the dual of the hypercube, so the voronoi
# vertices of the hypercube lie on the points of the cross-polytope.
# generate points of the hypercube
points = np.vstack(list(itertools.product([-1, 1], repeat=dim)))
points = points.astype(np.float) / np.sqrt(dim)
sv = SphericalVoronoi(points)
# generate points of the cross-polytope
expected = np.concatenate((-np.eye(dim), np.eye(dim)))
# test that Voronoi vertices are correctly placed
dist = distance.cdist(sv.vertices, expected)
res = linear_sum_assignment(dist)
assert dist[res].sum() < TOL
|
the-stack_0_24561
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import hashlib
import defusedxml.ElementTree as ET
from defusedxml import defuse_stdlib
from defusedxml.minidom import parseString
from mo.front.common.partial_infer.utils import unmask_shape, is_fully_defined
from mo.graph.graph import *
from mo.middle.passes.convert_data_type import np_data_type_to_precision
from mo.utils.unsupported_ops import UnsupportedOps
from mo.utils.utils import refer_to_faq_msg
from mo.utils.version import get_version
# defuse_stdlib provide patched version of xml.etree.ElementTree which allows to use objects from xml.etree.ElementTree
# in a safe manner without including unsafe xml.etree.ElementTree
ET_defused = defuse_stdlib()[ET]
Element = ET_defused.Element
SubElement = ET_defused.SubElement
tostring = ET_defused.tostring
def serialize_constants(graph: Graph, bin_file_name: str, data_type=np.float32):
"""
Found all data constants that has output edges with 'bin' attribute.
Serialize content for such constants to a binary file with name bin_file_name in
raw format. Save offset and length of serialized area in the file as 'offset' and 'size'
attributes of data node.
Args:
@graph: input graph with op and data nodes
@bin_file_name: path to file to write blobs to
@data_type: numpy data type to convert all blob elements to
"""
bin_hashes = {}
with open(bin_file_name, 'wb') as bin_file:
serialize_constants_recursively(graph, bin_file, data_type, bin_hashes)
def update_offset_size_in_const_node(node: Node):
assert node.kind == 'data'
for consumer in node.out_nodes():
if consumer.type != 'Const':
continue
assert not consumer.has_valid('offset')
assert not consumer.has_valid('size')
consumer['offset'] = node.offset
consumer['size'] = node.size
def serialize_constants_recursively(graph: Graph, bin_file, data_type, bin_hashes):
nodes = sorted(graph.nodes())
for node in nodes:
node = Node(graph, node)
if node.kind == 'data' and node.value is not None and \
any('bin' in d for u, v, d in graph.out_edges(node.node, data=True)):
# avoid array copying while taking hash
blob = node.value if node.value.ndim > 0 else node.value.reshape((1))
assert is_fully_defined(blob), 'The constant value cannot contain dynamic values'
if isinstance(blob, np.ma.masked_array):
blob = np.ma.getdata(blob)
blob_hash = hashlib.sha512(np.ascontiguousarray(blob).view(np.uint8)).hexdigest()
if blob_hash in bin_hashes and np.array_equal(blob, bin_hashes[blob_hash]['blob']):
graph.node[node.node]['offset'] = bin_hashes[blob_hash]['offset']
graph.node[node.node]['size'] = bin_hashes[blob_hash]['size']
graph.node[node.node]['blob_precision'] = np_data_type_to_precision(blob.dtype)
update_offset_size_in_const_node(node)
else:
start = bin_file.tell()
blob.tofile(bin_file)
end = bin_file.tell()
graph.node[node.node]['offset'] = start
graph.node[node.node]['size'] = end - start
graph.node[node.node]['blob_precision'] = np_data_type_to_precision(blob.dtype)
bin_hashes[blob_hash] = {'offset': graph.node[node.node]['offset'],
'size': graph.node[node.node]['size'], 'blob': blob}
update_offset_size_in_const_node(node)
assert (blob.dtype.itemsize * np.prod(node.shape) == end - start) or \
node.has_valid('force_shape'), node.attrs()
log.debug(
"Detected binary for graph: '{}', node: '{}', id: {}, shape: '{}', offset: '{}', size: '{}'".format(
graph, node.soft_get('name'), node.id, node.shape, node.offset, node.size))
# separate loop for sub-graph to dump them after all blobs for more natural blob offset ordering
# TODO: implement strict order for all blobs in entier IR
for node in nodes:
node = Node(graph, node)
# Dump blobs recursively if sub-graphs are present in the node
if node.has_valid('sub_graphs'):
for sub_graph_attr_name in node.sub_graphs:
sub_graph = node[sub_graph_attr_name]
serialize_constants_recursively(sub_graph, bin_file, data_type, bin_hashes)
def serialize_mean_image(bin_file_name: str, mean_data=[]):
with open(bin_file_name, 'ab') as bin_file:
mean_offset = []
mean_size = []
for x in range(len(mean_data)):
start = bin_file.tell()
bin_file.write(mean_data[x][:])
end = bin_file.tell()
mean_offset.append(start)
mean_size.append(end - start)
return mean_offset, mean_size
def xml_shape(shape: np.ndarray, element: Element):
for d in unmask_shape(shape):
if d < -1:
raise Error('The value "{}" for shape is not valid value.'.format(d))
dim = SubElement(element, 'dim')
if int(d) != d:
raise Error('The value "{}" for shape is not integer.'.format(d))
if not isinstance(d, np.int64):
log.warning('The element of shape is not np.int64 value. Converting the value "{}" to integer'.format(d))
d = int(d)
dim.text = str(d)
def xml_ports(node: Node, element: Element, edges: Element):
# input ports
inputs = None # will create input section only if at least one input is available
for u, d in node.get_sorted_inputs():
if 'bin' not in d and ('xml_skip' not in d or not d['xml_skip']):
if inputs is None:
inputs = SubElement(element, 'input')
port = SubElement(inputs, 'port')
port.set('id', str(d['in']))
assert node.graph.node[u]['shape'] is not None, 'Input shape is not calculated properly for node {}'.format(
node.id)
xml_shape(node.graph.node[u]['shape'], port)
# u is a data node that has a single producer, let's find it
assert (node.graph.node[u]['kind'] == 'data')
in_nodes = list(node.graph.in_edges(u, data=True))
assert (len(in_nodes) <= 1)
if len(in_nodes) == 1:
src, _, out_attrs = in_nodes[0]
edge = SubElement(edges, 'edge')
edge.set('from-layer', str(src))
edge.set('from-port', str(out_attrs['out']))
edge.set('to-layer', str(node.node))
edge.set('to-port', str(d['in']))
# port.set('precision', np_data_type_to_precision(node['_in_port_precision'][d['in']]))
# output ports
outputs = None
for v, d in node.get_sorted_outputs():
if 'xml_skip' not in d or not d['xml_skip']:
if outputs is None:
outputs = SubElement(element, 'output')
port = SubElement(outputs, 'port')
port.set('id', str(d['out']))
# we need to check operation type, if it is const op, we don't renumber out ports
# because they are already counted from zero
port_id = d['out'] - len(node.in_nodes()) if node.type != 'Const' else d['out']
data_type = node.out_port(port_id).get_data_type()
assert data_type is not None, 'The precision is not defined for the output port {} of node {}' \
''.format(port_id, node.soft_get('name'))
port.set('precision', node.soft_get('force_type', np_data_type_to_precision(data_type)))
assert node.graph.node[v]['shape'] is not None, 'Output shape is not calculated properly for node {}' \
''.format(node.id)
tensor_names = node.out_port(port_id).get_tensor_names(port_renumber=True)
if tensor_names:
port.set('names', ','.join(tensor_names))
xml_shape(node.graph.node[v]['shape'], port)
def xml_consts(graph: Graph, node: Node, element: Element):
blobs = None # sub-element that will be created on-demand
for u, d in node.get_sorted_inputs():
if 'bin' in d and (node.type != 'Const'):
if not blobs:
blobs = SubElement(element, 'blobs')
const = SubElement(blobs, d['bin'])
try:
const.set('offset', str(graph.node[u]['offset']))
const.set('size', str(graph.node[u]['size']))
const.set('precision', graph.node[u]['blob_precision'])
except Exception as e:
raise Error('Unable to access binary attributes ("offset" and/or "size") for blobs for node {}. '
'Details: {}'.format(node.soft_get('name'), e))
def soft_get(node, attr):
""" If node has soft_get callable member, returns node.soft_get(attr), else return <SUB-ELEMENT> """
return node.soft_get(attr) if hasattr(node, 'soft_get') and callable(node.soft_get) else '<SUB-ELEMENT>'
def serialize_element(
graph: Graph,
node,
schema: list,
parent_element: Element,
edges: Element,
unsupported):
name, attrs, subelements = schema
element = SubElement(parent_element, name)
for attr in attrs:
if isinstance(attr, tuple):
key = attr[0]
try:
if callable(attr[1]):
value = attr[1](node)
else:
value = node[attr[1]] if attr[1] in node else None
except TypeError as e:
raise Error('Unable to extract {} from layer {}', key, soft_get(node, 'name')) from e
except Exception as e:
raise Error(
'Cannot emit value for attribute {} for layer {}. '
'Internal attribute template: {}.',
key,
soft_get(node, 'name'),
attr
) from e
elif isinstance(attr, dict):
node_attrs = node.graph.node[node.id] if isinstance(node, Node) else node
for key in attr.keys():
if key in node_attrs:
for k, v in node_attrs[key].items():
element.set(k, str(v))
continue
else:
key = attr
value = node[attr] if attr in node else None
if value is not None:
element.set(key, str(value))
serialize_node_attributes(graph, node, subelements, element, edges, unsupported)
if len(element.attrib) == 0 and len(list(element)) == 0:
parent_element.remove(element)
def serialize_meta_list(graph, node, schema, element, edges, unsupported):
_, list_accessor, sub_schema = schema
items = list_accessor(node) # this is a list of dictionary-like objects
for item in items:
serialize_node_attributes(graph, item, [sub_schema], element, edges, unsupported)
def serialize_runtime_info(node, parent_element: Element):
if 'rt_info' not in node:
return
rt_info = SubElement(parent_element, 'rt_info')
for (name, version), info_elem in node.rt_info.info.items():
attribute = SubElement(rt_info, 'attribute')
attribute.set('name', name)
attribute.set('version', str(version))
params = info_elem.serialize(node)
if len(params) == 0:
rt_info.remove(attribute)
continue
for key, value in params.items():
attribute.set(key, value)
if len(rt_info.attrib) == 0 and len(list(rt_info)) == 0:
parent_element.remove(rt_info)
def serialize_node_attributes(
graph: Graph, # the current network graph
node, # dictionary-like object that should be serialized
schema: list,
parent_element: Element,
edges: Element,
unsupported):
# the Result op may be marked so it should not appear in the IR. For example, refer to transformation
# model-optimizer/extensions/back/TopKNormalizer.py
if isinstance(node, Node) and node.soft_get('type') == 'Result' and node.has_and_set('keep_output_port'):
return
try:
for s in schema:
if not isinstance(s, tuple):
if s == '@ports':
try:
# TODO make sure that edges are generated regardless of the existence of @ports
xml_ports(node, parent_element, edges)
except Exception as e:
raise Error(('Unable to create ports for node with id {}. ' +
refer_to_faq_msg(3)).format(node.id)) from e
elif s == '@consts':
xml_consts(graph, node, parent_element)
elif s == '@runtime_info':
serialize_runtime_info(node, parent_element)
else:
log.warning('Unknown xml schema tag: {}'.format(s))
else:
name = s[0]
if name == '@list':
serialize_meta_list(graph, node, s, parent_element, edges, unsupported)
elif name == '@network':
serialize_network(node[s[1]], parent_element, unsupported)
else:
serialize_element(graph, node, s, parent_element, edges, unsupported)
except Exception as e:
raise Error(
'Error while emitting attributes for layer {} (id = {}). It usually means that there is unsupported '
'pattern around this node or unsupported combination of attributes.',
soft_get(node, 'name'),
node.id
) from e
def create_pre_process_block_for_image(net: Element, ref_layer_names: list, mean_offset: tuple,
mean_size: tuple):
pre_process = SubElement(net, 'pre-process')
pre_process.set('mean-precision', 'FP32') # TODO: to think about need to output FP16 mean values
# TODO: extend it for several inputs
pre_process.set('reference-layer-name', ref_layer_names[0])
for idx in range(len(mean_size)):
channel_xml = SubElement(pre_process, 'channel')
channel_xml.set('id', str(idx))
mean_xml = SubElement(channel_xml, 'mean')
mean_xml.set('offset', str(mean_offset[idx]))
mean_xml.set('size', str(mean_size[idx]))
def create_pre_process_block(net, ref_layer_name, means, scales=None):
"""
Generates the pre-process block for the IR XML
Args:
net: root XML element
ref_layer_name: name of the layer where it is referenced to
means: tuple of values
scales: tuple of values
Returns:
pre-process XML element
"""
pre_process = SubElement(net, 'pre-process')
pre_process.set('reference-layer-name', ref_layer_name)
for idx in range(len(means)):
channel_xml = SubElement(pre_process, 'channel')
channel_xml.set('id', str(idx))
mean_xml = SubElement(channel_xml, 'mean')
mean_xml.set('value', str(means[idx]))
if scales:
scale_xml = SubElement(channel_xml, 'scale')
scale_xml.set('value', str(scales[idx]))
return pre_process
def add_quantization_statistics(graph, net_element):
if 'statistics' in graph.graph:
stats = SubElement(net_element, 'statistics')
for tensor, interval in graph.graph['statistics'].items():
layer = SubElement(stats, 'layer')
name = SubElement(layer, 'name')
name.text = tensor
min = SubElement(layer, 'min')
min.text = interval['min']
max = SubElement(layer, 'max')
max.text = interval['max']
log.info('Statistics were inserted to IR')
def add_quantization_info_section(net: Element, meta_info: dict):
if 'quantization_parameters' in meta_info:
parameters = meta_info['quantization_parameters']
quant_params = SubElement(net, 'quantization_parameters')
config = SubElement(quant_params, 'config')
config.text = parameters['config']
version = SubElement(quant_params, 'version')
version.set('value', parameters['version'])
cli_params = SubElement(quant_params, 'cli_params')
cli_params.set('value', parameters['cli_params'])
def add_meta_data(net: Element, meta_info: dict):
if meta_info == {}:
log.warning('`meta_info` is not provided, IR will not contain appropriate section.')
else:
meta = SubElement(net, 'meta_data')
SubElement(meta, 'MO_version').set('value', get_version())
parameters = SubElement(meta, 'cli_parameters')
[SubElement(parameters, str(key)).set('value', str(meta_info[key])) for key in sorted(meta_info.keys()) if
key not in ('unset', 'quantization_parameters')]
if 'unset' in meta_info:
SubElement(parameters, 'unset').set('unset_cli_parameters', ', '.join(sorted(meta_info['unset'])))
def serialize_network(graph, net_element, unsupported):
layers = SubElement(net_element, 'layers')
edges = SubElement(net_element, 'edges')
if graph is None:
return
nodes = sorted(graph.nodes())
for node in nodes:
node = Node(graph, node)
if node.kind == 'op' and (not node.has('type') or node.type is None):
unsupported.add(node)
continue
if not node.has('IE'):
continue
try:
serialize_node_attributes(graph, node, node.IE, layers, edges, unsupported)
except Error as e:
raise Error(str(e).replace('<SUB-ELEMENT>', '{} (id = {})'.format(node.soft_get('name'), node.id))) from e
def generate_ie_ir(graph: Graph, file_name: str, input_names: tuple = (), mean_offset: tuple = (),
mean_size: tuple = (), meta_info: dict = dict()):
"""
Extracts IE/IR attributes from kind='op' nodes in three ways:
(1) node.IE xml scheme that sets correspondence from existing attributes to generated xml elements
(2) input/output edges that don't have 'bin' attributes are transformed to input/output ports
(3) input edges that has 'bin' attributes are handled in special way like weights/biases
Args:
graph: nx graph with FW-independent model
file_name: name of the resulting IR
input_names: names of input layers of the topology to add mean file to
input_name: name of the layer which is referenced from pre-processing block if any
mean_values: tuple of mean values for channels in RGB order
scale_values: tuple of mean values for channels in RGB order
mean_offset: offset in binary file, where mean file values start
mean_size: size of the mean file
"""
net = Element('net')
net.set('name', graph.name)
net.set('version', str((graph.graph['ir_version'])))
if mean_size or mean_offset:
create_pre_process_block_for_image(net, input_names, mean_offset, mean_size)
if 'mean_values' in graph.graph.keys():
for input_name, values in graph.graph['mean_values'].items():
create_pre_process_block(net, input_name, values)
unsupported = UnsupportedOps(graph)
serialize_network(graph, net, unsupported)
add_quantization_statistics(graph, net)
add_meta_data(net, meta_info)
add_quantization_info_section(net, meta_info)
xml_string = tostring(net)
xml_doc = parseString(xml_string)
pretty_xml_as_string = xml_doc.toprettyxml()
if len(unsupported.unsupported):
log.debug('Partially correct IR XML:\n{}'.format(pretty_xml_as_string))
unsupported.report(log.error, "List of operations that cannot be converted to Inference Engine IR:")
raise Error('Part of the nodes was not converted to IR. Stopped. ' +
refer_to_faq_msg(24))
with open(file_name, 'wb') as file:
file.write(bytes(pretty_xml_as_string, "UTF-8"))
def port_renumber(graph: Graph):
for node in graph.get_op_nodes():
base = 0
# we need to check operation type, if it is const op, we don't renumber out ports to count them from zero
if node.soft_get('type') != 'Const':
for u, d in node.get_sorted_inputs():
d['in'] = base
base += 1
for v, d in node.get_sorted_outputs():
d['out'] = base
base += 1
def append_ir_info(file: str, meta_info: dict = dict(), mean_data: [list, None] = None, input_names: list = None):
path_to_xml = file + ".xml"
path_to_bin = file + ".bin"
et = ET.parse(path_to_xml)
net = et.getroot()
if mean_data:
mean_offset, mean_size = serialize_mean_image(path_to_bin, mean_data=mean_data)
create_pre_process_block_for_image(net, input_names, mean_offset, mean_size)
add_meta_data(net, meta_info)
for elem in et.iter():
if elem.text:
elem.text = elem.text.strip()
if elem.tail:
elem.tail = elem.tail.strip()
pretty_xml_as_string = parseString(tostring(net)).toprettyxml()
with open(path_to_xml, 'wb') as file:
file.write(bytes(pretty_xml_as_string, "UTF-8"))
|
the-stack_0_24562
|
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import (
Any,
Callable,
Dict,
Generator,
Generic,
Literal,
List,
Optional,
Union,
Set,
Tuple,
TypeVar,
Type,
TYPE_CHECKING,
overload,
)
import asyncio
import functools
import inspect
import datetime
import types
import discord
from .errors import *
from ...errors import *
from .cooldowns import Cooldown, BucketType, CooldownMapping, MaxConcurrency, DynamicCooldownMapping
from .converter import run_converters, get_converter, Greedy
from ...commands import _BaseCommand, slash_command, user_command, message_command
from .cog import Cog
from .context import Context
if TYPE_CHECKING:
from typing_extensions import Concatenate, ParamSpec, TypeGuard
from discord.message import Message
from ._types import (
Coro,
CoroFunc,
Check,
Hook,
Error,
)
__all__ = (
'Command',
'Group',
'GroupMixin',
'command',
'group',
'has_role',
'has_permissions',
'has_any_role',
'check',
'check_any',
'before_invoke',
'after_invoke',
'bot_has_role',
'bot_has_permissions',
'bot_has_any_role',
'cooldown',
'dynamic_cooldown',
'max_concurrency',
'dm_only',
'guild_only',
'is_owner',
'is_nsfw',
'has_guild_permissions',
'bot_has_guild_permissions',
'slash_command',
'user_command',
'message_command'
)
MISSING: Any = discord.utils.MISSING
T = TypeVar('T')
CogT = TypeVar('CogT', bound='Cog')
CommandT = TypeVar('CommandT', bound='Command')
ContextT = TypeVar('ContextT', bound='Context')
# CHT = TypeVar('CHT', bound='Check')
GroupT = TypeVar('GroupT', bound='Group')
HookT = TypeVar('HookT', bound='Hook')
ErrorT = TypeVar('ErrorT', bound='Error')
if TYPE_CHECKING:
P = ParamSpec('P')
else:
P = TypeVar('P')
def unwrap_function(function: Callable[..., Any]) -> Callable[..., Any]:
partial = functools.partial
while True:
if hasattr(function, '__wrapped__'):
function = function.__wrapped__
elif isinstance(function, partial):
function = function.func
else:
return function
def get_signature_parameters(function: Callable[..., Any], globalns: Dict[str, Any]) -> Dict[str, inspect.Parameter]:
signature = inspect.signature(function)
params = {}
cache: Dict[str, Any] = {}
eval_annotation = discord.utils.evaluate_annotation
for name, parameter in signature.parameters.items():
annotation = parameter.annotation
if annotation is parameter.empty:
params[name] = parameter
continue
if annotation is None:
params[name] = parameter.replace(annotation=type(None))
continue
annotation = eval_annotation(annotation, globalns, globalns, cache)
if annotation is Greedy:
raise TypeError('Unparameterized Greedy[...] is disallowed in signature.')
params[name] = parameter.replace(annotation=annotation)
return params
def wrap_callback(coro):
@functools.wraps(coro)
async def wrapped(*args, **kwargs):
try:
ret = await coro(*args, **kwargs)
except CommandError:
raise
except asyncio.CancelledError:
return
except Exception as exc:
raise CommandInvokeError(exc) from exc
return ret
return wrapped
def hooked_wrapped_callback(command, ctx, coro):
@functools.wraps(coro)
async def wrapped(*args, **kwargs):
try:
ret = await coro(*args, **kwargs)
except CommandError:
ctx.command_failed = True
raise
except asyncio.CancelledError:
ctx.command_failed = True
return
except Exception as exc:
ctx.command_failed = True
raise CommandInvokeError(exc) from exc
finally:
if command._max_concurrency is not None:
await command._max_concurrency.release(ctx)
await command.call_after_hooks(ctx)
return ret
return wrapped
class _CaseInsensitiveDict(dict):
def __contains__(self, k):
return super().__contains__(k.casefold())
def __delitem__(self, k):
return super().__delitem__(k.casefold())
def __getitem__(self, k):
return super().__getitem__(k.casefold())
def get(self, k, default=None):
return super().get(k.casefold(), default)
def pop(self, k, default=None):
return super().pop(k.casefold(), default)
def __setitem__(self, k, v):
super().__setitem__(k.casefold(), v)
class Command(_BaseCommand, Generic[CogT, P, T]):
r"""A class that implements the protocol for a bot text command.
These are not created manually, instead they are created via the
decorator or functional interface.
Attributes
-----------
name: :class:`str`
The name of the command.
callback: :ref:`coroutine <coroutine>`
The coroutine that is executed when the command is called.
help: Optional[:class:`str`]
The long help text for the command.
brief: Optional[:class:`str`]
The short help text for the command.
usage: Optional[:class:`str`]
A replacement for arguments in the default help text.
aliases: Union[List[:class:`str`], Tuple[:class:`str`]]
The list of aliases the command can be invoked under.
enabled: :class:`bool`
A boolean that indicates if the command is currently enabled.
If the command is invoked while it is disabled, then
:exc:`.DisabledCommand` is raised to the :func:`.on_command_error`
event. Defaults to ``True``.
parent: Optional[:class:`Group`]
The parent group that this command belongs to. ``None`` if there
isn't one.
cog: Optional[:class:`Cog`]
The cog that this command belongs to. ``None`` if there isn't one.
checks: List[Callable[[:class:`.Context`], :class:`bool`]]
A list of predicates that verifies if the command could be executed
with the given :class:`.Context` as the sole parameter. If an exception
is necessary to be thrown to signal failure, then one inherited from
:exc:`.CommandError` should be used. Note that if the checks fail then
:exc:`.CheckFailure` exception is raised to the :func:`.on_command_error`
event.
description: :class:`str`
The message prefixed into the default help command.
hidden: :class:`bool`
If ``True``\, the default help command does not show this in the
help output.
rest_is_raw: :class:`bool`
If ``False`` and a keyword-only argument is provided then the keyword
only argument is stripped and handled as if it was a regular argument
that handles :exc:`.MissingRequiredArgument` and default values in a
regular matter rather than passing the rest completely raw. If ``True``
then the keyword-only argument will pass in the rest of the arguments
in a completely raw matter. Defaults to ``False``.
invoked_subcommand: Optional[:class:`Command`]
The subcommand that was invoked, if any.
require_var_positional: :class:`bool`
If ``True`` and a variadic positional argument is specified, requires
the user to specify at least one argument. Defaults to ``False``.
.. versionadded:: 1.5
ignore_extra: :class:`bool`
If ``True``\, ignores extraneous strings passed to a command if all its
requirements are met (e.g. ``?foo a b c`` when only expecting ``a``
and ``b``). Otherwise :func:`.on_command_error` and local error handlers
are called with :exc:`.TooManyArguments`. Defaults to ``True``.
cooldown_after_parsing: :class:`bool`
If ``True``\, cooldown processing is done after argument parsing,
which calls converters. If ``False`` then cooldown processing is done
first and then the converters are called second. Defaults to ``False``.
extras: :class:`dict`
A dict of user provided extras to attach to the Command.
.. note::
This object may be copied by the library.
.. versionadded:: 2.0
cooldown: Optional[:class:`Cooldown`]
The cooldown applied when the command is invoked. ``None`` if the command
doesn't have a cooldown.
.. versionadded:: 2.0
"""
__original_kwargs__: Dict[str, Any]
def __new__(cls: Type[CommandT], *args: Any, **kwargs: Any) -> CommandT:
# if you're wondering why this is done, it's because we need to ensure
# we have a complete original copy of **kwargs even for classes that
# mess with it by popping before delegating to the subclass __init__.
# In order to do this, we need to control the instance creation and
# inject the original kwargs through __new__ rather than doing it
# inside __init__.
self = super().__new__(cls)
# we do a shallow copy because it's probably the most common use case.
# this could potentially break if someone modifies a list or something
# while it's in movement, but for now this is the cheapest and
# fastest way to do what we want.
self.__original_kwargs__ = kwargs.copy()
return self
def __init__(self, func: Union[
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
Callable[Concatenate[ContextT, P], Coro[T]],
], **kwargs: Any):
if not asyncio.iscoroutinefunction(func):
raise TypeError('Callback must be a coroutine.')
name = kwargs.get('name') or func.__name__
if not isinstance(name, str):
raise TypeError('Name of a command must be a string.')
self.name: str = name
self.callback = func
self.enabled: bool = kwargs.get('enabled', True)
help_doc = kwargs.get('help')
if help_doc is not None:
help_doc = inspect.cleandoc(help_doc)
else:
help_doc = inspect.getdoc(func)
if isinstance(help_doc, bytes):
help_doc = help_doc.decode('utf-8')
self.help: Optional[str] = help_doc
self.brief: Optional[str] = kwargs.get('brief')
self.usage: Optional[str] = kwargs.get('usage')
self.rest_is_raw: bool = kwargs.get('rest_is_raw', False)
self.aliases: Union[List[str], Tuple[str]] = kwargs.get('aliases', [])
self.extras: Dict[str, Any] = kwargs.get('extras', {})
if not isinstance(self.aliases, (list, tuple)):
raise TypeError("Aliases of a command must be a list or a tuple of strings.")
self.description: str = inspect.cleandoc(kwargs.get('description', ''))
self.hidden: bool = kwargs.get('hidden', False)
try:
checks = func.__commands_checks__
checks.reverse()
except AttributeError:
checks = kwargs.get('checks', [])
self.checks: List[Check] = checks
try:
cooldown = func.__commands_cooldown__
except AttributeError:
cooldown = kwargs.get('cooldown')
if cooldown is None:
buckets = CooldownMapping(cooldown, BucketType.default)
elif isinstance(cooldown, CooldownMapping):
buckets = cooldown
else:
raise TypeError("Cooldown must be a an instance of CooldownMapping or None.")
self._buckets: CooldownMapping = buckets
try:
max_concurrency = func.__commands_max_concurrency__
except AttributeError:
max_concurrency = kwargs.get('max_concurrency')
self._max_concurrency: Optional[MaxConcurrency] = max_concurrency
self.require_var_positional: bool = kwargs.get('require_var_positional', False)
self.ignore_extra: bool = kwargs.get('ignore_extra', True)
self.cooldown_after_parsing: bool = kwargs.get('cooldown_after_parsing', False)
self.cog: Optional[CogT] = None
# bandaid for the fact that sometimes parent can be the bot instance
parent = kwargs.get('parent')
self.parent: Optional[GroupMixin] = parent if isinstance(parent, _BaseCommand) else None # type: ignore
self._before_invoke: Optional[Hook] = None
try:
before_invoke = func.__before_invoke__
except AttributeError:
pass
else:
self.before_invoke(before_invoke)
self._after_invoke: Optional[Hook] = None
try:
after_invoke = func.__after_invoke__
except AttributeError:
pass
else:
self.after_invoke(after_invoke)
@property
def callback(self) -> Union[
Callable[Concatenate[CogT, Context, P], Coro[T]],
Callable[Concatenate[Context, P], Coro[T]],
]:
return self._callback
@callback.setter
def callback(self, function: Union[
Callable[Concatenate[CogT, Context, P], Coro[T]],
Callable[Concatenate[Context, P], Coro[T]],
]) -> None:
self._callback = function
unwrap = unwrap_function(function)
self.module = unwrap.__module__
try:
globalns = unwrap.__globals__
except AttributeError:
globalns = {}
self.params = get_signature_parameters(function, globalns)
def add_check(self, func: Check) -> None:
"""Adds a check to the command.
This is the non-decorator interface to :func:`.check`.
.. versionadded:: 1.3
Parameters
-----------
func
The function that will be used as a check.
"""
self.checks.append(func)
def remove_check(self, func: Check) -> None:
"""Removes a check from the command.
This function is idempotent and will not raise an exception
if the function is not in the command's checks.
.. versionadded:: 1.3
Parameters
-----------
func
The function to remove from the checks.
"""
try:
self.checks.remove(func)
except ValueError:
pass
def update(self, **kwargs: Any) -> None:
"""Updates :class:`Command` instance with updated attribute.
This works similarly to the :func:`.command` decorator in terms
of parameters in that they are passed to the :class:`Command` or
subclass constructors, sans the name and callback.
"""
self.__init__(self.callback, **dict(self.__original_kwargs__, **kwargs))
async def __call__(self, context: Context, *args: P.args, **kwargs: P.kwargs) -> T:
"""|coro|
Calls the internal callback that the command holds.
.. note::
This bypasses all mechanisms -- including checks, converters,
invoke hooks, cooldowns, etc. You must take care to pass
the proper arguments and types to this function.
.. versionadded:: 1.3
"""
if self.cog is not None:
return await self.callback(self.cog, context, *args, **kwargs) # type: ignore
else:
return await self.callback(context, *args, **kwargs) # type: ignore
def _ensure_assignment_on_copy(self, other: CommandT) -> CommandT:
other._before_invoke = self._before_invoke
other._after_invoke = self._after_invoke
if self.checks != other.checks:
other.checks = self.checks.copy()
if self._buckets.valid and not other._buckets.valid:
other._buckets = self._buckets.copy()
if self._max_concurrency != other._max_concurrency:
# _max_concurrency won't be None at this point
other._max_concurrency = self._max_concurrency.copy() # type: ignore
try:
other.on_error = self.on_error
except AttributeError:
pass
return other
def copy(self: CommandT) -> CommandT:
"""Creates a copy of this command.
Returns
--------
:class:`Command`
A new instance of this command.
"""
ret = self.__class__(self.callback, **self.__original_kwargs__)
return self._ensure_assignment_on_copy(ret)
def _update_copy(self: CommandT, kwargs: Dict[str, Any]) -> CommandT:
if kwargs:
kw = kwargs.copy()
kw.update(self.__original_kwargs__)
copy = self.__class__(self.callback, **kw)
return self._ensure_assignment_on_copy(copy)
else:
return self.copy()
async def dispatch_error(self, ctx: Context, error: Exception) -> None:
ctx.command_failed = True
cog = self.cog
try:
coro = self.on_error
except AttributeError:
pass
else:
injected = wrap_callback(coro)
if cog is not None:
await injected(cog, ctx, error)
else:
await injected(ctx, error)
try:
if cog is not None:
local = Cog._get_overridden_method(cog.cog_command_error)
if local is not None:
wrapped = wrap_callback(local)
await wrapped(ctx, error)
finally:
ctx.bot.dispatch('command_error', ctx, error)
async def transform(self, ctx: Context, param: inspect.Parameter) -> Any:
required = param.default is param.empty
converter = get_converter(param)
consume_rest_is_special = param.kind == param.KEYWORD_ONLY and not self.rest_is_raw
view = ctx.view
view.skip_ws()
# The greedy converter is simple -- it keeps going until it fails in which case,
# it undos the view ready for the next parameter to use instead
if isinstance(converter, Greedy):
if param.kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY):
return await self._transform_greedy_pos(ctx, param, required, converter.converter)
elif param.kind == param.VAR_POSITIONAL:
return await self._transform_greedy_var_pos(ctx, param, converter.converter)
else:
# if we're here, then it's a KEYWORD_ONLY param type
# since this is mostly useless, we'll helpfully transform Greedy[X]
# into just X and do the parsing that way.
converter = converter.converter
if view.eof:
if param.kind == param.VAR_POSITIONAL:
raise RuntimeError() # break the loop
if required:
if self._is_typing_optional(param.annotation):
return None
if hasattr(converter, '__commands_is_flag__') and converter._can_be_constructible():
return await converter._construct_default(ctx)
raise MissingRequiredArgument(param)
return param.default
previous = view.index
if consume_rest_is_special:
argument = view.read_rest().strip()
else:
try:
argument = view.get_quoted_word()
except ArgumentParsingError as exc:
if self._is_typing_optional(param.annotation):
view.index = previous
return None
else:
raise exc
view.previous = previous
# type-checker fails to narrow argument
return await run_converters(ctx, converter, argument, param) # type: ignore
async def _transform_greedy_pos(self, ctx: Context, param: inspect.Parameter, required: bool, converter: Any) -> Any:
view = ctx.view
result = []
while not view.eof:
# for use with a manual undo
previous = view.index
view.skip_ws()
try:
argument = view.get_quoted_word()
value = await run_converters(ctx, converter, argument, param) # type: ignore
except (CommandError, ArgumentParsingError):
view.index = previous
break
else:
result.append(value)
if not result and not required:
return param.default
return result
async def _transform_greedy_var_pos(self, ctx: Context, param: inspect.Parameter, converter: Any) -> Any:
view = ctx.view
previous = view.index
try:
argument = view.get_quoted_word()
value = await run_converters(ctx, converter, argument, param) # type: ignore
except (CommandError, ArgumentParsingError):
view.index = previous
raise RuntimeError() from None # break loop
else:
return value
@property
def clean_params(self) -> Dict[str, inspect.Parameter]:
"""Dict[:class:`str`, :class:`inspect.Parameter`]:
Retrieves the parameter dictionary without the context or self parameters.
Useful for inspecting signature.
"""
result = self.params.copy()
if self.cog is not None:
# first parameter is self
try:
del result[next(iter(result))]
except StopIteration:
raise ValueError("missing 'self' parameter") from None
try:
# first/second parameter is context
del result[next(iter(result))]
except StopIteration:
raise ValueError("missing 'context' parameter") from None
return result
@property
def full_parent_name(self) -> str:
""":class:`str`: Retrieves the fully qualified parent command name.
This the base command name required to execute it. For example,
in ``?one two three`` the parent name would be ``one two``.
"""
entries = []
command = self
# command.parent is type-hinted as GroupMixin some attributes are resolved via MRO
while command.parent is not None: # type: ignore
command = command.parent # type: ignore
entries.append(command.name) # type: ignore
return ' '.join(reversed(entries))
@property
def parents(self) -> List[Group]:
"""List[:class:`Group`]: Retrieves the parents of this command.
If the command has no parents then it returns an empty :class:`list`.
For example in commands ``?a b c test``, the parents are ``[c, b, a]``.
.. versionadded:: 1.1
"""
entries = []
command = self
while command.parent is not None: # type: ignore
command = command.parent # type: ignore
entries.append(command)
return entries
@property
def root_parent(self) -> Optional[Group]:
"""Optional[:class:`Group`]: Retrieves the root parent of this command.
If the command has no parents then it returns ``None``.
For example in commands ``?a b c test``, the root parent is ``a``.
"""
if not self.parent:
return None
return self.parents[-1]
@property
def qualified_name(self) -> str:
""":class:`str`: Retrieves the fully qualified command name.
This is the full parent name with the command name as well.
For example, in ``?one two three`` the qualified name would be
``one two three``.
"""
parent = self.full_parent_name
if parent:
return parent + ' ' + self.name
else:
return self.name
def __str__(self) -> str:
return self.qualified_name
async def _parse_arguments(self, ctx: Context) -> None:
ctx.args = [ctx] if self.cog is None else [self.cog, ctx]
ctx.kwargs = {}
args = ctx.args
kwargs = ctx.kwargs
view = ctx.view
iterator = iter(self.params.items())
if self.cog is not None:
# we have 'self' as the first parameter so just advance
# the iterator and resume parsing
try:
next(iterator)
except StopIteration:
raise discord.ClientException(f'Callback for {self.name} command is missing "self" parameter.')
# next we have the 'ctx' as the next parameter
try:
next(iterator)
except StopIteration:
raise discord.ClientException(f'Callback for {self.name} command is missing "ctx" parameter.')
for name, param in iterator:
ctx.current_parameter = param
if param.kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY):
transformed = await self.transform(ctx, param)
args.append(transformed)
elif param.kind == param.KEYWORD_ONLY:
# kwarg only param denotes "consume rest" semantics
if self.rest_is_raw:
converter = get_converter(param)
argument = view.read_rest()
kwargs[name] = await run_converters(ctx, converter, argument, param)
else:
kwargs[name] = await self.transform(ctx, param)
break
elif param.kind == param.VAR_POSITIONAL:
if view.eof and self.require_var_positional:
raise MissingRequiredArgument(param)
while not view.eof:
try:
transformed = await self.transform(ctx, param)
args.append(transformed)
except RuntimeError:
break
if not self.ignore_extra and not view.eof:
raise TooManyArguments('Too many arguments passed to ' + self.qualified_name)
async def call_before_hooks(self, ctx: Context) -> None:
# now that we're done preparing we can call the pre-command hooks
# first, call the command local hook:
cog = self.cog
if self._before_invoke is not None:
# should be cog if @commands.before_invoke is used
instance = getattr(self._before_invoke, '__self__', cog)
# __self__ only exists for methods, not functions
# however, if @command.before_invoke is used, it will be a function
if instance:
await self._before_invoke(instance, ctx) # type: ignore
else:
await self._before_invoke(ctx) # type: ignore
# call the cog local hook if applicable:
if cog is not None:
hook = Cog._get_overridden_method(cog.cog_before_invoke)
if hook is not None:
await hook(ctx)
# call the bot global hook if necessary
hook = ctx.bot._before_invoke
if hook is not None:
await hook(ctx)
async def call_after_hooks(self, ctx: Context) -> None:
cog = self.cog
if self._after_invoke is not None:
instance = getattr(self._after_invoke, '__self__', cog)
if instance:
await self._after_invoke(instance, ctx) # type: ignore
else:
await self._after_invoke(ctx) # type: ignore
# call the cog local hook if applicable:
if cog is not None:
hook = Cog._get_overridden_method(cog.cog_after_invoke)
if hook is not None:
await hook(ctx)
hook = ctx.bot._after_invoke
if hook is not None:
await hook(ctx)
def _prepare_cooldowns(self, ctx: Context) -> None:
if self._buckets.valid:
dt = ctx.message.edited_at or ctx.message.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
bucket = self._buckets.get_bucket(ctx.message, current)
if bucket is not None:
retry_after = bucket.update_rate_limit(current)
if retry_after:
raise CommandOnCooldown(bucket, retry_after, self._buckets.type) # type: ignore
async def prepare(self, ctx: Context) -> None:
ctx.command = self
if not await self.can_run(ctx):
raise CheckFailure(f'The check functions for command {self.qualified_name} failed.')
if self._max_concurrency is not None:
# For this application, context can be duck-typed as a Message
await self._max_concurrency.acquire(ctx) # type: ignore
try:
if self.cooldown_after_parsing:
await self._parse_arguments(ctx)
self._prepare_cooldowns(ctx)
else:
self._prepare_cooldowns(ctx)
await self._parse_arguments(ctx)
await self.call_before_hooks(ctx)
except:
if self._max_concurrency is not None:
await self._max_concurrency.release(ctx) # type: ignore
raise
@property
def cooldown(self) -> Optional[Cooldown]:
return self._buckets._cooldown
def is_on_cooldown(self, ctx: Context) -> bool:
"""Checks whether the command is currently on cooldown.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to use when checking the commands cooldown status.
Returns
--------
:class:`bool`
A boolean indicating if the command is on cooldown.
"""
if not self._buckets.valid:
return False
bucket = self._buckets.get_bucket(ctx.message)
dt = ctx.message.edited_at or ctx.message.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
return bucket.get_tokens(current) == 0
def reset_cooldown(self, ctx: Context) -> None:
"""Resets the cooldown on this command.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to reset the cooldown under.
"""
if self._buckets.valid:
bucket = self._buckets.get_bucket(ctx.message)
bucket.reset()
def get_cooldown_retry_after(self, ctx: Context) -> float:
"""Retrieves the amount of seconds before this command can be tried again.
.. versionadded:: 1.4
Parameters
-----------
ctx: :class:`.Context`
The invocation context to retrieve the cooldown from.
Returns
--------
:class:`float`
The amount of time left on this command's cooldown in seconds.
If this is ``0.0`` then the command isn't on cooldown.
"""
if self._buckets.valid:
bucket = self._buckets.get_bucket(ctx.message)
dt = ctx.message.edited_at or ctx.message.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
return bucket.get_retry_after(current)
return 0.0
async def invoke(self, ctx: Context) -> None:
await self.prepare(ctx)
# terminate the invoked_subcommand chain.
# since we're in a regular command (and not a group) then
# the invoked subcommand is None.
ctx.invoked_subcommand = None
ctx.subcommand_passed = None
injected = hooked_wrapped_callback(self, ctx, self.callback)
await injected(*ctx.args, **ctx.kwargs)
async def reinvoke(self, ctx: Context, *, call_hooks: bool = False) -> None:
ctx.command = self
await self._parse_arguments(ctx)
if call_hooks:
await self.call_before_hooks(ctx)
ctx.invoked_subcommand = None
try:
await self.callback(*ctx.args, **ctx.kwargs) # type: ignore
except:
ctx.command_failed = True
raise
finally:
if call_hooks:
await self.call_after_hooks(ctx)
def error(self, coro: ErrorT) -> ErrorT:
"""A decorator that registers a coroutine as a local error handler.
A local error handler is an :func:`.on_command_error` event limited to
a single command. However, the :func:`.on_command_error` is still
invoked afterwards as the catch-all.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the local error handler.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The error handler must be a coroutine.')
self.on_error: Error = coro
return coro
def has_error_handler(self) -> bool:
""":class:`bool`: Checks whether the command has an error handler registered.
.. versionadded:: 1.7
"""
return hasattr(self, 'on_error')
def before_invoke(self, coro: HookT) -> HookT:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
See :meth:`.Bot.before_invoke` for more info.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The pre-invoke hook must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro: HookT) -> HookT:
"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
See :meth:`.Bot.after_invoke` for more info.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The post-invoke hook must be a coroutine.')
self._after_invoke = coro
return coro
@property
def cog_name(self) -> Optional[str]:
"""Optional[:class:`str`]: The name of the cog this command belongs to, if any."""
return type(self.cog).__cog_name__ if self.cog is not None else None
@property
def short_doc(self) -> str:
""":class:`str`: Gets the "short" documentation of a command.
By default, this is the :attr:`.brief` attribute.
If that lookup leads to an empty string then the first line of the
:attr:`.help` attribute is used instead.
"""
if self.brief is not None:
return self.brief
if self.help is not None:
return self.help.split('\n', 1)[0]
return ''
def _is_typing_optional(self, annotation: Union[T, Optional[T]]) -> TypeGuard[Optional[T]]:
return (
(getattr(annotation, '__origin__', None) is Union
or type(annotation) is getattr(types, "UnionType", Union))
and type(None) in annotation.__args__ # type: ignore
)
@property
def signature(self) -> str:
""":class:`str`: Returns a POSIX-like signature useful for help command output."""
if self.usage is not None:
return self.usage
params = self.clean_params
if not params:
return ''
result = []
for name, param in params.items():
greedy = isinstance(param.annotation, Greedy)
optional = False # postpone evaluation of if it's an optional argument
# for typing.Literal[...], typing.Optional[typing.Literal[...]], and Greedy[typing.Literal[...]], the
# parameter signature is a literal list of it's values
annotation = param.annotation.converter if greedy else param.annotation
origin = getattr(annotation, '__origin__', None)
if not greedy and origin is Union:
none_cls = type(None)
union_args = annotation.__args__
optional = union_args[-1] is none_cls
if len(union_args) == 2 and optional:
annotation = union_args[0]
origin = getattr(annotation, '__origin__', None)
if origin is Literal:
name = '|'.join(f'"{v}"' if isinstance(v, str) else str(v) for v in annotation.__args__)
if param.default is not param.empty:
# We don't want None or '' to trigger the [name=value] case and instead it should
# do [name] since [name=None] or [name=] are not exactly useful for the user.
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
result.append(f'[{name}={param.default}]' if not greedy else
f'[{name}={param.default}]...')
continue
else:
result.append(f'[{name}]')
elif param.kind == param.VAR_POSITIONAL:
if self.require_var_positional:
result.append(f'<{name}...>')
else:
result.append(f'[{name}...]')
elif greedy:
result.append(f'[{name}]...')
elif optional:
result.append(f'[{name}]')
else:
result.append(f'<{name}>')
return ' '.join(result)
async def can_run(self, ctx: Context) -> bool:
"""|coro|
Checks if the command can be executed by checking all the predicates
inside the :attr:`~Command.checks` attribute. This also checks whether the
command is disabled.
.. versionchanged:: 1.3
Checks whether the command is disabled or not
Parameters
-----------
ctx: :class:`.Context`
The ctx of the command currently being invoked.
Raises
-------
:class:`CommandError`
Any command error that was raised during a check call will be propagated
by this function.
Returns
--------
:class:`bool`
A boolean indicating if the command can be invoked.
"""
if not self.enabled:
raise DisabledCommand(f'{self.name} command is disabled')
original = ctx.command
ctx.command = self
try:
if not await ctx.bot.can_run(ctx):
raise CheckFailure(f'The global check functions for command {self.qualified_name} failed.')
cog = self.cog
if cog is not None:
local_check = Cog._get_overridden_method(cog.cog_check)
if local_check is not None:
ret = await discord.utils.maybe_coroutine(local_check, ctx)
if not ret:
return False
predicates = self.checks
if not predicates:
# since we have no checks, then we just return True.
return True
return await discord.utils.async_all(predicate(ctx) for predicate in predicates) # type: ignore
finally:
ctx.command = original
class GroupMixin(Generic[CogT]):
"""A mixin that implements common functionality for classes that behave
similar to :class:`.Group` and are allowed to register commands.
Attributes
-----------
all_commands: :class:`dict`
A mapping of command name to :class:`.Command`
objects.
case_insensitive: :class:`bool`
Whether the commands should be case insensitive. Defaults to ``False``.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
case_insensitive = kwargs.get('case_insensitive', False)
self.all_commands: Dict[str, Command[CogT, Any, Any]] = _CaseInsensitiveDict() if case_insensitive else {}
self.case_insensitive: bool = case_insensitive
super().__init__(*args, **kwargs)
@property
def commands(self) -> Set[Command[CogT, Any, Any]]:
"""Set[:class:`.Command`]: A unique set of commands without aliases that are registered."""
return set(self.all_commands.values())
def recursively_remove_all_commands(self) -> None:
for command in self.all_commands.copy().values():
if isinstance(command, GroupMixin):
command.recursively_remove_all_commands()
self.remove_command(command.name)
def add_command(self, command: Command[CogT, Any, Any]) -> None:
"""Adds a :class:`.Command` into the internal list of commands.
This is usually not called, instead the :meth:`~.GroupMixin.command` or
:meth:`~.GroupMixin.group` shortcut decorators are used instead.
.. versionchanged:: 1.4
Raise :exc:`.CommandRegistrationError` instead of generic :exc:`.ClientException`
Parameters
-----------
command: :class:`Command`
The command to add.
Raises
-------
:exc:`.CommandRegistrationError`
If the command or its alias is already registered by different command.
TypeError
If the command passed is not a subclass of :class:`.Command`.
"""
if not isinstance(command, Command):
raise TypeError('The command passed must be a subclass of Command')
if isinstance(self, Command):
command.parent = self
if command.name in self.all_commands:
raise CommandRegistrationError(command.name)
self.all_commands[command.name] = command
for alias in command.aliases:
if alias in self.all_commands:
self.remove_command(command.name)
raise CommandRegistrationError(alias, alias_conflict=True)
self.all_commands[alias] = command
def remove_command(self, name: str) -> Optional[Command[CogT, Any, Any]]:
"""Remove a :class:`.Command` from the internal list
of commands.
This could also be used as a way to remove aliases.
Parameters
-----------
name: :class:`str`
The name of the command to remove.
Returns
--------
Optional[:class:`.Command`]
The command that was removed. If the name is not valid then
``None`` is returned instead.
"""
command = self.all_commands.pop(name, None)
# does not exist
if command is None:
return None
if name in command.aliases:
# we're removing an alias so we don't want to remove the rest
return command
# we're not removing the alias so let's delete the rest of them.
for alias in command.aliases:
cmd = self.all_commands.pop(alias, None)
# in the case of a CommandRegistrationError, an alias might conflict
# with an already existing command. If this is the case, we want to
# make sure the pre-existing command is not removed.
if cmd is not None and cmd != command:
self.all_commands[alias] = cmd
return command
def walk_commands(self) -> Generator[Command[CogT, Any, Any], None, None]:
"""An iterator that recursively walks through all commands and subcommands.
.. versionchanged:: 1.4
Duplicates due to aliases are no longer returned
Yields
------
Union[:class:`.Command`, :class:`.Group`]
A command or group from the internal list of commands.
"""
for command in self.commands:
yield command
if isinstance(command, GroupMixin):
yield from command.walk_commands()
def get_command(self, name: str) -> Optional[Command[CogT, Any, Any]]:
"""Get a :class:`.Command` from the internal list
of commands.
This could also be used as a way to get aliases.
The name could be fully qualified (e.g. ``'foo bar'``) will get
the subcommand ``bar`` of the group command ``foo``. If a
subcommand is not found then ``None`` is returned just as usual.
Parameters
-----------
name: :class:`str`
The name of the command to get.
Returns
--------
Optional[:class:`Command`]
The command that was requested. If not found, returns ``None``.
"""
# fast path, no space in name.
if ' ' not in name:
return self.all_commands.get(name)
names = name.split()
if not names:
return None
obj = self.all_commands.get(names[0])
if not isinstance(obj, GroupMixin):
return obj
for name in names[1:]:
try:
obj = obj.all_commands[name] # type: ignore
except (AttributeError, KeyError):
return None
return obj
@overload
def command(
self,
name: str = ...,
cls: Type[Command[CogT, P, T]] = ...,
*args: Any,
**kwargs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
Callable[Concatenate[ContextT, P], Coro[T]],
]
], Command[CogT, P, T]]:
...
@overload
def command(
self,
name: str = ...,
cls: Type[CommandT] = ...,
*args: Any,
**kwargs: Any,
) -> Callable[[Callable[Concatenate[ContextT, P], Coro[Any]]], CommandT]:
...
def command(
self,
name: str = MISSING,
cls: Type[CommandT] = MISSING,
*args: Any,
**kwargs: Any,
) -> Callable[[Callable[Concatenate[ContextT, P], Coro[Any]]], CommandT]:
"""A shortcut decorator that invokes :func:`.command` and adds it to
the internal command list via :meth:`~.GroupMixin.add_command`.
Returns
--------
Callable[..., :class:`Command`]
A decorator that converts the provided method into a Command, adds it to the bot, then returns it.
"""
def decorator(func: Callable[Concatenate[ContextT, P], Coro[Any]]) -> CommandT:
kwargs.setdefault('parent', self)
result = command(name=name, cls=cls, *args, **kwargs)(func)
self.add_command(result)
return result
return decorator
@overload
def group(
self,
name: str = ...,
cls: Type[Group[CogT, P, T]] = ...,
*args: Any,
**kwargs: Any,
) -> Callable[[
Union[
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
Callable[Concatenate[ContextT, P], Coro[T]]
]
], Group[CogT, P, T]]:
...
@overload
def group(
self,
name: str = ...,
cls: Type[GroupT] = ...,
*args: Any,
**kwargs: Any,
) -> Callable[[Callable[Concatenate[ContextT, P], Coro[Any]]], GroupT]:
...
def group(
self,
name: str = MISSING,
cls: Type[GroupT] = MISSING,
*args: Any,
**kwargs: Any,
) -> Callable[[Callable[Concatenate[ContextT, P], Coro[Any]]], GroupT]:
"""A shortcut decorator that invokes :func:`.group` and adds it to
the internal command list via :meth:`~.GroupMixin.add_command`.
Returns
--------
Callable[..., :class:`Group`]
A decorator that converts the provided method into a Group, adds it to the bot, then returns it.
"""
def decorator(func: Callable[Concatenate[ContextT, P], Coro[Any]]) -> GroupT:
kwargs.setdefault('parent', self)
result = group(name=name, cls=cls, *args, **kwargs)(func)
self.add_command(result)
return result
return decorator
class Group(GroupMixin[CogT], Command[CogT, P, T]):
"""A class that implements a grouping protocol for commands to be
executed as subcommands.
This class is a subclass of :class:`.Command` and thus all options
valid in :class:`.Command` are valid in here as well.
Attributes
-----------
invoke_without_command: :class:`bool`
Indicates if the group callback should begin parsing and
invocation only if no subcommand was found. Useful for
making it an error handling function to tell the user that
no subcommand was found or to have different functionality
in case no subcommand was found. If this is ``False``, then
the group callback will always be invoked first. This means
that the checks and the parsing dictated by its parameters
will be executed. Defaults to ``False``.
case_insensitive: :class:`bool`
Indicates if the group's commands should be case insensitive.
Defaults to ``False``.
"""
def __init__(self, *args: Any, **attrs: Any) -> None:
self.invoke_without_command: bool = attrs.pop('invoke_without_command', False)
super().__init__(*args, **attrs)
def copy(self: GroupT) -> GroupT:
"""Creates a copy of this :class:`Group`.
Returns
--------
:class:`Group`
A new instance of this group.
"""
ret = super().copy()
for cmd in self.commands:
ret.add_command(cmd.copy())
return ret # type: ignore
async def invoke(self, ctx: Context) -> None:
ctx.invoked_subcommand = None
ctx.subcommand_passed = None
early_invoke = not self.invoke_without_command
if early_invoke:
await self.prepare(ctx)
view = ctx.view
previous = view.index
view.skip_ws()
trigger = view.get_word()
if trigger:
ctx.subcommand_passed = trigger
ctx.invoked_subcommand = self.all_commands.get(trigger, None)
if early_invoke:
injected = hooked_wrapped_callback(self, ctx, self.callback)
await injected(*ctx.args, **ctx.kwargs)
ctx.invoked_parents.append(ctx.invoked_with) # type: ignore
if trigger and ctx.invoked_subcommand:
ctx.invoked_with = trigger
await ctx.invoked_subcommand.invoke(ctx)
elif not early_invoke:
# undo the trigger parsing
view.index = previous
view.previous = previous
await super().invoke(ctx)
async def reinvoke(self, ctx: Context, *, call_hooks: bool = False) -> None:
ctx.invoked_subcommand = None
early_invoke = not self.invoke_without_command
if early_invoke:
ctx.command = self
await self._parse_arguments(ctx)
if call_hooks:
await self.call_before_hooks(ctx)
view = ctx.view
previous = view.index
view.skip_ws()
trigger = view.get_word()
if trigger:
ctx.subcommand_passed = trigger
ctx.invoked_subcommand = self.all_commands.get(trigger, None)
if early_invoke:
try:
await self.callback(*ctx.args, **ctx.kwargs) # type: ignore
except:
ctx.command_failed = True
raise
finally:
if call_hooks:
await self.call_after_hooks(ctx)
ctx.invoked_parents.append(ctx.invoked_with) # type: ignore
if trigger and ctx.invoked_subcommand:
ctx.invoked_with = trigger
await ctx.invoked_subcommand.reinvoke(ctx, call_hooks=call_hooks)
elif not early_invoke:
# undo the trigger parsing
view.index = previous
view.previous = previous
await super().reinvoke(ctx, call_hooks=call_hooks)
# Decorators
@overload
def command(
name: str = ...,
cls: Type[Command[CogT, P, T]] = ...,
**attrs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
Callable[Concatenate[ContextT, P], Coro[T]],
]
]
, Command[CogT, P, T]]:
...
@overload
def command(
name: str = ...,
cls: Type[CommandT] = ...,
**attrs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[CogT, ContextT, P], Coro[Any]],
Callable[Concatenate[ContextT, P], Coro[Any]],
]
]
, CommandT]:
...
def command(
name: str = MISSING,
cls: Type[CommandT] = MISSING,
**attrs: Any
) -> Callable[
[
Union[
Callable[Concatenate[ContextT, P], Coro[Any]],
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
]
]
, Union[Command[CogT, P, T], CommandT]]:
"""A decorator that transforms a function into a :class:`.Command`
or if called with :func:`.group`, :class:`.Group`.
By default the ``help`` attribute is received automatically from the
docstring of the function and is cleaned up with the use of
``inspect.cleandoc``. If the docstring is ``bytes``, then it is decoded
into :class:`str` using utf-8 encoding.
All checks added using the :func:`.check` & co. decorators are added into
the function. There is no way to supply your own checks through this
decorator.
Parameters
-----------
name: :class:`str`
The name to create the command with. By default this uses the
function name unchanged.
cls
The class to construct with. By default this is :class:`.Command`.
You usually do not change this.
attrs
Keyword arguments to pass into the construction of the class denoted
by ``cls``.
Raises
-------
TypeError
If the function is not a coroutine or is already a command.
"""
if cls is MISSING:
cls = Command # type: ignore
def decorator(func: Union[
Callable[Concatenate[ContextT, P], Coro[Any]],
Callable[Concatenate[CogT, ContextT, P], Coro[Any]],
]) -> CommandT:
if isinstance(func, Command):
raise TypeError('Callback is already a command.')
return cls(func, name=name, **attrs)
return decorator
@overload
def group(
name: str = ...,
cls: Type[Group[CogT, P, T]] = ...,
**attrs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
Callable[Concatenate[ContextT, P], Coro[T]],
]
]
, Group[CogT, P, T]]:
...
@overload
def group(
name: str = ...,
cls: Type[GroupT] = ...,
**attrs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[CogT, ContextT, P], Coro[Any]],
Callable[Concatenate[ContextT, P], Coro[Any]],
]
]
, GroupT]:
...
def group(
name: str = MISSING,
cls: Type[GroupT] = MISSING,
**attrs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[ContextT, P], Coro[Any]],
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
]
]
, Union[Group[CogT, P, T], GroupT]]:
"""A decorator that transforms a function into a :class:`.Group`.
This is similar to the :func:`.command` decorator but the ``cls``
parameter is set to :class:`Group` by default.
.. versionchanged:: 1.1
The ``cls`` parameter can now be passed.
"""
if cls is MISSING:
cls = Group # type: ignore
return command(name=name, cls=cls, **attrs) # type: ignore
def check(predicate: Check) -> Callable[[T], T]:
r"""A decorator that adds a check to the :class:`.Command` or its
subclasses. These checks could be accessed via :attr:`.Command.checks`.
These checks should be predicates that take in a single parameter taking
a :class:`.Context`. If the check returns a ``False``\-like value then
during invocation a :exc:`.CheckFailure` exception is raised and sent to
the :func:`.on_command_error` event.
If an exception should be thrown in the predicate then it should be a
subclass of :exc:`.CommandError`. Any exception not subclassed from it
will be propagated while those subclassed will be sent to
:func:`.on_command_error`.
A special attribute named ``predicate`` is bound to the value
returned by this decorator to retrieve the predicate passed to the
decorator. This allows the following introspection and chaining to be done:
.. code-block:: python3
def owner_or_permissions(**perms):
original = commands.has_permissions(**perms).predicate
async def extended_check(ctx):
if ctx.guild is None:
return False
return ctx.guild.owner_id == ctx.author.id or await original(ctx)
return commands.check(extended_check)
.. note::
The function returned by ``predicate`` is **always** a coroutine,
even if the original function was not a coroutine.
.. versionchanged:: 1.3
The ``predicate`` attribute was added.
Examples
---------
Creating a basic check to see if the command invoker is you.
.. code-block:: python3
def check_if_it_is_me(ctx):
return ctx.message.author.id == 85309593344815104
@bot.command()
@commands.check(check_if_it_is_me)
async def only_for_me(ctx):
await ctx.send('I know you!')
Transforming common checks into its own decorator:
.. code-block:: python3
def is_me():
def predicate(ctx):
return ctx.message.author.id == 85309593344815104
return commands.check(predicate)
@bot.command()
@is_me()
async def only_me(ctx):
await ctx.send('Only you!')
Parameters
-----------
predicate: Callable[[:class:`Context`], :class:`bool`]
The predicate to check if the command should be invoked.
"""
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func.checks.append(predicate)
else:
if not hasattr(func, '__commands_checks__'):
func.__commands_checks__ = []
func.__commands_checks__.append(predicate)
return func
if inspect.iscoroutinefunction(predicate):
decorator.predicate = predicate
else:
@functools.wraps(predicate)
async def wrapper(ctx):
return predicate(ctx) # type: ignore
decorator.predicate = wrapper
return decorator # type: ignore
def check_any(*checks: Check) -> Callable[[T], T]:
r"""A :func:`check` that is added that checks if any of the checks passed
will pass, i.e. using logical OR.
If all checks fail then :exc:`.CheckAnyFailure` is raised to signal the failure.
It inherits from :exc:`.CheckFailure`.
.. note::
The ``predicate`` attribute for this function **is** a coroutine.
.. versionadded:: 1.3
Parameters
------------
\*checks: Callable[[:class:`Context`], :class:`bool`]
An argument list of checks that have been decorated with
the :func:`check` decorator.
Raises
-------
TypeError
A check passed has not been decorated with the :func:`check`
decorator.
Examples
---------
Creating a basic check to see if it's the bot owner or
the server owner:
.. code-block:: python3
def is_guild_owner():
def predicate(ctx):
return ctx.guild is not None and ctx.guild.owner_id == ctx.author.id
return commands.check(predicate)
@bot.command()
@commands.check_any(commands.is_owner(), is_guild_owner())
async def only_for_owners(ctx):
await ctx.send('Hello mister owner!')
"""
unwrapped = []
for wrapped in checks:
try:
pred = wrapped.predicate
except AttributeError:
raise TypeError(f'{wrapped!r} must be wrapped by commands.check decorator') from None
else:
unwrapped.append(pred)
async def predicate(ctx: Context) -> bool:
errors = []
for func in unwrapped:
try:
value = await func(ctx)
except CheckFailure as e:
errors.append(e)
else:
if value:
return True
# if we're here, all checks failed
raise CheckAnyFailure(unwrapped, errors)
return check(predicate)
def has_role(item: Union[int, str]) -> Callable[[T], T]:
"""A :func:`.check` that is added that checks if the member invoking the
command has the role specified via the name or ID specified.
If a string is specified, you must give the exact name of the role, including
caps and spelling.
If an integer is specified, you must give the exact snowflake ID of the role.
If the message is invoked in a private message context then the check will
return ``False``.
This check raises one of two special exceptions, :exc:`.MissingRole` if the user
is missing a role, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.MissingRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
Parameters
-----------
item: Union[:class:`int`, :class:`str`]
The name or ID of the role to check.
"""
def predicate(ctx: Context) -> bool:
if ctx.guild is None:
raise NoPrivateMessage()
# ctx.guild is None doesn't narrow ctx.author to Member
if isinstance(item, int):
role = discord.utils.get(ctx.author.roles, id=item) # type: ignore
else:
role = discord.utils.get(ctx.author.roles, name=item) # type: ignore
if role is None:
raise MissingRole(item)
return True
return check(predicate)
def has_any_role(*items: Union[int, str]) -> Callable[[T], T]:
r"""A :func:`.check` that is added that checks if the member invoking the
command has **any** of the roles specified. This means that if they have
one out of the three roles specified, then this check will return `True`.
Similar to :func:`.has_role`\, the names or IDs passed in must be exact.
This check raises one of two special exceptions, :exc:`.MissingAnyRole` if the user
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.MissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
Parameters
-----------
items: List[Union[:class:`str`, :class:`int`]]
An argument list of names or IDs to check that the member has roles wise.
Example
--------
.. code-block:: python3
@bot.command()
@commands.has_any_role('Library Devs', 'Moderators', 492212595072434186)
async def cool(ctx):
await ctx.send('You are cool indeed')
"""
def predicate(ctx):
if ctx.guild is None:
raise NoPrivateMessage()
# ctx.guild is None doesn't narrow ctx.author to Member
getter = functools.partial(discord.utils.get, ctx.author.roles) # type: ignore
if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items):
return True
raise MissingAnyRole(list(items))
return check(predicate)
def bot_has_role(item: int) -> Callable[[T], T]:
"""Similar to :func:`.has_role` except checks if the bot itself has the
role.
This check raises one of two special exceptions, :exc:`.BotMissingRole` if the bot
is missing the role, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.BotMissingRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
"""
def predicate(ctx):
if ctx.guild is None:
raise NoPrivateMessage()
me = ctx.me
if isinstance(item, int):
role = discord.utils.get(me.roles, id=item)
else:
role = discord.utils.get(me.roles, name=item)
if role is None:
raise BotMissingRole(item)
return True
return check(predicate)
def bot_has_any_role(*items: int) -> Callable[[T], T]:
"""Similar to :func:`.has_any_role` except checks if the bot itself has
any of the roles listed.
This check raises one of two special exceptions, :exc:`.BotMissingAnyRole` if the bot
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.BotMissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic checkfailure
"""
def predicate(ctx):
if ctx.guild is None:
raise NoPrivateMessage()
me = ctx.me
getter = functools.partial(discord.utils.get, me.roles)
if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items):
return True
raise BotMissingAnyRole(list(items))
return check(predicate)
def has_permissions(**perms: bool) -> Callable[[T], T]:
"""A :func:`.check` that is added that checks if the member has all of
the permissions necessary.
Note that this check operates on the current channel permissions, not the
guild wide permissions.
The permissions passed in must be exactly like the properties shown under
:class:`.discord.Permissions`.
This check raises a special exception, :exc:`.MissingPermissions`
that is inherited from :exc:`.CheckFailure`.
Parameters
------------
perms
An argument list of permissions to check for.
Example
---------
.. code-block:: python3
@bot.command()
@commands.has_permissions(manage_messages=True)
async def test(ctx):
await ctx.send('You can manage messages.')
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError(f"Invalid permission(s): {', '.join(invalid)}")
def predicate(ctx: Context) -> bool:
ch = ctx.channel
permissions = ch.permissions_for(ctx.author) # type: ignore
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise MissingPermissions(missing)
return check(predicate)
def bot_has_permissions(**perms: bool) -> Callable[[T], T]:
"""Similar to :func:`.has_permissions` except checks if the bot itself has
the permissions listed.
This check raises a special exception, :exc:`.BotMissingPermissions`
that is inherited from :exc:`.CheckFailure`.
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError(f"Invalid permission(s): {', '.join(invalid)}")
def predicate(ctx: Context) -> bool:
guild = ctx.guild
me = guild.me if guild is not None else ctx.bot.user
permissions = ctx.channel.permissions_for(me) # type: ignore
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise BotMissingPermissions(missing)
return check(predicate)
def has_guild_permissions(**perms: bool) -> Callable[[T], T]:
"""Similar to :func:`.has_permissions`, but operates on guild wide
permissions instead of the current channel permissions.
If this check is called in a DM context, it will raise an
exception, :exc:`.NoPrivateMessage`.
.. versionadded:: 1.3
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError(f"Invalid permission(s): {', '.join(invalid)}")
def predicate(ctx: Context) -> bool:
if not ctx.guild:
raise NoPrivateMessage
permissions = ctx.author.guild_permissions # type: ignore
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise MissingPermissions(missing)
return check(predicate)
def bot_has_guild_permissions(**perms: bool) -> Callable[[T], T]:
"""Similar to :func:`.has_guild_permissions`, but checks the bot
members guild permissions.
.. versionadded:: 1.3
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError(f"Invalid permission(s): {', '.join(invalid)}")
def predicate(ctx: Context) -> bool:
if not ctx.guild:
raise NoPrivateMessage
permissions = ctx.me.guild_permissions # type: ignore
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise BotMissingPermissions(missing)
return check(predicate)
def dm_only() -> Callable[[T], T]:
"""A :func:`.check` that indicates this command must only be used in a
DM context. Only private messages are allowed when
using the command.
This check raises a special exception, :exc:`.PrivateMessageOnly`
that is inherited from :exc:`.CheckFailure`.
.. versionadded:: 1.1
"""
def predicate(ctx: Context) -> bool:
if ctx.guild is not None:
raise PrivateMessageOnly()
return True
return check(predicate)
def guild_only() -> Callable[[T], T]:
"""A :func:`.check` that indicates this command must only be used in a
guild context only. Basically, no private messages are allowed when
using the command.
This check raises a special exception, :exc:`.NoPrivateMessage`
that is inherited from :exc:`.CheckFailure`.
"""
def predicate(ctx: Context) -> bool:
if ctx.guild is None:
raise NoPrivateMessage()
return True
return check(predicate)
def is_owner() -> Callable[[T], T]:
"""A :func:`.check` that checks if the person invoking this command is the
owner of the bot.
This is powered by :meth:`.Bot.is_owner`.
This check raises a special exception, :exc:`.NotOwner` that is derived
from :exc:`.CheckFailure`.
"""
async def predicate(ctx: Context) -> bool:
if not await ctx.bot.is_owner(ctx.author):
raise NotOwner('You do not own this bot.')
return True
return check(predicate)
def is_nsfw() -> Callable[[T], T]:
"""A :func:`.check` that checks if the channel is a NSFW channel.
This check raises a special exception, :exc:`.NSFWChannelRequired`
that is derived from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.NSFWChannelRequired` instead of generic :exc:`.CheckFailure`.
DM channels will also now pass this check.
"""
def pred(ctx: Context) -> bool:
ch = ctx.channel
if ctx.guild is None or (isinstance(ch, (discord.TextChannel, discord.Thread)) and ch.is_nsfw()):
return True
raise NSFWChannelRequired(ch) # type: ignore
return check(pred)
def cooldown(rate: int, per: float, type: Union[BucketType, Callable[[Message], Any]] = BucketType.default) -> Callable[[T], T]:
"""A decorator that adds a cooldown to a :class:`.Command`
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, per-role or global basis.
Denoted by the third argument of ``type`` which must be of enum
type :class:`.BucketType`.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
Parameters
------------
rate: :class:`int`
The number of times a command can be used before triggering a cooldown.
per: :class:`float`
The amount of seconds to wait for a cooldown when it's been triggered.
type: Union[:class:`.BucketType`, Callable[[:class:`.Message`], Any]]
The type of cooldown to have. If callable, should return a key for the mapping.
.. versionchanged:: 1.7
Callables are now supported for custom bucket types.
"""
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func._buckets = CooldownMapping(Cooldown(rate, per), type)
else:
func.__commands_cooldown__ = CooldownMapping(Cooldown(rate, per), type)
return func
return decorator # type: ignore
def dynamic_cooldown(cooldown: Union[BucketType, Callable[[Message], Any]], type: BucketType = BucketType.default) -> Callable[[T], T]:
"""A decorator that adds a dynamic cooldown to a :class:`.Command`
This differs from :func:`.cooldown` in that it takes a function that
accepts a single parameter of type :class:`.discord.Message` and must
return a :class:`.Cooldown` or ``None``. If ``None`` is returned then
that cooldown is effectively bypassed.
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, per-role or global basis.
Denoted by the third argument of ``type`` which must be of enum
type :class:`.BucketType`.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
.. versionadded:: 2.0
Parameters
------------
cooldown: Callable[[:class:`.discord.Message`], Optional[:class:`.Cooldown`]]
A function that takes a message and returns a cooldown that will
apply to this invocation or ``None`` if the cooldown should be bypassed.
type: :class:`.BucketType`
The type of cooldown to have.
"""
if not callable(cooldown):
raise TypeError("A callable must be provided")
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func._buckets = DynamicCooldownMapping(cooldown, type)
else:
func.__commands_cooldown__ = DynamicCooldownMapping(cooldown, type)
return func
return decorator # type: ignore
def max_concurrency(number: int, per: BucketType = BucketType.default, *, wait: bool = False) -> Callable[[T], T]:
"""A decorator that adds a maximum concurrency to a :class:`.Command` or its subclasses.
This enables you to only allow a certain number of command invocations at the same time,
for example if a command takes too long or if only one user can use it at a time. This
differs from a cooldown in that there is no set waiting period or token bucket -- only
a set number of people can run the command.
.. versionadded:: 1.3
Parameters
-------------
number: :class:`int`
The maximum number of invocations of this command that can be running at the same time.
per: :class:`.BucketType`
The bucket that this concurrency is based on, e.g. ``BucketType.guild`` would allow
it to be used up to ``number`` times per guild.
wait: :class:`bool`
Whether the command should wait for the queue to be over. If this is set to ``False``
then instead of waiting until the command can run again, the command raises
:exc:`.MaxConcurrencyReached` to its error handler. If this is set to ``True``
then the command waits until it can be executed.
"""
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
value = MaxConcurrency(number, per=per, wait=wait)
if isinstance(func, Command):
func._max_concurrency = value
else:
func.__commands_max_concurrency__ = value
return func
return decorator # type: ignore
def before_invoke(coro) -> Callable[[T], T]:
"""A decorator that registers a coroutine as a pre-invoke hook.
This allows you to refer to one before invoke hook for several commands that
do not have to be within the same cog.
.. versionadded:: 1.4
Example
---------
.. code-block:: python3
async def record_usage(ctx):
print(ctx.author, 'used', ctx.command, 'at', ctx.message.created_at)
@bot.command()
@commands.before_invoke(record_usage)
async def who(ctx): # Output: <User> used who at <Time>
await ctx.send('i am a bot')
class What(commands.Cog):
@commands.before_invoke(record_usage)
@commands.command()
async def when(self, ctx): # Output: <User> used when at <Time>
await ctx.send(f'and i have existed since {ctx.bot.user.created_at}')
@commands.command()
async def where(self, ctx): # Output: <Nothing>
await ctx.send('on Discord')
@commands.command()
async def why(self, ctx): # Output: <Nothing>
await ctx.send('because someone made me')
bot.add_cog(What())
"""
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func.before_invoke(coro)
else:
func.__before_invoke__ = coro
return func
return decorator # type: ignore
def after_invoke(coro) -> Callable[[T], T]:
"""A decorator that registers a coroutine as a post-invoke hook.
This allows you to refer to one after invoke hook for several commands that
do not have to be within the same cog.
.. versionadded:: 1.4
"""
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func.after_invoke(coro)
else:
func.__after_invoke__ = coro
return func
return decorator # type: ignore
|
the-stack_0_24565
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.scene.annotation.hoverlabel"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets the hover label text font. By default uses the global
hover font and size, with color from `hoverlabel.bordercolor`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.layout.scene.annotatio
n.hoverlabel.Font
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.annotation.hoverlabel.Font
constructor must be a dict or
an instance of plotly.graph_objs.layout.scene.annotation.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.scene.annotation.hoverlabel import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["size"] = v_font.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Font"]
|
the-stack_0_24567
|
import re
from typing import Dict, Any, List, Tuple
from aioupnp.fault import UPnPError
from aioupnp.constants import XML_VERSION_PREFIX
from aioupnp.serialization.xml import xml_to_dict
from aioupnp.util import flatten_keys
CONTENT_PATTERN = re.compile(
"(\<\?xml version=\"1\.0\"[^>]*\?\>(\s*.)*|\>)"
)
XML_ROOT_SANITY_PATTERN = re.compile(
"(?i)(\{|(urn:schemas-[\w|\d]*-(com|org|net))[:|-](device|service)[:|-]([\w|\d|\:|\-|\_]*)|\}([\w|\d|\:|\-|\_]*))"
)
XML_OTHER_KEYS = re.compile(
"{[\w|\:\/\.]*}|(\w*)"
)
def serialize_scpd_get(path: str, address: str) -> bytes:
if "http://" in address:
host = address.split("http://")[1]
else:
host = address
if ":" in host:
host = host.split(":")[0]
if not path.startswith("/"):
path = "/" + path
return (
f'GET {path} HTTP/1.1\r\n'
f'Accept-Encoding: gzip\r\n'
f'Host: {host}\r\n'
f'Connection: Close\r\n'
f'\r\n'
).encode()
def deserialize_scpd_get_response(content: bytes) -> Dict[str, Any]:
if XML_VERSION_PREFIX.encode() in content:
parsed: List[Tuple[str, str]] = CONTENT_PATTERN.findall(content.decode())
xml_dict = xml_to_dict('' if not parsed else parsed[0][0])
return parse_device_dict(xml_dict)
return {}
def parse_device_dict(xml_dict: Dict[str, Any]) -> Dict[str, Any]:
keys = list(xml_dict.keys())
found = False
for k in keys:
m: List[Tuple[str, str, str, str, str, str]] = XML_ROOT_SANITY_PATTERN.findall(k)
if len(m) == 3 and m[1][0] and m[2][5]:
schema_key: str = m[1][0]
root: str = m[2][5]
flattened = flatten_keys(xml_dict, "{%s}" % schema_key)
if root not in flattened:
raise UPnPError("root device not found")
xml_dict = flattened[root]
found = True
break
if not found:
raise UPnPError("device not found")
result = {}
for k, v in xml_dict.items():
if isinstance(xml_dict[k], dict):
inner_d = {}
for inner_k, inner_v in xml_dict[k].items():
parsed_k = XML_OTHER_KEYS.findall(inner_k)
if len(parsed_k) == 2:
inner_d[parsed_k[0]] = inner_v
else:
assert len(parsed_k) == 3, f"expected len=3, got {len(parsed_k)}"
inner_d[parsed_k[1]] = inner_v
result[k] = inner_d
else:
result[k] = v
return result
|
the-stack_0_24568
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import time
from keystoneauth1 import loading as ks_loading
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import uuidutils
import six
from nova.compute import utils as compute_utils
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network import base_api
from nova.network import model as network_model
from nova.network.neutronv2 import constants
from nova import objects
from nova.objects import fields as obj_fields
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
from nova.policies import servers as servers_policies
from nova import profiler
from nova import service_auth
from nova import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
_SESSION = None
_ADMIN_AUTH = None
DEFAULT_SECGROUP = 'default'
BINDING_PROFILE = 'binding:profile'
BINDING_HOST_ID = 'binding:host_id'
MIGRATING_ATTR = 'migrating_to'
L3_NETWORK_TYPES = ['vxlan', 'gre', 'geneve']
def reset_state():
global _ADMIN_AUTH
global _SESSION
_ADMIN_AUTH = None
_SESSION = None
def _load_auth_plugin(conf):
auth_plugin = ks_loading.load_auth_from_conf_options(conf,
nova.conf.neutron.NEUTRON_GROUP)
if auth_plugin:
return auth_plugin
if conf.neutron.auth_type is None:
# If we're coming in through a REST API call for something like
# creating a server, the end user is going to get a 500 response
# which is accurate since the system is mis-configured, but we should
# leave a breadcrumb for the operator that is checking the logs.
LOG.error('The [neutron] section of your nova configuration file '
'must be configured for authentication with the networking '
'service endpoint. See the networking service install guide '
'for details: '
'https://docs.openstack.org/neutron/latest/install/')
err_msg = _('Unknown auth type: %s') % conf.neutron.auth_type
raise neutron_client_exc.Unauthorized(message=err_msg)
def _get_binding_profile(port):
"""Convenience method to get the binding:profile from the port
The binding:profile in the port is undefined in the networking service
API and is dependent on backend configuration. This means it could be
an empty dict, None, or have some values.
:param port: dict port response body from the networking service API
:returns: The port binding:profile dict; empty if not set on the port
"""
return port.get(BINDING_PROFILE, {}) or {}
@profiler.trace_cls("neutron_api")
class ClientWrapper(clientv20.Client):
"""A Neutron client wrapper class.
Wraps the callable methods, catches Unauthorized,Forbidden from Neutron and
convert it to a 401,403 for Nova clients.
"""
def __init__(self, base_client, admin):
# Expose all attributes from the base_client instance
self.__dict__ = base_client.__dict__
self.base_client = base_client
self.admin = admin
def __getattribute__(self, name):
obj = object.__getattribute__(self, name)
if callable(obj):
obj = object.__getattribute__(self, 'proxy')(obj)
return obj
def proxy(self, obj):
def wrapper(*args, **kwargs):
try:
ret = obj(*args, **kwargs)
except neutron_client_exc.Unauthorized:
if not self.admin:
# Token is expired so Neutron is raising a
# unauthorized exception, we should convert it to
# raise a 401 to make client to handle a retry by
# regenerating a valid token and trying a new
# attempt.
raise exception.Unauthorized()
# In admin context if token is invalid Neutron client
# should be able to regenerate a valid by using the
# Neutron admin credential configuration located in
# nova.conf.
LOG.error("Neutron client was not able to generate a "
"valid admin token, please verify Neutron "
"admin credential located in nova.conf")
raise exception.NeutronAdminCredentialConfigurationInvalid()
except neutron_client_exc.Forbidden as e:
raise exception.Forbidden(six.text_type(e))
return ret
return wrapper
def _get_auth_plugin(context, admin=False):
# NOTE(dprince): In the case where no auth_token is present we allow use of
# neutron admin tenant credentials if it is an admin context. This is to
# support some services (metadata API) where an admin context is used
# without an auth token.
global _ADMIN_AUTH
if admin or (context.is_admin and not context.auth_token):
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
return _ADMIN_AUTH
if context.auth_token:
return service_auth.get_auth_plugin(context)
# We did not get a user token and we should not be using
# an admin token so log an error
raise exception.Unauthorized()
def _get_session():
global _SESSION
if not _SESSION:
_SESSION = ks_loading.load_session_from_conf_options(
CONF, nova.conf.neutron.NEUTRON_GROUP)
return _SESSION
def get_client(context, admin=False):
auth_plugin = _get_auth_plugin(context, admin=admin)
session = _get_session()
client_args = dict(session=session,
auth=auth_plugin,
global_request_id=context.global_request_id)
if CONF.neutron.url:
# TODO(efried): Remove in Rocky
client_args = dict(client_args,
endpoint_override=CONF.neutron.url,
# NOTE(efried): The legacy behavior was to default
# region_name in the conf.
region_name=CONF.neutron.region_name or 'RegionOne')
else:
# The new way
# NOTE(efried): We build an adapter
# to pull conf options
# to pass to neutronclient
# which uses them to build an Adapter.
# This should be unwound at some point.
adap = utils.get_ksa_adapter(
'network', ksa_auth=auth_plugin, ksa_session=session)
client_args = dict(client_args,
service_type=adap.service_type,
service_name=adap.service_name,
interface=adap.interface,
region_name=adap.region_name,
endpoint_override=adap.endpoint_override)
return ClientWrapper(clientv20.Client(**client_args),
admin=admin or context.is_admin)
def _get_ksa_client(context, admin=False):
"""Returns a keystoneauth Adapter
This method should only be used if python-neutronclient does not yet
provide the necessary API bindings.
:param context: User request context
:param admin: If True, uses the configured credentials, else uses the
existing auth_token in the context (the user token).
:returns: keystoneauth1 Adapter object
"""
auth_plugin = _get_auth_plugin(context, admin=admin)
session = _get_session()
client = utils.get_ksa_adapter(
'network', ksa_auth=auth_plugin, ksa_session=session)
client.additional_headers = {'accept': 'application/json'}
return client
def _is_not_duplicate(item, items, items_list_name, instance):
present = item in items
# The expectation from this function's perspective is that the
# item is not part of the items list so if it is part of it
# we should at least log it as a warning
if present:
LOG.warning("%(item)s already exists in list: %(list_name)s "
"containing: %(items)s. ignoring it",
{'item': item,
'list_name': items_list_name,
'items': items},
instance=instance)
return not present
def _ensure_no_port_binding_failure(port):
binding_vif_type = port.get('binding:vif_type')
if binding_vif_type == network_model.VIF_TYPE_BINDING_FAILED:
raise exception.PortBindingFailed(port_id=port['id'])
def _filter_hypervisor_macs(instance, requested_ports_dict, hypervisor_macs):
"""Removes macs from set if used by existing ports
:param instance: The server instance.
:type instance: nova.objects.instance.Instance
:param requested_ports_dict: dict, keyed by port ID, of ports requested by
the user
:type requested_ports_dict: dict
:param hypervisor_macs: None or a set of MAC addresses that the
instance should use. hypervisor_macs are supplied by the hypervisor
driver (contrast with requested_networks which is user supplied).
NB: NeutronV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
:type hypervisor_macs: set
:returns a set of available MAC addresses to use if
creating a port later; this is the set of hypervisor_macs
after removing any MAC addresses from explicitly
requested ports.
"""
if not hypervisor_macs:
return None
# Make a copy we can mutate: records macs that have not been used
# to create a port on a network. If we find a mac with a
# pre-allocated port we also remove it from this set.
available_macs = set(hypervisor_macs)
if not requested_ports_dict:
return available_macs
for port in requested_ports_dict.values():
mac = port['mac_address']
if mac not in hypervisor_macs:
LOG.debug("Port %(port)s mac address %(mac)s is "
"not in the set of hypervisor macs: "
"%(hyper_macs)s. Nova will overwrite "
"this with a new mac address.",
{'port': port['id'],
'mac': mac,
'hyper_macs': hypervisor_macs},
instance=instance)
else:
# Don't try to use this MAC if we need to create a
# port on the fly later. Identical MACs may be
# configured by users into multiple ports so we
# discard rather than popping.
available_macs.discard(mac)
return available_macs
class API(base_api.NetworkAPI):
"""API for interacting with the neutron 2.x API."""
def __init__(self):
super(API, self).__init__()
self.last_neutron_extension_sync = None
self.extensions = {}
self.pci_whitelist = pci_whitelist.Whitelist(
CONF.pci.passthrough_whitelist)
def _update_port_with_migration_profile(
self, instance, port_id, port_profile, admin_client):
try:
updated_port = admin_client.update_port(
port_id, {'port': {BINDING_PROFILE: port_profile}})
return updated_port
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error("Unable to update binding profile "
"for port: %(port)s due to failure: %(error)s",
{'port': port_id, 'error': ex},
instance=instance)
def _clear_migration_port_profile(
self, context, instance, admin_client, ports):
for p in ports:
# If the port already has a migration profile and if
# it is to be torn down, then we need to clean up
# the migration profile.
port_profile = _get_binding_profile(p)
if not port_profile:
continue
if MIGRATING_ATTR in port_profile:
del port_profile[MIGRATING_ATTR]
LOG.debug("Removing port %s migration profile", p['id'],
instance=instance)
self._update_port_with_migration_profile(
instance, p['id'], port_profile, admin_client)
def _setup_migration_port_profile(
self, context, instance, host, admin_client, ports):
# Migrating to a new host
for p in ports:
# If the host hasn't changed, there is nothing to do.
# But if the destination host is different than the
# current one, please update the port_profile with
# the 'migrating_to'(MIGRATING_ATTR) key pointing to
# the given 'host'.
host_id = p.get(BINDING_HOST_ID)
if host_id != host:
port_profile = _get_binding_profile(p)
# If the "migrating_to" attribute already points at the given
# host, then skip the port update call since we're not changing
# anything.
if host != port_profile.get(MIGRATING_ATTR):
port_profile[MIGRATING_ATTR] = host
self._update_port_with_migration_profile(
instance, p['id'], port_profile, admin_client)
LOG.debug("Port %(port_id)s updated with migration "
"profile %(profile_data)s successfully",
{'port_id': p['id'],
'profile_data': port_profile},
instance=instance)
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures.
:param context: The user request context.
:param instance: The instance with attached ports.
:param host: Optional host used to control the setup. If provided and
is not the same as the current instance.host, this method assumes
the instance is being migrated and sets the "migrating_to"
attribute in the binding profile for the attached ports.
:param teardown: Whether or not network information for the ports
should be cleaned up. If True, at a minimum the "migrating_to"
attribute is cleared in the binding profile for the ports. If a
host is also provided, then port bindings for that host are
deleted when teardown is True as long as the host does not match
the current instance.host.
:raises: nova.exception.PortBindingDeletionFailed if host is not None,
teardown is True, and port binding deletion fails.
"""
# Check if the instance is migrating to a new host.
port_migrating = host and (instance.host != host)
# If the port is migrating to a new host or if it is a
# teardown on the original host, then proceed.
if port_migrating or teardown:
search_opts = {'device_id': instance.uuid,
'tenant_id': instance.project_id,
BINDING_HOST_ID: instance.host}
# Now get the port details to process the ports
# binding profile info.
data = self.list_ports(context, **search_opts)
ports = data['ports']
admin_client = get_client(context, admin=True)
if teardown:
# Reset the port profile
self._clear_migration_port_profile(
context, instance, admin_client, ports)
# If a host was provided, delete any bindings between that
# host and the ports as long as the host isn't the same as
# the current instance.host.
has_binding_ext = self.supports_port_binding_extension(context)
if port_migrating and has_binding_ext:
# Attempt to delete all port bindings on the host and raise
# any errors at the end.
failed_port_ids = []
for port in ports:
# This call is safe in that 404s for non-existing
# bindings are ignored.
try:
self.delete_port_binding(
context, port['id'], host)
except exception.PortBindingDeletionFailed:
# delete_port_binding will log an error for each
# failure but since we're iterating a list we want
# to keep track of all failures to build a generic
# exception to raise
failed_port_ids.append(port['id'])
if failed_port_ids:
msg = (_("Failed to delete binding for port(s) "
"%(port_ids)s and host %(host)s.") %
{'port_ids': ','.join(failed_port_ids),
'host': host})
raise exception.PortBindingDeletionFailed(msg)
elif port_migrating:
# Setup the port profile
self._setup_migration_port_profile(
context, instance, host, admin_client, ports)
def _get_available_networks(self, context, project_id,
net_ids=None, neutron=None,
auto_allocate=False):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
if not neutron:
neutron = get_client(context)
if net_ids:
# If user has specified to attach instance only to specific
# networks then only add these to **search_opts. This search will
# also include 'shared' networks.
search_opts = {'id': net_ids}
nets = neutron.list_networks(**search_opts).get('networks', [])
else:
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {'tenant_id': project_id, 'shared': False}
if auto_allocate:
# The auto-allocated-topology extension may create complex
# network topologies and it does so in a non-transactional
# fashion. Therefore API users may be exposed to resources that
# are transient or partially built. A client should use
# resources that are meant to be ready and this can be done by
# checking their admin_state_up flag.
search_opts['admin_state_up'] = True
nets = neutron.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
nets += neutron.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
return nets
def _create_port_minimal(self, port_client, instance, network_id,
fixed_ip=None, security_group_ids=None):
"""Attempts to create a port for the instance on the given network.
:param port_client: The client to use to create the port.
:param instance: Create the port for the given instance.
:param network_id: Create the port on the given network.
:param fixed_ip: Optional fixed IP to use from the given network.
:param security_group_ids: Optional list of security group IDs to
apply to the port.
:returns: The created port.
:raises PortLimitExceeded: If neutron fails with an OverQuota error.
:raises NoMoreFixedIps: If neutron fails with
IpAddressGenerationFailure error.
:raises: PortBindingFailed: If port binding failed.
"""
# Set the device_id so it's clear who this port was created for,
# and to stop other instances trying to use it
port_req_body = {'port': {'device_id': instance.uuid}}
try:
if fixed_ip:
port_req_body['port']['fixed_ips'] = [
{'ip_address': str(fixed_ip)}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance.project_id
if security_group_ids:
port_req_body['port']['security_groups'] = security_group_ids
port_response = port_client.create_port(port_req_body)
port = port_response['port']
port_id = port['id']
try:
_ensure_no_port_binding_failure(port)
except exception.PortBindingFailed:
with excutils.save_and_reraise_exception():
port_client.delete_port(port_id)
LOG.debug('Successfully created port: %s', port_id,
instance=instance)
return port
except neutron_client_exc.InvalidIpForNetworkClient:
LOG.warning('Neutron error: %(ip)s is not a valid IP address '
'for network %(network_id)s.',
{'ip': fixed_ip, 'network_id': network_id},
instance=instance)
msg = (_('Fixed IP %(ip)s is not a valid ip address for '
'network %(network_id)s.') %
{'ip': fixed_ip, 'network_id': network_id})
raise exception.InvalidInput(reason=msg)
except (neutron_client_exc.IpAddressInUseClient,
neutron_client_exc.IpAddressAlreadyAllocatedClient):
LOG.warning('Neutron error: Fixed IP %s is '
'already in use.', fixed_ip, instance=instance)
msg = _("Fixed IP %s is already in use.") % fixed_ip
raise exception.FixedIpAlreadyInUse(message=msg)
except neutron_client_exc.OverQuotaClient:
LOG.warning(
'Neutron error: Port quota exceeded in tenant: %s',
port_req_body['port']['tenant_id'], instance=instance)
raise exception.PortLimitExceeded()
except neutron_client_exc.IpAddressGenerationFailureClient:
LOG.warning('Neutron error: No more fixed IPs in network: %s',
network_id, instance=instance)
raise exception.NoMoreFixedIps(net=network_id)
except neutron_client_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception('Neutron error creating port on network %s',
network_id, instance=instance)
def _update_port(self, port_client, instance, port_id,
port_req_body):
try:
port_response = port_client.update_port(port_id, port_req_body)
port = port_response['port']
_ensure_no_port_binding_failure(port)
LOG.debug('Successfully updated port: %s', port_id,
instance=instance)
return port
except neutron_client_exc.MacAddressInUseClient:
mac_address = port_req_body['port'].get('mac_address')
network_id = port_req_body['port'].get('network_id')
LOG.warning('Neutron error: MAC address %(mac)s is already '
'in use on network %(network)s.',
{'mac': mac_address, 'network': network_id},
instance=instance)
raise exception.PortInUse(port_id=mac_address)
except neutron_client_exc.HostNotCompatibleWithFixedIpsClient:
network_id = port_req_body['port'].get('network_id')
LOG.warning('Neutron error: Tried to bind a port with '
'fixed_ips to a host in the wrong segment on '
'network %(network)s.',
{'network': network_id}, instance=instance)
raise exception.FixedIpInvalidOnHost(port_id=port_id)
@staticmethod
def _populate_mac_address(instance, port_req_body, available_macs):
# NOTE(johngarbutt) On port_update, this will cause us to override
# any previous mac address the port may have had.
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance.uuid)
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
return mac_address
def _check_external_network_attach(self, context, nets):
"""Check if attaching to external network is permitted."""
if not context.can(servers_policies.NETWORK_ATTACH_EXTERNAL,
fatal=False):
for net in nets:
# Perform this check here rather than in validate_networks to
# ensure the check is performed every time
# allocate_for_instance is invoked
if net.get('router:external') and not net.get('shared'):
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
def _unbind_ports(self, context, ports,
neutron, port_client=None):
"""Unbind the given ports by clearing their device_id and
device_owner.
:param context: The request context.
:param ports: list of port IDs.
:param neutron: neutron client for the current context.
:param port_client: The client with appropriate karma for
updating the ports.
"""
if port_client is None:
# Requires admin creds to set port bindings
port_client = get_client(context, admin=True)
for port_id in ports:
# A port_id is optional in the NetworkRequest object so check here
# in case the caller forgot to filter the list.
if port_id is None:
continue
port_req_body = {'port': {'device_id': '', 'device_owner': ''}}
port_req_body['port'][BINDING_HOST_ID] = None
try:
port = self._show_port(context, port_id,
neutron_client=neutron,
fields=BINDING_PROFILE)
except exception.PortNotFound:
LOG.debug('Unable to show port %s as it no longer '
'exists.', port_id)
return
except Exception:
# NOTE: In case we can't retrieve the binding:profile assume
# that they are empty
LOG.exception("Unable to get binding:profile for port '%s'",
port_id)
port_profile = {}
else:
port_profile = port.get(BINDING_PROFILE, {})
# NOTE: We're doing this to remove the binding information
# for the physical device but don't want to overwrite the other
# information in the binding profile.
for profile_key in ('pci_vendor_info', 'pci_slot'):
if profile_key in port_profile:
del port_profile[profile_key]
port_req_body['port'][BINDING_PROFILE] = port_profile
if self._has_dns_extension():
port_req_body['port']['dns_name'] = ''
try:
port_client.update_port(port_id, port_req_body)
except neutron_client_exc.PortNotFoundClient:
LOG.debug('Unable to unbind port %s as it no longer '
'exists.', port_id)
except Exception:
LOG.exception("Unable to clear device ID for port '%s'",
port_id)
def _validate_requested_port_ids(self, context, instance, neutron,
requested_networks):
"""Processes and validates requested networks for allocation.
Iterates over the list of NetworkRequest objects, validating the
request and building sets of ports and networks to
use for allocating ports for the instance.
:param context: The user request context.
:type context: nova.context.RequestContext
:param instance: allocate networks on this instance
:type instance: nova.objects.Instance
:param neutron: neutron client session
:type neutron: neutronclient.v2_0.client.Client
:param requested_networks: List of user-requested networks and/or ports
:type requested_networks: nova.objects.NetworkRequestList
:returns: tuple of:
- ports: dict mapping of port id to port dict
- ordered_networks: list of nova.objects.NetworkRequest objects
for requested networks (either via explicit network request
or the network for an explicit port request)
:raises nova.exception.PortNotFound: If a requested port is not found
in Neutron.
:raises nova.exception.PortNotUsable: If a requested port is not owned
by the same tenant that the instance is created under.
:raises nova.exception.PortInUse: If a requested port is already
attached to another instance.
:raises nova.exception.PortNotUsableDNS: If a requested port has a
value assigned to its dns_name attribute.
"""
ports = {}
ordered_networks = []
# If we're asked to auto-allocate the network then there won't be any
# ports or real neutron networks to lookup, so just return empty
# results.
if requested_networks and not requested_networks.auto_allocate:
for request in requested_networks:
# Process a request to use a pre-existing neutron port.
if request.port_id:
# Make sure the port exists.
port = self._show_port(context, request.port_id,
neutron_client=neutron)
# Make sure the instance has access to the port.
if port['tenant_id'] != instance.project_id:
raise exception.PortNotUsable(port_id=request.port_id,
instance=instance.uuid)
# Make sure the port isn't already attached to another
# instance.
if port.get('device_id'):
raise exception.PortInUse(port_id=request.port_id)
# Make sure that if the user assigned a value to the port's
# dns_name attribute, it is equal to the instance's
# hostname
if port.get('dns_name'):
if port['dns_name'] != instance.hostname:
raise exception.PortNotUsableDNS(
port_id=request.port_id,
instance=instance.uuid, value=port['dns_name'],
hostname=instance.hostname)
# Make sure the port is usable
_ensure_no_port_binding_failure(port)
# If requesting a specific port, automatically process
# the network for that port as if it were explicitly
# requested.
request.network_id = port['network_id']
ports[request.port_id] = port
# Process a request to use a specific neutron network.
if request.network_id:
ordered_networks.append(request)
return ports, ordered_networks
def _clean_security_groups(self, security_groups):
"""Cleans security groups requested from Nova API
Neutron already passes a 'default' security group when
creating ports so it's not necessary to specify it to the
request.
"""
if not security_groups:
security_groups = []
elif security_groups == [DEFAULT_SECGROUP]:
security_groups = []
return security_groups
def _process_security_groups(self, instance, neutron, security_groups):
"""Processes and validates requested security groups for allocation.
Iterates over the list of requested security groups, validating the
request and filtering out the list of security group IDs to use for
port allocation.
:param instance: allocate networks on this instance
:type instance: nova.objects.Instance
:param neutron: neutron client session
:type neutron: neutronclient.v2_0.client.Client
:param security_groups: list of requested security group name or IDs
to use when allocating new ports for the instance
:return: list of security group IDs to use when allocating new ports
:raises nova.exception.NoUniqueMatch: If multiple security groups
are requested with the same name.
:raises nova.exception.SecurityGroupNotFound: If a requested security
group is not in the tenant-filtered list of available security
groups in Neutron.
"""
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
search_opts = {'tenant_id': instance.project_id}
user_security_groups = neutron.list_security_groups(
**search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
# If there was a name match in a previous iteration
# of the loop, we have a conflict.
if name_match:
raise exception.NoUniqueMatch(
_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific.") %
security_group)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
else:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
return security_group_ids
def _validate_requested_network_ids(self, context, instance, neutron,
requested_networks, ordered_networks):
"""Check requested networks using the Neutron API.
Check the user has access to the network they requested, and that
it is a suitable network to connect to. This includes getting the
network details for any ports that have been passed in, because the
request will have been updated with the network_id in
_validate_requested_port_ids.
If the user has not requested any ports or any networks, we get back
a full list of networks the user has access to, and if there is only
one network, we update ordered_networks so we will connect the
instance to that network.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param neutron: neutron client
:param requested_networks: nova.objects.NetworkRequestList, list of
user-requested networks and/or ports; may be empty
:param ordered_networks: output from _validate_requested_port_ids
that will be used to create and update ports
:returns: dict, keyed by network ID, of networks to use
:raises InterfaceAttachFailedNoNetwork: If no specific networks were
requested and none are available.
:raises NetworkAmbiguous: If no specific networks were requested but
more than one is available.
:raises ExternalNetworkAttachForbidden: If the policy rules forbid
the request context from using an external non-shared network but
one was requested (or available).
"""
# Get networks from Neutron
# If net_ids is empty, this actually returns all available nets
auto_allocate = requested_networks and requested_networks.auto_allocate
net_ids = [request.network_id for request in ordered_networks]
nets = self._get_available_networks(context, instance.project_id,
net_ids, neutron=neutron,
auto_allocate=auto_allocate)
if not nets:
if requested_networks:
# There are no networks available for the project to use and
# none specifically requested, so check to see if we're asked
# to auto-allocate the network.
if auto_allocate:
# During validate_networks we checked to see if
# auto-allocation is available so we don't need to do that
# again here.
nets = [self._auto_allocate_network(instance, neutron)]
else:
# NOTE(chaochin): If user specifies a network id and the
# network can not be found, raise NetworkNotFound error.
for request in requested_networks:
if not request.port_id and request.network_id:
raise exception.NetworkNotFound(
network_id=request.network_id)
else:
# no requested nets and user has no available nets
return {}
# if this function is directly called without a requested_network param
# or if it is indirectly called through allocate_port_for_instance()
# with None params=(network_id=None, requested_ip=None, port_id=None,
# pci_request_id=None):
if (not requested_networks
or requested_networks.is_single_unspecified
or requested_networks.auto_allocate):
# If no networks were requested and none are available, consider
# it a bad request.
if not nets:
raise exception.InterfaceAttachFailedNoNetwork(
project_id=instance.project_id)
# bug/1267723 - if no network is requested and more
# than one is available then raise NetworkAmbiguous Exception
if len(nets) > 1:
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
ordered_networks.append(
objects.NetworkRequest(network_id=nets[0]['id']))
# NOTE(melwitt): check external net attach permission after the
# check for ambiguity, there could be another
# available net which is permitted bug/1364344
self._check_external_network_attach(context, nets)
return {net['id']: net for net in nets}
def _create_ports_for_instance(self, context, instance, ordered_networks,
nets, neutron, security_group_ids):
"""Create port for network_requests that don't have a port_id
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param ordered_networks: objects.NetworkRequestList in requested order
:param nets: a dict of network_id to networks returned from neutron
:param neutron: neutronclient built from users request context
:param security_group_ids: a list of security group IDs to be applied
to any ports created
:returns a list of pairs (NetworkRequest, created_port_uuid); note that
created_port_uuid will be None for the pair where a pre-existing
port was part of the user request
"""
created_port_ids = []
requests_and_created_ports = []
for request in ordered_networks:
network = nets.get(request.network_id)
# if network_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
if not network:
continue
try:
port_security_enabled = network.get(
'port_security_enabled', True)
if port_security_enabled:
if not network.get('subnets'):
# Neutron can't apply security groups to a port
# for a network without L3 assignments.
LOG.debug('Network with port security enabled does '
'not have subnets so security groups '
'cannot be applied: %s',
network, instance=instance)
raise exception.SecurityGroupCannotBeApplied()
else:
if security_group_ids:
# We don't want to apply security groups on port
# for a network defined with
# 'port_security_enabled=False'.
LOG.debug('Network has port security disabled so '
'security groups cannot be applied: %s',
network, instance=instance)
raise exception.SecurityGroupCannotBeApplied()
created_port_id = None
if not request.port_id:
# create minimal port, if port not already created by user
created_port = self._create_port_minimal(
neutron, instance, request.network_id,
request.address, security_group_ids)
created_port_id = created_port['id']
created_port_ids.append(created_port_id)
requests_and_created_ports.append((
request, created_port_id))
except Exception:
with excutils.save_and_reraise_exception():
if created_port_ids:
self._delete_ports(
neutron, instance, created_port_ids)
return requests_and_created_ports
def allocate_for_instance(self, context, instance, vpn,
requested_networks, macs=None,
security_groups=None, bind_host_id=None):
"""Allocate network resources for the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vpn: A boolean, ignored by this driver.
:param requested_networks: objects.NetworkRequestList object.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
NB: NeutronV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
:param security_groups: None or security groups to allocate for
instance.
:param bind_host_id: the host ID to attach to the ports being created.
:returns: network info as from get_instance_nw_info()
"""
LOG.debug('allocate_for_instance()', instance=instance)
if not instance.project_id:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance.uuid)
# We do not want to create a new neutron session for each call
neutron = get_client(context)
#
# Validate ports and networks with neutron. The requested_ports_dict
# variable is a dict, keyed by port ID, of ports that were on the user
# request and may be empty. The ordered_networks variable is a list of
# NetworkRequest objects for any networks or ports specifically
# requested by the user, which again may be empty.
#
requested_ports_dict, ordered_networks = (
self._validate_requested_port_ids(
context, instance, neutron, requested_networks))
nets = self._validate_requested_network_ids(
context, instance, neutron, requested_networks, ordered_networks)
if not nets:
LOG.debug("No network configured", instance=instance)
return network_model.NetworkInfo([])
# Validate requested security groups
security_groups = self._clean_security_groups(security_groups)
security_group_ids = self._process_security_groups(
instance, neutron, security_groups)
# Create ports from the list of ordered_networks. The returned
# requests_and_created_ports variable is a list of 2-item tuples of
# the form (NetworkRequest, created_port_id). Note that a tuple pair
# will have None for the created_port_id if the NetworkRequest already
# contains a port_id, meaning the user requested a specific
# pre-existing port so one wasn't created here. The ports will be
# updated later in _update_ports_for_instance to be bound to the
# instance and compute host.
requests_and_created_ports = self._create_ports_for_instance(
context, instance, ordered_networks, nets, neutron,
security_group_ids)
#
# Update existing and newly created ports
#
available_macs = _filter_hypervisor_macs(
instance, requested_ports_dict, macs)
# We always need admin_client to build nw_info,
# we sometimes need it when updating ports
admin_client = get_client(context, admin=True)
ordered_nets, ordered_port_ids, preexisting_port_ids, \
created_port_ids = self._update_ports_for_instance(
context, instance,
neutron, admin_client, requests_and_created_ports, nets,
bind_host_id, available_macs, requested_ports_dict)
#
# Perform a full update of the network_info_cache,
# including re-fetching lots of the required data from neutron
#
nw_info = self.get_instance_nw_info(
context, instance, networks=ordered_nets,
port_ids=ordered_port_ids,
admin_client=admin_client,
preexisting_port_ids=preexisting_port_ids,
update_cells=True)
# Only return info about ports we processed in this run, which might
# have been pre-existing neutron ports or ones that nova created. In
# the initial allocation case (server create), this will be everything
# we processed, and in later runs will only be what was processed that
# time. For example, if the instance was created with port A and
# then port B was attached in this call, only port B would be returned.
# Thus, this filtering only affects the attach case.
return network_model.NetworkInfo([vif for vif in nw_info
if vif['id'] in created_port_ids +
preexisting_port_ids])
def _update_ports_for_instance(self, context, instance, neutron,
admin_client, requests_and_created_ports, nets,
bind_host_id, available_macs, requested_ports_dict):
"""Update ports from network_requests.
Updates the pre-existing ports and the ones created in
``_create_ports_for_instance`` with ``device_id``, ``device_owner``,
optionally ``mac_address`` and, depending on the
loaded extensions, ``rxtx_factor``, ``binding:host_id``, ``dns_name``.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param neutron: client using user context
:param admin_client: client using admin context
:param requests_and_created_ports: [(NetworkRequest, created_port_id)];
Note that created_port_id will be None for any user-requested
pre-existing port.
:param nets: a dict of network_id to networks returned from neutron
:param bind_host_id: a string for port['binding:host_id']
:param available_macs: a list of available mac addresses
:param requested_ports_dict: dict, keyed by port ID, of ports requested
by the user
:returns: tuple with the following::
* list of network dicts in their requested order
* list of port IDs in their requested order - note that does not
mean the port was requested by the user, it could be a port
created on a network requested by the user
* list of pre-existing port IDs requested by the user
* list of created port IDs
"""
# We currently require admin creds to set port bindings.
port_client = admin_client
preexisting_port_ids = []
created_port_ids = []
ports_in_requested_order = []
nets_in_requested_order = []
created_vifs = [] # this list is for cleanups if we fail
for request, created_port_id in requests_and_created_ports:
vifobj = objects.VirtualInterface(context)
vifobj.instance_uuid = instance.uuid
vifobj.tag = request.tag if 'tag' in request else None
network = nets.get(request.network_id)
# if network_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
if not network:
continue
nets_in_requested_order.append(network)
zone = 'compute:%s' % instance.availability_zone
port_req_body = {'port': {'device_id': instance.uuid,
'device_owner': zone}}
if (requested_ports_dict and
request.port_id in requested_ports_dict and
requested_ports_dict[request.port_id].get(BINDING_PROFILE)):
port_req_body['port'][BINDING_PROFILE] = (
requested_ports_dict[request.port_id][BINDING_PROFILE])
try:
self._populate_neutron_extension_values(
context, instance, request.pci_request_id, port_req_body,
network=network, neutron=neutron,
bind_host_id=bind_host_id)
self._populate_pci_mac_address(instance,
request.pci_request_id, port_req_body)
self._populate_mac_address(
instance, port_req_body, available_macs)
if created_port_id:
port_id = created_port_id
created_port_ids.append(port_id)
else:
port_id = request.port_id
ports_in_requested_order.append(port_id)
# After port is created, update other bits
updated_port = self._update_port(
port_client, instance, port_id, port_req_body)
# NOTE(danms): The virtual_interfaces table enforces global
# uniqueness on MAC addresses, which clearly does not match
# with neutron's view of the world. Since address is a 255-char
# string we can namespace it with our port id. Using '/' should
# be safely excluded from MAC address notations as well as
# UUIDs. We could stop doing this when we remove
# nova-network, but we'd need to leave the read translation in
# for longer than that of course.
vifobj.address = '%s/%s' % (updated_port['mac_address'],
updated_port['id'])
vifobj.uuid = port_id
vifobj.create()
created_vifs.append(vifobj)
if not created_port_id:
# only add if update worked and port create not called
preexisting_port_ids.append(port_id)
self._update_port_dns_name(context, instance, network,
ports_in_requested_order[-1],
neutron)
except Exception:
with excutils.save_and_reraise_exception():
self._unbind_ports(context,
preexisting_port_ids,
neutron, port_client)
self._delete_ports(neutron, instance, created_port_ids)
for vif in created_vifs:
vif.destroy()
return (nets_in_requested_order, ports_in_requested_order,
preexisting_port_ids, created_port_ids)
def _refresh_neutron_extensions_cache(self, context, neutron=None):
"""Refresh the neutron extensions cache when necessary."""
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync)
>= CONF.neutron.extension_sync_interval)):
if neutron is None:
neutron = get_client(context)
extensions_list = neutron.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
self.extensions = {ext['name']: ext for ext in extensions_list}
def _has_multi_provider_extension(self, context, neutron=None):
self._refresh_neutron_extensions_cache(context, neutron=neutron)
return constants.MULTI_NET_EXT in self.extensions
def _has_dns_extension(self):
return constants.DNS_INTEGRATION in self.extensions
def _has_qos_queue_extension(self, context, neutron=None):
self._refresh_neutron_extensions_cache(context, neutron=neutron)
return constants.QOS_QUEUE in self.extensions
def has_substr_port_filtering_extension(self, context):
self._refresh_neutron_extensions_cache(context)
return constants.SUBSTR_PORT_FILTERING in self.extensions
def supports_port_binding_extension(self, context):
"""This is a simple check to see if the neutron "binding-extended"
extension exists and is enabled.
The "binding-extended" extension allows nova to bind a port to multiple
hosts at the same time, like during live migration.
:param context: the user request context
:returns: True if the binding-extended API extension is available,
False otherwise
"""
self._refresh_neutron_extensions_cache(context)
return constants.PORT_BINDING_EXTENDED in self.extensions
def bind_ports_to_host(self, context, instance, host,
vnic_type=None, profile=None):
"""Attempts to bind the ports from the instance on the given host
If the ports are already actively bound to another host, like the
source host during live migration, then the new port bindings will
be inactive, assuming $host is the destination host for the live
migration.
In the event of an error, any ports which were successfully bound to
the host should have those host bindings removed from the ports.
This method should not be used if "supports_port_binding_extension"
returns False.
:param context: the user request context
:type context: nova.context.RequestContext
:param instance: the instance with a set of ports
:type instance: nova.objects.Instance
:param host: the host on which to bind the ports which
are attached to the instance
:type host: str
:param vnic_type: optional vnic type string for the host
port binding
:type vnic_type: str
:param profile: optional vif profile dict for the host port
binding; note that the port binding profile is mutable
via the networking "Port Binding" API so callers that
pass in a profile should ensure they have the latest
version from neutron with their changes merged,
which can be determined using the "revision_number"
attribute of the port.
:type profile: dict
:raises: PortBindingFailed if any of the ports failed to be bound to
the destination host
:returns: dict, keyed by port ID, of a new host port
binding dict per port that was bound
"""
# Get the current ports off the instance. This assumes the cache is
# current.
network_info = instance.get_network_info()
port_ids = [vif['id'] for vif in network_info]
if not port_ids:
# The instance doesn't have any ports so there is nothing to do.
LOG.debug('Instance does not have any ports.', instance=instance)
return {}
client = _get_ksa_client(context, admin=True)
# Now bind each port to the destination host and keep track of each
# port that is bound to the resulting binding so we can rollback in
# the event of a failure, or return the results if everything is OK.
binding = dict(host=host)
if vnic_type:
binding['vnic_type'] = vnic_type
if profile:
binding['profile'] = profile
data = dict(binding=binding)
bindings_by_port_id = {}
for port_id in port_ids:
resp = client.post('/v2.0/ports/%s/bindings' % port_id,
json=data, raise_exc=False)
if resp:
bindings_by_port_id[port_id] = resp.json()['binding']
else:
# Something failed, so log the error and rollback any
# successful bindings.
LOG.error('Binding failed for port %s and host %s. '
'Error: (%s %s)',
port_id, host, resp.status_code, resp.text,
instance=instance)
for rollback_port_id in bindings_by_port_id:
try:
self.delete_port_binding(
context, rollback_port_id, host)
except exception.PortBindingDeletionFailed:
LOG.warning('Failed to remove binding for port %s on '
'host %s.', rollback_port_id, host,
instance=instance)
raise exception.PortBindingFailed(port_id=port_id)
return bindings_by_port_id
def delete_port_binding(self, context, port_id, host):
"""Delete the port binding for the given port ID and host
This method should not be used if "supports_port_binding_extension"
returns False.
:param context: The request context for the operation.
:param port_id: The ID of the port with a binding to the host.
:param host: The host from which port bindings should be deleted.
:raises: nova.exception.PortBindingDeletionFailed if a non-404 error
response is received from neutron.
"""
client = _get_ksa_client(context, admin=True)
resp = client.delete(
'/v2.0/ports/%s/bindings/%s' % (port_id, host),
raise_exc=False)
if resp:
LOG.debug('Deleted binding for port %s and host %s.',
port_id, host)
else:
# We can safely ignore 404s since we're trying to delete
# the thing that wasn't found anyway.
if resp.status_code != 404:
# Log the details, raise an exception.
LOG.error('Unexpected error trying to delete binding '
'for port %s and host %s. Code: %s. '
'Error: %s', port_id, host,
resp.status_code, resp.text)
raise exception.PortBindingDeletionFailed(
port_id=port_id, host=host)
def activate_port_binding(self, context, port_id, host):
"""Activates an inactive port binding.
If there are two port bindings to different hosts, activating the
inactive binding atomically changes the other binding to inactive.
:param context: The request context for the operation.
:param port_id: The ID of the port with an inactive binding on the
host.
:param host: The host on which the inactive port binding should be
activated.
:raises: nova.exception.PortBindingActivationFailed if a non-409 error
response is received from neutron.
"""
client = _get_ksa_client(context, admin=True)
# This is a bit weird in that we don't PUT and update the status
# to ACTIVE, it's more like a POST action method in the compute API.
resp = client.put(
'/v2.0/ports/%s/bindings/%s/activate' % (port_id, host),
raise_exc=False)
if resp:
LOG.debug('Activated binding for port %s and host %s.',
port_id, host)
# A 409 means the port binding is already active, which shouldn't
# happen if the caller is doing things in the correct order.
elif resp.status_code == 409:
LOG.warning('Binding for port %s and host %s is already '
'active.', port_id, host)
else:
# Log the details, raise an exception.
LOG.error('Unexpected error trying to activate binding '
'for port %s and host %s. Code: %s. '
'Error: %s', port_id, host, resp.status_code,
resp.text)
raise exception.PortBindingActivationFailed(
port_id=port_id, host=host)
def _get_pci_device_profile(self, pci_dev):
dev_spec = self.pci_whitelist.get_devspec(pci_dev)
if dev_spec:
return {'pci_vendor_info': "%s:%s" %
(pci_dev.vendor_id, pci_dev.product_id),
'pci_slot': pci_dev.address,
'physical_network':
dev_spec.get_tags().get('physical_network')}
raise exception.PciDeviceNotFound(node_id=pci_dev.compute_node_id,
address=pci_dev.address)
def _populate_neutron_binding_profile(self, instance, pci_request_id,
port_req_body):
"""Populate neutron binding:profile.
Populate it with SR-IOV related information
"""
if pci_request_id:
pci_dev = pci_manager.get_instance_pci_devs(
instance, pci_request_id).pop()
if port_req_body['port'].get(BINDING_PROFILE) is None:
port_req_body['port'][BINDING_PROFILE] = {}
profile = copy.deepcopy(port_req_body['port'][BINDING_PROFILE])
profile.update(self._get_pci_device_profile(pci_dev))
port_req_body['port'][BINDING_PROFILE] = profile
@staticmethod
def _populate_pci_mac_address(instance, pci_request_id, port_req_body):
"""Add the updated MAC address value to the update_port request body.
Currently this is done only for PF passthrough.
"""
if pci_request_id is not None:
pci_devs = pci_manager.get_instance_pci_devs(
instance, pci_request_id)
if len(pci_devs) != 1:
# NOTE(ndipanov): We shouldn't ever get here since
# InstancePCIRequest instances built from network requests
# only ever index a single device, which needs to be
# successfully claimed for this to be called as part of
# allocate_networks method
LOG.error("PCI request %s does not have a "
"unique device associated with it. Unable to "
"determine MAC address",
pci_request, instance=instance)
return
pci_dev = pci_devs[0]
if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF:
try:
mac = pci_utils.get_mac_by_pci_address(pci_dev.address)
except exception.PciDeviceNotFoundById as e:
LOG.error(
"Could not determine MAC address for %(addr)s, "
"error: %(e)s",
{"addr": pci_dev.address, "e": e}, instance=instance)
else:
port_req_body['port']['mac_address'] = mac
def _populate_neutron_extension_values(self, context, instance,
pci_request_id, port_req_body,
network=None, neutron=None,
bind_host_id=None):
"""Populate neutron extension values for the instance.
If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor.
"""
if self._has_qos_queue_extension(context, neutron=neutron):
flavor = instance.get_flavor()
rxtx_factor = flavor.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
port_req_body['port'][BINDING_HOST_ID] = bind_host_id
self._populate_neutron_binding_profile(instance,
pci_request_id,
port_req_body)
if self._has_dns_extension():
# If the DNS integration extension is enabled in Neutron, most
# ports will get their dns_name attribute set in the port create or
# update requests in allocate_for_instance. So we just add the
# dns_name attribute to the payload of those requests. The
# exception is when the port binding extension is enabled in
# Neutron and the port is on a network that has a non-blank
# dns_domain attribute. This case requires to be processed by
# method _update_port_dns_name
if (not network.get('dns_domain')):
port_req_body['port']['dns_name'] = instance.hostname
def _update_port_dns_name(self, context, instance, network, port_id,
neutron):
"""Update an instance port dns_name attribute with instance.hostname.
The dns_name attribute of a port on a network with a non-blank
dns_domain attribute will be sent to the external DNS service
(Designate) if DNS integration is enabled in Neutron. This requires the
assignment of the dns_name to the port to be done with a Neutron client
using the user's context. allocate_for_instance uses a port with admin
context if the port binding extensions is enabled in Neutron. In this
case, we assign in this method the dns_name attribute to the port with
an additional update request. Only a very small fraction of ports will
require this additional update request.
"""
if self._has_dns_extension() and network.get('dns_domain'):
try:
port_req_body = {'port': {'dns_name': instance.hostname}}
neutron.update_port(port_id, port_req_body)
except neutron_client_exc.BadRequest:
LOG.warning('Neutron error: Instance hostname '
'%(hostname)s is not a valid DNS name',
{'hostname': instance.hostname}, instance=instance)
msg = (_('Instance hostname %(hostname)s is not a valid DNS '
'name') % {'hostname': instance.hostname})
raise exception.InvalidInput(reason=msg)
def _delete_ports(self, neutron, instance, ports, raise_if_fail=False):
exceptions = []
for port in ports:
try:
neutron.delete_port(port)
except neutron_client_exc.NeutronClientException as e:
if e.status_code == 404:
LOG.warning("Port %s does not exist", port,
instance=instance)
else:
exceptions.append(e)
LOG.warning("Failed to delete port %s for instance.",
port, instance=instance, exc_info=True)
if len(exceptions) > 0 and raise_if_fail:
raise exceptions[0]
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug('deallocate_for_instance()', instance=instance)
search_opts = {'device_id': instance.uuid}
neutron = get_client(context)
data = neutron.list_ports(**search_opts)
ports = [port['id'] for port in data.get('ports', [])]
requested_networks = kwargs.get('requested_networks') or []
# NOTE(danms): Temporary and transitional
if isinstance(requested_networks, objects.NetworkRequestList):
requested_networks = requested_networks.as_tuples()
ports_to_skip = set([port_id for nets, fips, port_id, pci_request_id
in requested_networks])
# NOTE(boden): requested_networks only passed in when deallocating
# from a failed build / spawn call. Therefore we need to include
# preexisting ports when deallocating from a standard delete op
# in which case requested_networks is not provided.
ports_to_skip |= set(self._get_preexisting_port_ids(instance))
ports = set(ports) - ports_to_skip
# Reset device_id and device_owner for the ports that are skipped
self._unbind_ports(context, ports_to_skip, neutron)
# Delete the rest of the ports
self._delete_ports(neutron, instance, ports, raise_if_fail=True)
# deallocate vifs (mac addresses)
objects.VirtualInterface.delete_by_instance_uuid(
context, instance.uuid)
# NOTE(arosen): This clears out the network_cache only if the instance
# hasn't already been deleted. This is needed when an instance fails to
# launch and is rescheduled onto another compute node. If the instance
# has already been deleted this call does nothing.
base_api.update_instance_cache_with_nw_info(self, context, instance,
network_model.NetworkInfo([]))
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None,
bind_host_id=None, tag=None):
"""Allocate a port for the instance."""
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=network_id,
address=requested_ip,
port_id=port_id,
pci_request_id=None,
tag=tag)])
return self.allocate_for_instance(context, instance, vpn=False,
requested_networks=requested_networks,
bind_host_id=bind_host_id)
def deallocate_port_for_instance(self, context, instance, port_id):
"""Remove a specified port from the instance.
Return network information for the instance
"""
neutron = get_client(context)
preexisting_ports = self._get_preexisting_port_ids(instance)
if port_id in preexisting_ports:
self._unbind_ports(context, [port_id], neutron)
else:
self._delete_ports(neutron, instance, [port_id],
raise_if_fail=True)
# Delete the VirtualInterface for the given port_id.
vif = objects.VirtualInterface.get_by_uuid(context, port_id)
if vif:
if 'tag' in vif and vif.tag:
self._delete_nic_metadata(instance, vif)
vif.destroy()
else:
LOG.debug('VirtualInterface not found for port: %s',
port_id, instance=instance)
return self.get_instance_nw_info(context, instance)
def _delete_nic_metadata(self, instance, vif):
for device in instance.device_metadata.devices:
if (isinstance(device, objects.NetworkInterfaceMetadata)
and device.mac == vif.address):
instance.device_metadata.devices.remove(device)
instance.save()
break
def list_ports(self, context, **search_opts):
"""List ports for the client based on search options."""
return get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
"""Return the port for the client given the port id.
:param context: Request context.
:param port_id: The id of port to be queried.
:returns: A dict containing port data keyed by 'port', e.g.
::
{'port': {'port_id': 'abcd',
'fixed_ip_address': '1.2.3.4'}}
"""
return dict(port=self._show_port(context, port_id))
def _show_port(self, context, port_id, neutron_client=None, fields=None):
"""Return the port for the client given the port id.
:param context: Request context.
:param port_id: The id of port to be queried.
:param neutron_client: A neutron client.
:param fields: The condition fields to query port data.
:returns: A dict of port data.
e.g. {'port_id': 'abcd', 'fixed_ip_address': '1.2.3.4'}
"""
if not neutron_client:
neutron_client = get_client(context)
try:
if fields:
result = neutron_client.show_port(port_id, fields=fields)
else:
result = neutron_client.show_port(port_id)
return result.get('port')
except neutron_client_exc.PortNotFoundClient:
raise exception.PortNotFound(port_id=port_id)
except neutron_client_exc.Unauthorized:
raise exception.Forbidden()
except neutron_client_exc.NeutronClientException as exc:
msg = (_("Failed to access port %(port_id)s: %(reason)s") %
{'port_id': port_id, 'reason': exc})
raise exception.NovaException(message=msg)
def _get_instance_nw_info(self, context, instance, networks=None,
port_ids=None, admin_client=None,
preexisting_port_ids=None,
refresh_vif_id=None, **kwargs):
# NOTE(danms): This is an inner method intended to be called
# by other code that updates instance nwinfo. It *must* be
# called with the refresh_cache-%(instance_uuid) lock held!
LOG.debug('_get_instance_nw_info()', instance=instance)
# Ensure that we have an up to date copy of the instance info cache.
# Otherwise multiple requests could collide and cause cache
# corruption.
compute_utils.refresh_info_cache_for_instance(context, instance)
nw_info = self._build_network_info_model(context, instance, networks,
port_ids, admin_client,
preexisting_port_ids,
refresh_vif_id)
return network_model.NetworkInfo.hydrate(nw_info)
def _gather_port_ids_and_networks(self, context, instance, networks=None,
port_ids=None, neutron=None):
"""Return an instance's complete list of port_ids and networks."""
if ((networks is None and port_ids is not None) or
(port_ids is None and networks is not None)):
message = _("This method needs to be called with either "
"networks=None and port_ids=None or port_ids and "
"networks as not none.")
raise exception.NovaException(message=message)
ifaces = instance.get_network_info()
# This code path is only done when refreshing the network_cache
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
if networks is None:
networks = self._get_available_networks(context,
instance.project_id,
net_ids, neutron)
# an interface was added/removed from instance.
else:
# Prepare the network ids list for validation purposes
networks_ids = [network['id'] for network in networks]
# Validate that interface networks doesn't exist in networks.
# Though this issue can and should be solved in methods
# that prepare the networks list, this method should have this
# ignore-duplicate-networks/port-ids mechanism to reduce the
# probability of failing to boot the VM.
networks = networks + [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces
if _is_not_duplicate(iface['network']['id'],
networks_ids,
"networks",
instance)]
# Include existing interfaces so they are not removed from the db.
# Validate that the interface id is not in the port_ids
port_ids = [iface['id'] for iface in ifaces
if _is_not_duplicate(iface['id'],
port_ids,
"port_ids",
instance)] + port_ids
return networks, port_ids
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Add a fixed IP to the instance from specified network."""
neutron = get_client(context)
search_opts = {'network_id': network_id}
data = neutron.list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance.uuid)
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone,
'network_id': network_id}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
neutron.update_port(p['id'], port_req_body)
return self._get_instance_nw_info(context, instance)
except Exception as ex:
msg = ("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex}, instance=instance)
raise exception.NetworkNotFoundForInstance(
instance_id=instance.uuid)
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Remove a fixed IP from the instance."""
neutron = get_client(context)
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
neutron.update_port(p['id'], port_req_body)
except Exception as ex:
msg = ("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex},
instance=instance)
return self._get_instance_nw_info(context, instance)
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance.uuid, ip=address)
def _get_physnet_tunneled_info(self, context, neutron, net_id):
"""Retrieve detailed network info.
:param context: The request context.
:param neutron: The neutron client object.
:param net_id: The ID of the network to retrieve information for.
:return: A tuple containing the physnet name, if defined, and the
tunneled status of the network. If the network uses multiple
segments, the first segment that defines a physnet value will be
used for the physnet name.
"""
if self._has_multi_provider_extension(context, neutron=neutron):
network = neutron.show_network(net_id,
fields='segments').get('network')
segments = network.get('segments', {})
for net in segments:
# NOTE(vladikr): In general, "multi-segments" network is a
# combination of L2 segments. The current implementation
# contains a vxlan and vlan(s) segments, where only a vlan
# network will have a physical_network specified, but may
# change in the future. The purpose of this method
# is to find a first segment that provides a physical network.
# TODO(vladikr): Additional work will be required to handle the
# case of multiple vlan segments associated with different
# physical networks.
physnet_name = net.get('provider:physical_network')
if physnet_name:
return physnet_name, False
# Raising here as at least one segment should
# have a physical network provided.
if segments:
msg = (_("None of the segments of network %s provides a "
"physical_network") % net_id)
raise exception.NovaException(message=msg)
net = neutron.show_network(
net_id, fields=['provider:physical_network',
'provider:network_type']).get('network')
return (net.get('provider:physical_network'),
net.get('provider:network_type') in L3_NETWORK_TYPES)
@staticmethod
def _get_trusted_mode_from_port(port):
"""Returns whether trusted mode is requested
If port binding does not provide any information about trusted
status this function is returning None
"""
value = _get_binding_profile(port).get('trusted')
if value is not None:
# This allows the user to specify things like '1' and 'yes' in
# the port binding profile and we can handle it as a boolean.
return strutils.bool_from_string(value)
def _get_port_vnic_info(self, context, neutron, port_id):
"""Retrieve port vNIC info
:param context: The request context
:param neutron: The Neutron client
:param port_id: The id of port to be queried
:return: A tuple of vNIC type, trusted status and network ID. Trusted
status only affects SR-IOV ports and will always be None for other
port types.
"""
port = self._show_port(context, port_id, neutron_client=neutron,
fields=['binding:vnic_type', BINDING_PROFILE,
'network_id'])
network_id = port.get('network_id')
trusted = None
vnic_type = port.get('binding:vnic_type',
network_model.VNIC_TYPE_NORMAL)
if vnic_type in network_model.VNIC_TYPES_SRIOV:
trusted = self._get_trusted_mode_from_port(port)
return vnic_type, trusted, network_id
def create_resource_requests(self, context, requested_networks,
pci_requests=None):
"""Retrieve all information for the networks passed at the time of
creating the server.
:param context: The request context.
:param requested_networks: The networks requested for the server.
:type requested_networks: nova.objects.RequestedNetworkList
:param pci_requests: The list of PCI requests to which additional PCI
requests created here will be added.
:type pci_requests: nova.objects.InstancePCIRequests
:returns: An instance of ``objects.NetworkMetadata`` for use by the
scheduler or None.
"""
if not requested_networks or requested_networks.no_allocate:
return None
physnets = set()
tunneled = False
neutron = get_client(context, admin=True)
for request_net in requested_networks:
physnet = None
trusted = None
tunneled_ = False
vnic_type = network_model.VNIC_TYPE_NORMAL
pci_request_id = None
if request_net.port_id:
vnic_type, trusted, network_id = self._get_port_vnic_info(
context, neutron, request_net.port_id)
physnet, tunneled_ = self._get_physnet_tunneled_info(
context, neutron, network_id)
elif request_net.network_id and not request_net.auto_allocate:
network_id = request_net.network_id
physnet, tunneled_ = self._get_physnet_tunneled_info(
context, neutron, network_id)
# All tunneled traffic must use the same logical NIC so we just
# need to know if there is one or more tunneled networks present.
tunneled = tunneled or tunneled_
# ...conversely, there can be multiple physnets, which will
# generally be mapped to different NICs, and some requested
# networks may use the same physnet. As a result, we need to know
# the *set* of physnets from every network requested
if physnet:
physnets.add(physnet)
if vnic_type in network_model.VNIC_TYPES_SRIOV:
# TODO(moshele): To differentiate between the SR-IOV legacy
# and SR-IOV ovs hardware offload we will leverage the nic
# feature based scheduling in nova. This mean we will need
# libvirt to expose the nic feature. At the moment
# there is a limitation that deployers cannot use both
# SR-IOV modes (legacy and ovs) in the same deployment.
spec = {pci_request.PCI_NET_TAG: physnet}
dev_type = pci_request.DEVICE_TYPE_FOR_VNIC_TYPE.get(vnic_type)
if dev_type:
spec[pci_request.PCI_DEVICE_TYPE_TAG] = dev_type
if trusted is not None:
# We specifically have requested device on a pool
# with a tag trusted set to true or false. We
# convert the value to string since tags are
# compared in that way.
spec[pci_request.PCI_TRUSTED_TAG] = str(trusted)
request = objects.InstancePCIRequest(
count=1,
spec=[spec],
request_id=uuidutils.generate_uuid())
pci_requests.requests.append(request)
pci_request_id = request.request_id
# Add pci_request_id into the requested network
request_net.pci_request_id = pci_request_id
return objects.NetworkMetadata(physnets=physnets, tunneled=tunneled)
def _can_auto_allocate_network(self, context, neutron):
"""Helper method to determine if we can auto-allocate networks
:param context: nova request context
:param neutron: neutron client
:returns: True if it's possible to auto-allocate networks, False
otherwise.
"""
# run the dry-run validation, which will raise a 409 if not ready
try:
neutron.validate_auto_allocated_topology_requirements(
context.project_id)
LOG.debug('Network auto-allocation is available for project '
'%s', context.project_id)
return True
except neutron_client_exc.Conflict as ex:
LOG.debug('Unable to auto-allocate networks. %s',
six.text_type(ex))
return False
def _auto_allocate_network(self, instance, neutron):
"""Automatically allocates a network for the given project.
:param instance: create the network for the project that owns this
instance
:param neutron: neutron client
:returns: Details of the network that was created.
:raises: nova.exception.UnableToAutoAllocateNetwork
:raises: nova.exception.NetworkNotFound
"""
project_id = instance.project_id
LOG.debug('Automatically allocating a network for project %s.',
project_id, instance=instance)
try:
topology = neutron.get_auto_allocated_topology(
project_id)['auto_allocated_topology']
except neutron_client_exc.Conflict:
raise exception.UnableToAutoAllocateNetwork(project_id=project_id)
try:
network = neutron.show_network(topology['id'])['network']
except neutron_client_exc.NetworkNotFoundClient:
# This shouldn't happen since we just created the network, but
# handle it anyway.
LOG.error('Automatically allocated network %(network_id)s '
'was not found.', {'network_id': topology['id']},
instance=instance)
raise exception.UnableToAutoAllocateNetwork(project_id=project_id)
LOG.debug('Automatically allocated network: %s', network,
instance=instance)
return network
def _ports_needed_per_instance(self, context, neutron, requested_networks):
# TODO(danms): Remove me when all callers pass an object
if requested_networks and isinstance(requested_networks[0], tuple):
requested_networks = objects.NetworkRequestList.from_tuples(
requested_networks)
ports_needed_per_instance = 0
if (requested_networks is None or len(requested_networks) == 0 or
requested_networks.auto_allocate):
nets = self._get_available_networks(context, context.project_id,
neutron=neutron)
if len(nets) > 1:
# Attaching to more than one network by default doesn't
# make sense, as the order will be arbitrary and the guest OS
# won't know which to configure
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
if not nets and (
requested_networks and requested_networks.auto_allocate):
# If there are no networks available to this project and we
# were asked to auto-allocate a network, check to see that we
# can do that first.
LOG.debug('No networks are available for project %s; checking '
'to see if we can automatically allocate a network.',
context.project_id)
if not self._can_auto_allocate_network(context, neutron):
raise exception.UnableToAutoAllocateNetwork(
project_id=context.project_id)
ports_needed_per_instance = 1
else:
net_ids_requested = []
for request in requested_networks:
if request.port_id:
port = self._show_port(context, request.port_id,
neutron_client=neutron)
if port.get('device_id', None):
raise exception.PortInUse(port_id=request.port_id)
deferred_ip = port.get('ip_allocation') == 'deferred'
# NOTE(carl_baldwin) A deferred IP port doesn't have an
# address here. If it fails to get one later when nova
# updates it with host info, Neutron will error which
# raises an exception.
if not deferred_ip and not port.get('fixed_ips'):
raise exception.PortRequiresFixedIP(
port_id=request.port_id)
request.network_id = port['network_id']
else:
ports_needed_per_instance += 1
net_ids_requested.append(request.network_id)
# NOTE(jecarey) There is currently a race condition.
# That is, if you have more than one request for a specific
# fixed IP at the same time then only one will be allocated
# the ip. The fixed IP will be allocated to only one of the
# instances that will run. The second instance will fail on
# spawn. That instance will go into error state.
# TODO(jecarey) Need to address this race condition once we
# have the ability to update mac addresses in Neutron.
if request.address:
# TODO(jecarey) Need to look at consolidating list_port
# calls once able to OR filters.
search_opts = {'network_id': request.network_id,
'fixed_ips': 'ip_address=%s' % (
request.address),
'fields': 'device_id'}
existing_ports = neutron.list_ports(
**search_opts)['ports']
if existing_ports:
i_uuid = existing_ports[0]['device_id']
raise exception.FixedIpAlreadyInUse(
address=request.address,
instance_uuid=i_uuid)
# Now check to see if all requested networks exist
if net_ids_requested:
nets = self._get_available_networks(
context, context.project_id, net_ids_requested,
neutron=neutron)
for net in nets:
if not net.get('subnets'):
raise exception.NetworkRequiresSubnet(
network_uuid=net['id'])
if len(nets) != len(net_ids_requested):
requested_netid_set = set(net_ids_requested)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requested_netid_set - returned_netid_set
if lostid_set:
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
return ports_needed_per_instance
def validate_networks(self, context, requested_networks, num_instances):
"""Validate that the tenant can use the requested networks.
Return the number of instances than can be successfully allocated
with the requested network configuration.
"""
LOG.debug('validate_networks() for %s', requested_networks)
neutron = get_client(context)
ports_needed_per_instance = self._ports_needed_per_instance(
context, neutron, requested_networks)
# Note(PhilD): Ideally Nova would create all required ports as part of
# network validation, but port creation requires some details
# from the hypervisor. So we just check the quota and return
# how many of the requested number of instances can be created
if ports_needed_per_instance:
quotas = neutron.show_quota(context.project_id)['quota']
if quotas.get('port', -1) == -1:
# Unlimited Port Quota
return num_instances
# We only need the port count so only ask for ids back.
params = dict(tenant_id=context.project_id, fields=['id'])
ports = neutron.list_ports(**params)['ports']
free_ports = quotas.get('port') - len(ports)
if free_ports < 0:
msg = (_("The number of defined ports: %(ports)d "
"is over the limit: %(quota)d") %
{'ports': len(ports),
'quota': quotas.get('port')})
raise exception.PortLimitExceeded(msg)
ports_needed = ports_needed_per_instance * num_instances
if free_ports >= ports_needed:
return num_instances
else:
return free_ports // ports_needed_per_instance
return num_instances
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given IP address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def _get_port_id_by_fixed_address(self, client,
instance, address):
"""Return port_id from a fixed address."""
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating IP with a fixed IP."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
try:
client.update_floatingip(fip['id'], {'floatingip': param})
except neutron_client_exc.Conflict as e:
raise exception.FloatingIpAssociateFailed(six.text_type(e))
if fip['port_id']:
port = self._show_port(context, fip['port_id'],
neutron_client=client)
orig_instance_uuid = port['device_id']
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info('re-assign floating IP %(address)s from '
'instance %(instance_id)s', msg_dict,
instance=instance)
orig_instance = objects.Instance.get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
def get_all(self, context):
"""Get all networks for client."""
client = get_client(context)
networks = client.list_networks().get('networks')
network_objs = []
for network in networks:
network_objs.append(objects.Network(context=context,
name=network['name'],
label=network['name'],
uuid=network['id']))
return objects.NetworkList(context=context,
objects=network_objs)
def get(self, context, network_uuid):
"""Get specific network for client."""
client = get_client(context)
try:
network = client.show_network(network_uuid).get('network') or {}
except neutron_client_exc.NetworkNotFoundClient:
raise exception.NetworkNotFound(network_id=network_uuid)
net_obj = objects.Network(context=context,
name=network['name'],
label=network['name'],
uuid=network['id'])
return net_obj
def delete(self, context, network_uuid):
"""Delete a network for client."""
raise NotImplementedError()
def disassociate(self, context, network_uuid):
"""Disassociate a network for client."""
raise NotImplementedError()
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate a network for client."""
raise NotImplementedError()
def get_fixed_ip(self, context, id):
"""Get a fixed IP from the id."""
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
"""Return instance uuids given an address."""
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, context, client, port_id):
if not port_id:
return {}
port = self._show_port(context, port_id, neutron_client=client)
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return {i['id']: i for i in pools}
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return {p['id']: p for p in ports}
def get_floating_ip(self, context, id):
"""Return floating IP object given the floating IP id."""
client = get_client(context)
try:
fip = client.show_floatingip(id)['floatingip']
except neutron_client_exc.NeutronClientException as e:
if e.status_code == 404:
raise exception.FloatingIpNotFound(id=id)
else:
with excutils.save_and_reraise_exception():
LOG.exception('Unable to access floating IP %s', id)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(context, client, fip['port_id'])
return self._make_floating_ip_obj(context, fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {constants.NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
"""Return floating IP pool names."""
client = get_client(context)
pools = self._get_floating_ip_pools(client)
# Note(salv-orlando): Return a list of names to be consistent with
# nova.network.api.get_floating_ip_pools
return [n['name'] or n['id'] for n in pools]
def _make_floating_ip_obj(self, context, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
# NOTE(danms): Don't give these objects a context, since they're
# not lazy-loadable anyway
floating = objects.floating_ip.NeutronFloatingIP(
id=fip['id'], address=fip['floating_ip_address'],
pool=(pool['name'] or pool['id']), project_id=fip['tenant_id'],
fixed_ip_id=fip['port_id'])
# In Neutron v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
if fip['fixed_ip_address']:
floating.fixed_ip = objects.FixedIP(
address=fip['fixed_ip_address'])
else:
floating.fixed_ip = None
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
# NOTE(danms): This could be .refresh()d, so give it context
floating.instance = objects.Instance(context=context,
uuid=instance_uuid)
if floating.fixed_ip:
floating.fixed_ip.instance_uuid = instance_uuid
else:
floating.instance = None
return floating
def get_floating_ip_by_address(self, context, address):
"""Return a floating IP given an address."""
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(context, client, fip['port_id'])
return self._make_floating_ip_obj(context, fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = get_client(context)
project_id = context.project_id
fips = self._safe_get_floating_ips(client, tenant_id=project_id)
if not fips:
return []
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._make_floating_ip_obj(context, fip, pool_dict, port_dict)
for fip in fips]
def get_instance_id_by_floating_address(self, context, address):
"""Return the instance id a floating IP's fixed IP is allocated to."""
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
try:
port = self._show_port(context, fip['port_id'],
neutron_client=client)
except exception.PortNotFound:
# NOTE: Here is a potential race condition between _show_port() and
# _get_floating_ip_by_address(). fip['port_id'] shows a port which
# is the server instance's. At _get_floating_ip_by_address(),
# Neutron returns the list which includes the instance. Just after
# that, the deletion of the instance happens and Neutron returns
# 404 on _show_port().
LOG.debug('The port(%s) is not found', fip['port_id'])
return None
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def _get_default_floating_ip_pool_name(self):
"""Get default pool name from config.
TODO(stephenfin): Remove this helper function in Queens, opting to
use the [neutron] option only.
"""
if CONF.default_floating_pool != 'nova':
LOG.warning("Config option 'default_floating_pool' is set to "
"a non-default value. Falling back to this value "
"for now but this behavior will change in a "
"future release. You should unset this value "
"and set the '[neutron] default_floating_pool' "
"option instead.")
return CONF.default_floating_pool
return CONF.neutron.default_floating_pool
def allocate_floating_ip(self, context, pool=None):
"""Add a floating IP to a project from a pool."""
client = get_client(context)
pool = pool or self._get_default_floating_ip_pool_name()
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
param = {'floatingip': {'floating_network_id': pool_id}}
try:
fip = client.create_floatingip(param)
except (neutron_client_exc.IpAddressGenerationFailureClient,
neutron_client_exc.ExternalIpAddressExhaustedClient) as e:
raise exception.NoMoreFloatingIps(six.text_type(e))
except neutron_client_exc.OverQuotaClient as e:
raise exception.FloatingIpLimitExceeded(six.text_type(e))
except neutron_client_exc.BadRequest as e:
raise exception.FloatingIpBadRequest(six.text_type(e))
return fip['floatingip']['floating_ip_address']
def _safe_get_floating_ips(self, client, **kwargs):
"""Get floating IP gracefully handling 404 from Neutron."""
try:
return client.list_floatingips(**kwargs)['floatingips']
# If a neutron plugin does not implement the L3 API a 404 from
# list_floatingips will be raised.
except neutron_client_exc.NotFound:
return []
except neutron_client_exc.NeutronClientException as e:
# bug/1513879 neutron client is currently using
# NeutronClientException when there is no L3 API
if e.status_code == 404:
return []
with excutils.save_and_reraise_exception():
LOG.exception('Unable to access floating IP for %s',
', '.join(['%s %s' % (k, v)
for k, v in kwargs.items()]))
def _get_floating_ip_by_address(self, client, address):
"""Get floating IP from floating IP address."""
if not address:
raise exception.FloatingIpNotFoundForAddress(address=address)
fips = self._safe_get_floating_ips(client, floating_ip_address=address)
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
"""Get floating IPs from fixed IP and port."""
return self._safe_get_floating_ips(client, fixed_ip_address=fixed_ip,
port_id=port)
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating IP with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
self._release_floating_ip(context, address)
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating IP.
This api call was added to allow this to be done in one operation
if using neutron.
"""
@base_api.refresh_cache
def _release_floating_ip_and_refresh_cache(self, context, instance,
floating_ip):
self._release_floating_ip(context, floating_ip['address'],
raise_if_associated=False)
if instance:
_release_floating_ip_and_refresh_cache(self, context, instance,
floating_ip)
else:
self._release_floating_ip(context, floating_ip['address'],
raise_if_associated=False)
def _release_floating_ip(self, context, address,
raise_if_associated=True):
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if raise_if_associated and fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
try:
client.delete_floatingip(fip['id'])
except neutron_client_exc.NotFound:
raise exception.FloatingIpNotFoundForAddress(
address=address
)
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating IP from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance.
If the instance has port bindings on the destination compute host,
they are activated in this method which will atomically change the
source compute host port binding to inactive and also change the port
"binding:host_id" attribute to the destination host.
If there are no binding resources for the attached ports on the given
destination host, this method is a no-op.
:param context: The user request context.
:param instance: The instance being migrated.
:param migration: dict with required keys::
"source_compute": The name of the source compute host.
"dest_compute": The name of the destination compute host.
:raises: nova.exception.PortBindingActivationFailed if any port binding
activation fails
"""
if not self.supports_port_binding_extension(context):
# If neutron isn't new enough yet for the port "binding-extended"
# API extension, we just no-op. The port binding host will be
# be updated in migrate_instance_finish, which is functionally OK,
# it's just not optimal.
LOG.debug('Neutron is not new enough to perform early destination '
'host port binding activation. Port bindings will be '
'updated later.', instance=instance)
return
client = _get_ksa_client(context, admin=True)
dest_host = migration['dest_compute']
for vif in instance.get_network_info():
# Not all compute migration flows use the port binding-extended
# API yet, so first check to see if there is a binding for the
# port and destination host.
resp = client.get('/v2.0/ports/%s/bindings/%s' %
(vif['id'], dest_host), raise_exc=False)
if resp:
if resp.json()['binding']['status'] != 'ACTIVE':
self.activate_port_binding(context, vif['id'], dest_host)
# TODO(mriedem): Do we need to call
# _clear_migration_port_profile? migrate_instance_finish
# would normally take care of clearing the "migrating_to"
# attribute on each port when updating the port's
# binding:host_id to point to the destination host.
else:
# We might be racing with another thread that's handling
# post-migrate operations and already activated the port
# binding for the destination host.
LOG.debug('Port %s binding to destination host %s is '
'already ACTIVE.', vif['id'], dest_host,
instance=instance)
elif resp.status_code == 404:
# If there is no port binding record for the destination host,
# we can safely assume none of the ports attached to the
# instance are using the binding-extended API in this flow and
# exit early.
return
else:
# We don't raise an exception here because we assume that
# port bindings will be updated correctly when
# migrate_instance_finish runs.
LOG.error('Unexpected error trying to get binding info '
'for port %s and destination host %s. Code: %i. '
'Error: %s', vif['id'], dest_host, resp.status_code,
resp.text)
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
self._update_port_binding_for_instance(context, instance,
migration['dest_compute'],
migration=migration)
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force add a network to the project."""
raise NotImplementedError()
def _nw_info_get_ips(self, client, port):
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
return network_IPs
def _nw_info_get_subnets(self, context, port, network_IPs, client=None):
subnets = self._get_subnets_from_port(context, port, client)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
return subnets
def _nw_info_build_network(self, context, port, networks, subnets):
# TODO(stephenfin): Pass in an existing admin client if available.
neutron = get_client(context, admin=True)
network_name = None
network_mtu = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
tenant_id = net['tenant_id']
network_mtu = net.get('mtu')
break
else:
tenant_id = port['tenant_id']
LOG.warning("Network %(id)s not matched with the tenants "
"network! The ports tenant %(tenant_id)s will be "
"used.",
{'id': port['network_id'], 'tenant_id': tenant_id})
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
port_details = port.get('binding:vif_details', {})
if vif_type in [network_model.VIF_TYPE_OVS,
network_model.VIF_TYPE_AGILIO_OVS]:
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
CONF.neutron.ovs_bridge)
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
"brq" + port['network_id'])
should_create_bridge = True
elif vif_type == network_model.VIF_TYPE_DVS:
# The name of the DVS port group will contain the neutron
# network id
bridge = port['network_id']
elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and
port_details.get(network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)):
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
CONF.neutron.ovs_bridge)
ovs_interfaceid = port['id']
elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and
port_details.get(network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG,
False)):
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
"brq" + port['network_id'])
# Prune the bridge name if necessary. For the DVS this is not done
# as the bridge is a '<network-name>-<network-UUID>'.
if bridge is not None and vif_type != network_model.VIF_TYPE_DVS:
bridge = bridge[:network_model.NIC_NAME_LEN]
physnet, tunneled = self._get_physnet_tunneled_info(
context, neutron, port['network_id'])
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=tenant_id,
mtu=network_mtu,
physical_network=physnet,
tunneled=tunneled
)
network['subnets'] = subnets
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
return network, ovs_interfaceid
def _get_preexisting_port_ids(self, instance):
"""Retrieve the preexisting ports associated with the given instance.
These ports were not created by nova and hence should not be
deallocated upon instance deletion.
"""
net_info = instance.get_network_info()
if not net_info:
LOG.debug('Instance cache missing network info.',
instance=instance)
return [vif['id'] for vif in net_info
if vif.get('preserve_on_delete')]
def _build_vif_model(self, context, client, current_neutron_port,
networks, preexisting_port_ids):
"""Builds a ``nova.network.model.VIF`` object based on the parameters
and current state of the port in Neutron.
:param context: Request context.
:param client: Neutron client.
:param current_neutron_port: The current state of a Neutron port
from which to build the VIF object model.
:param networks: List of dicts which represent Neutron networks
associated with the ports currently attached to a given server
instance.
:param preexisting_port_ids: List of IDs of ports attached to a
given server instance which Nova did not create and therefore
should not delete when the port is detached from the server.
:return: nova.network.model.VIF object which represents a port in the
instance network info cache.
"""
vif_active = False
if (current_neutron_port['admin_state_up'] is False
or current_neutron_port['status'] == 'ACTIVE'):
vif_active = True
network_IPs = self._nw_info_get_ips(client,
current_neutron_port)
subnets = self._nw_info_get_subnets(context,
current_neutron_port,
network_IPs, client)
devname = "tap" + current_neutron_port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network, ovs_interfaceid = (
self._nw_info_build_network(context, current_neutron_port,
networks, subnets))
preserve_on_delete = (current_neutron_port['id'] in
preexisting_port_ids)
return network_model.VIF(
id=current_neutron_port['id'],
address=current_neutron_port['mac_address'],
network=network,
vnic_type=current_neutron_port.get('binding:vnic_type',
network_model.VNIC_TYPE_NORMAL),
type=current_neutron_port.get('binding:vif_type'),
profile=_get_binding_profile(current_neutron_port),
details=current_neutron_port.get('binding:vif_details'),
ovs_interfaceid=ovs_interfaceid,
devname=devname,
active=vif_active,
preserve_on_delete=preserve_on_delete)
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None, admin_client=None,
preexisting_port_ids=None,
refresh_vif_id=None):
"""Return list of ordered VIFs attached to instance.
:param context: Request context.
:param instance: Instance we are returning network info for.
:param networks: List of networks being attached to an instance.
If value is None this value will be populated
from the existing cached value.
:param port_ids: List of port_ids that are being attached to an
instance in order of attachment. If value is None
this value will be populated from the existing
cached value.
:param admin_client: A neutron client for the admin context.
:param preexisting_port_ids: List of port_ids that nova didn't
allocate and there shouldn't be deleted when
an instance is de-allocated. Supplied list will
be added to the cached list of preexisting port
IDs for this instance.
:param refresh_vif_id: Optional port ID to refresh within the existing
cache rather than the entire cache. This can be
triggered via a "network-changed" server external event
from Neutron.
"""
search_opts = {'tenant_id': instance.project_id,
'device_id': instance.uuid, }
if admin_client is None:
client = get_client(context, admin=True)
else:
client = admin_client
data = client.list_ports(**search_opts)
current_neutron_ports = data.get('ports', [])
if preexisting_port_ids is None:
preexisting_port_ids = []
preexisting_port_ids = set(
preexisting_port_ids + self._get_preexisting_port_ids(instance))
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
# Figure out what kind of operation we're processing. If we're given
# a single port to refresh then we try to optimize and update just the
# information for that VIF in the existing cache rather than try to
# rebuild the entire thing.
if refresh_vif_id is not None:
# TODO(mriedem): Consider pulling this out into it's own method.
nw_info = instance.get_network_info()
if nw_info:
current_neutron_port = current_neutron_port_map.get(
refresh_vif_id)
if current_neutron_port:
# Get the network for the port.
networks = self._get_available_networks(
context, instance.project_id,
[current_neutron_port['network_id']], client)
# Build the VIF model given the latest port information.
refreshed_vif = self._build_vif_model(
context, client, current_neutron_port, networks,
preexisting_port_ids)
for index, vif in enumerate(nw_info):
if vif['id'] == refresh_vif_id:
# Update the existing entry.
nw_info[index] = refreshed_vif
LOG.debug('Updated VIF entry in instance network '
'info cache for port %s.',
refresh_vif_id, instance=instance)
break
else:
# If it wasn't in the existing cache, add it.
nw_info.append(refreshed_vif)
LOG.debug('Added VIF to instance network info cache '
'for port %s.', refresh_vif_id,
instance=instance)
else:
# This port is no longer associated with the instance, so
# simply remove it from the nw_info cache.
for index, vif in enumerate(nw_info):
if vif['id'] == refresh_vif_id:
LOG.info('Port %s from network info_cache is no '
'longer associated with instance in '
'Neutron. Removing from network '
'info_cache.', refresh_vif_id,
instance=instance)
del nw_info[index]
break
return nw_info
# else there is no existing cache and we need to build it
nw_info_refresh = networks is None and port_ids is None
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids, client)
nw_info = network_model.NetworkInfo()
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
vif = self._build_vif_model(
context, client, current_neutron_port, networks,
preexisting_port_ids)
nw_info.append(vif)
elif nw_info_refresh:
LOG.info('Port %s from network info_cache is no '
'longer associated with instance in Neutron. '
'Removing from network info_cache.', port_id,
instance=instance)
return nw_info
def _get_subnets_from_port(self, context, port, client=None):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
if not client:
client = get_client(context)
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = client.list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
if subnet.get('ipv6_address_mode'):
subnet_dict['ipv6_address_mode'] = subnet['ipv6_address_mode']
# attempt to populate DHCP server field
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = client.list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
for route in subnet.get('host_routes', []):
subnet_object.add_route(
network_model.Route(cidr=route['destination'],
gateway=network_model.IP(
address=route['nexthop'],
type='gateway')))
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
"""Return a list of available dns domains.
These can be used to create DNS entries for floating IPs.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def setup_instance_network_on_host(self, context, instance, host,
migration=None):
"""Setup network for specified instance on host."""
self._update_port_binding_for_instance(context, instance, host,
migration)
def cleanup_instance_network_on_host(self, context, instance, host):
"""Cleanup network for specified instance on host."""
pass
def _get_pci_devices_from_migration_context(self, migration_context,
migration):
if migration and migration.get('status') == 'reverted':
# In case of revert, swap old and new devices to
# update the ports back to the original devices.
return (migration_context.new_pci_devices,
migration_context.old_pci_devices)
return (migration_context.old_pci_devices,
migration_context.new_pci_devices)
def _get_pci_mapping_for_migration(self, context, instance, migration):
"""Get the mapping between the old PCI devices and the new PCI
devices that have been allocated during this migration. The
correlation is based on PCI request ID which is unique per PCI
devices for SR-IOV ports.
:param context: The request context.
:param instance: Get PCI mapping for this instance.
:param migration: The migration for this instance.
:Returns: dictionary of mapping {'<old pci address>': <New PciDevice>}
"""
migration_context = instance.migration_context
if not migration_context:
return {}
old_pci_devices, new_pci_devices = \
self._get_pci_devices_from_migration_context(migration_context,
migration)
if old_pci_devices and new_pci_devices:
LOG.debug("Determining PCI devices mapping using migration"
"context: old_pci_devices: %(old)s, "
"new_pci_devices: %(new)s",
{'old': [dev for dev in old_pci_devices],
'new': [dev for dev in new_pci_devices]})
return {old.address: new
for old in old_pci_devices
for new in new_pci_devices
if old.request_id == new.request_id}
return {}
def _update_port_binding_for_instance(self, context, instance, host,
migration=None):
neutron = get_client(context, admin=True)
search_opts = {'device_id': instance.uuid,
'tenant_id': instance.project_id}
data = neutron.list_ports(**search_opts)
pci_mapping = None
port_updates = []
ports = data['ports']
for p in ports:
updates = {}
binding_profile = _get_binding_profile(p)
# If the host hasn't changed, like in the case of resizing to the
# same host, there is nothing to do.
if p.get(BINDING_HOST_ID) != host:
updates[BINDING_HOST_ID] = host
# NOTE: Before updating the port binding make sure we
# remove the pre-migration status from the binding profile
if binding_profile.get(MIGRATING_ATTR):
del binding_profile[MIGRATING_ATTR]
updates[BINDING_PROFILE] = binding_profile
# Update port with newly allocated PCI devices. Even if the
# resize is happening on the same host, a new PCI device can be
# allocated. Note that this only needs to happen if a migration
# is in progress such as in a resize / migrate. It is possible
# that this function is called without a migration object, such
# as in an unshelve operation.
vnic_type = p.get('binding:vnic_type')
if (vnic_type in network_model.VNIC_TYPES_SRIOV
and migration is not None):
if not pci_mapping:
pci_mapping = self._get_pci_mapping_for_migration(context,
instance, migration)
pci_slot = binding_profile.get('pci_slot')
new_dev = pci_mapping.get(pci_slot)
if new_dev:
binding_profile.update(
self._get_pci_device_profile(new_dev))
updates[BINDING_PROFILE] = binding_profile
else:
raise exception.PortUpdateFailed(port_id=p['id'],
reason=_("Unable to correlate PCI slot %s") %
pci_slot)
port_updates.append((p['id'], updates))
# Avoid rolling back updates if we catch an error above.
# TODO(lbeliveau): Batch up the port updates in one neutron call.
for port_id, updates in port_updates:
if updates:
LOG.info("Updating port %(port)s with "
"attributes %(attributes)s",
{"port": port_id, "attributes": updates},
instance=instance)
try:
neutron.update_port(port_id, {'port': updates})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Unable to update binding details "
"for port %s",
port_id, instance=instance)
def update_instance_vnic_index(self, context, instance, vif, index):
"""Update instance vnic index.
When the 'VNIC index' extension is supported this method will update
the vnic index of the instance on the port.
"""
self._refresh_neutron_extensions_cache(context)
if constants.VNIC_INDEX_EXT in self.extensions:
neutron = get_client(context)
port_req_body = {'port': {'vnic_index': index}}
try:
neutron.update_port(vif['id'], port_req_body)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Unable to update instance VNIC index '
'for port %s.',
vif['id'], instance=instance)
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
|
the-stack_0_24569
|
# Copyright 2017, 2018 Google, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common stuff used with LexNET."""
# pylint: disable=bad-whitespace
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from sklearn import metrics
# Part of speech tags used in the paths.
POSTAGS = [
'PAD', 'VERB', 'CONJ', 'NOUN', 'PUNCT',
'ADP', 'ADJ', 'DET', 'ADV', 'PART',
'NUM', 'X', 'INTJ', 'SYM',
]
POSTAG_TO_ID = {tag: tid for tid, tag in enumerate(POSTAGS)}
# Dependency labels used in the paths.
DEPLABELS = [
'PAD', 'UNK', 'ROOT', 'abbrev', 'acomp', 'advcl',
'advmod', 'agent', 'amod', 'appos', 'attr', 'aux',
'auxpass', 'cc', 'ccomp', 'complm', 'conj', 'cop',
'csubj', 'csubjpass', 'dep', 'det', 'dobj', 'expl',
'infmod', 'iobj', 'mark', 'mwe', 'nc', 'neg',
'nn', 'npadvmod', 'nsubj', 'nsubjpass', 'num', 'number',
'p', 'parataxis', 'partmod', 'pcomp', 'pobj', 'poss',
'preconj', 'predet', 'prep', 'prepc', 'prt', 'ps',
'purpcl', 'quantmod', 'rcmod', 'ref', 'rel', 'suffix',
'title', 'tmod', 'xcomp', 'xsubj',
]
DEPLABEL_TO_ID = {label: lid for lid, label in enumerate(DEPLABELS)}
# Direction codes used in the paths.
DIRS = '_^V<>'
DIR_TO_ID = {dir: did for did, dir in enumerate(DIRS)}
def load_word_embeddings(word_embeddings_dir, word_embeddings_file):
"""Loads pretrained word embeddings from a binary file and returns the matrix.
Args:
word_embeddings_dir: The directory for the word embeddings.
word_embeddings_file: The pretrained word embeddings text file.
Returns:
The word embeddings matrix
"""
embedding_file = os.path.join(word_embeddings_dir, word_embeddings_file)
vocab_file = os.path.join(
word_embeddings_dir, os.path.dirname(word_embeddings_file), 'vocab.txt')
with open(vocab_file) as f_in:
vocab = [line.strip() for line in f_in]
vocab_size = len(vocab)
print('Embedding file "%s" has %d tokens' % (embedding_file, vocab_size))
with open(embedding_file) as f_in:
embeddings = np.load(f_in)
dim = embeddings.shape[1]
# Four initially random vectors for the special tokens: <PAD>, <UNK>, <X>, <Y>
special_embeddings = np.random.normal(0, 0.1, (4, dim))
embeddings = np.vstack((special_embeddings, embeddings))
embeddings = embeddings.astype(np.float32)
return embeddings
def full_evaluation(model, session, instances, labels, set_name, classes):
"""Prints a full evaluation on the current set.
Performance (recall, precision and F1), classification report (per
class performance), and confusion matrix).
Args:
model: The currently trained path-based model.
session: The current TensorFlow session.
instances: The current set instances.
labels: The current set labels.
set_name: The current set name (train/validation/test).
classes: The class label names.
Returns:
The model's prediction for the given instances.
"""
# Predict the labels
pred = model.predict(session, instances)
# Print the performance
precision, recall, f1, _ = metrics.precision_recall_fscore_support(
labels, pred, average='weighted')
print('%s set: Precision: %.3f, Recall: %.3f, F1: %.3f' % (
set_name, precision, recall, f1))
# Print a classification report
print('%s classification report:' % set_name)
print(metrics.classification_report(labels, pred, target_names=classes))
# Print the confusion matrix
print('%s confusion matrix:' % set_name)
cm = metrics.confusion_matrix(labels, pred, labels=range(len(classes)))
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] * 100
print_cm(cm, labels=classes)
return pred
def print_cm(cm, labels):
"""Pretty print for confusion matrices.
From: https://gist.github.com/zachguo/10296432.
Args:
cm: The confusion matrix.
labels: The class names.
"""
columnwidth = 10
empty_cell = ' ' * columnwidth
short_labels = [label[:12].rjust(10, ' ') for label in labels]
# Print header
header = empty_cell + ' '
header += ''.join([' %{0}s '.format(columnwidth) % label
for label in short_labels])
print(header)
# Print rows
for i, label1 in enumerate(short_labels):
row = '%{0}s '.format(columnwidth) % label1[:10]
for j in range(len(short_labels)):
value = int(cm[i, j]) if not np.isnan(cm[i, j]) else 0
cell = ' %{0}d '.format(10) % value
row += cell + ' '
print(row)
def load_all_labels(records):
"""Reads TensorFlow examples from a RecordReader and returns only the labels.
Args:
records: a record list with TensorFlow examples.
Returns:
The labels
"""
curr_features = tf.parse_example(records, {
'rel_id': tf.FixedLenFeature([1], dtype=tf.int64),
})
labels = tf.squeeze(curr_features['rel_id'], [-1])
return labels
def load_all_pairs(records):
"""Reads TensorFlow examples from a RecordReader and returns the word pairs.
Args:
records: a record list with TensorFlow examples.
Returns:
The word pairs
"""
curr_features = tf.parse_example(records, {
'pair': tf.FixedLenFeature([1], dtype=tf.string)
})
word_pairs = curr_features['pair']
return word_pairs
def write_predictions(pairs, labels, predictions, classes, predictions_file):
"""Write the predictions to a file.
Args:
pairs: the word pairs (list of tuple of two strings).
labels: the gold-standard labels for these pairs (array of rel ID).
predictions: the predicted labels for these pairs (array of rel ID).
classes: a list of relation names.
predictions_file: where to save the predictions.
"""
with open(predictions_file, 'w') as f_out:
for pair, label, pred in zip(pairs, labels, predictions):
w1, w2 = pair
f_out.write('\t'.join([w1, w2, classes[label], classes[pred]]) + '\n')
|
the-stack_0_24570
|
#!/bin/sh
""":" .
exec python "$0" "$@"
"""
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017 beyond-blockchain.org.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import gevent
from gevent import monkey
monkey.patch_all()
import argparse
from brownie import *
import json
import os
import subprocess
import time
import sys
sys.path.extend(["../"])
from bbc1.core import bbclib, bbc_config, subsystem_tool_lib
from bbc1.core.ethereum import bbc_ethereum
import bbc1
class EthereumSubsystemTool(subsystem_tool_lib.SubsystemTool):
def __init__(self):
super().__init__(
name='Ethereum',
tool='eth_subsystem_tool.py',
version='0.13.1'
)
def _add_additional_arguments(self):
self.argparser.add_argument('-n', '--network', type=str,
default='ropsten',
help='network name (ropsten by default)')
# account command
parser = self.subparsers.add_parser('account',
help='Set an Ethereum account')
parser.add_argument('private_key', action='store',
help='Private key of the account')
# auto command
parser = self.subparsers.add_parser('auto',
help='Automatically set up everything')
parser.add_argument('project_id', action='store',
help='INFURA project ID')
parser.add_argument('private_key', action='store',
help='Private key of the account')
# balance command
self.subparsers.add_parser('balance', help='Show ETH balance')
# deploy command
self.subparsers.add_parser('deploy', help='Deploy the anchor contract')
# deployed command
parser = self.subparsers.add_parser('deployed',
help='Use existing anchor contract')
parser.add_argument('contract_address', action='store',
help='Anchor contract address')
# new_account command
parser = self.subparsers.add_parser('new_account',
help='Create a new Ethereum account')
# brownie command
parser = self.subparsers.add_parser('brownie',
help='Initialize brownie and infura environment')
parser.add_argument('project_id', action='store',
help='INFURA project ID')
# test command
self.subparsers.add_parser('test', help='Test the anchor contract')
def _verify_by_subsystem(self, args, digest, spec, subtree):
if spec[b'subsystem'] != b'ethereum':
print("Failed: not stored in an Ethereum subsystem.")
return 0
bbcConfig = bbc_ethereum.setup_config(args.workingdir, args.config,
args.network)
config = bbcConfig.get_config()
prevdir = os.getcwd()
os.chdir(bbc1.__path__[0] + '/core/ethereum')
os.environ['WEB3_INFURA_PROJECT_ID'] = \
config['ethereum']['web3_infura_project_id']
eth = bbc_ethereum.BBcEthereum(
config['ethereum']['network'],
config['ethereum']['private_key'],
contract_address=spec[b'contract_address'].decode('utf-8')
)
os.chdir(prevdir)
return eth.verify(digest, subtree)
if __name__ == '__main__':
subsystem_tool = EthereumSubsystemTool()
args = subsystem_tool.parse_arguments()
bbcConfig = bbc_ethereum.setup_config(args.workingdir, args.config,
args.network)
if args.command_type == 'auto':
print("Setting up brownie.")
bbc_ethereum.setup_brownie(bbcConfig, args.project_id)
print("Setting up an Ethereum account.")
bbc_ethereum.setup_account(bbcConfig, args.private_key)
print("Deploying the anchor contract.")
bbc_ethereum.setup_deploy(bbcConfig)
elif args.command_type == 'balance':
print(bbc_ethereum.get_balance(bbcConfig))
elif args.command_type == 'brownie':
bbc_ethereum.setup_brownie(bbcConfig, args.project_id)
elif args.command_type == 'test':
bbc_ethereum.setup_test()
elif args.command_type == 'new_account':
bbc_ethereum.setup_new_account(bbcConfig)
print("private_key (copy and save somewhere):")
print(accounts[0].private_key)
print("address (copy and save somewhere):")
print(accounts[0].address)
elif args.command_type == 'account':
bbc_ethereum.setup_account(bbcConfig, args.private_key)
elif args.command_type == 'deploy':
bbc_ethereum.setup_deploy(bbcConfig)
elif args.command_type == 'deployed':
bbc_ethereum.setup_deployed(bbcConfig, args.contract_address)
sys.exit(0)
# end of utils/eth_subsystem_tool.py
|
the-stack_0_24571
|
import unittest
import astroimages_fits.fits_test_util_functions as ftuf
class TestUtilFunctions(unittest.TestCase):
def setUp(self):
self.folders_per_layer = 3
self.files_per_folder = 10
self.temp_folder = '/home/rsouza/Projects/AstroImages/'
def test_create_empty_fits_files_on_temp_folder(self):
temp_folder, files_list = ftuf.create_empty_fits_files_on_temp_folder(self.files_per_folder)
self.assertEqual(len(files_list), self.files_per_folder)
temp_folder.cleanup()
def test_create_empty_fits_files_on_path(self):
temp_folder, files_list = ftuf.create_empty_fits_files_on_temp_folder(
self.files_per_folder, self.temp_folder)
self.assertEqual(len(files_list), self.files_per_folder)
temp_folder.cleanup()
if __name__ == '__main__':
unittest.main()
|
the-stack_0_24572
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import datetime
import inspect
import os
import pprint
import fixtures
import mock
from oslo_log import log
from oslo_utils import timeutils
from oslo_utils import versionutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
from oslo_versionedobjects import fixture
import six
from testtools import matchers
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_notifier
from nova import utils
LOG = log.getLogger(__name__)
class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {'baz': fields.IntegerField()}
class MyObj(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
VERSION = '1.6'
fields = {'foo': fields.IntegerField(default=1),
'bar': fields.StringField(),
'missing': fields.StringField(),
'readonly': fields.IntegerField(read_only=True),
'rel_object': fields.ObjectField('MyOwnedObject', nullable=True),
'rel_objects': fields.ListOfObjectsField('MyOwnedObject',
nullable=True),
'mutable_default': fields.ListOfStringsField(default=[]),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
self.readonly = 1
self._context = context
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context=context, foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self):
return 'polo'
@base.remotable
def _update_test(self):
self.bar = 'updated'
@base.remotable
def save(self):
self.obj_reset_changes()
@base.remotable
def refresh(self):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self):
self.bar = 'meow'
self.save()
self.foo = 42
self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
super(MyObj, self).obj_make_compatible(primitive, target_version)
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
@base.NovaObjectRegistry.register_if(False)
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.StringField()}
class TestObjToPrimitive(test.NoDBTestCase):
def test_obj_to_primitive_list(self):
@base.NovaObjectRegistry.register_if(False)
class MyObjElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
@base.NovaObjectRegistry.register_if(False)
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
base.NovaObjectRegistry.register(MyObj)
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
base.NovaObjectRegistry.register(MyObj)
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_to_primitive_with_ip_addr(self):
@base.NovaObjectRegistry.register_if(False)
class TestObject(base.NovaObject):
fields = {'addr': fields.IPAddressField(),
'cidr': fields.IPNetworkField()}
obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
base.obj_to_primitive(obj))
class TestObjMakeList(test.NoDBTestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {
'objects': fields.ListOfObjectsField('MyObj'),
}
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = getattr(obj, key)
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.user_id = 'fake-user'
self.project_id = 'fake-project'
self.context = context.RequestContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
# NOTE(danms): register these here instead of at import time
# so that they're not always present
base.NovaObjectRegistry.register(MyObj)
base.NovaObjectRegistry.register(MyOwnedObject)
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def str_comparator(self, expected, obj_val):
"""Compare an object field to a string in the db by performing
a simple coercion on the object field value.
"""
self.assertEqual(expected, str(obj_val))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
try:
f = super(_BaseTestCase, self).assertNotIsInstance
except AttributeError:
self.assertThat(obj,
matchers.Not(matchers.IsInstance(cls)),
message=msg or '')
else:
f(obj, cls, msg=msg)
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
self.useFixture(nova_fixtures.IndirectionAPIFixture(None))
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.NovaObject.indirection_api
base.NovaObject.indirection_api = None
yield
base.NovaObject.indirection_api = _api
# FIXME(danms): We shouldn't be overriding any of this, but need to
# for the moment because of the mocks in the base fixture that don't
# hit our registry subclass.
class FakeIndirectionHack(fixture.FakeIndirectionAPI):
def object_action(self, context, objinst, objmethod, args, kwargs):
objinst = self._ser.deserialize_entity(
context, self._ser.serialize_entity(
context, objinst))
objmethod = six.text_type(objmethod)
args = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, args))
kwargs = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, kwargs))
original = objinst.obj_clone()
with mock.patch('nova.objects.base.NovaObject.'
'indirection_api', new=None):
result = getattr(objinst, objmethod)(*args, **kwargs)
updates = self._get_changes(original, objinst)
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
objname = six.text_type(objname)
objmethod = six.text_type(objmethod)
objver = six.text_type(objver)
args = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, args))
kwargs = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, kwargs))
cls = base.NovaObject.obj_class_from_name(objname, objver)
with mock.patch('nova.objects.base.NovaObject.'
'indirection_api', new=None):
result = getattr(cls, objmethod)(context, *args, **kwargs)
manifest = ovo_base.obj_tree_get_versions(objname)
return (base.NovaObject.obj_from_primitive(
result.obj_to_primitive(target_version=objver,
version_manifest=manifest),
context=context)
if isinstance(result, base.NovaObject) else result)
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
objname = six.text_type(objname)
objmethod = six.text_type(objmethod)
object_versions = {six.text_type(o): six.text_type(v)
for o, v in object_versions.items()}
args, kwargs = self._canonicalize_args(context, args, kwargs)
objver = object_versions[objname]
cls = base.NovaObject.obj_class_from_name(objname, objver)
with mock.patch('nova.objects.base.NovaObject.'
'indirection_api', new=None):
result = getattr(cls, objmethod)(context, *args, **kwargs)
return (base.NovaObject.obj_from_primitive(
result.obj_to_primitive(target_version=objver),
context=context)
if isinstance(result, base.NovaObject) else result)
class IndirectionFixture(fixtures.Fixture):
def setUp(self):
super(IndirectionFixture, self).setUp()
ser = base.NovaObjectSerializer()
self.indirection_api = FakeIndirectionHack(serializer=ser)
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.base.NovaObject.indirection_api',
self.indirection_api))
class _RemoteTest(_BaseTestCase):
def setUp(self):
super(_RemoteTest, self).setUp()
self.useFixture(IndirectionFixture())
class _TestObject(object):
def test_object_attrs_in_init(self):
# Spot check a few
objects.Instance
objects.InstanceInfoCache
objects.SecurityGroup
# Now check the test one in this file. Should be newest version
self.assertEqual('1.6', objects.MyObj.VERSION)
def test_hydration_type_error(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
real_method = MyObj._obj_from_primitive
def _obj_from_primitive(*args):
return real_method(*args)
with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
ofp.side_effect = _obj_from_primitive
obj = MyObj.obj_from_primitive(primitive)
ofp.assert_called_once_with(None, '1.5', primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_version_different(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.2',
'nova_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
self.assertEqual('1.2', obj.VERSION)
def test_hydration_bad_ns(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'foo',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
self.assertRaises(ovo_exc.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_hydration_additional_unexpected_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): If we call obj_from_primitive() directly
# with a version containing .z, we'll get that version
# in the resulting object. In reality, when using the
# serializer, we'll get that snipped off (tested
# elsewhere)
self.assertEqual('1.5.1', obj.VERSION)
def test_dehydration(self):
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.data': {'foo': 1}}
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj(foo=1)
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
@base.NovaObjectRegistry.register_if(False)
class Foo(base.NovaObject):
fields = {'foobar': fields.IntegerField()}
obj = Foo()
with self.assertRaisesRegex(NotImplementedError, ".*foobar.*"):
obj.foobar
def test_loaded_in_primitive(self):
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes': ['bar'],
'nova_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj(foo=123)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertIn('nova_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(ovo_exc.OrphanedObjectError,
obj._update_test)
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj._update_test()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertIsInstance(obj.rel_object, MyOwnedObject)
def test_changed_with_sub_object(self):
@base.NovaObjectRegistry.register_if(False)
class ParentObject(base.NovaObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.ObjectField('MyObj'),
}
obj = ParentObject()
self.assertEqual(set(), obj.obj_what_changed())
obj.foo = 1
self.assertEqual(set(['foo']), obj.obj_what_changed())
bar = MyObj()
obj.bar = bar
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(), obj.obj_what_changed())
bar.foo = 1
self.assertEqual(set(['bar']), obj.obj_what_changed())
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj._update_test()
self.assertEqual(obj.bar, 'updated')
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
deleted=False)
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'nova_object.data':
{'created_at': utils.isotime(dt),
'updated_at': utils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
actual = obj.obj_to_primitive()
self.assertJsonEqual(actual, expected)
def test_contains(self):
obj = MyObj()
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_obj_reset_changes_recursive(self):
obj = MyObj(rel_object=MyOwnedObject(baz=123),
rel_objects=[MyOwnedObject(baz=456)])
self.assertEqual(set(['rel_object', 'rel_objects']),
obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True, fields=['foo'])
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True)
self.assertEqual(set([]), obj.rel_object.obj_what_changed())
self.assertEqual(set([]), obj.obj_what_changed())
def test_get(self):
obj = MyObj(foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
myobj_fields = (['foo', 'bar', 'missing',
'readonly', 'rel_object',
'rel_objects', 'mutable_default'] +
list(base_fields))
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_obj_as_admin(self):
obj = MyObj(context=self.context)
def fake(*args, **kwargs):
self.assertTrue(obj._context.is_admin)
with mock.patch.object(obj, 'obj_reset_changes') as mock_fn:
mock_fn.side_effect = fake
with obj.obj_as_admin():
obj.save()
self.assertTrue(mock_fn.called)
self.assertFalse(obj._context.is_admin)
def test_obj_as_admin_orphaned(self):
def testme():
obj = MyObj()
with obj.obj_as_admin():
pass
self.assertRaises(exception.OrphanedObjectError, testme)
def test_obj_alternate_context(self):
obj = MyObj(context=self.context)
with obj.obj_alternate_context(mock.sentinel.alt_ctx):
self.assertEqual(mock.sentinel.alt_ctx,
obj._context)
self.assertEqual(self.context, obj._context)
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
@base.NovaObjectRegistry.register_if(False)
class TestObj(base.NovaObject):
fields = {'foo': fields.IntegerField()}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
def test_obj_constructor(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
def test_obj_read_only(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.readonly = 1
self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr,
obj, 'readonly', 2)
def test_obj_mutable_default(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.mutable_default = None
obj.mutable_default.append('s1')
self.assertEqual(obj.mutable_default, ['s1'])
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.mutable_default = None
obj1.mutable_default.append('s2')
self.assertEqual(obj1.mutable_default, ['s2'])
def test_obj_mutable_default_set_default(self):
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.obj_set_defaults('mutable_default')
self.assertEqual(obj1.mutable_default, [])
obj1.mutable_default.append('s1')
self.assertEqual(obj1.mutable_default, ['s1'])
obj2 = MyObj(context=self.context, foo=123, bar='abc')
obj2.obj_set_defaults('mutable_default')
self.assertEqual(obj2.mutable_default, [])
obj2.mutable_default.append('s2')
self.assertEqual(obj2.mutable_default, ['s2'])
def test_obj_repr(self):
obj = MyObj(foo=123)
self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
'deleted_at=<?>,foo=123,missing=<?>,'
'mutable_default=<?>,readonly=<?>,rel_object=<?>,'
'rel_objects=<?>,updated_at=<?>)',
repr(obj))
def test_obj_make_obj_compatible(self):
subobj = MyOwnedObject(baz=1)
subobj.VERSION = '1.2'
obj = MyObj(rel_object=subobj)
obj.obj_relationships = {
'rel_object': [('1.5', '1.1'), ('1.7', '1.2')],
}
orig_primitive = obj.obj_to_primitive()['nova_object.data']
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.8', 'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.7', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.2')
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.6', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.5', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.4', 'rel_object')
self.assertFalse(mock_compat.called)
self.assertNotIn('rel_object', primitive)
def test_obj_make_compatible_hits_sub_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10',
'rel_object')
def test_obj_make_compatible_skips_unset_sub_objects(self):
obj = MyObj(foo=123)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
self.assertFalse(mock_compat.called)
def test_obj_make_compatible_doesnt_skip_falsey_sub_objects(self):
@base.NovaObjectRegistry.register_if(False)
class MyList(base.ObjectListBase, base.NovaObject):
VERSION = '1.2'
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
obj_relationships = {
'objects': [('1.1', '1.1'), ('1.2', '1.2')],
}
mylist = MyList(objects=[])
@base.NovaObjectRegistry.register_if(False)
class MyOwner(base.NovaObject):
VERSION = '1.2'
fields = {'mylist': fields.ObjectField('MyList')}
obj_relationships = {
'mylist': [('1.1', '1.1')],
}
myowner = MyOwner(mylist=mylist)
primitive = myowner.obj_to_primitive('1.1')
self.assertIn('mylist', primitive['nova_object.data'])
def test_obj_make_compatible_handles_list_of_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(rel_objects=[subobj])
obj.obj_relationships = {'rel_objects': [('1.0', '1.123')]}
def fake_make_compat(primitive, version):
self.assertEqual('1.123', version)
self.assertIn('baz', primitive)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_mc:
mock_mc.side_effect = fake_make_compat
obj.obj_to_primitive('1.0')
self.assertTrue(mock_mc.called)
def test_delattr(self):
obj = MyObj(bar='foo')
del obj.bar
# Should appear unset now
self.assertFalse(obj.obj_attr_is_set('bar'))
# Make sure post-delete, references trigger lazy loads
self.assertEqual('loaded!', getattr(obj, 'bar'))
def test_delattr_unset(self):
obj = MyObj()
self.assertRaises(AttributeError, delattr, obj, 'bar')
class TestObject(_LocalTest, _TestObject):
def test_set_defaults(self):
obj = MyObj()
obj.obj_set_defaults('foo')
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertEqual(1, obj.foo)
def test_set_defaults_no_default(self):
obj = MyObj()
self.assertRaises(ovo_exc.ObjectActionError,
obj.obj_set_defaults, 'bar')
def test_set_all_defaults(self):
obj = MyObj()
obj.obj_set_defaults()
self.assertEqual(set(['deleted', 'foo', 'mutable_default']),
obj.obj_what_changed())
self.assertEqual(1, obj.foo)
def test_set_defaults_not_overwrite(self):
# NOTE(danms): deleted defaults to False, so verify that it does
# not get reset by obj_set_defaults()
obj = MyObj(deleted=True)
obj.obj_set_defaults()
self.assertEqual(1, obj.foo)
self.assertTrue(obj.deleted)
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_serialize_set_to_list(self):
ser = base.NovaObjectSerializer()
self.assertEqual([1, 2], ser.serialize_entity(None, set([1, 2])))
def _test_deserialize_entity_newer(self, obj_version, backported_to,
my_version='1.6'):
ser = base.NovaObjectSerializer()
ser._conductor = mock.Mock()
ser._conductor.object_backport_versions.return_value = 'backported'
class MyTestObj(MyObj):
VERSION = my_version
base.NovaObjectRegistry.register(MyTestObj)
obj = MyTestObj()
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertFalse(ser._conductor.object_backport_versions.called)
else:
self.assertEqual('backported', result)
versions = ovo_base.obj_tree_get_versions('MyTestObj')
ser._conductor.object_backport_versions.assert_called_with(
self.context, primitive, versions)
def test_deserialize_entity_newer_version_backports(self):
self._test_deserialize_entity_newer('1.25', '1.6')
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_entity_newer_version_passes_revision(self):
self._test_deserialize_entity_newer('1.7', '1.6.1', '1.6.1')
def test_deserialize_dot_z_with_extra_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
ser = base.NovaObjectSerializer()
obj = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): The serializer is where the logic lives that
# avoids backports for cases where only a .z difference in
# the received object version is detected. As a result, we
# end up with a version of what we expected, effectively the
# .0 of the object.
self.assertEqual('1.6', obj.VERSION)
@mock.patch('oslo_versionedobjects.base.obj_tree_get_versions')
def test_object_tree_backport(self, mock_get_versions):
# Test the full client backport path all the way from the serializer
# to the conductor and back.
self.start_service('conductor',
manager='nova.conductor.manager.ConductorManager')
# NOTE(danms): Actually register a complex set of objects,
# two versions of the same parent object which contain a
# child sub object.
@base.NovaObjectRegistry.register
class Child(base.NovaObject):
VERSION = '1.10'
@base.NovaObjectRegistry.register
class Parent(base.NovaObject):
VERSION = '1.0'
fields = {
'child': fields.ObjectField('Child'),
}
@base.NovaObjectRegistry.register # noqa
class Parent(base.NovaObject):
VERSION = '1.1'
fields = {
'child': fields.ObjectField('Child'),
}
# NOTE(danms): Since we're on the same node as conductor,
# return a fake version manifest so that we confirm that it
# actually honors what the client asked for and not just what
# it sees in the local machine state.
mock_get_versions.return_value = {
'Parent': '1.0',
'Child': '1.5',
}
call_context = {}
real_ofp = base.NovaObject.obj_from_primitive
def fake_obj_from_primitive(*a, **k):
# NOTE(danms): We need the first call to this to report an
# incompatible object version, but subsequent calls must
# succeed. Since we're testing the backport path all the
# way through conductor and RPC, we can't fully break this
# method, we just need it to fail once to trigger the
# backport.
if 'run' in call_context:
return real_ofp(*a, **k)
else:
call_context['run'] = True
raise ovo_exc.IncompatibleObjectVersion('foo')
child = Child()
parent = Parent(child=child)
prim = parent.obj_to_primitive()
ser = base.NovaObjectSerializer()
with mock.patch('nova.objects.base.NovaObject.'
'obj_from_primitive') as mock_ofp:
mock_ofp.side_effect = fake_obj_from_primitive
result = ser.deserialize_entity(self.context, prim)
# Our newest version (and what we passed back) of Parent
# is 1.1, make sure that the manifest version is honored
self.assertEqual('1.0', result.VERSION)
# Our newest version (and what we passed back) of Child
# is 1.10, make sure that the manifest version is honored
self.assertEqual('1.5', result.child.VERSION)
def test_object_serialization(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('nova_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
# dict case
thing = {'key': obj}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in six.itervalues(primitive):
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in six.itervalues(thing2):
self.assertIsInstance(item, MyObj)
# object-action updates dict case
thing = {'foo': obj.obj_to_primitive()}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(thing, primitive)
thing2 = ser.deserialize_entity(self.context, thing)
self.assertIsInstance(thing2['foo'], base.NovaObject)
class TestArgsSerializer(test.NoDBTestCase):
def setUp(self):
super(TestArgsSerializer, self).setUp()
self.now = timeutils.utcnow()
self.str_now = utils.strtime(self.now)
self.unicode_str = u'\xF0\x9F\x92\xA9'
@base.serialize_args
def _test_serialize_args(self, *args, **kwargs):
expected_args = ('untouched', self.str_now, self.str_now)
for index, val in enumerate(args):
self.assertEqual(expected_args[index], val)
expected_kwargs = {'a': 'untouched', 'b': self.str_now,
'c': self.str_now, 'exc_val': self.unicode_str}
for key, val in six.iteritems(kwargs):
self.assertEqual(expected_kwargs[key], val)
def test_serialize_args(self):
self._test_serialize_args('untouched', self.now, self.now,
a='untouched', b=self.now, c=self.now,
exc_val=self.unicode_str)
class TestRegistry(test.NoDBTestCase):
@mock.patch('nova.objects.base.objects')
def test_hook_chooses_newer_properly(self, mock_objects):
reg = base.NovaObjectRegistry()
reg.registration_hook(MyObj, 0)
class MyNewerObj(object):
VERSION = '1.123'
@classmethod
def obj_name(cls):
return 'MyObj'
self.assertEqual(MyObj, mock_objects.MyObj)
reg.registration_hook(MyNewerObj, 0)
self.assertEqual(MyNewerObj, mock_objects.MyObj)
@mock.patch('nova.objects.base.objects')
def test_hook_keeps_newer_properly(self, mock_objects):
reg = base.NovaObjectRegistry()
reg.registration_hook(MyObj, 0)
class MyOlderObj(object):
VERSION = '1.1'
@classmethod
def obj_name(cls):
return 'MyObj'
self.assertEqual(MyObj, mock_objects.MyObj)
reg.registration_hook(MyOlderObj, 0)
self.assertEqual(MyObj, mock_objects.MyObj)
# NOTE(danms): The hashes in this list should only be changed if
# they come with a corresponding version bump in the affected
# objects
object_data = {
'Agent': '1.0-c0c092abaceb6f51efe5d82175f15eba',
'AgentList': '1.0-5a7380d02c3aaf2a32fc8115ae7ca98c',
'Aggregate': '1.1-1ab35c4516f71de0bef7087026ab10d1',
'AggregateList': '1.2-fb6e19f3c3a3186b04eceb98b5dadbfa',
'BandwidthUsage': '1.2-c6e4c779c7f40f2407e3d70022e3cd1c',
'BandwidthUsageList': '1.2-5fe7475ada6fe62413cbfcc06ec70746',
'BlockDeviceMapping': '1.15-d44d8d694619e79c172a99b3c1d6261d',
'BlockDeviceMappingList': '1.17-1e568eecb91d06d4112db9fd656de235',
'CellMapping': '1.0-7f1a7e85a22bbb7559fc730ab658b9bd',
'ComputeNode': '1.14-a396975707b66281c5f404a68fccd395',
'ComputeNodeList': '1.14-3b6f4f5ade621c40e70cb116db237844',
'DNSDomain': '1.0-7b0b2dab778454b6a7b6c66afe163a1a',
'DNSDomainList': '1.0-4ee0d9efdfd681fed822da88376e04d2',
'EC2Ids': '1.0-474ee1094c7ec16f8ce657595d8c49d9',
'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f',
'EC2SnapshotMapping': '1.0-47e7ddabe1af966dce0cfd0ed6cd7cd1',
'EC2VolumeMapping': '1.0-5b713751d6f97bad620f3378a521020d',
'FixedIP': '1.14-53e1c10b539f1a82fe83b1af4720efae',
'FixedIPList': '1.14-87a39361c8f08f059004d6b15103cdfd',
'Flavor': '1.1-b6bb7a730a79d720344accefafacf7ee',
'FlavorList': '1.1-52b5928600e7ca973aa4fc1e46f3934c',
'FloatingIP': '1.10-52a67d52d85eb8b3f324a5b7935a335b',
'FloatingIPList': '1.11-7f2ba670714e1b7bab462ab3290f7159',
'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac',
'HVSpec': '1.2-db672e73304da86139086d003f3977e7',
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
'ImageMetaProps': '1.8-a07a00bb829668f3bdccf8de03c128bb',
'Instance': '2.0-ff56804dce87d81d9a04834d4bd1e3d2',
'InstanceAction': '1.1-f9f293e526b66fca0d05c3b3a2d13914',
'InstanceActionEvent': '1.1-e56a64fa4710e43ef7af2ad9d6028b33',
'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be',
'InstanceActionList': '1.0-4a53826625cc280e15fae64a575e0879',
'InstanceExternalEvent': '1.1-6e446ceaae5f475ead255946dd443417',
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
'InstanceFaultList': '1.1-f8ec07cbe3b60f5f07a8b7a06311ac0d',
'InstanceGroup': '1.10-1a0c8c7447dc7ecb9da53849430c4a5f',
'InstanceGroupList': '1.7-be18078220513316abd0ae1b2d916873',
'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e',
'InstanceList': '2.0-6c8ba6147cca3082b1e4643f795068bf',
'InstanceMapping': '1.0-47ef26034dfcbea78427565d9177fe50',
'InstanceMappingList': '1.0-9e982e3de1613b9ada85e35f69b23d47',
'InstanceNUMACell': '1.2-535ef30e0de2d6a0d26a71bd58ecafc4',
'InstanceNUMATopology': '1.2-d944a7d6c21e1c773ffdf09c6d025954',
'InstancePCIRequest': '1.1-b1d75ebc716cb12906d9d513890092bf',
'InstancePCIRequests': '1.1-65e38083177726d806684cb1cc0136d2',
'KeyPair': '1.3-bfaa2a8b148cdf11e0c72435d9dd097a',
'KeyPairList': '1.2-58b94f96e776bedaf1e192ddb2a24c4e',
'Migration': '1.2-8784125bedcea0a9227318511904e853',
'MigrationContext': '1.0-d8c2f10069e410f639c49082b5932c92',
'MigrationList': '1.2-02c0ec0c50b75ca86a2a74c5e8c911cc',
'MonitorMetric': '1.1-53b1db7c4ae2c531db79761e7acc52ba',
'MonitorMetricList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'NUMACell': '1.2-74fc993ac5c83005e76e34e8487f1c05',
'NUMAPagesTopology': '1.0-c71d86317283266dc8364c149155e48e',
'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
'NUMATopologyLimits': '1.0-9463e0edd40f64765ae518a539b9dfd2',
'Network': '1.2-a977ab383aa462a479b2fae8211a5dde',
'NetworkList': '1.2-69eca910d8fa035dfecd8ba10877ee59',
'NetworkRequest': '1.1-7a3e4ca2ce1e7b62d8400488f2f2b756',
'NetworkRequestList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'PciDevice': '1.3-d92e0b17bbed61815b919af6b8d8998e',
'PciDeviceList': '1.2-3757458c45591cbc92c72ee99e757c98',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Quotas': '1.2-1fe4cd50593aaf5d36a6dc5ab3f98fb3',
'QuotasNoOp': '1.2-e041ddeb7dc8188ca71706f78aad41c1',
'RequestSpec': '1.5-576a249869c161e17b7cd6d55f9d85f3',
'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e',
'SchedulerLimits': '1.0-249c4bd8e62a9b327b7026b7f19cc641',
'SchedulerRetries': '1.1-3c9c8b16143ebbb6ad7030e999d14cc0',
'SecurityGroup': '1.1-0e1b9ba42fe85c13c1437f8b74bdb976',
'SecurityGroupList': '1.0-dc8bbea01ba09a2edb6e5233eae85cbc',
'SecurityGroupRule': '1.1-ae1da17b79970012e8536f88cb3c6b29',
'SecurityGroupRuleList': '1.1-674b323c9ccea02e93b1b40e7fd2091a',
'Service': '1.19-8914320cbeb4ec29f252d72ce55d07e1',
'ServiceList': '1.17-b767102cba7cbed290e396114c3f86b3',
'TaskLog': '1.0-78b0534366f29aa3eebb01860fbe18fe',
'TaskLogList': '1.0-cc8cce1af8a283b9d28b55fcd682e777',
'Tag': '1.1-8b8d7d5b48887651a0e01241672e2963',
'TagList': '1.1-55231bdb671ecf7641d6a2e9109b5d8e',
'VirtCPUFeature': '1.0-3310718d8c72309259a6e39bdefe83ee',
'VirtCPUModel': '1.0-6a5cc9f322729fc70ddc6733bacd57d3',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.0-19921e38cba320f355d56ecbf8f29587',
'VirtualInterfaceList': '1.0-9750e2074437b3077e46359102779fc6',
'VolumeUsage': '1.0-6c8190c46ce1469bb3286a1f21c2e475',
}
class TestObjectVersions(test.NoDBTestCase):
@staticmethod
def _is_method(thing):
# NOTE(dims): In Python3, The concept of 'unbound methods' has
# been removed from the language. When referencing a method
# as a class attribute, you now get a plain function object.
# so let's check for both
return inspect.isfunction(thing) or inspect.ismethod(thing)
def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
"""Follow a chain of remotable things down to the original function."""
if isinstance(thing, classmethod):
return self._find_remotable_method(cls, thing.__get__(None, cls))
elif self._is_method(thing) and hasattr(thing, 'remotable'):
return self._find_remotable_method(cls, thing.original_fn,
parent_was_remotable=True)
elif parent_was_remotable:
# We must be the first non-remotable thing underneath a stack of
# remotable things (i.e. the actual implementation method)
return thing
else:
# This means the top-level thing never hit a remotable layer
return None
def _un_unicodify_enum_valid_values(self, _fields):
for name, field in _fields:
if not isinstance(field, (fields.BaseEnumField,
fields.EnumField)):
continue
orig_type = type(field._type._valid_values)
field._type._valid_values = orig_type(
[x.encode('utf-8') for x in
field._type._valid_values])
def test_find_remotable_method(self):
class MyObject(object):
@base.remotable
def my_method(self):
return 'Hello World!'
thing = self._find_remotable_method(MyObject,
getattr(MyObject, 'my_method'))
self.assertIsNotNone(thing)
def test_versions(self):
checker = fixture.ObjectVersionChecker(
base.NovaObjectRegistry.obj_classes())
fingerprints = checker.get_hashes()
if os.getenv('GENERATE_HASHES'):
file('object_hashes.txt', 'w').write(
pprint.pformat(fingerprints))
raise test.TestingException(
'Generated hashes in object_hashes.txt')
expected, actual = checker.test_hashes(object_data)
self.assertEqual(expected, actual,
'Some objects have changed; please make sure the '
'versions have been bumped, and then update their '
'hashes here.')
def test_obj_make_compatible(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes:
versions = ovo_base.obj_tree_get_versions(obj_name)
obj_class = obj_classes[obj_name][0]
version = versionutils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
obj_class().obj_to_primitive(target_version=test_version,
version_manifest=versions)
def test_list_obj_make_compatible(self):
@base.NovaObjectRegistry.register_if(False)
class TestObj(base.NovaObject):
VERSION = '1.4'
fields = {'foo': fields.IntegerField()}
@base.NovaObjectRegistry.register_if(False)
class TestListObj(base.ObjectListBase, base.NovaObject):
VERSION = '1.5'
fields = {'objects': fields.ListOfObjectsField('TestObj')}
obj_relationships = {
'objects': [('1.0', '1.1'), ('1.1', '1.2'),
('1.3', '1.3'), ('1.5', '1.4')]
}
my_list = TestListObj()
my_obj = TestObj(foo=1)
my_list.objects = [my_obj]
primitive = my_list.obj_to_primitive(target_version='1.5')
primitive_data = primitive['nova_object.data']
obj_primitive = my_obj.obj_to_primitive(target_version='1.4')
obj_primitive_data = obj_primitive['nova_object.data']
with mock.patch.object(TestObj, 'obj_make_compatible') as comp:
my_list.obj_make_compatible(primitive_data, '1.1')
comp.assert_called_with(obj_primitive_data,
'1.2')
def test_list_obj_make_compatible_when_no_objects(self):
# Test to make sure obj_make_compatible works with no 'objects'
# If a List object ever has a version that did not contain the
# 'objects' key, we need to make sure converting back to that version
# doesn't cause backporting problems.
@base.NovaObjectRegistry.register_if(False)
class TestObj(base.NovaObject):
VERSION = '1.1'
fields = {'foo': fields.IntegerField()}
@base.NovaObjectRegistry.register_if(False)
class TestListObj(base.ObjectListBase, base.NovaObject):
VERSION = '1.1'
fields = {'objects': fields.ListOfObjectsField('TestObj')}
# pretend that version 1.0 didn't have 'objects'
obj_relationships = {
'objects': [('1.1', '1.1')]
}
my_list = TestListObj()
my_list.objects = [TestObj(foo=1)]
primitive = my_list.obj_to_primitive(target_version='1.1')
primitive_data = primitive['nova_object.data']
my_list.obj_make_compatible(primitive_data,
target_version='1.0')
self.assertNotIn('objects', primitive_data,
"List was backported to before 'objects' existed."
" 'objects' should not be in the primitive.")
class TestObjEqualPrims(_BaseTestCase):
def test_object_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='goodbye')
obj2.obj_reset_changes()
obj2.bar = 'goodbye'
# obj2 will be marked with field 'three' updated
self.assertTrue(base.obj_equal_prims(obj1, obj2),
"Objects that differ only because one a is marked "
"as updated should be equal")
def test_object_not_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertFalse(base.obj_equal_prims(obj1, obj2),
"Objects that differ in any field "
"should not be equal")
def test_object_ignore_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertTrue(base.obj_equal_prims(obj1, obj2, ['bar']),
"Objects that only differ in an ignored field "
"should be equal")
class TestObjMethodOverrides(test.NoDBTestCase):
def test_obj_reset_changes(self):
args = inspect.getargspec(base.NovaObject.obj_reset_changes)
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes:
obj_class = obj_classes[obj_name][0]
self.assertEqual(args,
inspect.getargspec(obj_class.obj_reset_changes))
|
the-stack_0_24573
|
def test_get(player, ball):
player.perform("get ball")
assert player.saw("pick up a red ball")
player.forget()
player.perform("inv")
assert player.saw("a red ball")
def test_get_self(player):
player.perform("get self")
assert player.saw("can't")
def test_get_uncarriable(player, statue):
player.perform("get statue")
assert player.saw("can't be carried")
def test_get_from(player, ball, box):
ball.Spatial.store_in(box)
player.perform("get ball from box")
assert player.saw("a red ball")
player.forget()
player.perform("inv")
assert player.saw("a red ball")
def test_get_from_ambiguous(player, box, iron_box, ball, green_ball):
ball.Spatial.store_in(box)
green_ball.Spatial.store_in(box)
player.perform("get ball from box")
assert player.saw("Which 'box'")
player.forget()
player.perform("1")
assert player.saw("Which 'ball'")
player.forget()
player.perform("1")
assert player.saw("red ball")
def test_get_from_entity_in_inventory(player, box, ball, green_ball):
ball.Spatial.store_in(box)
box.Spatial.store_in(player)
player.perform("get ball from box")
assert player.saw("a red ball")
def test_get_from_too_deep(player, box, iron_box, ball):
ball.Spatial.store_in(box)
box.Spatial.store_in(iron_box)
player.perform("get ball from box")
assert player.saw("You don't see any 'ball' in an iron box")
player.perform("get ball from cardboard box")
assert player.saw("You don't see any 'cardboard box' nearby")
|
the-stack_0_24576
|
"""
Widgets to represent and edit base Python types.
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class NumericTypeWidget(object):
default_minimum = 0
default_maximum = 1
def __init__(self, min=None, max=None):
if min is not None:
self.setMinimum(min)
else:
self.setMinimum(self.default_minimum)
if max is not None:
self.setMaximum(max)
else:
self.setMaximum(self.default_maximum)
def get_value(self):
return self.value()
def set_value(self, value):
self.setValue(value)
widget_value = property(get_value, set_value)
class IntegerTypeWidget(QSpinBox, NumericTypeWidget):
default_minimum = -999
default_maximum = 999
def __init__(self, min=None, max=None, parent=None):
QSpinBox.__init__(self, parent)
NumericTypeWidget.__init__(self, min, max)
class FloatTypeWidget(QDoubleSpinBox, NumericTypeWidget):
default_maximum = 999.0
default_minimum = -999.0
def __init__(self, min=None, max=None, parent=None):
QDoubleSpinBox.__init__(self, parent)
NumericTypeWidget.__init__(self, min, max)
self.setDecimals(2)
class StringTypeWidget(QLineEdit):
def __init__(self, parent=None):
QLineEdit.__init__(self, parent)
def get_value(self):
return unicode(self.text())
def set_value(self, value):
self.setText(value)
widget_value = property(get_value, set_value)
class BooleanTypeWidget(QCheckBox):
def __init__(self, parent=None):
QCheckBox.__init__(self, parent)
def get_value(self):
return self.isChecked()
def set_value(self, value):
self.setChecked(value)
widget_value = property(get_value, set_value)
class ChoiceTypeWidget(QComboBox):
EXTRACT_METHODS = {
int: lambda v: v.toInt()[0],
float: lambda v: v.toDouble()[0],
str: lambda v: unicode(v.toString()[0]),
bool: lambda v: bool(v.toBool()[0]),
}
def __init__(self, data_type, choices, parent=None):
QComboBox.__init__(self, parent)
self.data_type = data_type
for value, text in choices:
self.addItem(text, QVariant(value))
def get_value(self):
value = self.itemData(self.currentIndex())
return self.EXTRACT_METHODS[self.data_type](value)
def set_value(self, value):
index = self.findData(QVariant(value))
if index == -1:
raise ValueError("invalid value: '%s'" % value)
self.setCurrentIndex(index)
widget_value = property(get_value, set_value)
TYPES_WIDGETS = {
int: IntegerTypeWidget,
float: FloatTypeWidget,
str: StringTypeWidget,
bool: BooleanTypeWidget,
}
def create_type_widget(data_type, choices=None, **kwargs):
if choices is None:
return TYPES_WIDGETS[data_type](**kwargs)
else:
return ChoiceTypeWidget(data_type, choices, **kwargs)
|
the-stack_0_24577
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Legendgrouptitle(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "heatmap"
_path_str = "heatmap.legendgrouptitle"
_valid_props = {"font", "text"}
# font
# ----
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.heatmap.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.heatmap.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmap.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super(Legendgrouptitle, self).__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmap.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmap.Legendgrouptitle`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
the-stack_0_24579
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Spanish National Research Council.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# E0202: An attribute inherited from %s hide this method
# pylint: disable=E0202
import abc
import argparse
import os
import six
from stevedore import extension
from cinderclient.openstack.common.apiclient import exceptions
_discovered_plugins = {}
def discover_auth_systems():
"""Discover the available auth-systems.
This won't take into account the old style auth-systems.
"""
global _discovered_plugins
_discovered_plugins = {}
def add_plugin(ext):
_discovered_plugins[ext.name] = ext.plugin
ep_namespace = "cinderclient.openstack.common.apiclient.auth"
mgr = extension.ExtensionManager(ep_namespace)
mgr.map(add_plugin)
def load_auth_system_opts(parser):
"""Load options needed by the available auth-systems into a parser.
This function will try to populate the parser with options from the
available plugins.
"""
group = parser.add_argument_group("Common auth options")
BaseAuthPlugin.add_common_opts(group)
for name, auth_plugin in six.iteritems(_discovered_plugins):
group = parser.add_argument_group(
"Auth-system '%s' options" % name,
conflict_handler="resolve")
auth_plugin.add_opts(group)
def load_plugin(auth_system):
try:
plugin_class = _discovered_plugins[auth_system]
except KeyError:
raise exceptions.AuthSystemNotFound(auth_system)
return plugin_class(auth_system=auth_system)
def load_plugin_from_args(args):
"""Load required plugin and populate it with options.
Try to guess auth system if it is not specified. Systems are tried in
alphabetical order.
:type args: argparse.Namespace
:raises: AuthorizationFailure
"""
auth_system = args.os_auth_system
if auth_system:
plugin = load_plugin(auth_system)
plugin.parse_opts(args)
plugin.sufficient_options()
return plugin
for plugin_auth_system in sorted(six.iterkeys(_discovered_plugins)):
plugin_class = _discovered_plugins[plugin_auth_system]
plugin = plugin_class()
plugin.parse_opts(args)
try:
plugin.sufficient_options()
except exceptions.AuthPluginOptionsMissing:
continue
return plugin
raise exceptions.AuthPluginOptionsMissing(["auth_system"])
@six.add_metaclass(abc.ABCMeta)
class BaseAuthPlugin(object):
"""Base class for authentication plugins.
An authentication plugin needs to override at least the authenticate
method to be a valid plugin.
"""
auth_system = None
opt_names = []
common_opt_names = [
"auth_system",
"username",
"password",
"tenant_name",
"token",
"auth_url",
]
def __init__(self, auth_system=None, **kwargs):
self.auth_system = auth_system or self.auth_system
self.opts = dict((name, kwargs.get(name))
for name in self.opt_names)
@staticmethod
def _parser_add_opt(parser, opt):
"""Add an option to parser in two variants.
:param opt: option name (with underscores)
"""
dashed_opt = opt.replace("_", "-")
env_var = "OS_%s" % opt.upper()
arg_default = os.environ.get(env_var, "")
arg_help = "Defaults to env[%s]." % env_var
parser.add_argument(
"--os-%s" % dashed_opt,
metavar="<%s>" % dashed_opt,
default=arg_default,
help=arg_help)
parser.add_argument(
"--os_%s" % opt,
metavar="<%s>" % dashed_opt,
help=argparse.SUPPRESS)
@classmethod
def add_opts(cls, parser):
"""Populate the parser with the options for this plugin.
"""
for opt in cls.opt_names:
# use `BaseAuthPlugin.common_opt_names` since it is never
# changed in child classes
if opt not in BaseAuthPlugin.common_opt_names:
cls._parser_add_opt(parser, opt)
@classmethod
def add_common_opts(cls, parser):
"""Add options that are common for several plugins.
"""
for opt in cls.common_opt_names:
cls._parser_add_opt(parser, opt)
@staticmethod
def get_opt(opt_name, args):
"""Return option name and value.
:param opt_name: name of the option, e.g., "username"
:param args: parsed arguments
"""
return (opt_name, getattr(args, "os_%s" % opt_name, None))
def parse_opts(self, args):
"""Parse the actual auth-system options if any.
This method is expected to populate the attribute `self.opts` with a
dict containing the options and values needed to make authentication.
"""
self.opts.update(dict(self.get_opt(opt_name, args)
for opt_name in self.opt_names))
def authenticate(self, http_client):
"""Authenticate using plugin defined method.
The method usually analyses `self.opts` and performs
a request to authentication server.
:param http_client: client object that needs authentication
:type http_client: HTTPClient
:raises: AuthorizationFailure
"""
self.sufficient_options()
self._do_authenticate(http_client)
@abc.abstractmethod
def _do_authenticate(self, http_client):
"""Protected method for authentication.
"""
def sufficient_options(self):
"""Check if all required options are present.
:raises: AuthPluginOptionsMissing
"""
missing = [opt
for opt in self.opt_names
if not self.opts.get(opt)]
if missing:
raise exceptions.AuthPluginOptionsMissing(missing)
@abc.abstractmethod
def token_and_endpoint(self, endpoint_type, service_type):
"""Return token and endpoint.
:param service_type: Service type of the endpoint
:type service_type: string
:param endpoint_type: Type of endpoint.
Possible values: public or publicURL,
internal or internalURL,
admin or adminURL
:type endpoint_type: string
:returns: tuple of token and endpoint strings
:raises: EndpointException
"""
|
the-stack_0_24580
|
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import shutil
from typing import Dict, List
from polyaxon.constants import UNKNOWN
from polyaxon.logger import logger
from polyaxon.polyboard.artifacts import V1ArtifactKind
from polyaxon.polyboard.events import (
LoggedEventSpec,
V1Event,
V1EventArtifact,
V1EventAudio,
V1EventChart,
V1EventChartKind,
V1EventDataframe,
V1EventHistogram,
V1EventImage,
V1EventModel,
V1EventVideo,
)
from polyaxon.utils.np_utils import calculate_scale_factor, to_np
from polyaxon.utils.path_utils import check_or_create_path, module_type
try:
import numpy as np
except ImportError:
np = None
NUMPY_ERROR_MESSAGE = "numpy is required for this tracking operation!"
PIL_ERROR_MESSAGE = "PIL/Pillow is required for this tracking operation!"
MOVIEPY_ERROR_MESSAGE = "moviepy is required for this tracking operation!"
MATPLOTLIB_ERROR_MESSAGE = "matplotlib is required for this tracking operation!"
PLOTLY_ERROR_MESSAGE = "plotly is required for this tracking operation!"
BOKEH_ERROR_MESSAGE = "bokeh is required for this tracking operation!"
def dataframe_path(
from_path: str, asset_path: str, content_type: str = None
) -> V1EventDataframe:
check_or_create_path(asset_path, is_dir=False)
shutil.copy(from_path, asset_path)
return V1EventDataframe(path=asset_path, content_type=content_type)
def model_path(
from_path: str, asset_path: str, framework: str = None, spec: Dict = None
) -> V1EventModel:
check_or_create_path(asset_path, is_dir=False)
if os.path.isfile(from_path):
shutil.copy(from_path, asset_path)
else:
shutil.copytree(from_path, asset_path)
return V1EventModel(path=asset_path, framework=framework, spec=spec)
def artifact_path(from_path: str, asset_path: str, kind: str) -> V1EventArtifact:
check_or_create_path(asset_path, is_dir=False)
shutil.copy(from_path, asset_path)
return V1EventArtifact(kind=kind, path=asset_path)
def image_path(from_path: str, asset_path: str) -> V1EventImage:
check_or_create_path(asset_path, is_dir=False)
shutil.copy(from_path, asset_path)
return V1EventImage(path=asset_path)
def video_path(from_path: str, asset_path: str, content_type=None) -> V1EventVideo:
check_or_create_path(asset_path, is_dir=False)
shutil.copy(from_path, asset_path)
return V1EventVideo(path=asset_path, content_type=content_type)
def audio_path(from_path: str, asset_path: str, content_type=None) -> V1EventAudio:
check_or_create_path(asset_path, is_dir=False)
shutil.copy(from_path, asset_path)
return V1EventAudio(path=asset_path, content_type=content_type)
def _draw_single_box(
image,
xmin,
ymin,
xmax,
ymax,
display_str,
color="black",
color_text="black",
thickness=2,
):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
try:
from PIL import ImageDraw, ImageFont
except ImportError:
logger.warning(PIL_ERROR_MESSAGE)
return UNKNOWN
font = ImageFont.load_default()
draw = ImageDraw.Draw(image)
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line(
[(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
width=thickness,
fill=color,
)
if display_str:
text_bottom = bottom
# Reverse list and print from bottom to top.
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[
(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom),
],
fill=color,
)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill=color_text,
font=font,
)
return image
def metric(value):
if isinstance(value, float):
return value
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
value = to_np(value)
assert value.squeeze().ndim == 0, "scalar should be 0D"
return float(value)
def histogram(values, bins, max_bins=None):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
values = to_np(values).astype(float)
if values.size == 0:
raise ValueError("The input has no element.")
values = values.reshape(-1)
counts, limits = np.histogram(values, bins=bins)
num_bins = len(counts)
if max_bins is not None and num_bins > max_bins:
subsampling = num_bins // max_bins
subsampling_remainder = num_bins % subsampling
if subsampling_remainder != 0:
counts = np.pad(
counts,
pad_width=[[0, subsampling - subsampling_remainder]],
mode="constant",
constant_values=0,
)
counts = counts.reshape(-1, subsampling).sum(axis=-1)
if counts.size == 0:
logger.warning("Tracking an empty histogram")
return UNKNOWN
return V1EventHistogram(values=values, counts=counts)
def np_histogram(values, counts):
return V1EventHistogram(values=values, counts=counts)
def encoded_image(asset_path, data):
try:
from PIL import Image
except ImportError:
logger.warning(PIL_ERROR_MESSAGE)
return UNKNOWN
image_data = Image.open(io.BytesIO(data.encoded_image_string))
return save_image(
asset_path=asset_path,
image_data=image_data,
height=data.height,
width=data.width,
colorspace=data.colorspace,
)
def image(asset_path, data, rescale=1, dataformats="CHW"):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
tensor = to_np(data)
tensor = convert_to_HWC(tensor, dataformats)
# Do not assume that user passes in values in [0, 255], use data type to detect
scale_factor = calculate_scale_factor(tensor)
tensor = tensor.astype(np.float32)
tensor = (tensor * scale_factor).astype(np.uint8)
return make_image(asset_path, tensor, rescale=rescale)
def image_boxes(asset_path, tensor_image, tensor_boxes, rescale=1, dataformats="CHW"):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
tensor_image = to_np(tensor_image)
tensor_image = convert_to_HWC(tensor_image, dataformats)
tensor_boxes = to_np(tensor_boxes)
tensor_image = tensor_image.astype(np.float32) * calculate_scale_factor(
tensor_image
)
return make_image(
asset_path, tensor_image.astype(np.uint8), rescale=rescale, rois=tensor_boxes
)
def draw_boxes(disp_image, boxes):
# xyxy format
num_boxes = boxes.shape[0]
list_gt = range(num_boxes)
for i in list_gt:
disp_image = _draw_single_box(
disp_image,
boxes[i, 0],
boxes[i, 1],
boxes[i, 2],
boxes[i, 3],
display_str=None,
color="Red",
)
return disp_image
def make_image(asset_path, tensor, rescale=1, rois=None):
try:
from PIL import Image
except ImportError:
logger.warning(PIL_ERROR_MESSAGE)
return UNKNOWN
height, width, colorspace = tensor.shape
scaled_height = int(height * rescale)
scaled_width = int(width * rescale)
image_data = Image.fromarray(tensor)
if rois is not None:
image_data = draw_boxes(image_data, rois)
image_data = image_data.resize((scaled_width, scaled_height), Image.ANTIALIAS)
return save_image(
asset_path=asset_path,
image_data=image_data,
height=height,
width=width,
colorspace=colorspace,
)
def save_image(asset_path, image_data, height, width, colorspace):
check_or_create_path(asset_path, is_dir=False)
image_data.save(asset_path, format="PNG")
return V1EventImage(
height=height, width=width, colorspace=colorspace, path=asset_path
)
def video(asset_path, tensor, fps=4, content_type="gif"):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
tensor = to_np(tensor)
tensor = prepare_video(tensor)
# If user passes in uint8, then we don't need to rescale by 255
scale_factor = calculate_scale_factor(tensor)
tensor = tensor.astype(np.float32)
tensor = (tensor * scale_factor).astype(np.uint8)
return make_video(asset_path, tensor, fps, content_type)
def make_video(asset_path, tensor, fps, content_type="gif"):
try:
import moviepy # noqa: F401
except ImportError:
logger.warning(MOVIEPY_ERROR_MESSAGE)
return UNKNOWN
try:
from moviepy import editor as mpy
except ImportError:
logger.warning(
"moviepy is installed, but can't import moviepy.editor.",
"Some packages could be missing [imageio, requests]",
)
return
t, h, w, c = tensor.shape
# encode sequence of images into gif string
clip = mpy.ImageSequenceClip(list(tensor), fps=fps)
check_or_create_path(asset_path, is_dir=False)
try: # older version of moviepy
if content_type == "gif":
clip.write_gif(asset_path, verbose=False, progress_bar=False)
else:
clip.write_videofile(asset_path, verbose=False, progress_bar=False)
except TypeError:
if content_type == "gif":
clip.write_gif(asset_path, verbose=False)
else:
clip.write_videofile(asset_path, verbose=False)
return V1EventVideo(
height=h, width=w, colorspace=c, path=asset_path, content_type=content_type
)
def audio(asset_path, tensor, sample_rate=44100):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
tensor = to_np(tensor)
tensor = tensor.squeeze()
if abs(tensor).max() > 1:
print("warning: audio amplitude out of range, auto clipped.")
tensor = tensor.clip(-1, 1)
assert tensor.ndim == 1, "input tensor should be 1 dimensional."
tensor_list = [int(32767.0 * x) for x in tensor]
import wave
import struct
check_or_create_path(asset_path, is_dir=False)
wave_write = wave.open(asset_path, "wb")
wave_write.setnchannels(1)
wave_write.setsampwidth(2)
wave_write.setframerate(sample_rate)
tensor_enc = b""
for v in tensor_list:
tensor_enc += struct.pack("<h", v)
wave_write.writeframes(tensor_enc)
wave_write.close()
return V1EventAudio(
sample_rate=sample_rate,
num_channels=1,
length_frames=len(tensor_list),
path=asset_path,
content_type="audio/wav",
)
# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py
def compute_curve(labels, predictions, num_thresholds=None, weights=None):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
_MINIMUM_COUNT = 1e-7
if weights is None:
weights = 1.0
# Compute bins of true positives and false positives.
bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))
float_labels = labels.astype(np.float)
histogram_range = (0, num_thresholds - 1)
tp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=float_labels * weights,
)
fp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=(1.0 - float_labels) * weights,
)
# Obtain the reverse cumulative sum.
tp = np.cumsum(tp_buckets[::-1])[::-1]
fp = np.cumsum(fp_buckets[::-1])[::-1]
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)
return np.stack((tp, fp, tn, fn, precision, recall))
def figure_to_image(figure, close=True):
"""Render matplotlib figure to numpy format.
Returns:
numpy.array: image in [CHW] order
"""
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
try:
import matplotlib.pyplot as plt
import matplotlib.backends.backend_agg as plt_backend_agg
except ImportError:
logger.warning(MATPLOTLIB_ERROR_MESSAGE)
canvas = plt_backend_agg.FigureCanvasAgg(figure)
canvas.draw()
data = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8)
w, h = figure.canvas.get_width_height()
image_hwc = data.reshape([h, w, 4])[:, :, 0:3]
image_chw = np.moveaxis(image_hwc, source=2, destination=0)
if close:
plt.close(figure)
return image_chw
def figures_to_images(figures, close=True):
"""Render matplotlib figure to numpy format.
Returns:
numpy.array: image in [CHW] order
"""
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
images = [figure_to_image(figure, close=close) for figure in figures]
return np.stack(images)
def ensure_matplotlib_figure(figure):
"""Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted.
"""
import matplotlib
from matplotlib.figure import Figure
if figure == matplotlib.pyplot:
figure = figure.gcf()
elif not isinstance(figure, Figure):
if hasattr(figure, "figure"):
figure = figure.figure
# Some matplotlib objects have a figure function
if not isinstance(figure, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted."
)
if not figure.gca().has_data():
raise ValueError(
"You attempted to log an empty plot, "
"pass a figure directly or ensure the global plot isn't closed."
)
return figure
def prepare_video(data):
"""
Converts a 5D tensor [batchsize, time(frame), channel(color), height, width]
into 4D tensor with dimension [time(frame), new_width, new_height, channel].
A batch of images are spreaded to a grid, which forms a frame.
e.g. Video with batchsize 16 will have a 4x4 grid.
"""
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
b, t, c, h, w = data.shape
if data.dtype == np.uint8:
data = np.float32(data) / 255.0
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
# pad to nearest power of 2, all at once
if not is_power2(data.shape[0]):
len_addition = int(2 ** data.shape[0].bit_length() - data.shape[0])
data = np.concatenate(
(data, np.zeros(shape=(len_addition, t, c, h, w))), axis=0
)
n_rows = 2 ** ((b.bit_length() - 1) // 2)
n_cols = data.shape[0] // n_rows
data = np.reshape(data, newshape=(n_rows, n_cols, t, c, h, w))
data = np.transpose(data, axes=(2, 0, 4, 1, 5, 3))
return np.reshape(data, newshape=(t, n_rows * h, n_cols * w, c))
def make_grid(data, ncols=8):
# I: N1HW or N3HW
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
assert isinstance(data, np.ndarray), "plugin error, should pass numpy array here"
if data.shape[1] == 1:
data = np.concatenate([data, data, data], 1)
assert data.ndim == 4 and data.shape[1] == 3 or data.shape[1] == 4
nimg = data.shape[0]
H = data.shape[2] # noqa
W = data.shape[3] # noqa
ncols = min(nimg, ncols)
nrows = int(np.ceil(float(nimg) / ncols))
canvas = np.zeros((data.shape[1], H * nrows, W * ncols))
i = 0
for y in range(nrows):
for x in range(ncols):
if i >= nimg:
break
canvas[:, y * H : (y + 1) * H, x * W : (x + 1) * W] = data[i] # noqa
i = i + 1
return canvas
def convert_to_HWC(tensor, input_format): # noqa
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
assert len(set(input_format)) == len(
input_format
), "You can not use the same dimension shordhand twice. \
input_format: {}".format(
input_format
)
assert len(tensor.shape) == len(
input_format
), "size of input tensor and input format are different. \
tensor shape: {}, input_format: {}".format(
tensor.shape, input_format
)
input_format = input_format.upper()
if len(input_format) == 4:
index = [input_format.find(c) for c in "NCHW"]
tensor_NCHW = tensor.transpose(index) # noqa
tensor_CHW = make_grid(tensor_NCHW) # noqa
return tensor_CHW.transpose(1, 2, 0)
if len(input_format) == 3:
index = [input_format.find(c) for c in "HWC"]
tensor_HWC = tensor.transpose(index) # noqa
if tensor_HWC.shape[2] == 1:
tensor_HWC = np.concatenate([tensor_HWC, tensor_HWC, tensor_HWC], 2) # noqa
return tensor_HWC
if len(input_format) == 2:
index = [input_format.find(c) for c in "HW"]
tensor = tensor.transpose(index)
tensor = np.stack([tensor, tensor, tensor], 2)
return tensor
def bokeh_chart(figure) -> V1EventChart:
try:
from bokeh.embed import json_item
except ImportError:
logger.warning(BOKEH_ERROR_MESSAGE)
return UNKNOWN
return V1EventChart(kind=V1EventChartKind.BOKEH, figure=json_item(figure))
def plotly_chart(figure) -> V1EventChart:
try:
import plotly.tools
except ImportError:
logger.warning(PLOTLY_ERROR_MESSAGE)
return UNKNOWN
if module_type(figure, "matplotlib.figure.Figure"):
figure = plotly.tools.mpl_to_plotly(figure)
else:
figure = plotly.tools.return_figure_from_figure_or_data(
figure, validate_figure=True
)
return V1EventChart(kind=V1EventChartKind.PLOTLY, figure=figure)
def mpl_plotly_chart(figure) -> V1EventChart:
try:
import plotly.tools
except ImportError:
logger.warning(PLOTLY_ERROR_MESSAGE)
return UNKNOWN
try:
import matplotlib
from matplotlib.figure import Figure
except ImportError:
logger.warning(MATPLOTLIB_ERROR_MESSAGE)
if module_type(figure, "matplotlib.figure.Figure"):
pass
else:
if figure == matplotlib.pyplot:
figure = figure.gcf()
elif not isinstance(figure, Figure):
if hasattr(figure, "figure"):
figure = figure.figure
# Some matplotlib objects have a figure function
if not isinstance(figure, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted."
)
figure = plotly.tools.mpl_to_plotly(figure)
return plotly_chart(figure=figure)
def metrics_dict_to_list(metrics: Dict) -> List:
results = []
for k, v in metrics.items():
results.append(
LoggedEventSpec(
name=k, kind=V1ArtifactKind.METRIC, event=V1Event.make(metric=v),
)
)
return results
|
the-stack_0_24582
|
from ipso_phen.ipapi.base.ip_abstract import BaseImageProcessor
from ipso_phen.ipapi.tools.csv_writer import AbstractCsvWriter
from ipso_phen.ipapi.tools.common_functions import add_header_footer
from ipso_phen.ipapi.tools.common_functions import time_method
class ImageMpsCsvWriter(AbstractCsvWriter):
def __init__(self):
super().__init__()
self.data_list = dict.fromkeys(
[
# Header - text values
"experiment",
"plant",
"plant_id",
"treatment",
"genotype",
"condition",
"date_time",
"hist_bins",
# Morphology
"area",
"hull_area",
"width_data",
"shape_height",
"shape_solidity",
"shape_extend",
"rotated_bounding_rectangle",
"minimum_enclosing_circle",
# Color descriptors
"color_std_dev",
"color_mean",
]
)
_MAIN_ROI_RADIUS = 2056 / 2
_SAFE_ROI_RADIUS = 944 / 2
class Ip008C0604DevelopAbio(BaseImageProcessor):
def init_csv_writer(self):
return ImageMpsCsvWriter()
@staticmethod
def can_process(dict_data: dict) -> bool:
"""
Checks if the class can process the image
:param dict_data: Dictionary containing filter data
:return: True if current class can process data
"""
return dict_data["experiment"] in ["008C0604_DevelopAbio".lower()]
def init_rois(self):
self.add_circle_roi(
int(194 + _MAIN_ROI_RADIUS),
int(0 + _MAIN_ROI_RADIUS),
int(_MAIN_ROI_RADIUS),
"main_roi",
"keep",
)
self.add_circle_roi(
int(782 + _SAFE_ROI_RADIUS),
int(542 + _SAFE_ROI_RADIUS),
int(_SAFE_ROI_RADIUS),
"safe_roi",
"safe",
)
def _process_job_mps(self):
# Init rois
self.init_rois()
img = self.source_image
if not self.good_image:
self.error_holder.add_error(
"Image failed to load", new_error_kind="source_issue"
)
return False, None
# Build preliminary mask
op = "multi_and"
params_dict = [
dict(channel="h", min_t=10, max_t=100, median_filter_size=3),
dict(channel="a", min_t=15, max_t=135, median_filter_size=3),
dict(channel="b", min_t=120, max_t=155, median_filter_size=3),
dict(channel="rd", min_t=20, max_t=160, median_filter_size=3),
]
mask = self.build_mask(
img, **dict(is_store_images=True, merge_action=op, params_list=params_dict)
)
# Clean mask
mask = self.keep_roi(mask, self.get_roi("main_roi"))
self.store_image(mask, "mask_before_open", rois=self.rois_list)
mask = self.multi_or(
(
self.open(self.keep_roi(mask, self.get_roi("safe_roi")), 3),
self.open(mask, 7),
)
)
self.store_image(mask, "mask_from_channels", rois=self.rois_list)
mask = self.keep_linked_contours(
src_image=img,
src_mask=mask,
tolerance_distance=40,
tolerance_area=5000,
root_position="MIDDLE_CENTER",
)
self.store_image(mask, "mask")
self.mask = mask
return True, mask
def _process_job_fluo(self):
self.default_process()
return False, None
@add_header_footer
@time_method
def process_image(self, **kwargs):
"""Executes pipeline instructions to process image
Raises:
NotImplementedError -- Only fluo- is implemented
Returns:
boolean -- is job successful
"""
res = False
threshold_only_ = kwargs.get("threshold_only", 0) == 1
try:
if self.is_corrupted:
self.error_holder.add_error(
"HANDLED FAILURE Image has been tagged as corrupted",
new_error_kind="source_issue",
)
return False
if self.is_color_checker:
self.error_holder.add_error("HANDLED FAILURE Image is color checker")
return False
if self.is_empty_ctrl or self.is_missing_plant:
self.error_holder.add_error("HANDLED FAILURE Image is empty control")
return False
if not self.is_good_batch:
self.error_holder.add_error(
"HANDLED FAILURE some images are missing",
new_error_kind="source_issue",
)
return False
[genotype_, condition_, plant_id_] = self.plant.split("_")
self.csv_data_holder.update_csv_value("plant_id", plant_id_)
self.csv_data_holder.update_csv_value("condition", condition_)
self.csv_data_holder.update_csv_value("genotype", genotype_)
self.csv_data_holder.update_csv_value(
"treatment", f"{genotype_} - {condition_}"
)
if self.is_fluo:
res, mask = self._process_job_fluo()
else:
res, mask = self._process_job_mps()
if not res:
self.error_holder.add_error("Segmentation failed")
# self._mosaic_data = np.array([['source', 'pseudo_on'],
# ['src_img_with_cnt_after_agg_iter_last', 'mask']])
if self.is_msp:
pseudo_color_channel = "l"
else:
pseudo_color_channel = "v"
pseudo_color_channel = kwargs.get(
"pseudo_color_channel", pseudo_color_channel
)
if kwargs.get("threshold_only", 0) != 1:
res = self.extract_image_data(self.mask, pseudo_color_channel)
else:
pseudo_color_img = self.draw_image(
channel=pseudo_color_channel, background="source"
)
self.store_image(pseudo_color_img, "pseudo_on")
self.build_mosaic_data(**kwargs)
except Exception as e:
self.error_holder.add_error(f'Failed to process image because "{repr(e)}"')
res = False
self.print_images()
self.csv_data_holder.clean_data()
return res
@property
def is_empty_ctrl(self):
return "empty" in self.plant
@property
def is_missing_plant(self):
return self.plant in [
"arf4_drought_84",
"arf4_mock_73",
"arf4_salt_76",
"arf4_drought_421",
"arf4_salt_417",
]
@property
def is_good_batch(self):
return self.is_fluo or self.is_after_date(year="2018", month="04", day="10")
|
the-stack_0_24584
|
import sys
from selenium import webdriver
class CoronaStats():
def __init__(self, country, website):
self.driver = webdriver.Chrome('ChromeDriver\chromedriver.exe')
self.country = country
self.website = website
# Reading data where it found the country name
def scrapping_data(self, table):
# Get number of rows
country_element = table.find_element_by_xpath("//td[contains(., '{}')]".format(self.country))
row = country_element.find_element_by_xpath("./..")
data = row.text.split(" ")
return data
def get_data(self):
try:
self.driver.get(self.website)
table = self.driver.find_element_by_xpath('//*[@id="main_table_countries_today"]/tbody[1]')
data = self.scrapping_data(table)
if len(data) == 10:
#For the Countries whose New Death and Serious Critical data doesn't exist.
total_cases = data[1]
new_cases = data[2]
total_deaths = data[3]
total_recovered = data[4]
active_cases = data[5]
serious_critical = 0
else:
total_cases = data[1]
new_cases = data[2]
total_deaths = data[3]
total_recovered = data[5]
active_cases = data[6]
serious_critical = data[7]
total_deaths_cal = total_deaths.split(',')
total_deaths_cal = "".join(total_deaths_cal)
total_recovered_cal = total_recovered.split(',')
total_recovered_cal = "".join(total_recovered_cal)
closed_cases = int(total_deaths_cal) + int(total_recovered_cal)
self.driver.close()
return total_cases, new_cases, total_deaths, active_cases, total_recovered, serious_critical, closed_cases
except Exception as e:
print(e)
self.driver.quit()
sys.exit()
|
the-stack_0_24586
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExtendedLocation(Model):
"""The extended location.
:param type: The extended location type.
:type type: str
:param name: The extended location name.
:type name: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, *, type: str=None, name: str=None, **kwargs) -> None:
super(ExtendedLocation, self).__init__(**kwargs)
self.type = type
self.name = name
|
the-stack_0_24588
|
# -*- coding: utf-8 -*-
#
# @Author: gyj176383
# @Date: 2019/5/11
import face_recognition
import cv2
import numpy as np
import os
from keras.models import load_model
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
scan_train_path = "train_imgs"
# Create arrays of known face encodings and their names
known_face_encodings = []
known_face_names = []
if not os.path.isdir(scan_train_path):
print('wrong scan path...')
exit(1)
for file_name in os.listdir(scan_train_path):
sub_path = os.path.join(scan_train_path, file_name)
if not os.path.isdir(sub_path) and file_name.endswith('.jpg'):
# Load a sample picture and learn how to recognize it.
face_img = face_recognition.load_image_file(sub_path)
face_encoding = face_recognition.face_encodings(face_img)[0]
known_face_encodings.append(face_encoding)
known_face_names.append(file_name.split('.')[0])
# 导入性别识别 工具
gender_classifier = load_model(
"trained_models/gender_models/simple_CNN.81-0.96.hdf5")
gender_labels = {0: 'female', 1: 'male'}
# 导入表情识别工具
emotion_classifier = load_model(
'trained_models/emotion_models/simple_CNN.530-0.65.hdf5')
emotion_labels = {
0: 'angry',
1: 'disgust',
2: 'terrified',
3: 'happy',
4: 'sad',
5: 'surprise',
6: 'calm'
}
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
# recognize emotion
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_face = gray[(top):(bottom), (left):(right)]
gray_face = cv2.resize(gray_face, (48, 48))
gray_face = gray_face / 255.0
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
emotion = emotion_labels[emotion_label_arg]
# recognize gender
face = frame[(top):(bottom), (left):(right)]
face = cv2.resize(face, (48, 48))
face = np.expand_dims(face, 0)
face = face / 255.0
gender_label_arg = np.argmax(gender_classifier.predict(face))
gender = gender_labels[gender_label_arg]
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, ' '.join((name, emotion, gender)), (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
pass
|
the-stack_0_24590
|
from __future__ import division, absolute_import, print_function
import sys
import subprocess
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fft'
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'special',
'stats',
'stats.mstats',
]
def test_importing_submodules():
# Regression test for gh-6793.
for name in PUBLIC_SUBMODULES:
try:
cmd = [sys.executable, '-c', 'import scipy.{0}'.format(name)]
subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise AssertionError('Importing scipy.{0} failed'.format(name))
|
the-stack_0_24592
|
import sys, os, glob
import json
from astropy.io import fits
from astropy.table import Table, join
import numpy as np
import time, datetime
from collections import OrderedDict
import subprocess
from copy import deepcopy
from desispec.scripts.tile_redshifts import generate_tile_redshift_scripts, get_tile_redshift_script_pathname, \
get_tile_redshift_relpath, get_tile_redshift_script_suffix
from desispec.workflow.queue import get_resubmission_states, update_from_queue
from desispec.workflow.timing import what_night_is_it
from desispec.workflow.desi_proc_funcs import get_desi_proc_batch_file_pathname, create_desi_proc_batch_script, \
get_desi_proc_batch_file_path
from desispec.workflow.utils import pathjoin
from desispec.workflow.tableio import write_table
from desispec.workflow.proctable import table_row_to_dict
from desiutil.log import get_logger
from desispec.io import findfile, specprod_root
from desispec.io.util import decode_camword, create_camword, difference_camwords, camword_to_spectros
#################################################
############## Misc Functions ###################
#################################################
def night_to_starting_iid(night=None):
"""
Creates an internal ID for a given night. The resulting integer is an 8 digit number.
The digits are YYMMDDxxx where YY is the years since 2000, MM and DD are the month and day. xxx are 000,
and are incremented for up to 1000 unique job ID's for a given night.
Args:
night, str or int. YYYYMMDD of the night to get the starting internal ID for.
Returns:
internal_id, int. 9 digit number consisting of YYMMDD000. YY is years after 2000, MMDD is month and day.
000 being the starting job number (0).
"""
if night is None:
night = what_night_is_it()
night = int(night)
internal_id = (night - 20000000) * 1000
return internal_id
#################################################
############ Script Functions ###################
#################################################
def batch_script_name(prow):
"""
Wrapper script that takes a processing table row (or dictionary with NIGHT, EXPID, JOBDESC, PROCCAMWORD defined)
and determines the script file pathname as defined by desi_proc's helper functions.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for 'NIGHT', 'EXPID', 'JOBDESC', and 'PROCCAMWORD'.
Returns:
scriptfile, str. The complete pathname to the script file, as it is defined within the desi_proc ecosystem.
"""
pathname = get_desi_proc_batch_file_pathname(night = prow['NIGHT'], exp=prow['EXPID'], \
jobdesc=prow['JOBDESC'], cameras=prow['PROCCAMWORD'])
scriptfile = pathname + '.slurm'
return scriptfile
def check_for_outputs_on_disk(prow, resubmit_partial_complete=True):
"""
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in
desispect.workflow.proctable.get_processing_table_column_defs()
resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True,
jobs with some prior data are pruned using PROCCAMWORD to only process the
remaining cameras not found to exist.
Returns:
prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated to reflect
the change in job status after creating and submitting the job for processing.
"""
prow['STATUS'] = 'UNKNOWN'
log = get_logger()
job_to_file_map = {'prestdstar': 'sframe', 'stdstarfit': 'stdstars', 'poststdstar': 'cframe',
'arc': 'psf', 'flat': 'fiberflat', 'psfnight': 'psfnight', 'nightlyflat': 'fiberflatnight',
'spectra': 'spectra_tile', 'coadds': 'coadds_tile', 'redshift': 'redrock_tile'}
night = prow['NIGHT']
if prow['JOBDESC'] in ['cumulative','pernight-v0','pernight','perexp']:
filetype = 'redrock_tile'
else:
filetype = job_to_file_map[prow['JOBDESC']]
orig_camword = prow['PROCCAMWORD']
## if spectro based, look for spectros, else look for cameras
if prow['JOBDESC'] in ['stdstarfit','spectra','coadds','redshift']:
## Spectrograph based
spectros = camword_to_spectros(prow['PROCCAMWORD'])
n_desired = len(spectros)
## Suppress outputs about using tile based files in findfile if only looking for stdstarfits
if prow['JOBDESC'] == 'stdstarfit':
tileid = None
else:
tileid = prow['TILEID']
expid = prow['EXPID'][0]
existing_spectros = []
for spectro in spectros:
if os.path.exists(findfile(filetype=filetype, night=night, expid=expid, spectrograph=spectro, tile=tileid)):
existing_spectros.append(spectro)
completed = (len(existing_spectros) == n_desired)
if not completed and resubmit_partial_complete and len(existing_spectros) > 0:
existing_camword = 'a' + ''.join([str(spec) for spec in sorted(existing_spectros)])
prow['PROCCAMWORD'] = difference_camwords(prow['PROCCAMWORD'],existing_camword)
elif prow['JOBDESC'] in ['cumulative','pernight-v0','pernight','perexp']:
## Spectrograph based
spectros = camword_to_spectros(prow['PROCCAMWORD'])
n_desired = len(spectros)
## Suppress outputs about using tile based files in findfile if only looking for stdstarfits
tileid = prow['TILEID']
expid = prow['EXPID'][0]
redux_dir = specprod_root()
outdir = os.path.join(redux_dir,get_tile_redshift_relpath(tileid,group=prow['JOBDESC'],night=night,expid=expid))
suffix = get_tile_redshift_script_suffix(tileid, group=prow['JOBDESC'], night=night, expid=expid)
existing_spectros = []
for spectro in spectros:
if os.path.exists(os.path.join(outdir, f"redrock-{spectro}-{suffix}.fits")):
existing_spectros.append(spectro)
completed = (len(existing_spectros) == n_desired)
if not completed and resubmit_partial_complete and len(existing_spectros) > 0:
existing_camword = 'a' + ''.join([str(spec) for spec in sorted(existing_spectros)])
prow['PROCCAMWORD'] = difference_camwords(prow['PROCCAMWORD'],existing_camword)
else:
## Otheriwse camera based
cameras = decode_camword(prow['PROCCAMWORD'])
n_desired = len(cameras)
expid = prow['EXPID'][0]
if len(prow['EXPID']) > 1 and prow['JOBDESC'] not in ['psfnight','nightlyflat']:
log.warning(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']}. This job type only makes " +
f"sense with a single exposure. Proceeding with {expid}.")
missing_cameras = []
for cam in cameras:
if not os.path.exists(findfile(filetype=filetype, night=night, expid=expid, camera=cam)):
missing_cameras.append(cam)
completed = (len(missing_cameras) == 0)
if not completed and resubmit_partial_complete and len(missing_cameras) < n_desired:
prow['PROCCAMWORD'] = create_camword(missing_cameras)
if completed:
prow['STATUS'] = 'COMPLETED'
log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} already has " +
f"the desired {n_desired} {filetype}'s. Not submitting this job.")
elif resubmit_partial_complete and orig_camword != prow['PROCCAMWORD']:
log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} already has " +
f"some {filetype}'s. Submitting smaller camword={prow['PROCCAMWORD']}.")
elif not resubmit_partial_complete:
log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} doesn't have all " +
f"{filetype}'s and resubmit_partial_complete=False. "+
f"Submitting full camword={prow['PROCCAMWORD']}.")
else:
log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} has no " +
f"existing {filetype}'s. Submitting full camword={prow['PROCCAMWORD']}.")
return prow
def create_and_submit(prow, queue='realtime', reservation=None, dry_run=0, joint=False,
strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True,
system_name=None):
"""
Wrapper script that takes a processing table row and three modifier keywords, creates a submission script for the
compute nodes, and then submits that script to the Slurm scheduler with appropriate dependencies.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in
desispect.workflow.proctable.get_processing_table_column_defs()
queue, str. The name of the NERSC Slurm queue to submit to. Default is the realtime queue.
reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation.
dry_run, int. If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If
dry_run=2, the scripts will not be writter or submitted. Logging will remain the same
for testing as though scripts are being submitted. Default is 0 (false).
joint, bool. Whether this is a joint fitting job (the job involves multiple exposures) and therefore needs to be
run with desi_proc_joint_fit. Default is False.
strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is
less desirable because e.g. the sciences can run with SVN default calibrations rather
than failing completely from failed calibrations. Default is False.
check_for_outputs, bool. Default is True. If True, the code checks for the existence of the expected final
data products for the script being submitted. If all files exist and this is True,
then the script will not be submitted. If some files exist and this is True, only the
subset of the cameras without the final data products will be generated and submitted.
resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True,
jobs with some prior data are pruned using PROCCAMWORD to only process the
remaining cameras not found to exist.
system_name (str): batch system name, e.g. cori-haswell or perlmutter-gpu
Returns:
prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated to reflect
the change in job status after creating and submitting the job for processing.
Note:
This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the
input object in memory may or may not be changed. As of writing, a row from a table given to this function will
not change during the execution of this function (but can be overwritten explicitly with the returned row if desired).
"""
orig_prow = prow.copy()
if check_for_outputs:
prow = check_for_outputs_on_disk(prow, resubmit_partial_complete)
if prow['STATUS'].upper() == 'COMPLETED':
return prow
prow = create_batch_script(prow, queue=queue, dry_run=dry_run, joint=joint, system_name=system_name)
prow = submit_batch_script(prow, reservation=reservation, dry_run=dry_run, strictly_successful=strictly_successful)
## If resubmitted partial, the PROCCAMWORD and SCRIPTNAME will correspond to the pruned values. But we want to
## retain the full job's value, so get those from the old job.
if resubmit_partial_complete:
prow['PROCCAMWORD'] = orig_prow['PROCCAMWORD']
prow['SCRIPTNAME'] = orig_prow['SCRIPTNAME']
return prow
def desi_proc_command(prow, queue=None):
"""
Wrapper script that takes a processing table row (or dictionary with NIGHT, EXPID, OBSTYPE, JOBDESC, PROCCAMWORD defined)
and determines the proper command line call to process the data defined by the input row/dict.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for 'NIGHT', 'EXPID', 'JOBDESC', and 'PROCCAMWORD'.
queue, str. The name of the NERSC Slurm queue to submit to. Default is None (which leaves it to the desi_proc default).
Returns:
cmd, str. The proper command to be submitted to desi_proc to process the job defined by the prow values.
"""
cmd = 'desi_proc'
cmd += ' --batch'
cmd += ' --nosubmit'
cmd += ' --traceshift'
if queue is not None:
cmd += f' -q {queue}'
if prow['OBSTYPE'].lower() == 'science':
if prow['JOBDESC'] == 'prestdstar':
cmd += ' --nostdstarfit --nofluxcalib'
elif prow['JOBDESC'] == 'poststdstar':
cmd += ' --noprestdstarfit --nostdstarfit'
specs = str(prow['PROCCAMWORD'])
cmd += ' --cameras={} -n {} -e {}'.format(specs, prow['NIGHT'], prow['EXPID'][0])
if prow['BADAMPS'] != '':
cmd += ' --badamps={}'.format(prow['BADAMPS'])
return cmd
def desi_proc_joint_fit_command(prow, queue=None):
"""
Wrapper script that takes a processing table row (or dictionary with NIGHT, EXPID, OBSTYPE, PROCCAMWORD defined)
and determines the proper command line call to process the data defined by the input row/dict.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for 'NIGHT', 'EXPID', 'JOBDESC', and 'PROCCAMWORD'.
queue, str. The name of the NERSC Slurm queue to submit to. Default is None (which leaves it to the desi_proc default).
Returns:
cmd, str. The proper command to be submitted to desi_proc_joint_fit to process the job defined by the prow values.
"""
cmd = 'desi_proc_joint_fit'
cmd += ' --batch'
cmd += ' --nosubmit'
cmd += ' --traceshift'
if queue is not None:
cmd += f' -q {queue}'
descriptor = prow['OBSTYPE'].lower()
night = prow['NIGHT']
specs = str(prow['PROCCAMWORD'])
expids = prow['EXPID']
expid_str = ','.join([str(eid) for eid in expids])
cmd += f' --obstype {descriptor}'
cmd += ' --cameras={} -n {} -e {}'.format(specs, night, expid_str)
return cmd
def create_batch_script(prow, queue='realtime', dry_run=0, joint=False, system_name=None):
"""
Wrapper script that takes a processing table row and three modifier keywords and creates a submission script for the
compute nodes.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in
desispect.workflow.proctable.get_processing_table_column_defs()
queue, str. The name of the NERSC Slurm queue to submit to. Default is the realtime queue.
dry_run, int. If nonzero, this is a simulated run. If dry_run=1 the scripts will be written but not submitted.
If dry_run=2, the scripts will not be written nor submitted. Logging will remain the same
for testing as though scripts are being submitted. Default is 0 (false).
joint, bool. Whether this is a joint fitting job (the job involves multiple exposures) and therefore needs to be
run with desi_proc_joint_fit. Default is False.
system_name (str): batch system name, e.g. cori-haswell or perlmutter-gpu
Returns:
prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for
scriptname.
Note:
This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the
input object in memory may or may not be changed. As of writing, a row from a table given to this function will
not change during the execution of this function (but can be overwritten explicitly with the returned row if desired).
"""
log = get_logger()
if prow['JOBDESC'] in ['perexp','pernight','pernight-v0','cumulative']:
if dry_run > 1:
scriptpathname = get_tile_redshift_script_pathname(tileid=prow['TILEID'],group=prow['JOBDESC'],
night=prow['NIGHT'], expid=prow['EXPID'][0])
log.info("Output file would have been: {}".format(scriptpathname))
else:
#- run zmtl for cumulative redshifts but not others
run_zmtl = (prow['JOBDESC'] == 'cumulative')
scripts, failed_scripts = generate_tile_redshift_scripts(tileid=prow['TILEID'], group=prow['JOBDESC'],
night=[prow['NIGHT']], expid=prow['EXPID'],
run_zmtl=run_zmtl,
batch_queue=queue, system_name=system_name,
nosubmit=True)
if len(failed_scripts) > 0:
log.error(f"Redshifts failed for group={prow['JOBDESC']}, night={prow['NIGHT']}, "+
f"tileid={prow['TILEID']}, expid={prow['EXPID']}.")
log.info(f"Returned failed scriptname is {failed_scripts}")
elif len(scripts) > 1:
log.error(f"More than one redshifts returned for group={prow['JOBDESC']}, night={prow['NIGHT']}, "+
f"tileid={prow['TILEID']}, expid={prow['EXPID']}.")
log.info(f"Returned scriptnames were {scripts}")
else:
scriptpathname = scripts[0]
else:
if joint:
cmd = desi_proc_joint_fit_command(prow, queue=queue)
else:
cmd = desi_proc_command(prow, queue=queue)
scriptpathname = batch_script_name(prow)
if dry_run > 1:
log.info("Output file would have been: {}".format(scriptpathname))
log.info("Command to be run: {}".format(cmd.split()))
else:
log.info("Running: {}".format(cmd.split()))
scriptpathname = create_desi_proc_batch_script(night=prow['NIGHT'], exp=prow['EXPID'], \
cameras=prow['PROCCAMWORD'], jobdesc=prow['JOBDESC'], \
queue=queue, cmdline=cmd, system_name=system_name)
log.info("Outfile is: {}".format(scriptpathname))
prow['SCRIPTNAME'] = os.path.basename(scriptpathname)
return prow
def submit_batch_script(prow, dry_run=0, reservation=None, strictly_successful=False):
"""
Wrapper script that takes a processing table row and three modifier keywords and submits the scripts to the Slurm
scheduler.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in
desispect.workflow.proctable.get_processing_table_column_defs()
dry_run, int. If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If
dry_run=2, the scripts will not be writter or submitted. Logging will remain the same
for testing as though scripts are being submitted. Default is 0 (false).
reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation.
strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is
less desirable because e.g. the sciences can run with SVN default calibrations rather
than failing completely from failed calibrations. Default is False.
Returns:
prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for
scriptname.
Note:
This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the
input object in memory may or may not be changed. As of writing, a row from a table given to this function will
not change during the execution of this function (but can be overwritten explicitly with the returned row if desired).
"""
log = get_logger()
dep_qids = prow['LATEST_DEP_QID']
dep_list, dep_str = '', ''
if len(dep_qids) > 0:
jobtype = prow['JOBDESC']
if strictly_successful:
depcond = 'afterok'
elif jobtype in ['arc', 'psfnight', 'prestdstar', 'stdstarfit']:
## (though psfnight and stdstarfit will require some inputs otherwise they'll go up in flames)
depcond = 'afterany'
else:
## if 'flat','nightlyflat','poststdstar', or any type of redshift, require strict success of inputs
depcond = 'afterok'
dep_str = f'--dependency={depcond}:'
if np.isscalar(dep_qids):
dep_list = str(dep_qids).strip(' \t')
if dep_list == '':
dep_str = ''
else:
dep_str += dep_list
else:
if len(dep_qids)>1:
dep_list = ':'.join(np.array(dep_qids).astype(str))
dep_str += dep_list
elif len(dep_qids) == 1 and dep_qids[0] not in [None, 0]:
dep_str += str(dep_qids[0])
else:
dep_str = ''
# script = f'{jobname}.slurm'
# script_path = pathjoin(batchdir, script)
if prow['JOBDESC'] in ['pernight-v0','pernight','perexp','cumulative']:
script_path = get_tile_redshift_script_pathname(tileid=prow['TILEID'],group=prow['JOBDESC'],
night=prow['NIGHT'], expid=np.min(prow['EXPID']))
jobname = os.path.split(script_path)[-1]
else:
batchdir = get_desi_proc_batch_file_path(night=prow['NIGHT'])
jobname = batch_script_name(prow)
script_path = pathjoin(batchdir, jobname)
batch_params = ['sbatch', '--parsable']
if dep_str != '':
batch_params.append(f'{dep_str}')
if reservation is not None:
batch_params.append(f'--reservation={reservation}')
batch_params.append(f'{script_path}')
if dry_run:
## in dry_run, mock Slurm ID's are generated using CPU seconds. Wait one second so we have unique ID's
current_qid = int(time.time() - 1.6e9)
time.sleep(1)
else:
current_qid = subprocess.check_output(batch_params, stderr=subprocess.STDOUT, text=True)
current_qid = int(current_qid.strip(' \t\n'))
log.info(batch_params)
log.info(f'Submitted {jobname} with dependencies {dep_str} and reservation={reservation}. Returned qid: {current_qid}')
prow['LATEST_QID'] = current_qid
prow['ALL_QIDS'] = np.append(prow['ALL_QIDS'],current_qid)
prow['STATUS'] = 'SUBMITTED'
prow['SUBMIT_DATE'] = int(time.time())
return prow
#############################################
########## Row Manipulations ############
#############################################
def define_and_assign_dependency(prow, arcjob, flatjob):
"""
Given input processing row and possible arcjob (processing row for psfnight) and flatjob (processing row for
nightlyflat), this defines the JOBDESC keyword and assigns the dependency appropriate for the job type of prow.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for 'OBSTYPE'. A row must have column names for
'JOBDESC', 'INT_DEP_IDS', and 'LATEST_DEP_ID'.
arcjob, Table.Row, dict, or NoneType. Processing row corresponding to psfnight for the night of the data in prow.
This must contain keyword accessible values for 'INTID', and 'LATEST_QID'.
If None, it assumes the dependency doesn't exist and no dependency is assigned.
flatjob, Table.Row, dict, or NoneType. Processing row corresponding to nightlyflat for the night of the data in prow.
This must contain keyword accessible values for 'INTID', and 'LATEST_QID'.
If None, it assumes the dependency doesn't exist and no dependency is assigned.
Returns:
prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for
'JOBDESC', 'INT_DEP_IDS'. and 'LATEST_DEP_ID'.
Note:
This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the
input object in memory may or may not be changed. As of writing, a row from a table given to this function will
not change during the execution of this function (but can be overwritten explicitly with the returned row if desired).
"""
if prow['OBSTYPE'] in ['science', 'twiflat']:
if flatjob is None:
dependency = arcjob
else:
dependency = flatjob
prow['JOBDESC'] = 'prestdstar'
elif prow['OBSTYPE'] == 'flat':
dependency = arcjob
else:
dependency = None
prow = assign_dependency(prow, dependency)
return prow
def assign_dependency(prow, dependency):
"""
Given input processing row and possible arcjob (processing row for psfnight) and flatjob (processing row for
nightlyflat), this defines the JOBDESC keyword and assigns the dependency appropriate for the job type of prow.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for 'OBSTYPE'. A row must have column names for
'JOBDESC', 'INT_DEP_IDS', and 'LATEST_DEP_ID'.
dependency, NoneType or scalar/list/array of Table.Row, dict. Processing row corresponding to the required input
for the job in prow. This must contain keyword
accessible values for 'INTID', and 'LATEST_QID'.
If None, it assumes the dependency doesn't exist
and no dependency is assigned.
Returns:
prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for
'JOBDESC', 'INT_DEP_IDS'. and 'LATEST_DEP_ID'.
Note:
This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the
input object in memory may or may not be changed. As of writing, a row from a table given to this function will
not change during the execution of this function (but can be overwritten explicitly with the returned row if desired).
"""
prow['INT_DEP_IDS'] = np.ndarray(shape=0).astype(int)
prow['LATEST_DEP_QID'] = np.ndarray(shape=0).astype(int)
if dependency is not None:
if type(dependency) in [list, np.array]:
ids, qids = [], []
for curdep in dependency:
if still_a_dependency(curdep):
ids.append(curdep['INTID'])
qids.append(curdep['LATEST_QID'])
prow['INT_DEP_IDS'] = np.array(ids, dtype=int)
prow['LATEST_DEP_QID'] = np.array(qids, dtype=int)
elif type(dependency) in [dict, OrderedDict, Table.Row] and still_a_dependency(dependency):
prow['INT_DEP_IDS'] = np.array([dependency['INTID']], dtype=int)
prow['LATEST_DEP_QID'] = np.array([dependency['LATEST_QID']], dtype=int)
return prow
def still_a_dependency(dependency):
"""
Defines the criteria for which a dependency is deemed complete (and therefore no longer a dependency).
Args:
dependency, Table.Row or dict. Processing row corresponding to the required input for the job in prow.
This must contain keyword accessible values for 'STATUS', and 'LATEST_QID'.
Returns:
bool. False if the criteria indicate that the dependency is completed and no longer a blocking factor (ie no longer
a genuine dependency). Returns True if the dependency is still a blocking factor such that the slurm
scheduler needs to be aware of the pending job.
"""
return dependency['LATEST_QID'] > 0 and dependency['STATUS'] != 'COMPLETED'
def get_type_and_tile(erow):
"""
Trivial function to return the OBSTYPE and the TILEID from an exposure table row
Args:
erow, Table.Row or dict. Must contain 'OBSTYPE' and 'TILEID' as keywords.
Returns:
tuple (str, str), corresponding to the OBSTYPE and TILEID values of the input erow.
"""
return str(erow['OBSTYPE']).lower(), erow['TILEID']
#############################################
######### Table manipulators ############
#############################################
def parse_previous_tables(etable, ptable, night):
"""
This takes in the exposure and processing tables and regenerates all the working memory variables needed for the
daily processing script.
Used by the daily processing to define most of its state-ful variables into working memory.
If the processing table is empty, these are simply declared and returned for use.
If the code had previously run and exited (or crashed), however, this will all the code to
re-establish itself by redefining these values.
Args:
etable, Table, Exposure table of all exposures that have been dealt with thus far.
ptable, Table, Processing table of all exposures that have been processed.
night, str or int, the night the data was taken.
Returns:
arcs, list of dicts, list of the individual arc jobs used for the psfnight (NOT all
the arcs, if multiple sets existed)
flats, list of dicts, list of the individual flat jobs used for the nightlyflat (NOT
all the flats, if multiple sets existed)
sciences, list of dicts, list of the most recent individual prestdstar science exposures
(if currently processing that tile)
arcjob, dict or None, the psfnight job row if it exists. Otherwise None.
flatjob, dict or None, the nightlyflat job row if it exists. Otherwise None.
curtype, None, the obstype of the current job being run. Always None as first new job will define this.
lasttype, str or None, the obstype of the last individual exposure row to be processed.
curtile, None, the tileid of the current job (if science). Otherwise None. Always None as first
new job will define this.
lasttile, str or None, the tileid of the last job (if science). Otherwise None.
internal_id, int, an internal identifier unique to each job. Increments with each new job. This
is the latest unassigned value.
"""
log = get_logger()
arcs, flats, sciences = [], [], []
arcjob, flatjob = None, None
curtype,lasttype = None,None
curtile,lasttile = None,None
if len(ptable) > 0:
prow = ptable[-1]
internal_id = int(prow['INTID'])+1
lasttype,lasttile = get_type_and_tile(ptable[-1])
jobtypes = ptable['JOBDESC']
if 'psfnight' in jobtypes:
arcjob = table_row_to_dict(ptable[jobtypes=='psfnight'][0])
log.info("Located joint fit arc job in exposure table: {}".format(arcjob))
elif lasttype == 'arc':
seqnum = 10
for row in ptable[::-1]:
erow = etable[etable['EXPID']==row['EXPID'][0]]
if row['OBSTYPE'].lower() == 'arc' and int(erow['SEQNUM'])<seqnum:
arcs.append(table_row_to_dict(row))
seqnum = int(erow['SEQNUM'])
else:
break
## Because we work backword to fill in, we need to reverse them to get chronological order back
arcs = arcs[::-1]
if 'nightlyflat' in jobtypes:
flatjob = table_row_to_dict(ptable[jobtypes=='nightlyflat'][0])
log.info("Located joint fit flat job in exposure table: {}".format(flatjob))
elif lasttype == 'flat':
for row in ptable[::-1]:
erow = etable[etable['EXPID']==row['EXPID'][0]]
if row['OBSTYPE'].lower() == 'flat' and int(erow['SEQTOT'])<5:
flats.append(table_row_to_dict(row))
else:
break
flats = flats[::-1]
if lasttype.lower() == 'science':
for row in ptable[::-1]:
if row['OBSTYPE'].lower() == 'science' and row['TILEID'] == lasttile and \
row['JOBDESC'] == 'prestdstar' and row['LASTSTEP'] != 'skysub':
sciences.append(table_row_to_dict(row))
else:
break
sciences = sciences[::-1]
else:
internal_id = night_to_starting_iid(night)
return arcs,flats,sciences, \
arcjob, flatjob, \
curtype, lasttype, \
curtile, lasttile,\
internal_id
def update_and_recurvsively_submit(proc_table, submits=0, resubmission_states=None, start_time=None, end_time=None,
ptab_name=None, dry_run=0,reservation=None):
"""
Given an processing table, this loops over job rows and resubmits failed jobs (as defined by resubmission_states).
Before submitting a job, it checks the dependencies for failures. If a dependency needs to be resubmitted, it recursively
follows dependencies until it finds the first job without a failed dependency and resubmits that. Then resubmits the
other jobs with the new Slurm jobID's for proper dependency coordination within Slurm.
Args:
proc_table, Table, the processing table with a row per job.
submits, int, the number of submissions made to the queue. Used for saving files and in not overloading the scheduler.
resubmission_states, list or array of strings, each element should be a capitalized string corresponding to a
possible Slurm scheduler state, where you wish for jobs with that
outcome to be resubmitted
start_time, str, datetime string in the format understood by NERSC Slurm scheduler. This should defined the earliest
date and time that you expected to have a job run in the queue. Used to narrow the window of jobs
to request information on.
end_time, str, datetime string in the format understood by NERSC Slurm scheduler. This should defined the latest
date and time that you expected to have a job run in the queue. Used to narrow the window of jobs
to request information on.
ptab_name, str, the full pathname where the processing table should be saved.
dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If
dry_run=2, the scripts will not be writter or submitted. Logging will remain the same
for testing as though scripts are being submitted. Default is 0 (false).
reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation.
Returns:
proc_table: Table, a table with the same rows as the input except that Slurm and jobid relevant columns have
been updated for those jobs that needed to be resubmitted.
submits: int, the number of submissions made to the queue. This is incremented from the input submits, so it is
the number of submissions made from this function call plus the input submits value.
Note:
This modifies the inputs of both proc_table and submits and returns them.
"""
if resubmission_states is None:
resubmission_states = get_resubmission_states()
proc_table = update_from_queue(proc_table, start_time=start_time, end_time=end_time)
id_to_row_map = {row['INTID']: rown for rown, row in enumerate(proc_table)}
for rown in range(len(proc_table)):
if proc_table['STATUS'][rown] in resubmission_states:
proc_table, submits = recursive_submit_failed(rown, proc_table, submits, id_to_row_map, ptab_name,
resubmission_states, reservation, dry_run)
return proc_table, submits
def recursive_submit_failed(rown, proc_table, submits, id_to_row_map, ptab_name=None,
resubmission_states=None, reservation=None, dry_run=0):
"""
Given a row of a processing table and the full processing table, this resubmits the given job.
Before submitting a job, it checks the dependencies for failures in the processing table. If a dependency needs to
be resubmitted, it recursively follows dependencies until it finds the first job without a failed dependency and
resubmits that. Then resubmits the other jobs with the new Slurm jobID's for proper dependency coordination within Slurm.
Args:
rown, Table.Row, the row of the processing table that you want to resubmit.
proc_table, Table, the processing table with a row per job.
submits, int, the number of submissions made to the queue. Used for saving files and in not overloading the scheduler.
id_to_row_map, dict, lookup dictionary where the keys are internal ids (INTID's) and the values are the row position
in the processing table.
ptab_name, str, the full pathname where the processing table should be saved.
resubmission_states, list or array of strings, each element should be a capitalized string corresponding to a
possible Slurm scheduler state, where you wish for jobs with that
outcome to be resubmitted
reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation.
dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If
dry_run=2, the scripts will not be writter or submitted. Logging will remain the same
for testing as though scripts are being submitted. Default is 0 (false).
Returns:
proc_table: Table, a table with the same rows as the input except that Slurm and jobid relevant columns have
been updated for those jobs that needed to be resubmitted.
submits: int, the number of submissions made to the queue. This is incremented from the input submits, so it is
the number of submissions made from this function call plus the input submits value.
Note:
This modifies the inputs of both proc_table and submits and returns them.
"""
log = get_logger()
if resubmission_states is None:
resubmission_states = get_resubmission_states()
ideps = proc_table['INT_DEP_IDS'][rown]
if ideps is None:
proc_table['LATEST_DEP_QID'][rown] = np.ndarray(shape=0).astype(int)
else:
qdeps = []
for idep in np.sort(np.atleast_1d(ideps)):
if proc_table['STATUS'][id_to_row_map[idep]] in resubmission_states:
proc_table, submits = recursive_submit_failed(id_to_row_map[idep], proc_table, submits,
id_to_row_map, reservation=reservation, dry_run=dry_run)
qdeps.append(proc_table['LATEST_QID'][id_to_row_map[idep]])
qdeps = np.atleast_1d(qdeps)
if len(qdeps) > 0:
proc_table['LATEST_DEP_QID'][rown] = qdeps
else:
log.error(f"number of qdeps should be 1 or more: Rown {rown}, ideps {ideps}")
proc_table[rown] = submit_batch_script(proc_table[rown], reservation=reservation, dry_run=dry_run)
submits += 1
if not dry_run:
time.sleep(2)
if submits % 10 == 0:
if ptab_name is None:
write_table(proc_table, tabletype='processing', overwrite=True)
else:
write_table(proc_table, tablename=ptab_name, overwrite=True)
time.sleep(60)
if submits % 100 == 0:
time.sleep(540)
proc_table = update_from_queue(proc_table)
if ptab_name is None:
write_table(proc_table, tabletype='processing', overwrite=True)
else:
write_table(proc_table, tablename=ptab_name, overwrite=True)
return proc_table, submits
#########################################
######## Joint fit ##############
#########################################
def joint_fit(ptable, prows, internal_id, queue, reservation, descriptor, z_submit_types=None,
dry_run=0, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True,
system_name=None):
"""
Given a set of prows, this generates a processing table row, creates a batch script, and submits the appropriate
joint fitting job given by descriptor. If the joint fitting job is standard star fitting, the post standard star fits
for all the individual exposures also created and submitted. The returned ptable has all of these rows added to the
table given as input.
Args:
ptable, Table. The processing table where each row is a processed job.
prows, list or array of dicts. The rows corresponding to the individual exposure jobs that are
inputs to the joint fit.
internal_id, int, the next internal id to be used for assignment (already incremented up from the last used id number used).
queue, str. The name of the queue to submit the jobs to. If None is given the current desi_proc default is used.
reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation.
descriptor, str. Description of the joint fitting job. Can either be 'science' or 'stdstarfit', 'arc' or 'psfnight',
or 'flat' or 'nightlyflat'.
z_submit_types: list of str's. The "group" types of redshifts that should be submitted with each
exposure. If not specified or None, then no redshifts are submitted.
dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If
dry_run=2, the scripts will not be writter or submitted. Logging will remain the same
for testing as though scripts are being submitted. Default is 0 (false).
strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is
less desirable because e.g. the sciences can run with SVN default calibrations rather
than failing completely from failed calibrations. Default is False.
check_for_outputs, bool. Default is True. If True, the code checks for the existence of the expected final
data products for the script being submitted. If all files exist and this is True,
then the script will not be submitted. If some files exist and this is True, only the
subset of the cameras without the final data products will be generated and submitted.
resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True,
jobs with some prior data are pruned using PROCCAMWORD to only process the
remaining cameras not found to exist.
system_name (str): batch system name, e.g. cori-haswell or perlmutter-gpu
Returns:
ptable, Table. The same processing table as input except with added rows for the joint fit job and, in the case
of a stdstarfit, the poststdstar science exposure jobs.
joint_prow, dict. Row of a processing table corresponding to the joint fit job.
internal_id, int, the next internal id to be used for assignment (already incremented up from the last used id number used).
"""
log = get_logger()
if len(prows) < 1:
return ptable, None, internal_id
if descriptor is None:
return ptable, None
elif descriptor == 'arc':
descriptor = 'psfnight'
elif descriptor == 'flat':
descriptor = 'nightlyflat'
elif descriptor == 'science':
if z_submit_types is None:
descriptor = 'stdstarfit'
elif len(z_submit_types) == 0:
descriptor = 'stdstarfit'
if descriptor not in ['psfnight', 'nightlyflat', 'science','stdstarfit']:
return ptable, None, internal_id
log.info(" ")
log.info(f"Joint fit criteria found. Running {descriptor}.\n")
if descriptor == 'science':
joint_prow = make_joint_prow(prows, descriptor='stdstarfit', internal_id=internal_id)
else:
joint_prow = make_joint_prow(prows, descriptor=descriptor, internal_id=internal_id)
internal_id += 1
joint_prow = create_and_submit(joint_prow, queue=queue, reservation=reservation, joint=True, dry_run=dry_run,
strictly_successful=strictly_successful, check_for_outputs=check_for_outputs,
resubmit_partial_complete=resubmit_partial_complete, system_name=system_name)
ptable.add_row(joint_prow)
if descriptor in ['science','stdstarfit']:
if descriptor == 'science':
zprows = []
log.info(" ")
log.info(f"Submitting individual science exposures now that joint fitting of standard stars is submitted.\n")
for row in prows:
if row['LASTSTEP'] == 'stdstarfit':
continue
row['JOBDESC'] = 'poststdstar'
row['INTID'] = internal_id
internal_id += 1
row['ALL_QIDS'] = np.ndarray(shape=0).astype(int)
row = assign_dependency(row, joint_prow)
row = create_and_submit(row, queue=queue, reservation=reservation, dry_run=dry_run,
strictly_successful=strictly_successful, check_for_outputs=check_for_outputs,
resubmit_partial_complete=resubmit_partial_complete, system_name=system_name)
ptable.add_row(row)
if descriptor == 'science' and row['LASTSTEP'] == 'all':
zprows.append(row)
## Now run redshifts
if descriptor == 'science' and len(zprows) > 0:
log.info(" ")
for zsubtype in z_submit_types:
if zsubtype == 'perexp':
for zprow in zprows:
log.info(f"Submitting redshift fit of type {zsubtype} for TILEID {zprow['TILEID']} and EXPID {zprow['EXPID']}.\n")
joint_prow = make_joint_prow([zprow], descriptor=zsubtype, internal_id=internal_id)
internal_id += 1
joint_prow = create_and_submit(joint_prow, queue=queue, reservation=reservation, joint=True, dry_run=dry_run,
strictly_successful=strictly_successful, check_for_outputs=check_for_outputs,
resubmit_partial_complete=resubmit_partial_complete, system_name=system_name)
ptable.add_row(joint_prow)
else:
log.info(f"Submitting joint redshift fits of type {zsubtype} for TILEID {zprows[0]['TILEID']}.\n")
joint_prow = make_joint_prow(zprows, descriptor=zsubtype, internal_id=internal_id)
internal_id += 1
joint_prow = create_and_submit(joint_prow, queue=queue, reservation=reservation, joint=True, dry_run=dry_run,
strictly_successful=strictly_successful, check_for_outputs=check_for_outputs,
resubmit_partial_complete=resubmit_partial_complete, system_name=system_name)
ptable.add_row(joint_prow)
if descriptor in ['psfnight', 'nightlyflat']:
log.info(f"Setting the calibration exposures as calibrators in the processing table.\n")
ptable = set_calibrator_flag(prows, ptable)
return ptable, joint_prow, internal_id
## wrapper functions for joint fitting
def science_joint_fit(ptable, sciences, internal_id, queue='realtime', reservation=None,
z_submit_types=None, dry_run=0, strictly_successful=False,
check_for_outputs=True, resubmit_partial_complete=True,
system_name=None):
"""
Wrapper function for desiproc.workflow.procfuns.joint_fit specific to the stdstarfit joint fit and redshift fitting.
All variables are the same except:
Arg 'sciences' is mapped to the prows argument of joint_fit.
The joint_fit argument descriptor is pre-defined as 'science'.
"""
return joint_fit(ptable=ptable, prows=sciences, internal_id=internal_id, queue=queue, reservation=reservation,
descriptor='science', z_submit_types=z_submit_types, dry_run=dry_run,
strictly_successful=strictly_successful, check_for_outputs=check_for_outputs,
resubmit_partial_complete=resubmit_partial_complete, system_name=system_name)
def flat_joint_fit(ptable, flats, internal_id, queue='realtime',
reservation=None, dry_run=0, strictly_successful=False,
check_for_outputs=True, resubmit_partial_complete=True,
system_name=None):
"""
Wrapper function for desiproc.workflow.procfuns.joint_fit specific to the nightlyflat joint fit.
All variables are the same except:
Arg 'flats' is mapped to the prows argument of joint_fit.
The joint_fit argument descriptor is pre-defined as 'nightlyflat'.
"""
return joint_fit(ptable=ptable, prows=flats, internal_id=internal_id, queue=queue, reservation=reservation,
descriptor='nightlyflat', dry_run=dry_run, strictly_successful=strictly_successful,
check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete,
system_name=system_name)
def arc_joint_fit(ptable, arcs, internal_id, queue='realtime',
reservation=None, dry_run=0, strictly_successful=False,
check_for_outputs=True, resubmit_partial_complete=True,
system_name=None):
"""
Wrapper function for desiproc.workflow.procfuns.joint_fit specific to the psfnight joint fit.
All variables are the same except:
Arg 'arcs' is mapped to the prows argument of joint_fit.
The joint_fit argument descriptor is pre-defined as 'psfnight'.
"""
return joint_fit(ptable=ptable, prows=arcs, internal_id=internal_id, queue=queue, reservation=reservation,
descriptor='psfnight', dry_run=dry_run, strictly_successful=strictly_successful,
check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete,
system_name=system_name)
def make_joint_prow(prows, descriptor, internal_id):
"""
Given an input list or array of processing table rows and a descriptor, this creates a joint fit processing job row.
It starts by copying the first input row, overwrites relevant columns, and defines the new dependencies (based on the
input prows).
Args:
prows, list or array of dicts. The rows corresponding to the individual exposure jobs that are
inputs to the joint fit.
descriptor, str. Description of the joint fitting job. Can either be 'stdstarfit', 'psfnight', or 'nightlyflat'.
internal_id, int, the next internal id to be used for assignment (already incremented up from the last used id number used).
Returns:
joint_prow, dict. Row of a processing table corresponding to the joint fit job.
"""
first_row = prows[0]
joint_prow = first_row.copy()
joint_prow['INTID'] = internal_id
joint_prow['JOBDESC'] = descriptor
joint_prow['LATEST_QID'] = -99
joint_prow['ALL_QIDS'] = np.ndarray(shape=0).astype(int)
joint_prow['SUBMIT_DATE'] = -99
joint_prow['STATUS'] = 'U'
joint_prow['SCRIPTNAME'] = ''
joint_prow['EXPID'] = np.array([ currow['EXPID'][0] for currow in prows ], dtype=int)
joint_prow = assign_dependency(joint_prow,dependency=prows)
return joint_prow
def checkfor_and_submit_joint_job(ptable, arcs, flats, sciences, arcjob, flatjob,
lasttype, internal_id, z_submit_types=None, dry_run=0,
queue='realtime', reservation=None, strictly_successful=False,
check_for_outputs=True, resubmit_partial_complete=True,
system_name=None):
"""
Takes all the state-ful data from daily processing and determines whether a joint fit needs to be submitted. Places
the decision criteria into a single function for easier maintainability over time. These are separate from the
new standard manifest*.json method of indicating a calibration sequence is complete. That is checked independently
elsewhere and doesn't interact with this.
Args:
ptable, Table, Processing table of all exposures that have been processed.
arcs, list of dicts, list of the individual arc jobs to be used for the psfnight (NOT all
the arcs, if multiple sets existed). May be empty if none identified yet.
flats, list of dicts, list of the individual flat jobs to be used for the nightlyflat (NOT
all the flats, if multiple sets existed). May be empty if none identified yet.
sciences, list of dicts, list of the most recent individual prestdstar science exposures
(if currently processing that tile). May be empty if none identified yet.
arcjob, dict or None, the psfnight job row if it exists. Otherwise None.
flatjob, dict or None, the nightlyflat job row if it exists. Otherwise None.
lasttype, str or None, the obstype of the last individual exposure row to be processed.
internal_id, int, an internal identifier unique to each job. Increments with each new job. This
is the smallest unassigned value.
z_submit_types: list of str's. The "group" types of redshifts that should be submitted with each
exposure. If not specified or None, then no redshifts are submitted.
dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If
dry_run=2, the scripts will not be writter or submitted. Logging will remain the same
for testing as though scripts are being submitted. Default is 0 (false).
queue, str. The name of the queue to submit the jobs to. If None is given the current desi_proc default is used.
reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation.
strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is
less desirable because e.g. the sciences can run with SVN default calibrations rather
than failing completely from failed calibrations. Default is False.
check_for_outputs, bool. Default is True. If True, the code checks for the existence of the expected final
data products for the script being submitted. If all files exist and this is True,
then the script will not be submitted. If some files exist and this is True, only the
subset of the cameras without the final data products will be generated and submitted.
resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True,
jobs with some prior data are pruned using PROCCAMWORD to only process the
remaining cameras not found to exist.
system_name (str): batch system name, e.g. cori-haswell, cori-knl, permutter-gpu
Returns:
ptable, Table, Processing table of all exposures that have been processed.
arcjob, dictor None, the psfnight job row if it exists. Otherwise None.
flatjob, dict or None, the nightlyflat job row if it exists. Otherwise None.
sciences, list of dicts, list of the most recent individual prestdstar science exposures
(if currently processing that tile). May be empty if none identified yet or
we just submitted them for processing.
internal_id, int, if no job is submitted, this is the same as the input, otherwise it is incremented upward from
from the input such that it represents the smallest unused ID.
"""
if lasttype == 'science' and len(sciences) > 0:
log = get_logger()
skysubonly = np.array([sci['LASTSTEP'] == 'skysub' for sci in sciences])
if np.all(skysubonly):
log.error("Identified all exposures in joint fitting request as skysub-only. Not submitting")
sciences = []
return ptable, arcjob, flatjob, sciences, internal_id
if np.any(skysubonly):
log.error("Identified skysub-only exposures in joint fitting request")
log.info("Expid's: {}".format([row['EXPID'] for row in sciences]))
log.info("LASTSTEP's: {}".format([row['LASTSTEP'] for row in sciences]))
sciences = (np.array(sciences,dtype=object)[~skysubonly]).tolist()
log.info("Removed skysub only exposures in joint fitting:")
log.info("Expid's: {}".format([row['EXPID'] for row in sciences]))
log.info("LASTSTEP's: {}".format([row['LASTSTEP'] for row in sciences]))
from collections import Counter
tiles = np.array([sci['TILEID'] for sci in sciences])
counts = Counter(tiles)
if len(counts.most_common()) > 1:
log.error("Identified more than one tile in a joint fitting request")
log.info("Expid's: {}".format([row['EXPID'] for row in sciences]))
log.info("Tileid's: {}".format(tiles))
log.info("Returning without joint fitting any of these exposures.")
# most_common, nmost_common = counts.most_common()[0]
# if most_common == -99:
# most_common, nmost_common = counts.most_common()[1]
# log.warning(f"Given multiple tiles to jointly fit: {counts}. "+
# "Only processing the most common non-default " +
# f"tile: {most_common} with {nmost_common} exposures")
# sciences = (np.array(sciences,dtype=object)[tiles == most_common]).tolist()
# log.info("Tiles and exposure id's being submitted for joint fitting:")
# log.info("Expid's: {}".format([row['EXPID'] for row in sciences]))
# log.info("Tileid's: {}".format([row['TILEID'] for row in sciences]))
sciences = []
return ptable, arcjob, flatjob, sciences, internal_id
ptable, tilejob, internal_id = science_joint_fit(ptable, sciences, internal_id, z_submit_types=z_submit_types,
dry_run=dry_run, queue=queue, reservation=reservation,
strictly_successful=strictly_successful,
check_for_outputs=check_for_outputs,
resubmit_partial_complete=resubmit_partial_complete,
system_name=system_name
)
if tilejob is not None:
sciences = []
elif lasttype == 'flat' and flatjob is None and len(flats)>11:
## Note here we have an assumption about the number of expected flats being greater than 11
ptable, flatjob, internal_id = flat_joint_fit(ptable, flats, internal_id, dry_run=dry_run, queue=queue,
reservation=reservation, strictly_successful=strictly_successful,
check_for_outputs=check_for_outputs,
resubmit_partial_complete=resubmit_partial_complete,
system_name=system_name
)
elif lasttype == 'arc' and arcjob is None and len(arcs) > 4:
## Note here we have an assumption about the number of expected arcs being greater than 4
ptable, arcjob, internal_id = arc_joint_fit(ptable, arcs, internal_id, dry_run=dry_run, queue=queue,
reservation=reservation, strictly_successful=strictly_successful,
check_for_outputs=check_for_outputs,
resubmit_partial_complete=resubmit_partial_complete,
system_name=system_name
)
return ptable, arcjob, flatjob, sciences, internal_id
def set_calibrator_flag(prows, ptable):
"""
Sets the "CALIBRATOR" column of a procesing table row to 1 (integer representation of True)
for all input rows. Used within joint fitting code to flag the exposures that were input
to the psfnight or nightlyflat for later reference.
Args:
prows, list or array of Table.Rows or dicts. The rows corresponding to the individual exposure jobs that are
inputs to the joint fit.
ptable, Table. The processing table where each row is a processed job.
Returns:
ptable, Table. The same processing table as input except with added rows for the joint fit job and, in the case
of a stdstarfit, the poststdstar science exposure jobs.
"""
for prow in prows:
ptable['CALIBRATOR'][ptable['INTID'] == prow['INTID']] = 1
return ptable
|
the-stack_0_24593
|
"""Settings that need to be set in order to run the tests."""
import os
DEBUG = True
SITE_ID = 1
APP_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
ROOT_URLCONF = 'multilingual_survey.tests.urls'
LANGUAGE_CODE = 'en'
LANGUAGES = [
('en', 'English'),
('de', 'Deutsch'),
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(APP_ROOT, '../app_static')
MEDIA_ROOT = os.path.join(APP_ROOT, '../app_media')
STATICFILES_DIRS = (
os.path.join(APP_ROOT, 'static'),
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(APP_ROOT, 'tests/test_app/templates')],
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
)
}
}]
MIDDLEWARE_CLASSES = (
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'django_libs',
'generic_positions',
'hvad',
]
INTERNAL_APPS = [
'multilingual_survey',
'multilingual_survey.tests.test_app',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
SECRET_KEY = 'foobar'
LOCALE_PATHS = (
os.path.join(APP_ROOT, 'locale'),
)
|
the-stack_0_24594
|
"""SongDate website development configuration."""
import pathlib
# Root of this application, useful if it doesn't occupy an entire domain
APPLICATION_ROOT = "/"
# Secret key for encrypting cookies
SECRET_KEY = b"\xcc\tKo\xbf\xd6\x06E/\x80~]\xf7<i\x899x\xa0\x8d\xa2e\x15\xca"
SESSION_COOKIE_NAME = "login"
# File Upload to var/uploads/
WEBSITE_ROOT = pathlib.Path(__file__).resolve().parent.parent
UPLOAD_FOLDER = WEBSITE_ROOT / "var" / "uploads"
JS_FOLDER = WEBSITE_ROOT / "website" / "js"
ALLOWED_EXTENSIONS = set(["png", "jpg", "jpeg", "gif"])
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
# Uncomment and fill in if we decide to use a database
# Database file is var/insta485.sqlite3
# DATABASE_FILENAME = INSTA485_ROOT / "var" / "insta485.sqlite3"
|
the-stack_0_24595
|
"""teste para abertua do navegador"""
from subprocess import Popen, PIPE
from nfp.servicos.interface import abrir_popup
from nfp import CHREXEC, CHRPREFS, URLBASE
def abrir_chrome():
chrexec = '"{}" --remote-debugging-port=9222 --user-data-dir="{}" {}'.format(CHREXEC, CHRPREFS, URLBASE)
chrexec = [
CHREXEC,
'--remote-debugging-port=9222',
'--user-data-dir="{}"'.format(CHRPREFS),
URLBASE
]
Popen(chrexec, shell=False, stdout=PIPE).stdout
msg = 'ROBÔ EM ESPERA\n\nFaça o login no sistema e responda ao captcha.\n'
msg += 'Após o login, feche esta janela para iniciar a execução.\n'
abrir_popup(msg)
def test_get_versao():
from nfp.servicos.chrome import conferir_chrome
print(conferir_chrome())
if __name__ == "__main__":
test_get_versao()
|
the-stack_0_24596
|
#!C:\Python34\scrapper\Scripts
# Place url, linking to ad list, with desired search filters here.
url_to_scrape = "http://www.kijiji.ca/b-canot-kayak-paddle-board/quebec/kayak/k0c329l9001"
# Set the delay in (s) that the programs waits before scraping again.
scrape_delay = 600 # 600 = 10 mins
# Set filename to store ads in.
filename = 'ads.txt'
import requests
from bs4 import BeautifulSoup
import datetime
import time
def ParseAd(html): # Parses ad html trees and sorts relevant data into a dictionary
ad_info = {}
try:
ad_info["Title"] = html.find_all('a', {"class": "title"})[0].text.strip()
except:
log('[Error] Unable to parse Title data.')
try:
ad_info["Url"] = 'http://www.kijiji.ca' + html.get("data-vip-url")
except:
log('[Error] Unable to parse URL data.')
try:
ad_info["Description"] = html.find_all('div', {"class": "description"})[0].text.strip()
except:
log('[Error] Unable to parse Description data.')
try:
tempsoup = html.find_all('div', {"class": "location"})[0].text.strip()
if tempsoup.find('-') > 0:
tempsoup = tempsoup[:tempsoup.find('-') - 2]
ad_info["Location"] = tempsoup
except:
log('[Error] Unable to parse Location data.')
try:
ad_info["Date"] = html.find_all('span', {"class": "date-posted"})[0].text.strip()
except:
log('[Error] Unable to parse Date data.')
try:
ad_info["Price"] = html.find_all('div', {"class": "price"})[0].text.strip()
except:
log('[Error] Unable to parse Price data.')
return ad_info
def WriteAds(ad_dict, filename): # Writes ads to given file
try:
file = open(filename, 'a')
for ad_id in ad_dict:
file.write(ad_id)
file.write(str(ad_dict[ad_id]) + "\n")
log('[Okay] Ad ' + ad_id + ' written to database.')
file.close()
except:
log('[Error] Unable to write ad(s) to database.')
def ReadAds(filename): # Reads given file and creates a dict of ads in file
import ast
import os.path
if not os.path.isfile(filename): # If the file doesn't exist, it makes it.
file = open(filename, 'w')
file.close()
ad_dict = {}
file = open(filename, 'r')
for line in file:
if line.strip() != '':
index = line.find('{')
ad_id = line[:index]
dictionary = line[index:]
dictionary = ast.literal_eval(dictionary)
ad_dict[ad_id] = dictionary
file.close()
return ad_dict
def log(text): # writes log data to log.txt with datetime.
date_time = datetime.datetime.now()
myfile = open('log.txt', 'a')
date_time = str(date_time) + '\n'
text += '\n\n'
myfile.write(date_time)
myfile.write(text)
myfile.close()
def MailAd(ad_dict): # Sends an email with a link and info of new ads
import smtplib
from email.mime.text import MIMEText
sender = '[email protected]'
passwd = 'Password'
receiver = '[email protected]'
count = len(ad_dict)
if count > 1:
subject = str(count) + ' Nouvelle annonces trouvés!'
if count == 1:
subject = 'Une nouvelle annonce trouvé'
body = ''
try:
for ad_id in ad_dict:
body += ad_dict[ad_id]['Title'] + ' - ' + ad_dict[ad_id]['Price'] + ' - ' + ad_dict[ad_id]['Location']
body += ' - ' + ad_dict[ad_id]['Date'] + '\n'
body += ad_dict[ad_id]['Url'] + '\n\n'
except:
log('[Error] Unable to create body for email message')
body += 'This is an automated message.\nPlease do not reply to this message.'
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = receiver
try:
server = smtplib.SMTP('smtp.live.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
except:
log('[Error] Unable to connect to email server.')
try:
server.login(sender, passwd)
except:
log('[Error] Unable to login to email server.')
try:
server.sendmail(msg['From'], msg['To'], msg.as_string())
server.quit()
log('[Okay] Email message successfully delivered.')
except:
log('[Error] Unable to send message.')
def main(old_ad_dict): # Main function, brings it all together.
try:
page = requests.get(url_to_scrape)
log("[Okay] Retrieved HTML data from: " + url_to_scrape)
except:
log("[Error] Unable to load html data from: " + url_to_scrape)
soup = BeautifulSoup(page.content, "html.parser")
page = None
kijiji_ads = soup.find_all("div", {"class": "regular-ad"}) # Finds all ad trees in page html.
ad_dict = {}
checklist = ['boréal', 'kayak de mer', 'baffin', 'epsilon', 'scorpio']
excludelist = ['wanted', 'recherché']
for ad in kijiji_ads: # Creats a dictionary of all ads sorted by ad id.
title = ad.find_all('a', {"class": "title"})[0].text.strip()
ad_id = ad.find_all('div', {'class': "watch"})[0].get('data-adid')
if not [False for match in excludelist if match in title.lower()]:
if [True for match in checklist if match in title.lower()]:
if ad_id not in old_ad_dict:
log('[Okay] New ad found! Ad id: ' + ad_id)
ad_dict[ad_id] = ParseAd(ad)
if ad_dict != {}: # If dict not emtpy, write ads to text file and send email.
WriteAds(ad_dict, filename)
MailAd(ad_dict)
try:
old_ad_dict = ReadAds(filename)
log("[Okay] Database succesfully reloaded.")
except:
log("[Error] Unable to reload database.")
time.sleep(scrape_delay)
main(old_ad_dict)
if __name__ == "__main__":
old_ad_dict = ReadAds(filename)
log("[Okay] Ad database succesfully loaded.")
myfile = open('log.txt', 'w') # Create/Empty log file
myfile.close()
main(old_ad_dict)
|
the-stack_0_24597
|
import itertools
from copy import deepcopy
from bots import *
from propagate_scores import *
class TreeBot(Bot):
def __init__(self, select_action_method):
super(TreeBot, self).__init__()
self.name = f"TreeBotRelative"
self.select_action_method = select_action_method
self.tree = nx.DiGraph()
self.n_sims = 5
def get_score(self, game, player):
return player.get_score()
def sim_action(self, G: nx.DiGraph, game, player, paction, action_i: int, turn, parent_node, level=0):
try:
possible_actions = turn.send(paction)
turn_phase = self.get_turn_phase(possible_actions)
for current_paction, paction in enumerate(possible_actions):
# Duplicate game state
player_copy = deepcopy(player)
game_copy = deepcopy(game)
turn_copy = game_copy.start_turn(player_copy, turn_phase=turn_phase)
# Go to the appropriate `yield` statement
next(turn_copy)
new_node = f"{G.number_of_nodes()}. {str(paction)}"
G.add_node(new_node, action_i=current_paction)
G.add_edge(parent_node, new_node)
self.sim_action(
G, game_copy, player_copy, paction, current_paction,
turn_copy, parent_node=new_node, level=level + 1
)
except StopIteration:
score = self.get_score(game, player)
if score > 0:
score_node = f"{G.number_of_nodes()}. !Get a stone with score {score}!"
else:
score_node = f"{G.number_of_nodes()}. Lose the turn."
G.add_node(score_node, score=score)
G.add_edge(parent_node, score_node)
def get_turn_phase(self, possible_actions: List[PlayerAction]) -> TurnPhase:
if self.in_take_steal_phase(possible_actions):
return TurnPhase.SELECT_A_STONE
if self.in_roll_take_phase(possible_actions):
return TurnPhase.ROLL_OR_TAKE
if self.in_pick_dice_phase(possible_actions):
return TurnPhase.PICK_DICE_SET
@staticmethod
def get_n_possible_actions(possible_actions, n_sims):
return list(itertools.chain(*[copy.deepcopy(possible_actions) for _ in range(n_sims)]))
def game_loop(self, game, player: Player, turn):
self.game = game
possible_actions = next(turn)
# Number of simulations
possible_actions = self.get_n_possible_actions(possible_actions, self.n_sims)
round = 0
while 1:
# print(f"Starting new round with {len(possible_actions)} possible actions.")
G = nx.DiGraph()
G.add_node("0. START")
# If we can take only one action, just do that.
if len(possible_actions) == self.n_sims:
action = possible_actions[0]
try:
possible_actions = turn.send(action)
except StopIteration:
break
possible_actions = self.get_n_possible_actions(possible_actions, self.n_sims)
round += 1
continue
# Otherwise, start simulating
for pa, paction in enumerate(possible_actions):
# print(f"Starting a simulation with start action {pa}, {str(paction)}")
# Duplicate game state
parent_node = f"{G.number_of_nodes()}. {str(paction)}"
G.add_node(parent_node, action_i=pa)
G.add_node(parent_node, action_i=pa)
G.add_edge("0. START", parent_node)
player_copy = deepcopy(player)
assert None not in [die.face for die in getattr(player_copy, "rolled_dice")]
game_copy = deepcopy(game)
turn_copy = game_copy.start_turn(player_copy, turn_phase=self.get_turn_phase(possible_actions))
# Go to the appropriate `yield` statement
next(turn_copy)
self.sim_action(G, game_copy, player_copy, paction, pa, turn_copy, parent_node=parent_node)
G = propagate_scores(G)
actions_df = pandas.DataFrame([
(i % (len(possible_actions) / self.n_sims), G.nodes[out_node]["action_i"], G.nodes[out_node]["score"])
for i, out_node in enumerate(G.neighbors("0. START"))
], columns=["action_type_i", "action_i", "score"])
actions_df = actions_df.groupby("action_type_i").agg(self.select_action_method).sort_values("score")
# Select the best action
selected_action_i = int(actions_df.iloc[-1, 0])
# if round > 0:
# nx.write_gexf(G, "./tree.gexf")
# exit()
# Select the action with the highest score
action = possible_actions[selected_action_i]
# print("Final selected action", action)
try:
possible_actions = turn.send(action)
except StopIteration:
break
possible_actions = self.get_n_possible_actions(possible_actions, self.n_sims)
round += 1
def select_action(self, player: Player, possible_actions: List[PlayerAction]) -> PlayerAction:
if Bot.in_roll_take_phase(possible_actions):
# Pick `roll` until we reach a threshold, then pick take a stone if possible.
if len(Utils.count_faces(player.selected_dice)) < self.take_stone_threshold:
return PlayerAction(PlayerActionType.ROLL_DICE, None)
if any([action for action in possible_actions if action.action_type == PlayerActionType.TAKE_STONE]):
return PlayerAction(PlayerActionType.TAKE_STONE, None)
return PlayerAction(PlayerActionType.ROLL_DICE, None)
elif Bot.in_pick_dice_phase(possible_actions):
possible_actions = sorted(possible_actions, key=self.key_dice_set_actions)
return possible_actions[-1]
elif Bot.in_take_steal_phase(possible_actions):
# Try to pick the highest stone, disregarding stealing or taking from the bank
sorted_actions = sorted(possible_actions, key=lambda x: x.argument)
best_stone_action = sorted_actions[-1]
return best_stone_action
else:
# Fall back to a random choice
return random.choice(possible_actions)
|
the-stack_0_24598
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
exp = np.exp
log = np.log
class ReduceTest(test_util.TensorFlowTestCase):
def testReduceAllDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with self.test_session():
y_tf = math_ops.reduce_sum(x).eval()
self.assertEqual(y_tf, 21)
def testReduceExplicitDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
axis = np.array([[0], [1]])
with self.assertRaisesRegexp(ValueError, "must have rank at most 1"):
math_ops.reduce_sum(x, axis)
class RoundTest(test_util.TensorFlowTestCase):
def testRounding(self):
x = [0.49, 0.7, -0.3, -0.8]
for dtype in [np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.round(x_tf)
y_tf_np = y_tf.eval()
y_np = np.round(x_np)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
class ModTest(test_util.TensorFlowTestCase):
def testFloat(self):
x = [0.5, 0.7, 0.3]
for dtype in [np.float32, np.double]:
# Test scalar and vector versions.
for denom in [x[0], [x[0]] * 3]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.fmod(x_np, denom)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
def testFixed(self):
x = [5, 10, 23]
for dtype in [np.int32, np.int64]:
# Test scalar and vector versions.
for denom in [x[0], x]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.mod(x_np, denom)
self.assertAllClose(y_tf_np, y_np)
class SquaredDifferenceTest(test_util.TensorFlowTestCase):
def testSquaredDifference(self):
for dtype in [np.int32, np.float16]:
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
y = np.array([-3, -2, -1], dtype=dtype)
z = (x - y)*(x - y)
with self.test_session():
z_tf = math_ops.squared_difference(x, y).eval()
self.assertAllClose(z, z_tf)
class ScalarMulTest(test_util.TensorFlowTestCase):
def testAcceptsRefs(self):
var = variables.Variable(10)
result = math_ops.scalar_mul(3, var)
init = variables.initialize_all_variables()
with self.test_session() as sess:
sess.run(init)
self.assertEqual(30, result.eval())
def testAcceptsConstant(self):
const = constant_op.constant(10)
result = math_ops.scalar_mul(3, const)
with self.test_session():
self.assertEqual(30, result.eval())
def testAcceptsTensor(self):
tensor = array_ops.ones([10, 10])
result = math_ops.scalar_mul(3, tensor)
expected = array_ops.ones([10, 10]) * 3
with self.test_session():
self.assertAllEqual(expected.eval(), result.eval())
def testAcceptsIndexedSlices(self):
values = constant_op.constant([2, 3, 5, 7, 0, -1], shape=[3, 2])
indices = constant_op.constant([0, 2, 5])
x = math_ops.scalar_mul(-3, ops.IndexedSlices(values, indices))
with self.test_session():
self.assertAllEqual(x.values.eval(), [[-6, -9], [-15, -21], [0, 3]])
self.assertAllEqual(x.indices.eval(), [0, 2, 5])
class AccumulateNTest(test_util.TensorFlowTestCase):
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
for u in tf_x:
print("shape=%s" % u.get_shape())
with self.test_session():
self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
def testInt(self):
np.random.seed(54321)
x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session():
self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllEqual(x[0] * 6, math_ops.accumulate_n([tf_x[0]] * 6).eval())
if __name__ == "__main__":
googletest.main()
|
the-stack_0_24599
|
import sys
import numpy as np
import intcode
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage: python3 tractor.py <intcode_file>")
exit(1)
instructions = intcode.load_instructions(sys.argv[1])
output = []
for i in range(50):
for j in range(50):
idx, rboffset = intcode.execute(instructions.copy(), \
inputdata=[i, j])
output.append(intcode.RETVAL)
print(intcode.RETVAL, end='')
print()
output = np.array(output)
print(output)
print(output[output == 1])
print(len(output[output == 1]))
|
the-stack_0_24601
|
from .graphics import *
from .graphics import _root
import pyscreenshot as ImageGrab
class GridDrawer(object):
def __init__(self, grid, maximum_grid_drawer_window_height_in_pixels, top_left_in_pixels = None):
self._grid = grid
width = grid.width()
height = grid.height()
# Make sure that the height of the window is less than the specified maximumFinished policy evaluation iteration 6
self._cell_size = max(10, maximum_grid_drawer_window_height_in_pixels / height)
# Create the window
pixel_width = width * self._cell_size
pixel_height = height * self._cell_size
self._win = GraphWin(grid.name(), pixel_width, pixel_height, autoflush = False)
# If the x and y coordinates are specified, then set the geometry; this is a bit of a hack
if top_left_in_pixels is not None:
self._win.master.geometry('%dx%d+%d+%d' % (pixel_width, pixel_height, \
top_left_in_pixels[0], top_left_in_pixels[1]))
# Allocate the cells
self._rectangles = [[Rectangle(Point(i * self._cell_size, (height - j - 1) * self._cell_size), \
Point((i+1) * self._cell_size, (height - j) * self._cell_size)) \
for i in range(width)] \
for j in range(height)]
for i in range(width):
for j in range(height):
self._rectangles[j][i].draw(self._win)
def reset(self):
pass
# Save the window
def save_screenshot(self, filename):
# From https://stackoverflow.com/questions/66672786
x=self._win.winfo_rootx()
y=self._win.winfo_rooty()
x1=x+self._win.winfo_width()
y1=y+self._win.winfo_height()
ImageGrab.grab().crop((x,y,x1,y1)).save(filename)
def update(self):
raise NotImplementedError()
def wait_for_key_press(self):
self._win.getKey()
|
the-stack_0_24602
|
"""
Copyright 2017 Jean-Noel Colin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
ETH_HEADER_LENGTH=14
IP_HEADER_LENGTH=20
TCP_HEADER_LENGTH=20
UDP_HEADER_LENGTH=8
ip_protos={0:'HOPOPT',
1:'ICMP',
2:'IGMP',
3:'GGP',
4:'IPv4',
5:'ST',
6:'TCP',
7:'CBT',
8:'EGP',
9:'IGP',
10:'BBN-RCC-MON',
11:'NVP-II',
12:'PUP',
13:'ARGUS (deprecated)',
14:'EMCON',
15:'XNET',
16:'CHAOS',
17:'UDP',
18:'MUX',
19:'DCN-MEAS',
20:'HMP',
21:'PRM',
22:'XNS-IDP',
23:'TRUNK-1',
24:'TRUNK-2',
25:'LEAF-1',
26:'LEAF-2',
27:'RDP',
28:'IRTP',
29:'ISO-TP4',
30:'NETBLT',
31:'MFE-NSP',
32:'MERIT-INP',
33:'DCCP',
34:'3PC',
35:'IDPR',
36:'XTP',
37:'DDP',
38:'IDPR-CMTP',
39:'TP++',
40:'IL',
41:'IPv6',
42:'SDRP',
43:'IPv6-Route',
44:'IPv6-Frag',
45:'IDRP',
46:'RSVP',
47:'GRE',
48:'DSR',
49:'BNA',
50:'ESP',
51:'AH',
52:'I-NLSP',
53:'SWIPE (deprecated)',
54:'NARP',
55:'MOBILE',
56:'TLSP',
57:'SKIP',
58:'IPv6-ICMP',
59:'IPv6-NoNxt',
60:'IPv6-Opts',
61:'',
62:'CFTP',
63:'',
64:'SAT-EXPAK',
65:'KRYPTOLAN',
66:'RVD',
67:'IPPC',
68:'',
69:'SAT-MON',
70:'VISA',
71:'IPCV',
72:'CPNX',
73:'CPHB',
74:'WSN',
75:'PVP',
76:'BR-SAT-MON',
77:'SUN-ND',
78:'WB-MON',
79:'WB-EXPAK',
80:'ISO-IP',
81:'VMTP',
82:'SECURE-VMTP',
83:'VINES',
84:'TTP',
84:'IPTM',
85:'NSFNET-IGP',
86:'DGP',
87:'TCF',
88:'EIGRP',
89:'OSPFIGP',
90:'Sprite-RPC',
91:'LARP',
92:'MTP',
93:'AX.25',
94:'IPIP',
95:'MICP (deprecated)',
96:'SCC-SP',
97:'ETHERIP',
98:'ENCAP',
99:'',
100:'GMTP',
101:'IFMP',
102:'PNNI',
103:'PIM',
104:'ARIS',
105:'SCPS',
106:'QNX',
107:'A/N',
108:'IPComp',
109:'SNP',
110:'Compaq-Peer',
111:'IPX-in-IP',
112:'VRRP',
113:'PGM',
114:'',
115:'L2TP',
116:'DDX',
117:'IATP',
118:'STP',
119:'SRP',
120:'UTI',
121:'SMP',
122:'SM (deprecated)',
123:'PTP',
124:'ISIS over IPv4',
125:'FIRE',
126:'CRTP',
127:'CRUDP',
128:'SSCOPMCE',
129:'IPLT',
130:'SPS',
131:'PIPE',
132:'SCTP',
133:'FC',
134:'RSVP-E2E-IGNORE',
135:'Mobility Header',
136:'UDPLite',
137:'MPLS-in-IP',
138:'manet',
139:'HIP',
140:'Shim6',
141:'WESP',
142:'ROHC'}
|
the-stack_0_24603
|
"""
Valid Parentheses: Leetcode 20 / Balanced Brackets
https://www.algoexpert.io/questions/Balanced%20Brackets
"""
class Solution(object):
# O(n) time | O(n) space
def isValid(self, s):
myStack = []
count = 0 # keep track of number of elements
# used to find closing brackets
match = {
"(": ")",
"[": "]",
"{": "}"
}
for par in s:
# handle opening brackets
if par == "(" or par == "{" or par == "[":
myStack.append(par)
count += 1
# handle closing brackets
else:
# check if we can find the corresponding opening bracket
if count < 1 or match[myStack.pop()] != par:
return False
# the pop() above removed a alement from the stack
count -= 1
# return true if stack is empty(all matched successfully)
return count == 0
class SimplerSolution(object):
def isValid(self, s):
myStack = []
match = {
"(": ")",
"[": "]",
"{": "}"
}
for par in s:
if par == "(" or par == "{" or par == "[":
myStack.append(par)
elif len(myStack) == 0 or match[myStack.pop()] != par:
return False
return len(myStack) == 0
def balancedBrackets(string):
opening_brackets = "([{"
matching_brackets = {")": "(", "]": "[", "}": "{"}
stack = []
for char in string:
if char not in matching_brackets and char in opening_brackets: # opening bracket
stack.append(char)
# closing brackets
elif char in matching_brackets and(not stack or matching_brackets[char] != stack.pop(-1)):
return False
return len(stack) == 0
print(balancedBrackets("([])(){}(())()()"))
print(balancedBrackets("([])(){}(()))()()"))
|
the-stack_0_24604
|
import numpy as np
from ray.rllib.agents.trainer import Trainer, with_common_config
from ray.rllib.utils.annotations import override
# yapf: disable
# __sphinx_doc_begin__
class RandomAgent(Trainer):
"""Policy that takes random actions and never learns."""
_name = "RandomAgent"
_default_config = with_common_config({
"rollouts_per_iteration": 10,
})
@override(Trainer)
def _init(self, config, env_creator):
self.env = env_creator(config["env_config"])
@override(Trainer)
def _train(self):
rewards = []
steps = 0
for _ in range(self.config["rollouts_per_iteration"]):
obs = self.env.reset()
done = False
reward = 0.0
while not done:
action = self.env.action_space.sample()
obs, r, done, info = self.env.step(action)
reward += r
steps += 1
rewards.append(reward)
return {
"episode_reward_mean": np.mean(rewards),
"timesteps_this_iter": steps,
}
# __sphinx_doc_end__
# don't enable yapf after, it's buggy here
if __name__ == "__main__":
trainer = RandomAgent(
env="CartPole-v0", config={"rollouts_per_iteration": 10})
result = trainer.train()
assert result["episode_reward_mean"] > 10, result
print("Test: OK")
|
the-stack_0_24605
|
# -*- coding: utf-8 -*-
import pygame
import numpy as np
class NPC:
""" Base class for NPC """
def __init__(self, pos_x, pos_y, size, radius, screen_width, screen_height,
speed=None, num=None, bullets=None, reload_delay=None,
bullet_size=None, bullet_radius=None, bullet_speed=None,
x_step=None, y_step=None):
self.rect = pygame.Rect(pos_x, pos_y, size, size)
self.size = size
self.radius = radius
self.screen_width = screen_width
self.screen_height = screen_height
if speed is not None:
self.speed = speed
if num is not None:
self.num = num
if bullets is not None:
self.bullets = []
if reload_delay is not None:
self.reload_delay = reload_delay
if bullet_size is not None:
self.bullet_size = bullet_size
if bullet_radius is not None:
self.bullet_radius = bullet_radius
if bullet_speed is not None:
self.bullet_speed = bullet_speed
if x_step is not None:
self.x_step = x_step
if y_step is not None:
self.y_step = y_step
@property
def x(self):
return self.rect.x
@property
def y(self):
return self.rect.y
def move(self, x_step=0, y_step=0, borders=True):
if borders: # bullets must ignore screen borders
# x moving
if self.x <= 0 or self.x >= self.screen_width - self.size:
x_step = 0
elif 0 < self.screen_width - self.x + self.size < self.speed:
x_step = self.screen_width - self.x + self.size
elif 0 < self.x < self.speed:
x_step = -(self.speed - self.x)
# y moving
if self.y <= 0 or self.y >= self.screen_height - self.size:
y_step = 0
elif 0 < self.screen_height - self.y + self.size < self.speed:
y_step = self.screen_height - self.y + self.size
elif 0 < self.y < self.speed:
y_step = -(self.speed - self.y)
self.rect = self.rect.move(x_step, y_step)
class Hero(NPC):
""" NPC teached by user """
bullets_created = 0
bullets = []
shoot_wait = 0
def process(self, doc):
shot_used = False
for item in doc['cmd_lst']:
if item['cmd'] == 'move':
x_step = y_step = 0
if item['xd'] == 1:
x_step = self.speed
elif item['xd'] == -1:
x_step = -self.speed
else:
x_step = 0
if item['yd'] == 1:
y_step = self.speed
elif item['yd'] == -1:
y_step = -self.speed
else:
y_step = 0
self.move(x_step, y_step)
elif item['cmd'] == 'shoot' and not shot_used:
if self.shoot_wait < 1:
self.shoot(item['x'], item['y'])
self.shoot_wait = self.reload_delay
shot_used = True
else:
self.shoot_wait -= 1
def shoot(self, x_target, y_target):
x, y = self.rect.center
dt = np.sqrt((x_target - x)**2 + (y_target - y)**2)
steps = dt / self.bullet_speed
x_step = (x_target - x) / steps
y_step = (y_target - y) / steps
bullet = HeroBullet(x, y, self.bullet_size, self.bullet_radius,
self.screen_width, self.screen_height,
self.bullet_speed, num=self.bullets_created,
x_step=x_step, y_step=y_step)
self.bullets_created += 1
self.bullets.append(bullet)
class HeroBullet(NPC):
""" Bullet created by Hero """
def process(self):
self.move(self.x_step, self.y_step, borders=False)
class Enemy(NPC):
""" Base class for npc enemy """
power = 2
def process(self, hero_x, hero_y):
x, y = self.rect.center
x_step = y_step = 0
# Simple move enemy to hero
if hero_x > x:
x_step = self.speed
elif hero_x < x:
x_step = -self.speed
else:
x_step = 0
if hero_y > y:
y_step = self.speed
elif hero_y < y:
y_step = -self.speed
else:
y_step = 0
self.move(x_step, y_step)
class EnemyGreen(Enemy):
""" Easy enemy """
enemy_class = 'green'
color = (51, 222, 32)
speed = 1
class EnemyYellow(Enemy):
""" Medium enemy """
enemy_class = 'yellow'
color = (255, 230, 0)
speed = 2
class EnemyRed(Enemy):
""" Hard enemy """
enemy_class = 'red'
color = (236, 0, 0)
speed = 3
|
the-stack_0_24606
|
#!/usr/bin/python
#
# (C) 2017 Riad S. Wahby <[email protected]>
#
# noninteractive proof via Fiat-Shamir
# zk via Pedersen commitments
# uses squashing trick to make V's checks "one-shot" for sum-checks
# uses sqrt witness trick inspired by Groth09
from itertools import izip
import libfennel.fiatshamir as fs
import libfennel.parse_pws
import libfennel.util as util
from libfennel.defs import Defs
from libfennel.circuitnizk import CircuitProverNIZK, CircuitVerifierNIZK
from libfennel.commit import PedVecCommit, WitnessCommit, WitnessLogCommit
from libfennel.gateprover import GateFunctionsPVC
from libfennel.iomlext import VerifierIOMLExt
from libfennel.rdlprover import RDLProver
def _compute_Jvec(j1val, jvals, wvals, nCopyBits, nInBits, half, rec=None):
# note: order in each round is const, linear, quadratic, cubic coeff
Jvec = []
prev_j = Defs.prime - j1val
tot_iters = 0
for (i, w) in enumerate(wvals):
this_j = jvals[i]
raccum = w
# const term
Jvec.append((this_j - 2 * prev_j) % Defs.prime)
# further terms
niter = 3 if i < nCopyBits else 2
tot_iters += niter
for _ in xrange(0, niter):
Jvec.append((raccum * this_j - prev_j) % Defs.prime)
raccum *= w
raccum %= Defs.prime
prev_j = this_j
if rec is not None:
rec.did_add(len(wvals))
rec.did_sub(len(wvals) + tot_iters)
rec.did_mul(2 * tot_iters)
if half:
assert len(Jvec) == (3 * nInBits)
else:
assert len(Jvec) == (4 * nCopyBits + 6 * nInBits)
return Jvec
class _NIZKWComMixin(object):
wcom = None
create_witness_proof = None
check_witness = None
def create_witness_proof_sqrt(self, rvals, r0val, szeta):
# we have the rvals now
self.wcom.set_rvals(rvals, r0val)
# start the proof
(aval, Cval) = self.wcom.eval_init()
aval = self.com.compress(aval)
Cval = self.com.compress(Cval)
self.fs.put((aval, Cval))
# finish proof
chal = self.fs.rand_scalar()
(zvals, zh, zc) = self.wcom.eval_finish(chal, szeta)
self.fs.put((zvals, zh, zc))
if Defs.track_fArith:
self.com_q_a.did_rng()
def create_witness_proof_log(self, rvals, r0val, szeta):
self.wcom.set_rvals_p(rvals, r0val, szeta)
cont = True
tot = 1
while cont:
self.fs.put(self.wcom.redc_init())
cont = self.wcom.redc_cont_p(self.fs.rand_scalar())
tot += 1
self.fs.put(self.wcom.fin_init())
self.fs.put(self.wcom.fin_finish(self.fs.rand_scalar()))
if Defs.track_fArith:
self.com_q_a.did_rng(tot)
def check_witness_sqrt(self, cvals, rvals, r0val, zeta, vxeval):
# set r and get beginning of proof
self.wcom.set_rvals(rvals, r0val)
(aval, Cval) = self.fs.take()[0]
# decompress points
cvals = [ self.com.decompress(cval) for cval in cvals ]
aval = self.com.decompress(aval)
Cval = self.com.decompress(Cval)
# get rest of proof
chal = self.fs.rand_scalar()
(zvals, zh, zc) = self.fs.take()[0]
if Defs.track_fArith:
self.com_q_a.did_rng()
# check proof
return self.wcom.eval_check(cvals, aval, Cval, zvals, zh, zc, chal, zeta, vxeval)
def check_witness_log(self, cvals, rvals, r0val, zeta, vxeval):
cvals = [ self.com.decompress(cval) for cval in cvals ]
self.wcom.set_rvals_v(rvals, r0val, cvals, zeta, vxeval)
# run reductions
cont = True
tot = 1
while cont:
rv = self.fs.take()[0]
cv = self.fs.rand_scalar()
cont = self.wcom.redc_cont_v(cv, rv)
tot += 1
# check proof
fin_init_val = self.fs.take()[0]
fin_chal = self.fs.rand_scalar()
fin_finish_val = self.fs.take()[0]
if Defs.track_fArith:
self.com_q_a.did_rng(tot)
return self.wcom.fin_check(fin_chal, fin_init_val, fin_finish_val)
def build_wcom(self, is_prv):
if self.fs.wDiv is None:
self.wcom = WitnessCommit(self.com)
if is_prv:
self.create_witness_proof = self.create_witness_proof_sqrt
else:
self.check_witness = self.check_witness_sqrt
else:
self.wcom = WitnessLogCommit(self.com, self.fs.wDiv)
if is_prv:
self.create_witness_proof = self.create_witness_proof_log
else:
self.check_witness = self.check_witness_log
class CircuitProverVecWitNIZK(CircuitProverNIZK, _NIZKWComMixin):
__metaclass__ = libfennel.parse_pws.FromPWS
rdl_prover = None
cat_label = "prv_nizk_vec_wit"
commit_type = PedVecCommit
def __init__(self, nCopies, nInputs, in0vv, in1vv, typvv, muxvv=None):
super(CircuitProverVecWitNIZK, self).__init__(nCopies, nInputs, in0vv, in1vv, typvv, muxvv)
if Defs.track_fArith:
self.rdl_sc_a = Defs.fArith().new_cat("%s_rdl_sc_a_%d" % (self.cat_label, hash(self)))
else:
self.rdl_sc_a = None
def create_witness_comm(self, wvals):
# self.wcom holds onto the svals
cvals = [ self.com.compress(cval) for cval in self.wcom.witness_commit(wvals) ]
self.fs.put(cvals)
def set_wdiv(self, n):
self.fs.wDiv = n
def run(self, inputs, muxbits=None):
self.build_prover()
self.build_wcom(True)
self.prover_fresh = False
assert Defs.prime == self.com.gops.q
######################
# 0. Run computation #
######################
assert self.prover is not None
# generate any nondet inputs
inputs = self.nondet_gen(inputs, muxbits)
# set muxbits and dump into transcript
if muxbits is not None:
self.prover.set_muxbits(muxbits)
self.fs.put(muxbits, True)
# figure out the nondeterministic
invals = []
invals_nd = []
for ins in inputs:
ins = list(ins) + [0] * (2**self.nInBits - len(ins))
if self.fs.ndb is not None:
loIdx = (2 ** self.nInBits) - (2 ** (self.nInBits - self.fs.ndb))
if self.fs.rvend is not None and self.fs.rvstart is not None:
ins[self.fs.rvstart:self.fs.rvend+1] = [0] * (self.fs.rvend - self.fs.rvstart + 1)
ins_nd = ins[loIdx:]
ins[loIdx:] = [0] * (2 ** (self.nInBits - self.fs.ndb))
invals_nd.extend(ins_nd)
invals.extend(ins)
# need to pad up to nCopies if we're not using an RDL
if self.rdl is None:
assert util.clog2(len(invals)) == self.nInBits + self.nCopyBits
invals += [0] * (2 ** (self.nInBits + self.nCopyBits) - len(invals))
self.fs.put(invals, True)
# commit to nondet inputs from prover
if invals_nd:
self.create_witness_comm(invals_nd)
# now V sets r_values if necessary
if self.fs.rvstart is not None and self.fs.rvend is not None:
r_values = [ self.fs.rand_scalar() for _ in xrange(self.fs.rvstart, self.fs.rvend + 1) ]
if self.rdl is None:
assert len(inputs) == self.nCopies
for inp in inputs:
inp[self.fs.rvstart:self.fs.rvend+1] = r_values
else:
assert len(inputs) == 1
inputs[0][self.fs.rvstart:self.fs.rvend+1] = r_values
if self.rdl is None:
self.prover.set_inputs(inputs)
else:
rdl_inputs = []
for r_ents in self.rdl:
rdl_inputs.append([ inputs[0][r_ent] for r_ent in r_ents ])
self.prover.set_inputs(rdl_inputs)
# now evaluate the AC and put the outputs in the transcript
outvals = util.flatten(self.prover.ckt_outputs)
nOutBits = util.clog2(len(self.in0vv[-1]))
assert util.clog2(len(outvals)) == nOutBits + self.nCopyBits
outvals += [0] * (2 ** (nOutBits + self.nCopyBits) - len(outvals))
self.fs.put(outvals, True)
# generate random point in (z1, z2) \in F^{nOutBits + nCopyBits}
z1 = [ self.fs.rand_scalar() for _ in xrange(0, nOutBits) ]
z1_2 = None
z2 = [ self.fs.rand_scalar() for _ in xrange(0, self.nCopyBits) ]
if Defs.track_fArith:
self.sc_a.did_rng(nOutBits + self.nCopyBits)
# to start, we reconcile with mlext of output
# V knows it, so computes g^{mlext}, i.e., Com(mlext; 0)
prev_rval = 0
muls = None
# if the AC has only one layer, tell P to give us H(.)
project_line = len(self.in0vv) == 1
self.prover.set_z(z1, z2, None, None, project_line)
##########################################
# 1. Interact with prover for each layer #
##########################################
for lay in xrange(0, len(self.in0vv)):
nInBits = self.layInBits[lay]
nOutBits = self.layOutBits[lay]
w1 = []
w2 = []
w3 = []
self.com.reset()
if Defs.track_fArith:
self.sc_a.did_rng(2*nInBits + self.nCopyBits)
###################
### A. Sumcheck ###
###################
for rd in xrange(0, 2 * nInBits + self.nCopyBits):
# get output from prv and check against expected value
outs = self.prover.get_outputs()
# 1. commit to these values
self.fs.put(self.com.compress(self.com.commitvec(outs)))
# 2. compute new rand value and go to next round
nrand = self.fs.rand_scalar()
self.prover.next_round(nrand)
if rd < self.nCopyBits:
assert len(outs) == 4
w3.append(nrand)
else:
assert len(outs) == 3
if rd < self.nCopyBits + nInBits:
w1.append(nrand)
else:
w2.append(nrand)
###############################
### B. Extend to next layer ###
###############################
outs = self.prover.get_outputs()
if project_line:
assert len(outs) == 1 + nInBits
assert lay == len(self.in0vv) - 1
# (1) commit to all values plus their sum
# (2) figure out c2val, r2val from above and outs[0] com
# (3) create prod com
# (4) send PoK of product for outs[0], c2val, prod
(outs_rvals, pr_rvals) = self.create_final_prod_pok(outs)
else:
# just need to do product PoK since we're sending tV(r1) and tV(r2)
assert len(outs) == 2
pr_rvals = self.create_prod_pok(outs)
### all claimed values are now in the transcript, so we can do vector proof
# put delvals in the transcript
self.fs.put([ self.com.compress(delval) for delval in self.com.vecpok_init() ])
# now we need the vector of J values. first, generate the per-row Js
j1val = self.fs.rand_scalar()
jvals = [ self.fs.rand_scalar() for _ in xrange(0, 2 * nInBits + self.nCopyBits) ]
# next, compute Jvec and put corresponding element in proof
Jvec = _compute_Jvec(j1val, jvals, w3 + w1 + w2, self.nCopyBits, nInBits, False, self.com_q_a)
self.fs.put(self.com.compress(self.com.vecpok_cont(Jvec)))
# next, need mlext evals to do PoK
(mlext_evals, mlx_z2) = self.eval_mlext(lay, z1, z2, w1, w2, w3, z1_2, muls)
xyzvals = [0, 0, 0, 0]
for (idx, elm) in enumerate(mlext_evals):
GateFunctionsPVC[idx](elm, jvals[-1], xyzvals, self.tV_a)
xyzvals = [ (mlx_z2 * v) % Defs.prime for v in xyzvals ]
# finally, run vecpok_finish to put zvals in transcript
chal = self.fs.rand_scalar()
self.fs.put(self.com.vecpok_finish(j1val, prev_rval, xyzvals, pr_rvals, chal))
if Defs.track_fArith:
self.com_q_a.did_rng(2*nInBits + self.nCopyBits + 1)
self.tV_a.did_mul(len(xyzvals))
self.com_q_a.did_rng()
project_next = (lay == len(self.in0vv) - 2) and (self.rdl is None)
if project_line:
tau = self.fs.rand_scalar()
muls = None
prev_rval = util.horner_eval(outs_rvals, tau)
z1 = [ (elm1 + (elm2 - elm1) * tau) % Defs.prime for (elm1, elm2) in izip(w1, w2) ]
z1_2 = None
if Defs.track_fArith:
self.nlay_a.did_sub(len(w1))
self.nlay_a.did_mul(len(w1))
self.nlay_a.did_add(len(w1))
self.sc_a.did_rng()
else:
muls = [self.fs.rand_scalar(), self.fs.rand_scalar()]
tau = None
prev_rval = (muls[0] * pr_rvals[0] + muls[1] * pr_rvals[1]) % Defs.prime
z1 = w1
z1_2 = w2
if Defs.track_fArith:
self.nlay_a.did_add()
self.nlay_a.did_mul(2)
self.sc_a.did_rng(2)
if lay < len(self.in0vv) - 1:
self.prover.next_layer(muls, project_next)
project_line = project_next
z2 = w3
self.prover = None # don't need this anymore
#############################
# 1.5. Run RDL if necessary #
#############################
if self.rdl is not None:
self.rdl_prover = RDLProver(self.rdl, self.nInBits)
self.rdl_prover.set_inputs(inputs)
self.rdl_prover.set_z(z1 + z2, z1_2 + z2, muls)
w1 = []
self.com.reset()
if Defs.track_fArith:
self.rdl_sc_a.did_rng(self.nInBits)
####################
# Sumcheck for RDL #
####################
for _ in xrange(0, self.nInBits):
# get outputs
outs = self.rdl_prover.compute_outputs()
# commit to these values
self.fs.put(self.com.compress(self.com.commitvec(outs)))
# compute new value and go to next round
nrand = self.fs.rand_scalar()
w1.append(nrand)
self.rdl_prover.next_round(nrand)
#######################
# Finish RDL sumcheck #
#######################
outs = self.rdl_prover.compute_outputs()
self.rdl_prover = None # don't need this any more; save the memory
# in this case, output is just claimed eval of V_0
assert len(outs) == 1
pr_rvals = self.create_pok(outs)
# all claimed values are now in the transcript, so we can do a vector proof
self.fs.put([ self.com.compress(delval) for delval in self.com.vecpok_init() ])
# now need vector of J values; generate per-row Js
j1val = self.fs.rand_scalar()
jvals = [ self.fs.rand_scalar() for _ in xrange(0, self.nInBits) ]
# compute Jvec and put corresponding element in proof
Jvec = _compute_Jvec(j1val, jvals, w1, 0, self.nInBits, True, self.com_q_a)
self.fs.put(self.com.compress(self.com.vecpok_cont(Jvec)))
# next, need mlext eval for PASS to do PoK
mlext_pass = self.eval_mlext_pass(z1, z1_2, z2, w1, muls)
xyzvals = [(mlext_pass * jvals[-1]) % Defs.prime]
# run vecpok_finish to put zvals in transcript
chal = self.fs.rand_scalar()
self.fs.put(self.com.vecpok_finish(j1val, prev_rval, xyzvals, pr_rvals, chal))
if Defs.track_fArith:
self.com_q_a.did_rng(self.nInBits + 1)
self.tP_a.did_mul()
self.com_q_a.did_rng()
# prepare variables for final check
muls = None
tau = None
prev_rval = pr_rvals[0]
z1 = w1
z1_2 = None
z2 = []
#############################
# 2. Proof of eq with input #
#############################
if invals_nd:
# do witness proof
r0val = reduce(lambda x, y: (x * y) % Defs.prime, z1[len(z1)-self.fs.ndb:], 1)
rvals = z1[:len(z1)-self.fs.ndb] + z2
self.create_witness_proof(rvals, r0val, prev_rval)
if Defs.track_fArith:
self.com_q_a.did_mul(self.fs.ndb)
else:
self.create_eq_proof(None, prev_rval)
########################
# 3. Return transcript #
########################
return self.fs.to_string()
class CircuitVerifierVecWitNIZK(CircuitVerifierNIZK, _NIZKWComMixin):
__metaclass__ = libfennel.parse_pws.FromPWS
fs = None
cat_label = "ver_nizk_vec_wit"
commit_type = PedVecCommit
def __init__(self, nCopies, nInputs, in0vv, in1vv, typvv, muxvv=None):
super(CircuitVerifierVecWitNIZK, self).__init__(nCopies, nInputs, in0vv, in1vv, typvv, muxvv)
if Defs.track_fArith:
self.rdl_sc_a = Defs.fArith().new_cat("%s_rdl_sc_a_%d" % (self.cat_label, hash(self)))
else:
self.rdl_sc_a = None
def set_rdl(self, rdl, nRDLInputs):
self.nInputs = nRDLInputs
self.nCktBits = self.nInBits
self.nInBits = util.clog2(nRDLInputs)
assert len(rdl) == self.nCopies
self.rdl = rdl
def run(self, pf, _=None):
assert Defs.prime == self.com.gops.q
self.fs = fs.FiatShamir.from_string(pf)
assert Defs.prime == self.fs.q
self.build_wcom(False)
####
# 0. Get i/o
####
# get inputs
self.muxbits = self.fs.take(True)
self.inputs = self.fs.take(True)
# get witness commitments
nd_cvals = None
if self.fs.ndb is not None:
nd_cvals = self.fs.take()
# now generate rvals
if self.fs.rvstart is not None and self.fs.rvend is not None:
r_values = [ self.fs.rand_scalar() for _ in xrange(self.fs.rvstart, self.fs.rvend + 1) ]
nCopies = 1
if self.rdl is None:
nCopies = self.nCopies
for idx in xrange(0, nCopies):
first = idx * (2 ** self.nInBits) + self.fs.rvstart
last = first + self.fs.rvend - self.fs.rvstart + 1
self.inputs[first:last] = r_values
# finally, get outputs
self.outputs = self.fs.take(True)
####
# 1. mlext of outs
####
nOutBits = util.clog2(len(self.in0vv[-1]))
assert util.clog2(len(self.outputs)) == nOutBits + self.nCopyBits
# z1 and z2 vals
z1 = [ self.fs.rand_scalar() for _ in xrange(0, nOutBits) ]
z1_2 = None
z2 = [ self.fs.rand_scalar() for _ in xrange(0, self.nCopyBits) ]
# instructions for P
muls = None
project_line = len(self.in0vv) == 1
expectNext = VerifierIOMLExt(z1 + z2, self.out_a).compute(self.outputs)
prev_cval = self.com.gops.pow_g(expectNext)
if Defs.track_fArith:
self.sc_a.did_rng(nOutBits + self.nCopyBits)
self.com_p_a.did_exp()
####
# 2. Simulate prover interactions
####
for lay in xrange(0, len(self.in0vv)):
nInBits = self.layInBits[lay]
nOutBits = self.layOutBits[lay]
w1 = []
w2 = []
w3 = []
alphas = []
if Defs.track_fArith:
self.sc_a.did_rng(2*nInBits + self.nCopyBits)
####
# A. Sumcheck
####
for rd in xrange(0, 2 * nInBits + self.nCopyBits):
# take next alpha value from transcript
alphas.append(self.com.decompress(self.fs.take()[0]))
# generate new rand value
nrand = self.fs.rand_scalar()
if rd < self.nCopyBits:
w3.append(nrand)
elif rd < self.nCopyBits + nInBits:
w1.append(nrand)
else:
w2.append(nrand)
####
# B. Extend to next layer
####
if project_line:
assert lay == len(self.in0vv) - 1
(cvals, c2val, c3val, is_ok) = self.check_final_prod_pok(nInBits)
if not is_ok:
raise RuntimeError("Verification of final product PoK failed")
pr_cvals = (cvals[0], c2val, c3val)
else:
(pr_cvals, is_ok) = self.check_prod_pok()
if not is_ok:
raise RuntimeError("Verification of product PoK failed in layer %d" % lay)
# get delvals from proof
delvals = [ self.com.decompress(delval) for delval in self.fs.take() ]
# generate vector of J values
j1val = self.fs.rand_scalar()
jvals = [ self.fs.rand_scalar() for _ in xrange(0, 2 * nInBits + self.nCopyBits) ]
# compute Jvec
Jvec = _compute_Jvec(j1val, jvals, w3 + w1 + w2, self.nCopyBits, nInBits, False, self.com_q_a)
Cval = self.com.decompress(self.fs.take()[0])
# mlext eval
(mlext_evals, mlx_z2) = self.eval_mlext(lay, z1, z2, w1, w2, w3, z1_2, muls)
xyzvals = [0, 0, 0, 0]
for (idx, elm) in enumerate(mlext_evals):
GateFunctionsPVC[idx](elm, jvals[-1], xyzvals, self.tV_a)
xyzvals = [ (mlx_z2 * v) % Defs.prime for v in xyzvals ]
# generate challenge and take zvals from transcript
chal = self.fs.rand_scalar()
zvals = self.fs.take()[0]
if Defs.track_fArith:
self.com_q_a.did_rng(2*nInBits + self.nCopyBits + 1)
self.tV_a.did_mul(len(xyzvals))
self.com_q_a.did_rng()
# check vector PoK
is_ok = self.com.vecpok_check_lay(alphas, delvals, zvals, Cval, prev_cval, pr_cvals, Jvec, j1val, xyzvals, chal)
if not is_ok:
raise ValueError("Sumcheck verification failed at layer %d" % lay)
project_next = (lay == len(self.in0vv) - 2) and (self.rdl is None)
if project_line:
tau = self.fs.rand_scalar()
muls = None
prev_cval = self.com.horner_eval(cvals, tau)
z1 = [ (elm1 + (elm2 - elm1) * tau) % Defs.prime for (elm1, elm2) in izip(w1, w2) ]
z1_2 = None
if Defs.track_fArith:
self.nlay_a.did_sub(len(w1))
self.nlay_a.did_mul(len(w1))
self.nlay_a.did_add(len(w1))
self.sc_a.did_rng()
else:
muls = [self.fs.rand_scalar(), self.fs.rand_scalar()]
tau = None
prev_cval = self.com.muls_eval(pr_cvals, muls)
z1 = w1
z1_2 = w2
if Defs.track_fArith:
self.sc_a.did_rng(2)
project_line = project_next
z2 = w3
####
# 2.5. do RDL if necessary
####
if self.rdl is not None:
w1 = []
alphas = []
if Defs.track_fArith:
self.rdl_sc_a.did_rng(self.nInBits)
####
# sumcheck for RDL
####
for _ in xrange(0, self.nInBits):
# take next val from transcript
alphas.append(self.com.decompress(self.fs.take()[0]))
# generate new rand value
w1.append(self.fs.rand_scalar())
# get PoK for V0 val
(pr_cvals, is_ok) = self.check_pok(1)
if not is_ok:
raise ValueError("Verification of V0 PoK failed in RDL")
# get delvals from proof
delvals = [ self.com.decompress(delval) for delval in self.fs.take() ]
# generate vector of J values
j1val = self.fs.rand_scalar()
jvals = [ self.fs.rand_scalar() for _ in xrange(0, self.nInBits) ]
# compute Jvec
Jvec = _compute_Jvec(j1val, jvals, w1, 0, self.nInBits, True, self.com_q_a)
Cval = self.com.decompress(self.fs.take()[0])
# mlext eval
mlext_pass = self.eval_mlext_pass(z1, z1_2, z2, w1, muls)
xyzvals = [(mlext_pass * jvals[-1]) % Defs.prime]
# generate challenge and take zvals from transcript
chal = self.fs.rand_scalar()
zvals = self.fs.take()[0]
if Defs.track_fArith:
self.com_q_a.did_rng(self.nInBits + 1)
self.tP_a.did_mul()
self.com_q_a.did_rng()
# check vector PoK
is_ok = self.com.vecpok_check_rdl(alphas, delvals, zvals, Cval, prev_cval, pr_cvals, Jvec, j1val, xyzvals, chal)
if not is_ok:
raise ValueError("Sumcheck verification failed for RDL")
# get variables right for finishing
muls = None
tau = None
prev_cval = pr_cvals[0]
z1 = w1
z1_2 = None
z2 = []
####
# 3. mlext of inputs
####
if self.rdl is None:
input_mlext_eval = VerifierIOMLExt(z1 + z2, self.in_a).compute(self.inputs)
else:
# we can reuse evaluation of compute_betas(w1) from eval_mlext_pass
input_mlext_eval = sum( 0 if inp == 0 else inp * mle % Defs.prime for (inp, mle) in izip(self.inputs, self.mlx_w1) ) % Defs.prime
if Defs.track_fArith:
nvals = sum( 1 if x != 0 else 0 for x in self.inputs )
self.in_a.did_add(nvals-1)
self.in_a.did_mul(nvals)
if nd_cvals is None:
is_ok = self.check_val_proof(prev_cval, input_mlext_eval)
else:
r0val = reduce(lambda x, y: (x * y) % Defs.prime, z1[len(z1)-self.fs.ndb:], 1)
rvals = z1[:len(z1)-self.fs.ndb] + z2
is_ok = self.check_witness(nd_cvals, rvals, r0val, prev_cval, input_mlext_eval)
if Defs.track_fArith:
self.com_q_a.did_mul(self.fs.ndb)
if not is_ok:
raise ValueError("Verification failed checking input mlext")
ProverClass = CircuitProverVecWitNIZK
VerifierClass = CircuitVerifierVecWitNIZK
|
the-stack_0_24607
|
# -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import errno
import io
import os
import fnmatch
from streamlit import env_util
from streamlit import util
from streamlit.string_util import is_binary_string
# Configuration and credentials are stored inside the ~/.streamlit folder
CONFIG_FOLDER_NAME = ".streamlit"
def get_encoded_file_data(data, encoding="auto"):
"""Coerce bytes to a BytesIO or a StringIO.
Parameters
----------
data : bytes
encoding : str
Returns
-------
BytesIO or StringIO
If the file's data is in a well-known textual format (or if the encoding
parameter is set), return a StringIO. Otherwise, return BytesIO.
"""
if encoding == "auto":
if is_binary_string(data):
encoding = None
else:
# If the file does not look like a pure binary file, assume
# it's utf-8. It would be great if we could guess it a little
# more smartly here, but it is what it is!
encoding = "utf-8"
if encoding:
return io.StringIO(data.decode(encoding))
return io.BytesIO(data)
@contextlib.contextmanager
def streamlit_read(path, binary=False):
"""Opens a context to read this file relative to the streamlit path.
For example:
with streamlit_read('foo.txt') as foo:
...
opens the file `%s/foo.txt`
path - the path to write to (within the streamlit directory)
binary - set to True for binary IO
""" % CONFIG_FOLDER_NAME
filename = get_streamlit_file_path(path)
if os.stat(filename).st_size == 0:
raise util.Error('Read zero byte file: "%s"' % filename)
mode = "r"
if binary:
mode += "b"
with open(os.path.join(CONFIG_FOLDER_NAME, path), mode) as handle:
yield handle
@contextlib.contextmanager
def streamlit_write(path, binary=False):
"""
Opens a file for writing within the streamlit path, and
ensuring that the path exists. For example:
with streamlit_write('foo/bar.txt') as bar:
...
opens the file %s/foo/bar.txt for writing,
creating any necessary directories along the way.
path - the path to write to (within the streamlit directory)
binary - set to True for binary IO
""" % CONFIG_FOLDER_NAME
mode = "w"
if binary:
mode += "b"
path = get_streamlit_file_path(path)
try:
os.makedirs(os.path.dirname(path))
except Exception:
# Python 3 supports exist_ok=True which avoids the try/except,
# but Python 2 does not.
pass
try:
with open(path, mode) as handle:
yield handle
except OSError as e:
msg = ["Unable to write file: %s" % os.path.abspath(path)]
if e.errno == errno.EINVAL and env_util.IS_DARWIN:
msg.append(
"Python is limited to files below 2GB on OSX. "
"See https://bugs.python.org/issue24658"
)
raise util.Error("\n".join(msg))
def get_static_dir():
"""Get the folder where static HTML/JS/CSS files live."""
dirname = os.path.dirname(os.path.normpath(__file__))
return os.path.normpath(os.path.join(dirname, "static"))
def get_streamlit_file_path(*filepath):
"""Return the full path to a file in ~/.streamlit.
This doesn't guarantee that the file (or its directory) exists.
"""
# os.path.expanduser works on OSX, Linux and Windows
home = os.path.expanduser("~")
if home is None:
raise RuntimeError("No home directory.")
return os.path.join(home, CONFIG_FOLDER_NAME, *filepath)
def get_project_streamlit_file_path(*filepath):
"""Return the full path to a filepath in ${CWD}/.streamlit.
This doesn't guarantee that the file (or its directory) exists.
"""
return os.path.join(os.getcwd(), CONFIG_FOLDER_NAME, *filepath)
def file_is_in_folder_glob(filepath, folderpath_glob):
"""Test whether a file is in some folder with globbing support.
Parameters
----------
filepath : str
A file path.
folderpath_glob: str
A path to a folder that may include globbing.
"""
# Make the glob always end with "/*" so we match files inside subfolders of
# folderpath_glob.
if not folderpath_glob.endswith("*"):
if folderpath_glob.endswith("/"):
folderpath_glob += "*"
else:
folderpath_glob += "/*"
file_dir = os.path.dirname(filepath) + "/"
return fnmatch.fnmatch(file_dir, folderpath_glob)
def file_in_pythonpath(filepath):
"""Test whether a filepath is in the same folder of a path specified in the PYTHONPATH env variable.
Parameters
----------
filepath : str
An absolute file path.
Returns
-------
boolean
True if contained in PYTHONPATH, False otherwise. False if PYTHONPATH is not defined or empty.
"""
pythonpath = os.environ.get("PYTHONPATH", "")
if len(pythonpath) == 0:
return False
absolute_paths = [os.path.abspath(path) for path in pythonpath.split(os.pathsep)]
return any(
file_is_in_folder_glob(os.path.normpath(filepath), path)
for path in absolute_paths
)
|
the-stack_0_24608
|
# -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import synapse.rest.admin
from synapse.rest.client.v1 import login, room
from tests import unittest
class IdentityTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
login.register_servlets,
]
def make_homeserver(self, reactor, clock):
config = self.default_config()
config.enable_3pid_lookup = False
self.hs = self.setup_test_homeserver(config=config)
return self.hs
def test_3pid_lookup_disabled(self):
self.hs.config.enable_3pid_lookup = False
self.register_user("kermit", "monkey")
tok = self.login("kermit", "monkey")
request, channel = self.make_request(
b"POST", "/createRoom", b"{}", access_token=tok
)
self.render(request)
self.assertEquals(channel.result["code"], b"200", channel.result)
room_id = channel.json_body["room_id"]
params = {
"id_server": "testis",
"medium": "email",
"address": "[email protected]",
}
request_data = json.dumps(params)
request_url = ("/rooms/%s/invite" % (room_id)).encode('ascii')
request, channel = self.make_request(
b"POST", request_url, request_data, access_token=tok
)
self.render(request)
self.assertEquals(channel.result["code"], b"403", channel.result)
|
the-stack_0_24609
|
import rpm # from python3-rpm
import os
import hashlib
import stat
import struct
from xml.sax import saxutils
from operator import itemgetter
_share_data_store = {}
def share_data(value):
""" Take a value and use the same value from the store,
if the value isn't in the store this one becomes the shared version. """
if isinstance( value, tuple):
return value
return _share_data_store.setdefault(value, value)
def hdrFromPackage(ts, package):
"""hand back the rpm header or raise an Error if the pkg is fubar"""
try:
fdno = os.open(package, os.O_RDONLY)
except OSError:
raise Exception('Unable to open file')
try:
hdr = ts.hdrFromFdno(fdno)
except rpm.error:
os.close(fdno)
raise Exception("RPM Error opening Package")
if type(hdr) != rpm.hdr:
os.close(fdno)
raise Exception("RPM Error opening Package (type)")
os.close(fdno)
return hdr
def to_xml(item, attrib=False):
item = item.rstrip()
if attrib:
item = saxutils.escape(item, entities={'"': """})
else:
item = saxutils.escape(item)
return item
def re_primary_filename(filename):
""" Tests if a filename string, can be matched against just primary.
Note that this can produce false negatives (but not false
positives). Note that this is a superset of re_primary_dirname(). """
if re_primary_dirname(filename):
return True
if filename == '/usr/lib/sendmail':
return True
return False
def re_primary_dirname(dirname):
""" Tests if a dirname string, can be matched against just primary. Note
that this is a subset of re_primary_filename(). """
if 'bin/' in dirname:
return True
if dirname.startswith('/etc/'):
return True
return False
def decode_value( value ):
if isinstance( value, bytes ):
return value.decode()
if isinstance( value, list ):
return [ decode_value( i ) for i in value ]
return value
def flagToString(flags):
flags = flags & 0xf
if flags == 0:
return None
elif flags == 2:
return 'LT'
elif flags == 4:
return 'GT'
elif flags == 8:
return 'EQ'
elif flags == 10:
return 'LE'
elif flags == 12:
return 'GE'
return flags
def stringToVersion(verstring):
if verstring in [None, '']:
return (None, None, None)
i = verstring.find(':')
if i != -1:
try:
epoch = str(int(verstring[:i]))
except ValueError:
# look, garbage in the epoch field, how fun, kill it
epoch = '0' # this is our fallback, deal
else:
epoch = '0'
j = verstring.find('-')
if j != -1:
if verstring[i + 1:j] == '':
version = None
else:
version = verstring[i + 1:j]
release = verstring[j + 1:]
else:
if verstring[i + 1:] == '':
version = None
else:
version = verstring[i + 1:]
release = None
return (epoch, version, release)
def compareEVR(xxx_todo_changeme, xxx_todo_changeme1):
# return 1: a is newer than b
# 0: a and b are the same version
# -1: b is newer than a
(e1, v1, r1) = xxx_todo_changeme
(e2, v2, r2) = xxx_todo_changeme1
if e1 is None:
e1 = '0'
else:
e1 = str(e1)
v1 = str(v1)
r1 = str(r1)
if e2 is None:
e2 = '0'
else:
e2 = str(e2)
v2 = str(v2)
r2 = str(r2)
rc = rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
return rc
def compareVerOnly(v1, v2):
"""compare version strings only using rpm vercmp"""
return compareEVR(('', v1, ''), ('', v2, ''))
class PackageObject(object):
"""Base Package Object - sets up the default storage dicts and the
most common returns"""
def __init__(self):
self.name = None
self.version = None
self.release = None
self.epoch = None
self.arch = None
# see: YumAvailablePackage.
class RpmBase(object):
"""return functions and storage for rpm-specific data"""
def __init__(self):
self.prco = {}
self.prco['obsoletes'] = [] # (name, flag, (e,v,r))
self.prco['conflicts'] = [] # (name, flag, (e,v,r))
self.prco['requires'] = [] # (name, flag, (e,v,r))
self.prco['provides'] = [] # (name, flag, (e,v,r))
self.files = {}
self.files['file'] = []
self.files['dir'] = []
self.files['ghost'] = []
self.licenses = []
self._hash = None
class YumAvailablePackage(PackageObject, RpmBase):
"""derived class for the packageobject and RpmBase packageobject yum
uses this for dealing with packages in a repository"""
def __init__(self):
PackageObject.__init__(self)
RpmBase.__init__(self)
self.state = None
self._loadedfiles = False
self._verify_local_pkg_cache = None
self._checksum = None
self.pkgtup = (self.name, self.arch, self.epoch, self.version, self.release)
# This is a tweak on YumAvailablePackage() and is a base class for packages
# which are actual rpms.
class YumHeaderPackage(YumAvailablePackage):
"""Package object built from an rpm header"""
def __init__(self, hdr):
"""hand in an rpm header, we'll assume it's installed and query from there"""
YumAvailablePackage.__init__(self)
self.hdr = hdr
self.name = share_data( self.hdr['name'].decode() )
this_a = self.hdr['arch'].decode()
if not this_a: # this should only happen on gpgkeys and other "odd" pkgs
this_a = 'noarch'
self.arch = share_data( this_a )
self.epoch = share_data( self.doepoch() )
self.version = share_data( self.hdr['version'].decode() )
self.release = share_data( self.hdr['release'].decode() )
self.ver = self.version
self.rel = self.release
self.pkgtup = (self.name, self.arch, self.epoch, self.version, self.release)
self._loaded_summary = None
self._loaded_description = None
self.pkgid = self.hdr[rpm.RPMTAG_SHA1HEADER].decode()
if not self.pkgid:
self.pkgid = "{0}.{1}".format(self.hdr['name'].decode(), self.hdr['buildtime'].decode())
self.packagesize = self.hdr['size']
self._mode_cache = {}
self._prcoPopulated = False
def doepoch(self):
tmpepoch = self.hdr['epoch']
if tmpepoch is None:
epoch = '0'
else:
epoch = str(tmpepoch)
return epoch
def __getattr__(self, thing):
# FIXME - if an error - return AttributeError, not KeyError
# ONLY FIX THIS AFTER THE API BREAK
if thing.startswith('__') and thing.endswith('__'):
# If these existed, then we wouldn't get here ...
# So these are missing.
raise AttributeError("{0} has no attribute {1}".format(self, thing))
try:
return decode_value( self.hdr[thing] )
except KeyError:
raise KeyError("{0} has no attribute {1}".format(self, thing))
except ValueError as e:
if e.args[0] == 'unknown header tag':
raise AttributeError("{0} has no attribute {1}".format(self, thing))
else:
raise e
class YumLocalPackage(YumHeaderPackage):
"""Class to handle an arbitrary package from a file path
this inherits most things from YumInstalledPackage because
installed packages and an arbitrary package on disk act very
much alike. init takes a ts instance and a filename/path
to the package."""
def __init__(self, filename, relpath):
print( 'Loading "{0}"...'.format(filename))
self.pkgtype = 'local'
self.localpath = filename
try:
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS)
hdr = hdrFromPackage(ts, self.localpath)
except Exception as e:
raise Exception('Could not open local rpm file: {0}: {1}'.format(self.localpath, e))
YumHeaderPackage.__init__(self, hdr)
self.id = self.pkgid
self._stat = os.stat(self.localpath)
self.filetime = str(self._stat[-2])
self.packagesize = str(self._stat[6])
self.arch = self.isSrpm()
self.pkgtup = (self.name, self.arch, self.epoch, self.ver, self.rel)
self._hdrstart = None
self._hdrend = None
self.checksum_type = 'sha256'
self.relpath = relpath
def isSrpm(self):
if self.sourcepackage == 1 or not self.sourcerpm:
return 'src'
else:
return self.arch
@property
def checksum( self ):
if self._checksum is not None:
return self._checksum
sha256 = hashlib.sha256()
wrk = open( self.localpath, 'rb' )
buff = wrk.read( 4096 )
while buff:
sha256.update( buff )
buff = wrk.read( 4096 )
self._checksum = sha256.hexdigest()
return self._checksum
@property
def changelog(self):
if len(self.hdr['changelogname']) > 0:
return list( zip(decode_value( self.hdr['changelogtime'] ), decode_value( self.hdr['changelogname'] ), decode_value( self.hdr['changelogtext'] ) ) )
return []
def _get_header_byte_range(self):
"""takes an rpm file or fileobject and returns byteranges for location of the header"""
if self._hdrstart and self._hdrend:
return (self._hdrstart, self._hdrend)
fo = open(self.localpath, 'rb')
# read in past lead and first 8 bytes of sig header
fo.seek(104)
# 104 bytes in
binindex = fo.read(4)
# 108 bytes in
(sigindex, ) = struct.unpack('>I', binindex)
bindata = fo.read(4)
# 112 bytes in
(sigdata, ) = struct.unpack('>I', bindata)
# each index is 4 32bit segments - so each is 16 bytes
sigindexsize = sigindex * 16
sigsize = sigdata + sigindexsize
# we have to round off to the next 8 byte boundary
disttoboundary = (sigsize % 8)
if disttoboundary != 0:
disttoboundary = 8 - disttoboundary
# 112 bytes - 96 == lead, 8 = magic and reserved, 8 == sig header data
hdrstart = 112 + sigsize + disttoboundary
fo.seek(hdrstart) # go to the start of the header
fo.seek(8, 1) # read past the magic number and reserved bytes
binindex = fo.read(4)
(hdrindex, ) = struct.unpack('>I', binindex)
bindata = fo.read(4)
(hdrdata, ) = struct.unpack('>I', bindata)
# each index is 4 32bit segments - so each is 16 bytes
hdrindexsize = hdrindex * 16
# add 16 to the hdrsize to account for the 16 bytes of misc data b/t the
# end of the sig and the header.
hdrsize = hdrdata + hdrindexsize + 16
# header end is hdrstart + hdrsize
hdrend = hdrstart + hdrsize
fo.close()
self._hdrstart = hdrstart
self._hdrend = hdrend
return (hdrstart, hdrend)
hdrend = property(fget=lambda self: self._get_header_byte_range()[1])
hdrstart = property(fget=lambda self: self._get_header_byte_range()[0])
def _populatePrco(self):
"Populate the package object with the needed PRCO interface."
tag2prco = { "OBSOLETE": share_data("obsoletes"),
"CONFLICT": share_data("conflicts"),
"REQUIRE": share_data("requires"),
"PROVIDE": share_data("provides") }
for tag in tag2prco:
name = decode_value( self.hdr[getattr(rpm, 'RPMTAG_%sNAME' % tag)] )
name = list(map(share_data, name))
if not name: # empty or none or whatever, doesn't matter
continue
lst = decode_value( self.hdr[getattr(rpm, 'RPMTAG_%sFLAGS' % tag)] )
flag = list(map(flagToString, lst))
flag = list(map(share_data, flag))
lst = decode_value( self.hdr[getattr(rpm, 'RPMTAG_%sVERSION' % tag)] )
vers = list(map(stringToVersion, lst))
vers = [(share_data(x[0]), share_data(x[1]), share_data(x[2])) for x in vers]
prcotype = tag2prco[tag]
self.prco[prcotype] = list(map(share_data, list(zip(name, flag, vers))))
def inPrcoRange(self, prcotype, reqtuple):
"""returns true if the package has a the prco that satisfies
the reqtuple range, assume false.
Takes: prcotype, requested prco tuple"""
return bool(self.matchingPrcos(prcotype, reqtuple))
def checkPrco(self, prcotype, prcotuple):
"""returns 1 or 0 if the pkg contains the requested tuple/tuple range"""
# get rid of simple cases - nothing
if prcotype not in self.prco:
return 0
# First try and exact match, then search
# Make it faster, if it's "big".
if len(self.prco[prcotype]) <= 8:
if prcotuple in self.prco[prcotype]:
return 1
else:
if not hasattr(self, '_prco_lookup'):
self._prco_lookup = {'obsoletes': None, 'conflicts': None,
'requires': None, 'provides': None}
if self._prco_lookup[prcotype] is None:
self._prco_lookup[prcotype] = set(self.prco[prcotype])
if prcotuple in self._prco_lookup[prcotype]:
return 1
# make us look it up and compare
(reqn, reqf, (reqe, reqv, reqr)) = prcotuple
if reqf is not None:
return self.inPrcoRange(prcotype, prcotuple)
else:
for (n, f, (e, v, r)) in self.returnPrco(prcotype):
if reqn.encode() == n.encode():
return 1
return 0
def returnPrco(self, prcotype, printable=False):
"""return list of provides, requires, conflicts or obsoletes"""
if not self._prcoPopulated:
self._populatePrco()
self._prcoPopulated = True
return self.prco.get(prcotype, [])
def returnPrcoNames(self, prcotype):
if not hasattr(self, '_cache_prco_names_' + prcotype):
data = [n for (n, f, v) in self.returnPrco(prcotype)]
setattr(self, '_cache_prco_names_' + prcotype, data)
return getattr(self, '_cache_prco_names_' + prcotype)
requires = property(fget=lambda self: self.returnPrco('requires'))
provides = property(fget=lambda self: self.returnPrco('provides'))
obsoletes = property(fget=lambda self: self.returnPrco('obsoletes'))
conflicts = property(fget=lambda self: self.returnPrco('conflicts'))
provides_names = property(fget=lambda self: self.returnPrcoNames('provides'))
requires_names = property(fget=lambda self: self.returnPrcoNames('requires'))
conflicts_names = property(fget=lambda self: self.returnPrcoNames('conflicts'))
obsoletes_names = property(fget=lambda self: self.returnPrcoNames('obsoletes'))
def _loadFiles(self):
files = decode_value( self.hdr['filenames'] )
fileflags = decode_value( self.hdr['fileflags'] )
filemodes = decode_value( self.hdr['filemodes'] )
filetuple = list(zip(files, filemodes, fileflags))
if not self._loadedfiles:
for (fn, mode, flag) in filetuple:
# garbage checks
if mode is None or mode == '':
if 'file' not in self.files:
self.files['file'] = []
self.files['file'].append(fn)
continue
if mode not in self._mode_cache:
self._mode_cache[mode] = stat.S_ISDIR(mode)
fkey = 'file'
if self._mode_cache[mode]:
fkey = 'dir'
elif flag is not None and (flag & 64):
fkey = 'ghost'
self.files.setdefault(fkey, []).append(fn)
self._loadedfiles = True
def returnFileEntries(self, ftype='file', primary_only=False):
"""return list of files based on type, you can pass primary_only=True
to limit to those files in the primary repodata"""
self._loadFiles()
if self.files:
if ftype in self.files:
if primary_only:
if ftype == 'dir':
match = re_primary_dirname
else:
match = re_primary_filename
return [fn for fn in self.files[ftype] if match(fn)]
return self.files[ftype]
return []
filelist = property(fget=lambda self: self.returnFileEntries(ftype='file'))
dirlist = property(fget=lambda self: self.returnFileEntries(ftype='dir'))
ghostlist = property(fget=lambda self: self.returnFileEntries(ftype='ghost'))
def _is_pre_req(self, flag):
"""check the flags for a requirement, return 1 or 0 whether or not requires
is a pre-requires or a not"""
# FIXME this should probably be put in rpmUtils.miscutils since
# - that's what it is
if flag is not None:
# Note: RPMSENSE_PREREQ == 0 since rpm-4.4'ish
if flag & (rpm.RPMSENSE_PREREQ |
rpm.RPMSENSE_SCRIPT_PRE |
rpm.RPMSENSE_SCRIPT_POST):
return 1
return 0
def _requires_with_pre(self):
"""returns requires with pre-require bit"""
name = decode_value( self.hdr[rpm.RPMTAG_REQUIRENAME] )
lst = decode_value( self.hdr[rpm.RPMTAG_REQUIREFLAGS] )
flag = list(map(flagToString, lst))
pre = list(map(self._is_pre_req, lst))
lst = decode_value( self.hdr[rpm.RPMTAG_REQUIREVERSION] )
vers = list(map(stringToVersion, lst))
if name is not None:
lst = list(zip(name, flag, vers, pre))
mylist = list(set(lst))
return mylist
def _dump_base_items(self):
packager = url = ''
if self.packager:
packager = to_xml(self.packager)
if self.url:
url = to_xml(self.url)
msg = """
<name>{0}</name>
<arch>{1}</arch>
<version epoch="{2}" ver="{3}" rel="{4}"/>
<checksum type="{5}" pkgid="YES">{6}</checksum>
<summary>{7}</summary>
<description>{8}</description>
<packager>{9}</packager>
<url>{10}</url>
<time file="{11}" build="{12}"/>
<size package="{13}" installed="{14}" archive="{15}"/>\n""".format(self.name, self.arch, self.epoch, self.ver, self.rel, 'sha256', self.checksum, to_xml(self.summary), to_xml(self.description), packager, url, self.filetime, self.buildtime, self.packagesize, self.size, self.archivesize)
msg += """<location href="{0}"/>\n""".format( to_xml(self.relpath, attrib=True) )
return msg
def _dump_format_items(self):
msg = " <format>\n"
if self.license:
msg += """ <rpm:license>{0}</rpm:license>\n""".format( to_xml(self.license) )
else:
msg += """ <rpm:license/>\n"""
if self.vendor:
msg += """ <rpm:vendor>{0}</rpm:vendor>\n""".format( to_xml(self.vendor) )
else:
msg += """ <rpm:vendor/>\n"""
if self.group:
msg += """ <rpm:group>{0}</rpm:group>\n""".format( to_xml(self.group) )
else:
msg += """ <rpm:group/>\n"""
if self.buildhost:
msg += """ <rpm:buildhost>{0}</rpm:buildhost>\n""".format( to_xml(self.buildhost) )
else:
msg += """ <rpm:buildhost/>\n"""
if self.sourcerpm:
msg += """ <rpm:sourcerpm>{0}</rpm:sourcerpm>\n""".format( to_xml(self.sourcerpm) )
else: # b/c yum 2.4.3 and OLD y-m-p willgfreak out if it is not there.
msg += """ <rpm:sourcerpm/>\n"""
msg += """ <rpm:header-range start="{0}" end="{1}"/>""".format( self.hdrstart, self.hdrend)
msg += self._dump_pco('provides')
msg += self._dump_requires()
msg += self._dump_pco('conflicts')
msg += self._dump_pco('obsoletes')
msg += self._dump_files(True)
if msg[-1] != '\n':
msg += """\n"""
msg += """ </format>"""
return msg
def _dump_pco(self, pcotype):
msg = ""
mylist = getattr(self, pcotype)
if mylist:
msg = "\n <rpm:{0}>\n".format( pcotype )
for (name, flags, (e, v, r)) in mylist:
pcostring = ''' <rpm:entry name="{0}"'''.format( to_xml(name, attrib=True) )
if flags:
pcostring += ''' flags="{0}"'''.format( to_xml(flags, attrib=True) )
if e:
pcostring += ''' epoch="{0}"'''.format( to_xml(e, attrib=True) )
if v:
pcostring += ''' ver="{0}"'''.format( to_xml(v, attrib=True) )
if r:
pcostring += ''' rel="{0}"'''.format( to_xml(r, attrib=True) )
pcostring += "/>\n"
msg += pcostring
if mylist:
msg += " </rpm:{0}>".format( pcotype )
return msg
def _dump_requires(self):
"""returns deps in XML format"""
mylist = self._requires_with_pre()
msg = ""
if mylist:
msg = "\n <rpm:requires>\n"
if getattr(self, '_collapse_libc_requires', False):
libc_requires = [x for x in mylist if x[0].startswith('libc.so.6')]
if libc_requires:
rest = sorted(libc_requires, cmp=compareVerOnly, key=itemgetter(0))
best = rest.pop()
if len(rest) > 0 and best[0].startswith('libc.so.6()'): # rpmvercmp will sort this one as 'highest' so we need to remove it from the list
best = rest.pop()
newlist = []
for i in mylist:
if i[0].startswith('libc.so.6') and i != best:
continue
newlist.append(i)
mylist = newlist
for (name, flags, (e, v, r), pre) in mylist:
if name.startswith('rpmlib('):
continue
# this drops out requires that the pkg provides for itself.
if name in self.provides_names or (name.startswith('/') and (name in self.filelist or name in self.dirlist or name in self.ghostlist)):
if not flags:
continue
else:
if self.checkPrco('provides', (name, flags, (e, v, r))):
continue
prcostring = ''' <rpm:entry name="{0}"'''.format( to_xml(name, attrib=True) )
if flags:
prcostring += ''' flags="{0}"'''.format( to_xml(flags, attrib=True) )
if e:
prcostring += ''' epoch="{0}"'''.format( to_xml(e, attrib=True) )
if v:
prcostring += ''' ver="{0}"'''.format( to_xml(v, attrib=True) )
if r:
prcostring += ''' rel="{0}"'''.format( to_xml(r, attrib=True) )
if pre:
prcostring += ''' pre="{0}"'''.format( pre )
prcostring += "/>\n"
msg += prcostring
if mylist:
msg += " </rpm:requires>"
return msg
def _dump_files(self, primary=False):
msg = "\n"
if not primary:
files = self.returnFileEntries('file')
dirs = self.returnFileEntries('dir')
ghosts = self.returnFileEntries('ghost')
else:
files = self.returnFileEntries('file', primary_only=True)
dirs = self.returnFileEntries('dir', primary_only=True)
ghosts = self.returnFileEntries('ghost', primary_only=True)
for fn in files:
msg += """ <file>{0}</file>\n""".format( to_xml(fn) )
for fn in dirs:
msg += """ <file type="dir">{0}</file>\n""".format( to_xml(fn) )
for fn in ghosts:
msg += """ <file type="ghost">{0}</file>\n""".format( to_xml(fn) )
return msg
def _dump_changelog(self, clog_limit):
if not self.changelog:
return ""
msg = "\n"
# We need to output them "backwards", so the oldest is first
if not clog_limit:
clogs = self.changelog
else:
clogs = self.changelog[:clog_limit]
last_ts = 0
hack_ts = 0
for (ts, author, content) in reversed(clogs):
if ts != last_ts:
hack_ts = 0
else:
hack_ts += 1
last_ts = ts
ts += hack_ts
msg += """<changelog author="{0}" date="{1}">{2}</changelog>\n""".format( to_xml(author, attrib=True), to_xml(str(ts)), to_xml(content))
return msg
def xml_dump_primary_metadata(self):
msg = """\n<package type="rpm">"""
msg += self._dump_base_items()
msg += self._dump_format_items()
msg += """\n</package>"""
return msg
def xml_dump_filelists_metadata(self):
msg = """\n<package pkgid="{0}" name="{1}" arch="{2}">
<version epoch="{3}" ver="{4}" rel="{5}"/>\n""".format(self.checksum, self.name, self.arch, self.epoch, self.ver, self.rel)
msg += self._dump_files()
msg += "</package>\n"
return msg
def xml_dump_other_metadata(self, clog_limit=0):
msg = """\n<package pkgid="{0}" name="{1}" arch="{2}">
<version epoch="{3}" ver="{4}" rel="{5}"/>\n""".format( self.checksum, self.name, self.arch, self.epoch, self.ver, self.rel)
msg += "{0}\n</package>\n".format( self._dump_changelog(clog_limit) )
return msg
|
the-stack_0_24610
|
""" Helper functions """
import numpy as np
import rospy
from scipy.spatial import KDTree
from geometry_msgs.msg import Point, Pose, PoseStamped
from tf.transformations import euler_from_quaternion
from styx_msgs.msg import Waypoint, Lane
__all__ = ['Logger', 'WaypointTree', 'set_linear_speed_of',
'get_position_from', 'get_orientation_from', 'get_yaw_from']
LOG_THRESHOLD = 0.5 # [s], passed time to print next log messages
class Logger(object):
def __init__(self):
self._last_time = rospy.get_time()
self._active = None
@property
def active(self):
if self._active is None:
current_time = rospy.get_time()
if current_time - self._last_time > LOG_THRESHOLD:
self._last_time = current_time
self._active = True
else:
self._active = False
return self._active
def reset(self):
self._active = None
def info(self, message, *args):
if self.active:
rospy.loginfo(message, *args)
@staticmethod
def warn(message, *args):
print("")
rospy.logwarn(message, *args)
print("")
class WaypointTree(object):
def __init__(self, waypoints):
"""
Args:
waypoints (Lane)
"""
self.header = waypoints.header
self.waypoints = waypoints.waypoints
self.num_waypoints = len(self.waypoints)
self.xy = [get_position_from(wp)[:-1] for wp in self.waypoints]
self.tree = KDTree(self.xy)
def __getitem__(self, item):
""" Slice the refrence waypoints of the tree.
Indices exceeding the length of the waypoint list are wrapped to the
beginning of the list.
Args:
item (slice|int):
the slice's step attribute is not accounted
"""
if isinstance(item, slice):
start = item.start % self.num_waypoints
stop = item.stop % self.num_waypoints
if stop < start:
return self.waypoints[start:] + self.waypoints[:stop]
else:
return self.waypoints[start:stop]
else:
return self.waypoints[item]
def get_closest_idx_from(self, message):
""" Get reference waypoint index closest to provided message's position.
Args:
message (Pose|PoseStamped|Waypoint|list):
the message to get the closest waypoint index for. If a list is
passed, it must contain the position's coordinates.
Returns:
the index to the closest reference waypoint in the tree.
"""
if isinstance(message, list):
xy_position = message[:2]
else:
xy_position = get_position_from(message)[:-1]
return self.tree.query(xy_position, 1)[1]
def get_cumulated_dists_from(self, start, stop):
""" Get a vector of cumulated distances from a start to a stop waypoint.
Each entry in the vector is the distance from the respective waypoint to
the stop waypoint, computed by summing up the piecewise distances
of all waypoints in between. For instance, the first entry is the full
distance from the start waypoint to the stop waypoint.
Args:
start (int):
waypoint index of the start waypoint in the reference tree
stop (int):
waypoint index of the stop waypoint in the reference tree
Returns:
array of cumulated distances from the start to the stop waypoint
"""
p0 = [get_position_from(wp) for wp in self[start:stop]]
p1 = [get_position_from(wp) for wp in self[start + 1: stop + 1]]
piecewise_dists = np.linalg.norm(np.subtract(p1, p0), axis=1)
return np.cumsum(piecewise_dists[::-1])[::-1]
def set_linear_speed_of(waypoint, speed):
""" Set the linear speed component of the given waypoint's twist.
Args:
waypoint (Waypoint):
the waypoint to set the speed for
speed:
the linear speed to set
Returns:
the updated waypoint copy
"""
wp = Waypoint()
wp.pose = waypoint.pose
wp.twist.twist.linear.x = min(speed, waypoint.twist.twist.linear.x)
return wp
def get_position_from(message):
""" Extract and convert the `message's` position into a list.
Args:
message (Pose|PoseStamped|Waypoint):
the message to get the position from
Returns:
position list [x, y, z]
"""
if isinstance(message, (Pose, PoseStamped)):
return [message.pose.position.x,
message.pose.position.y,
message.pose.position.z]
elif isinstance(message, Waypoint):
return [message.pose.pose.position.x,
message.pose.pose.position.y,
message.pose.pose.position.z]
def get_orientation_from(pose):
""" Extract and convert the pose's orientation into a list.
Args:
pose (PoseStamped): the pose to extract the position from
Returns:
the orientation quaternion list [x, y, z, w]
"""
return [pose.pose.orientation.x,
pose.pose.orientation.y,
pose.pose.orientation.z,
pose.pose.orientation.w]
def get_yaw_from(pose):
""" Extract the yaw from the given pose
Args:
pose (PoseStamped|Pose): the pose to convert and extract the yaw from
Returns:
the pose's yaw
"""
return euler_from_quaternion(get_orientation_from(pose))[2]
|
the-stack_0_24615
|
def prepare_posts(request, *posts):
for post in posts:
post.updated = False
post.downvoted = False
if request.user.is_authenticated:
if any(p.user == request.user for p in post.upvote_set.all()):
post.upvoted = True
if any(p.user == request.user for p in post.downvote_set.all()):
post.downvoted = True
return posts
|
the-stack_0_24616
|
import datetime
import json
from django.core.urlresolvers import resolve
from django.test import TestCase
from rest_framework.serializers import ValidationError
from rest_framework.test import APITestCase
from .models import Appointment
from .serializers import DATE_ERROR_MESSAGE, TIME_ERROR_MESSAGE
from .views import main_view
class AppointmentModelTestCase(TestCase):
"""docstring"""
appt_dict = {
'date': datetime.date.today().isoformat(),
'time_start': "13:30",
'non_recurring': "Anon",
'reason': "I broke a leg"
}
def test_appointment_saved_with_time_end(self):
existing = Appointment.objects.create(**self.appt_dict)
self.assertEqual(existing.time_end, datetime.time(13, 59))
class MainViewTestCase(TestCase):
"""Smoke tests"""
def test_index_resolve_correct_view(self):
view = resolve('/')
self.assertEqual(view.func, main_view)
def test_index_renders_correct_html(self):
resp = self.client.get('/')
self.assertIn(b'Dr. Dre\'s', resp.content)
class AppointmentAPITestCase(APITestCase):
"""docstring for AppointmentAPITestCase"""
endpoint = '/api/v1/appointment/'
appt_dict = {
'date': datetime.date.today().isoformat(),
'time_start': "13:30",
'non_recurring': "Anon",
'reason': "I broke a leg"
}
def test_anonymous_user_can_create_appointment(self):
resp = self.client.post(self.endpoint, self.appt_dict)
self.assertEqual(resp.status_code, 201)
appt = Appointment.objects.first()
self.assertEqual(appt.reason, self.appt_dict['reason'])
self.assertEqual(appt.non_recurring, self.appt_dict['non_recurring'])
self.assertIsNone(appt.visitor)
def test_appointments_cant_be_in_past(self):
appt_dict = dict(self.appt_dict)
yesterday = datetime.date.today() - datetime.timedelta(days=1)
appt_dict['date'] = yesterday.isoformat()
resp = self.client.post(self.endpoint, appt_dict)
self.assertJSONEqual(resp.content.decode('utf-8'),
{"date":[DATE_ERROR_MESSAGE]})
self.assertFalse(Appointment.objects.exists())
def test_appointments_cant_be_in_wrong_hours(self):
appt_dict = dict(self.appt_dict)
appt_dict['time_start'] = "07:00"
resp = self.client.post(self.endpoint, appt_dict)
self.assertJSONEqual(resp.content.decode('utf-8'),
{"time_start":[TIME_ERROR_MESSAGE]})
self.assertFalse(Appointment.objects.exists())
def test_appointments_cant_be_in_same_hours(self):
Appointment.objects.create(**self.appt_dict)
resp = self.client.post(self.endpoint, self.appt_dict)
self.assertEqual(Appointment.objects.count(), 1)
self.assertContains(resp, 'non_field_errors', status_code=400)
def test_appointments_cant_be_closer_than_30_mins(self):
self.appt_dict['time_end'] = '15:30'
Appointment.objects.create(**self.appt_dict)
before = dict(self.appt_dict)
before['time_start'] = "13:20"
after = dict(self.appt_dict)
after['time_start'] = "13:59"
another_after = dict(self.appt_dict)
another_after['time_start'] = "15:29"
resp = self.client.post(self.endpoint, before)
resp = self.client.post(self.endpoint, after)
resp = self.client.post(self.endpoint, another_after)
self.assertEqual(Appointment.objects.count(), 1)
def test_user_cant_edit_appointment(self):
existing = Appointment.objects.create(**self.appt_dict)
edit = {'reason': "Malicious edit"}
resp = self.client.patch(self.endpoint + str(existing.id), edit)
self.assertEqual(Appointment.objects.first().reason,
existing.reason)
# what's wrong with status?
# self.assertEqual(resp.status_code, 405)
def test_user_cant_delete_appointment(self):
existing = Appointment.objects.create(**self.appt_dict)
before = Appointment.objects.count()
resp = self.client.delete(self.endpoint + str(existing.id))
after = Appointment.objects.count()
self.assertTrue(Appointment.objects.exists())
self.assertEqual(before, after)
# what's wrong with status?
# self.assertEqual(resp.status_code, 405)
|
the-stack_0_24617
|
import numpy as np
from functools import partial
import os
import copy
from multiprocessing import Pool
from scipy.optimize import least_squares
from pyapprox.univariate_polynomials.orthonormal_polynomials import \
evaluate_orthonormal_polynomial_1d
from pyapprox.univariate_polynomials.orthonormal_recursions import \
jacobi_recurrence
def evaluate_core(sample, core_params, core_params_map, ranks,
recursion_coeffs):
"""
Evaluate a core of the function train at a sample
Parameters
----------
sample : float
The sample at which to evaluate the function train
univariate_params : [ np.ndarray (num_coeffs_i) ] (ranks[0]*ranks[2])
The coeffs of each univariate function. May be of different size
i.e. num_coeffs_i can be different for i=0,...,ranks[0]*ranks[1]
ranks : np.ndarray (2)
The ranks of the core [r_{k-1},r_k]
recursion_coeffs : np.ndarray (max_degree+1)
The recursion coefficients used to evaluate the univariate functions
which are assumed to polynomials defined by the recursion coefficients
Returns
-------
core_values : np.ndarray (ranks[0],ranks[1])
The values of each univariate function evaluated at the sample
Notes
-----
If we assume each univariate function for variable ii is fixed
we only need to compute basis matrix once. This is also true
if we compute basis matrix for max degree of the univariate functions
of the ii variable. If degree of a given univariate function is
smaller we can just use subset of matrix. This comes at the cost of
more storage but less computations than if vandermonde was computed
for each different degree. We build max_degree vandermonde here.
"""
try:
from pyapprox.python.function_train import evaluate_core_pyx
return evaluate_core_pyx(sample, core_params, core_params_map, ranks,
recursion_coeffs)
# from pyapprox.weave.function_train import c_evalute_core
# return c_evaluate_core(sample, core_params, core_params_map, ranks,
# recursion_coeffs)
except:
pass
assert ranks.shape[0] == 2
assert np.isscalar(sample)
core_values = np.empty((ranks[0], ranks[1]), dtype=float)
max_degree = recursion_coeffs.shape[0]-1
basis_matrix = evaluate_orthonormal_polynomial_1d(
np.asarray([sample]), max_degree, recursion_coeffs)
for kk in range(ranks[1]):
for jj in range(ranks[0]):
params = get_params_of_univariate_function(
jj, kk, ranks, core_params, core_params_map)
degree = params.shape[0]-1
assert degree < recursion_coeffs.shape[0]
core_values[jj, kk] = np.dot(basis_matrix[:, :degree+1], params)
return core_values
def core_grad_left(sample, core_params, core_params_map, ranks,
recursion_coeffs, left_vals):
"""
Evaluate the value and intermediate derivaties, with respect to
univariate function basis parameters, of a core of the function train at
a sample.
Parameters
----------
sample : float
The sample at which to evaluate the function train
univariate_params : [ np.ndarray (num_coeffs_i) ] (ranks[0]*ranks[2])
The params of each univariate function. May be of different size
i.e. num_params_i can be different for i=0,...,ranks[0]*ranks[1]
ranks : np.ndarray (2)
The ranks of the core [r_{k-1},r_k]
recursion_coeffs : np.ndarray (max_degree+1)
The recursion coefficients used to evaluate the univariate functions
which are assumed to polynomials defined by the recursion coefficients
left_vals : np.ndarray (ranks[0])
The values of the product of all previous cores F_1F_2...F_{k-1}.
If None no derivatives will be computed. Setting None is useful if
one only want function values or when computing derivatives of first
core.
Returns
-------
core_values : np.ndarray (ranks[0],ranks[1])
The values of each univariate function evaluated at the sample
derivs : [ [] (num_params_i) ] (ranks[0]*ranks[1])
The derivates of the univariate function with respect to the
basis parameters after the left pass algorithm.
Derivs of univariate functions are in column major ordering
Notes
-----
If we assume each univariate function for variable ii is fixed
we only need to compute basis matrix once. This is also true
if we compute basis matrix for max degree of the univariate functions
of the ii variable. If degree of a given univariate function is
smaller we can just use subset of matrix. This comes at the cost of
more storage but less computations than if vandermonde was computed
for each different degree. We build max_degree vandermonde here.
"""
assert ranks.shape[0] == 2
assert np.isscalar(sample)
if left_vals is not None:
assert left_vals.ndim == 2 and left_vals.shape[0] == 1
core_values = np.empty((ranks[0]*ranks[1]), dtype=float)
core_derivs = np.empty_like(core_params)
max_degree = recursion_coeffs.shape[0]-1
basis_matrix = evaluate_orthonormal_polynomial_1d(
np.asarray([sample]), max_degree, recursion_coeffs)
cnt = 0
for kk in range(ranks[1]):
for jj in range(ranks[0]):
params = get_params_of_univariate_function(
jj, kk, ranks, core_params, core_params_map)
degree = params.shape[0]-1
assert degree < recursion_coeffs.shape[0]
univariate_function_num = get_univariate_function_number(
ranks[0], jj, kk)
core_values[univariate_function_num] = np.dot(
basis_matrix[:, :degree+1], params)
if left_vals is not None:
core_derivs[cnt:cnt+params.shape[0]] = \
left_vals[0, jj]*basis_matrix[:, :degree+1]
else:
core_derivs[cnt:cnt+params.shape[0]
] = basis_matrix[:, :degree+1]
cnt += params.shape[0]
return core_values, core_derivs
def evaluate_function_train(samples, ft_data, recursion_coeffs):
"""
Evaluate the function train
Parameters
----------
samples : np.ndarray (num_vars, num_samples)
The samples at which to evaluate the function train
cores : [ [ [] univariate_params[ii][jj]] (ranks[ii]*ranks[ii+1]) ](num_vars)
Parameters of the univariate function of each core.
ii is the variable index, jj is the univariate function index within the
ii-th core. jj=0,...,ranks[ii]*ranks[ii+1]. Univariate functions
are assumed to be stored in column major ordering.
ranks : np.ndarray (num_vars+1)
The ranks of the function train cores
recursion_coeffs : np.ndarray (max_degree+1)
The recursion coefficients used to evaluate the univariate functions
which are assumed to polynomials defined by the recursion coefficients
Returns
-------
values : np.ndarray (num_samples,1)
The values of the function train at the samples
"""
ranks, ft_params, ft_params_map, ft_cores_map = ft_data
num_vars = len(ranks)-1
num_samples = samples.shape[1]
assert len(ranks) == num_vars+1
values = np.zeros((num_samples, 1), dtype=float)
for ii in range(num_samples):
core_params, core_params_map = get_all_univariate_params_of_core(
ft_params, ft_params_map, ft_cores_map, 0)
core_values = evaluate_core(
samples[0, ii], core_params, core_params_map, ranks[:2],
recursion_coeffs)
for dd in range(1, num_vars):
core_params, core_params_map = get_all_univariate_params_of_core(
ft_params, ft_params_map, ft_cores_map, dd)
core_values = np.dot(core_values, evaluate_core(
samples[dd, ii], core_params, core_params_map, ranks[dd:dd+2],
recursion_coeffs))
values[ii, 0] = core_values[0, 0]
return values
def core_grad_right(ranks, right_vals, intermediate_core_derivs,
core_params_map):
"""
Evaluate the gradient of a core of the function train at a sample using
precomputed intermediate_results
This works because derivative of core (deriv_matrix) with respect to a
single parameter of a single univariate function index by ii,jj is
zero except for 1 non-zero entry at ii,jj.
Thus theoretically we need to compute np.dot(deriv_matrix,right_vals) for
each ii,jj we actually only need to collect all non-zero derivatives
of each deriv_matrix into one vector and multiply this by right_vals
Parameters
----------
ranks : np.ndarray (2)
The ranks of the core [r_{k-1},r_k]
right_vals : np.ndarray (ranks[1])
The values of the product of all following cores F_{k+1}F_{k+1}...F_d
intermediate_core_derivs : [ [] (num_params_i) ] (ranks[0]*ranks[1])
The derivates of the univariate function with respect to the
basis parameters after the left pass algorithm.
Derivs of univariate functions are in column major ordering
Returns
-------
derivs : [ [] (num_params_i) ] (ranks[0]*ranks[1])
The derivates of the univariate function with respect to the
basis parameters. Derivs of univariate functions are in
column major ordering
"""
assert ranks.shape[0] == 2
assert right_vals.ndim == 2 and right_vals.shape[1] == 1
core_derivs = np.empty((0), dtype=float)
for kk in range(ranks[1]):
for jj in range(ranks[0]):
lb, ub = get_index_bounds_of_univariate_function_params(
jj, kk, ranks, core_params_map,
intermediate_core_derivs.shape[0])
core_derivs = np.append(
core_derivs, intermediate_core_derivs[lb:ub]*right_vals[kk, 0])
return core_derivs
def evaluate_function_train_grad(sample, ft_data, recursion_coeffs):
"""
Evaluate the function train and its gradient
Parameters
----------
sample : np.ndarray (num_vars, )
The sample1 at which to evaluate the function train gradient
cores : [ [ [] univariate_params[ii][jj]] (ranks[ii]*ranks[ii+1]) ](num_vars)
Parameters of the univariate function of each core.
ii is the variable index, jj is the univariate function index within the
ii-th core. jj=0,...,ranks[ii]*ranks[ii+1]. Univariate functions
are assumed to be stored in column major ordering.
ranks : np.ndarray (num_vars+1)
The ranks of the function train cores
recursion_coeffs : np.ndarray (max_degree+1)
The recursion coefficients used to evaluate the univariate functions
which are assumed to polynomials defined by the recursion coefficients
Returns
-------
value : float
The value of the function train at the sample
grad : np.ndarray(num_ft_params)
The derivative of the function train with respect to each coefficient
of each univariate core.
The gradient is stored for each core from first to last.
The gradient of each core is stored using column major ordering of
the univariate functions.
"""
value, values_of_cores, derivs_of_cores = \
evaluate_ft_gradient_forward_pass(sample, ft_data, recursion_coeffs)
ranks, ft_params, ft_params_map, ft_cores_map = ft_data
gradient = evaluate_ft_gradient_backward_pass(
ranks,values_of_cores,derivs_of_cores,ft_params_map,ft_cores_map)
return value, gradient
def evaluate_ft_gradient_forward_pass(sample, ft_data, recursion_coeffs):
ranks, ft_params, ft_params_map, ft_cores_map = ft_data
num_vars = len(ranks)-1
assert sample.shape[1] == 1
assert sample.shape[0] == num_vars
core_params, core_params_map = get_all_univariate_params_of_core(
ft_params, ft_params_map, ft_cores_map, 0)
core_values, core_derivs = core_grad_left(
sample[0, 0], core_params, core_params_map, ranks[:2],
recursion_coeffs, None)
left_vals = core_values.copy()[np.newaxis, :]
values_of_cores = core_values.copy()
derivs_of_cores = core_derivs.copy()
for dd in range(1, num_vars):
core_params, core_params_map = get_all_univariate_params_of_core(
ft_params, ft_params_map, ft_cores_map, dd)
core_values, core_derivs = core_grad_left(
sample[dd, 0], core_params, core_params_map, ranks[dd:dd+2],
recursion_coeffs, left_vals)
left_vals = np.dot(left_vals, np.reshape(
core_values, (ranks[dd], ranks[dd+1]), order='F'))
values_of_cores = np.concatenate((values_of_cores, core_values))
derivs_of_cores = np.concatenate((derivs_of_cores, core_derivs))
value = left_vals[0]
return value, values_of_cores, derivs_of_cores
def evaluate_ft_gradient_backward_pass(ranks,values_of_cores,derivs_of_cores,
ft_params_map,ft_cores_map):
num_vars = ranks.shape[0]-1
num_ft_params = derivs_of_cores.shape[0]
gradient = np.empty_like(derivs_of_cores)
# gradient of parameters of last core
cores_params_lb, cores_params_ub = get_index_bounds_of_core_params(
num_vars-1, ft_cores_map, ft_params_map, num_ft_params)[:2]
gradient[cores_params_lb:cores_params_ub] =\
derivs_of_cores[cores_params_lb:cores_params_ub]
values_lb, values_ub = get_index_bounds_of_core_univariate_functions(
num_vars-1, ft_cores_map, values_of_cores.shape[0])
right_vals = np.reshape(
values_of_cores[values_lb:values_ub],
(ranks[num_vars-1], ranks[num_vars]), order='F')
# gradient of parameters of each of the middle cores
for dd in range(num_vars-2, 0, -1):
core_derivs, core_params_map = get_all_univariate_params_of_core(
derivs_of_cores, ft_params_map, ft_cores_map, dd)
core_values = get_core_values(dd, values_of_cores, ft_cores_map)
core_params_lb, core_params_ub = get_index_bounds_of_core_params(
dd, ft_cores_map, ft_params_map, num_ft_params)[:2]
gradient[core_params_lb:core_params_ub] = core_grad_right(
ranks[dd:dd+2], right_vals, core_derivs, core_params_map)
core_values = np.reshape(
core_values, (ranks[dd], ranks[dd+1]), order='F')
right_vals = np.dot(core_values, right_vals)
# gradient of parameters of first core
core_derivs, core_params_map = get_all_univariate_params_of_core(
derivs_of_cores, ft_params_map, ft_cores_map, 0)
core_params_lb, core_params_ub = get_index_bounds_of_core_params(
0, ft_cores_map, ft_params_map, num_ft_params)[:2]
gradient[core_params_lb:core_params_ub] = core_grad_right(
ranks[:2], right_vals, core_derivs, core_params_map)
return gradient
def num_univariate_functions(ranks):
"""
Compute the number of univariate function in a function train.
Parameters
----------
ranks : np.ndarray (num_vars+1)
The ranks of the function train cores
Returns
-------
num_1d_functions : integer
The number of univariate functions
"""
num_1d_functions = 0
for ii in range(len(ranks)-1):
num_1d_functions += ranks[ii]*ranks[ii+1]
return int(num_1d_functions)
def generate_homogeneous_function_train(ranks, num_params_1d, ft_params):
"""
Generate a function train of a specified rank using the same
parameterization of each univariate function within a core and for all
cores.
Parameters
----------
ranks : np.ndarray (num_vars+1)
The ranks of the function train cores
num_params_1d : integer
The number of parameters of each univariate function, e.g.
the number of parameters in a polynomial basis
ft_params : np.ndarray (num_univariate_functions(ranks)*num_params)
Flatten array containing the parameter values of each univariate
function
Returns
-------
cores : [ [ [] univariate_params[ii][jj]] (ranks[ii]*ranks[ii+1]) ](num_vars)
Parameters of the univariate function of each core.
ii is the variable index, jj is the univariate function index within
the ii-th core. jj=0,...,ranks[ii]*ranks[ii+1]. Univariate functions
are assumed to be stored in column major ordering.
"""
num_vars = ranks.shape[0]-1
num_1d_functions = num_univariate_functions(ranks)
num_ft_parameters = num_params_1d*num_1d_functions
ft_params_map = np.arange(0, num_ft_parameters, num_params_1d, dtype=int)
ft_cores_map = np.zeros((1), dtype=int)
for ii in range(num_vars-1):
ft_cores_map = np.append(
ft_cores_map,
np.asarray(ft_cores_map[-1]+ranks[ii]*ranks[ii+1], dtype=int))
# return list so I can make assignments like
# ft_data[1]=...
return [ranks, ft_params, ft_params_map, ft_cores_map]
def get_index_bounds_of_core_univariate_functions(var_num, ft_cores_map,
num_univariate_functions):
num_vars = ft_cores_map.shape[0]
assert var_num < num_vars
core_map_lb = ft_cores_map[var_num]
if var_num == num_vars-1:
core_map_ub = num_univariate_functions
else:
core_map_ub = ft_cores_map[var_num+1]
return core_map_lb, core_map_ub
def get_index_bounds_of_core_params(var_num, ft_cores_map,
ft_params_map, num_ft_params):
num_vars = ft_cores_map.shape[0]
assert var_num < num_vars
core_map_lb, core_map_ub = get_index_bounds_of_core_univariate_functions(
var_num, ft_cores_map, ft_params_map.shape[0])
if var_num == num_vars-1:
params_map_ub = num_ft_params
else:
params_map_ub = ft_params_map[core_map_ub]
params_map_lb = ft_params_map[core_map_lb]
return params_map_lb, params_map_ub, core_map_lb, core_map_ub
def get_all_univariate_params_of_core(ft_params, ft_params_map, ft_cores_map,
var_num):
params_map_lb, params_map_ub, core_map_lb, core_map_ub = \
get_index_bounds_of_core_params(
var_num, ft_cores_map, ft_params_map, ft_params.shape[0])
core_params = ft_params[params_map_lb:params_map_ub]
core_params_map = ft_params_map[core_map_lb:core_map_ub]-params_map_lb
return core_params, core_params_map
def get_core_values(var_num, values_of_cores, ft_cores_map):
num_ft_univariate_functions = values_of_cores.shape[0]
lb, ub = get_index_bounds_of_core_univariate_functions(
var_num, ft_cores_map, num_ft_univariate_functions)
return values_of_cores[lb:ub]
def get_univariate_function_number(left_rank, ii, jj):
# for some reason if left_rank is uint 64 and ii,jj are int
# following operation returns a float64
return int(jj*left_rank+ii)
def get_index_bounds_of_univariate_function_params(
ii, jj, ranks, core_params_map, num_core_params):
univariate_function_num = get_univariate_function_number(ranks[0], ii, jj)
num_univariate_functions = core_params_map.shape[0]
assert univariate_function_num < num_univariate_functions
lb = core_params_map[univariate_function_num]
if univariate_function_num == num_univariate_functions-1:
ub = num_core_params
else:
ub = core_params_map[univariate_function_num+1]
return lb, ub
def get_params_of_univariate_function(ii, jj, ranks, core_params,
core_params_map):
lb, ub = get_index_bounds_of_univariate_function_params(
ii, jj, ranks, core_params_map, core_params.shape[0])
univariate_function_params = core_params[lb:ub]
return univariate_function_params
def add_core(univariate_functions_params, ft_params, ft_params_map,
ft_cores_map):
if ft_params is None:
ft_params = np.empty((0), dtype=float)
ft_params_map = np.empty((0), dtype=int)
ft_cores_map = np.empty((0), dtype=int)
params_cnt = ft_params.shape[0]
params_map_cnt = ft_params_map.shape[0]
ft_cores_map = np.append(ft_cores_map, params_map_cnt)
num_univariate_functions = len(univariate_functions_params)
ft_params = np.concatenate(
(ft_params, np.concatenate(univariate_functions_params)))
param_indices = np.empty((num_univariate_functions), dtype=int)
param_indices[0] = params_cnt
for ii in range(1, num_univariate_functions):
params_cnt += univariate_functions_params[ii-1].shape[0]
param_indices[ii] = params_cnt
ft_params_map = np.concatenate((ft_params_map, param_indices))
return ft_params, ft_params_map, ft_cores_map
def generate_additive_function_in_function_train_format(
univariate_function_params, compress):
"""
Generate function train representation of
f(x) = f_1(x_1)+f_2(x_2)+...+f_d(x_d).
An additive function in tensor train format has ranks [1,2,2,...,2,2,1]
Parameters
----------
univariate_function_params : [np.ndarray(num_params_1d_i)] (num_vars)
The parameters of the univariate function f_i for each dimension
compress : boolean
True - return compressed representation of zero and one cores,
i.e. a coefficient vector for the univariate cores of length 1
False - return a coefficient vector that has zeros for all 1d params
of a univariate function. If False then the number of
parameters of all univariate parameters must be equal
Returns
-------
ranks : np.ndarray (num_vars+1)
The ranks of the function train cores
cores : [ [ [] univariate_params[ii][jj]] (ranks[ii]*ranks[ii+1]) ](num_vars)
Parameters of the univariate function of each core.
ii is the variable index, jj is the univariate function index within
the ii-th core. jj=0,...,ranks[ii]*ranks[ii+1]. Univariate functions
are assumed to be stored in column major ordering.
"""
num_vars = len(univariate_function_params)
ranks = ranks_vector(num_vars, 2)
if not compress:
num_params_1d = univariate_function_params[0].shape[0]
for dd in range(1, num_vars):
assert num_params_1d == univariate_function_params[dd].shape[0]
zero = np.zeros((num_params_1d), dtype=float)
one = zero.copy()
one[0] = 1.
else:
zero = np.asarray([0.])
one = np.asarray([1.])
# first core is |f_1 1|
core_univariate_functions_params = [univariate_function_params[0], one]
ft_params, ft_params_map, ft_cores_map = add_core(
core_univariate_functions_params, None, None, None)
# middle cores are | 1 0|
# |f_ii, 1|
for ii in range(1, num_vars-1):
core_univariate_functions_params = [
one, univariate_function_params[ii], zero, one]
ft_params, ft_params_map, ft_cores_map = add_core(
core_univariate_functions_params, ft_params, ft_params_map,
ft_cores_map)
# last core is | 1 |
# |f_d|
core_univariate_functions_params =\
[one, univariate_function_params[num_vars-1]]
ft_params, ft_params_map, ft_cores_map = add_core(
core_univariate_functions_params, ft_params, ft_params_map,
ft_cores_map)
# return list so I can make assignments like
# ft_data[1]=...
return [ranks, ft_params, ft_params_map, ft_cores_map]
def ft_parameter_finite_difference_gradient(
sample, ft_data, recursion_coeffs, eps=2*np.sqrt(np.finfo(float).eps)):
ft_params = ft_data[1]
value = evaluate_function_train(sample, ft_data, recursion_coeffs)
num_ft_parameters = ft_params.shape[0]
gradient = np.empty_like(ft_params)
for ii in range(num_ft_parameters):
perturbed_params = ft_params.copy()
perturbed_params[ii] += eps
ft_data = (ft_data[0], perturbed_params, ft_data[2], ft_data[3])
perturbed_value = evaluate_function_train(
sample, ft_data, recursion_coeffs)
gradient[ii] = (perturbed_value-value)/eps
return gradient
def ranks_vector(num_vars, rank):
ranks = np.ones((num_vars+1), dtype=int)
ranks[1:num_vars] = rank
return ranks
def print_function_train(ft_data):
"""
Print the parameters of the univariate functions of each core of a
function train.
"""
ranks, ft_params, ft_params_map, ft_cores_map = ft_data
num_vars = len(ranks)-1
print(('Function train d=%d' % num_vars, "rank:", ranks))
for ii in range(num_vars):
core_params, core_params_map = get_all_univariate_params_of_core(
ft_params, ft_params_map, ft_cores_map, ii)
for kk in range(ranks[ii+1]):
for jj in range(ranks[ii]):
params = get_params_of_univariate_function(
jj, kk, ranks, core_params, core_params_map)
print(("(%d,%d,%d)" % (ii, jj, kk), params))
def function_trains_equal(ft_data_1, ft_data_2, verbose=False):
"""
Check equality of two function trains
"""
ranks_1, ft_params_1, ft_params_map_1, ft_cores_map_1 = ft_data_1
ranks_2, ft_params_2, ft_params_map_2, ft_cores_map_2 = ft_data_2
num_vars = len(ranks_1)-1
if (num_vars != len(ranks_2)-1):
if verbose:
print("Inconsistent number of variables")
return False
if not np.allclose(ranks_1, ranks_2):
if verbose:
print("Inconsistent ranks")
return False
if not np.allclose(ft_params_1, ft_params_2):
if verbose:
print("Inconsistent univariate function parameters")
return False
if not np.allclose(ft_params_map_1, ft_params_map_2):
if verbose:
print("Inconsistent univariate functions parameters map")
return False
if not np.allclose(ft_cores_map_1, ft_cores_map_2):
if verbose:
print("Inconsistent cores map")
return False
return True
def ft_linear_least_squares_regression(samples, values, degree, perturb=None):
num_vars, num_samples = samples.shape
vandermonde = np.hstack((np.ones((num_samples, 1)), samples.copy().T))
sol = np.linalg.lstsq(vandermonde, values, rcond=None)[0]
univariate_function_params = []
for ii in range(num_vars):
params = np.zeros(degree+1)
# add small constant contribution to each core
params[0] = sol[0]/float(num_vars)
params[1] = sol[ii+1]
univariate_function_params.append(params)
linear_ft_data = generate_additive_function_in_function_train_format(
univariate_function_params, compress=False)
if perturb is not None:
II = np.where(linear_ft_data[1] == 0)[0]
linear_ft_data[1][II] = perturb
return linear_ft_data
def modify_and_evaluate_function_train(samples, ft_data, recursion_coeffs,
active_indices, ft_params):
if active_indices is not None:
ft_data[1][active_indices] = ft_params
else:
ft_data[1] = ft_params
ft_values = evaluate_function_train(samples, ft_data, recursion_coeffs)
return ft_values
def ft_least_squares_residual(samples, values, ft_data, recursion_coeffs,
active_indices, ft_params):
"""
Warning this only overwrites parameters associated with the active indices
the rest of the parameters are taken from ft_data.
"""
ft_values = modify_and_evaluate_function_train(
samples, ft_data, recursion_coeffs, active_indices, ft_params)
assert ft_values.shape == values.shape
return (values-ft_values)[:, 0]
def ft_least_squares_jacobian(samples, values, ft_data, recursion_coeffs,
active_indices, ft_params):
"""
Warning this only overwrites parameters associated with the active indices
the rest of the parameters are taken from ft_data.
"""
num_samples = samples.shape[1]
num_params = ft_params.shape[0]
if active_indices is not None:
ft_data[1][active_indices] = ft_params
nactive_params = active_indices.shape[0]
else:
ft_data[1] = ft_params
nactive_params = num_params
jacobian = np.empty((num_samples, nactive_params), dtype=float)
for ii in range(num_samples):
__, grad = evaluate_function_train_grad(
samples[:, ii:ii+1], ft_data, recursion_coeffs)
if active_indices is None:
jacobian[ii, :] = -grad
jacobian[ii, :] = -grad[active_indices]
return jacobian
def apply_function_train_adjoint_jacobian(samples, ft_data, recursion_coeffs,
perturb, ft_params, vec):
"""
Apply the function train Jacobian to a vector. This is more memory
efficient than simply computing Jacobain and then using a matrix-vector
multiply to compute its action.
Parameters
----------
perturb : float
Small positive perturbation to add to any zero valued function train
parameters before computing Jacobian. This is useful when computing
Jacobians for greedy regression methods that start with only a small
number of non-zero paramters which can mean that gradient can often
be zero unless perturbation is applied.
"""
new_ft_data = copy.deepcopy(ft_data)
new_ft_data[1] = ft_params.copy()
new_ft_data[1][ft_params == 0] = perturb
result = np.zeros((ft_params.shape[0]), dtype=float)
num_samples = samples.shape[1]
for ii in range(num_samples):
__, ft_gradient = evaluate_function_train_grad(
samples[:, ii:ii+1], new_ft_data, recursion_coeffs)
for jj in range(ft_gradient.shape[0]):
result[jj] += -ft_gradient[jj]*vec[ii]
# print 'result',result
return result
def ft_non_linear_least_squares_regression(samples, values, ft_data,
recursion_coeffs,
initial_guess,
active_indices=None,
opts=dict()):
if active_indices is not None:
assert active_indices.shape[0] == initial_guess.shape[0]
assert values.ndim == 2 and values.shape[1] == 1
residual_func = partial(
ft_least_squares_residual, samples, values, ft_data, recursion_coeffs,
active_indices)
jacobian_func = partial(ft_least_squares_jacobian,
samples, values, ft_data, recursion_coeffs,
active_indices)
# jacobian_func='2-point'
result = least_squares(
residual_func, initial_guess, jac=jacobian_func,
gtol=opts.get('gtol', 1e-8), ftol=opts.get('ftol', 1e-8),
xtol=opts.get('xtol', 1e-8))
if opts.get('verbosity', 0) > 0:
print('non-linear least squares output')
print(('#fevals:', result['nfev']))
print(('#gevals:', result['njev']))
print(('obj:', result['cost']))
print(('status:', result['status']))
if active_indices is None:
return result['x']
else:
ft_params = ft_data[1].copy()
ft_params[active_indices] = result['x']
return ft_params
def generate_random_sparse_function_train(num_vars, rank, num_params_1d,
sparsity_ratio):
ranks = rank*np.ones(num_vars+1, dtype=int)
ranks[0] = 1
ranks[-1] = 1
num_1d_functions = num_univariate_functions(ranks)
num_ft_params = num_params_1d*num_1d_functions
sparsity = int(sparsity_ratio*num_ft_params)
ft_params = np.random.normal(0., 1., (num_ft_params))
II = np.random.permutation(num_ft_params)[sparsity:]
ft_params[II] = 0.
ft_data = generate_homogeneous_function_train(
ranks, num_params_1d, ft_params)
return ft_data
def get_random_compressible_vector(num_entries, decay_rate):
vec = np.random.normal(0., 1., (num_entries))
vec /= (np.arange(1, num_entries+1))**decay_rate
vec = vec[np.random.permutation(num_entries)]
return vec
def compare_compressible_vectors():
import matplotlib.pyplot as plt
decay_rates = [1, 2, 3, 4]
num_entries = 1000
II = np.arange(num_entries)
for decay_rate in decay_rates:
vec = get_random_compressible_vector(num_entries, decay_rate)
plt.loglog(II, np.sort(np.absolute(vec))[
::-1], 'o', label=r'$r=%1.1f$' % decay_rate)
plt.legend()
plt.show()
def sparsity_example(decay_rate, rank, num_vars, num_params_1d):
num_trials = 20
sparsity_fractions = np.arange(1, 10, dtype=float)/10.
assert sparsity_fractions[-1] != 1 # error will always be zero
ranks = ranks_vector(num_vars, rank)
num_1d_functions = num_univariate_functions(ranks)
num_ft_parameters = num_params_1d*num_1d_functions
alpha = 0
beta = 0
recursion_coeffs = jacobi_recurrence(
num_params_1d, alpha=alpha, beta=beta, probability=True)
assert int(os.environ['OMP_NUM_THREADS']) == 1
max_eval_concurrency = 4 # max(multiprocessing.cpu_count()-2,1)
pool = Pool(max_eval_concurrency)
partial_func = partial(
sparsity_example_engine, ranks=ranks,
num_ft_parameters=num_ft_parameters,
decay_rate=decay_rate, recursion_coeffs=recursion_coeffs,
sparsity_fractions=sparsity_fractions)
filename = 'function-train-sparsity-effect-%d-%d-%d-%1.1f-%d.npz' % (
num_vars, rank, num_params_1d, decay_rate, num_trials)
if not os.path.exists(filename):
result = pool.map(partial_func, [(num_params_1d)]*num_trials)
l2_errors = np.asarray(result).T # (num_sparsity_fractions,num_trials)
np.savez(
filename, l2_errors=l2_errors,
sparsity_fractions=sparsity_fractions,
num_ft_parameters=num_ft_parameters)
return filename
def compress_homogeneous_function_train(ft_data, tol=1e-15):
"""
Compress a function train such that only parameters with magnitude
greater than a tolerance are retained.
If all parameters of a univariate function are below tolerance
Then a single zero is used as the parameters of that function.
Note this function will not reduce rank, even if after compression
the effective rank is smaller. E.g. we can have a column of all zeros
functions.
"""
ranks = ft_data[0]
ft_params = ft_data[1]
num_funcs_1d = num_univariate_functions(ranks)
num_params_1d = ft_params.shape[0]//num_funcs_1d
num_vars = ranks.shape[0]-1
compressed_ft_params = None
compressed_ft_params_map = None
compressed_ft_cores_map = None
cnt = 0
for ii in range(num_vars):
core_univariate_functions_params = []
for jj in range(ranks[ii]):
for kk in range(ranks[ii+1]):
univariate_params = \
ft_params[cnt*num_params_1d:(cnt+1)*num_params_1d]
II = np.where(np.absolute(univariate_params) > tol)[0]
if II.shape[0] == 0:
compressed_univariate_params = np.array([0])
else:
compressed_univariate_params = univariate_params[II]
core_univariate_functions_params.append(
compressed_univariate_params)
cnt += 1
compressed_ft_params, compressed_ft_params_map, \
compressed_ft_cores_map = add_core(
core_univariate_functions_params, compressed_ft_params,
compressed_ft_params_map, compressed_ft_cores_map)
return [ranks, compressed_ft_params, compressed_ft_params_map,
compressed_ft_cores_map]
def plot_sparsity_example():
import matplotlib.pyplot as plt
num_vars = 10
num_params_1d = 10
rank = 2
decay_rates = [1, 2, 3, 4]
for decay_rate in decay_rates:
filename = sparsity_example(decay_rate, rank, num_vars, num_params_1d)
file_data = np.load(filename)
l2_errors = file_data['l2_errors']
sparsity_fractions = file_data['sparsity_fractions']
plt.semilogy(sparsity_fractions, np.median(l2_errors, axis=1))
plt.fill_between(
sparsity_fractions, l2_errors.min(axis=1), l2_errors.max(axis=1),
alpha=0.5, label='$r=%1.1f$' % decay_rate)
plt.legend()
figname = 'function-train-sparsity-effect-%d-%d-%d.png' % (
num_vars, rank, num_params_1d)
plt.savefig(figname, dpi=600)
# plt.show()
def sparsity_example_engine(num_params_1d, ranks, num_ft_parameters,
decay_rate, recursion_coeffs, sparsity_fractions):
np.random.seed()
num_vars = len(ranks)-1
ft_params = get_random_compressible_vector(
num_ft_parameters, decay_rate)*100
ft_data = generate_homogeneous_function_train(
ranks, num_params_1d, ft_params)
num_samples = 1000
samples = np.random.uniform(-1., 1., (num_vars, num_samples))
values = evaluate_function_train(samples, ft_data, recursion_coeffs)
# sort parameters in descending order
sorted_indices = np.argsort(np.absolute(ft_params))[::-1]
l2_errors = np.empty(len(sparsity_fractions), dtype=float)
for jj in range(len(sparsity_fractions)):
sparsity = int(num_ft_parameters*sparsity_fractions[jj])
# retain only the s largest parameters
assert sparsity <= num_ft_parameters
sparse_params = ft_params.copy()
sparse_params[sorted_indices[sparsity:]] = 0.
sparse_ft_data = generate_homogeneous_function_train(
ranks, num_params_1d, sparse_params)
sparse_values = evaluate_function_train(
samples, sparse_ft_data, recursion_coeffs)
l2_error = np.linalg.norm(sparse_values-values)/np.linalg.norm(values)
print(('num_ft_parameters', num_ft_parameters, 'sparsity', sparsity))
print(('l2 error', l2_error))
l2_errors[jj] = l2_error
return l2_errors
# if __name__=='__main__':
# #compare_compressible_vectors()
# #plot_sparsity_example()
# pass
|
the-stack_0_24618
|
import pytest
from stp_core.loop.eventually import eventually
from plenum.test.primary_election.helpers import checkNomination
from plenum.test.test_node import checkPoolReady, \
checkProtocolInstanceSetup
from plenum.test import waits
nodeCount = 4
@pytest.fixture()
def electContFixture(txnPoolNodeSet):
A, B, C, D = txnPoolNodeSet
for node in [B, C, D]:
node.delaySelfNomination(4)
# noinspection PyIncorrectDocstring
@pytest.mark.skip('Nodes use round robin primary selection')
def testPrimaryElectionWithAClearWinner(
electContFixture, looper, txnPoolNodeSet):
"""
Primary selection (Sunny Day)
A, B, C, D, E
A, B, C, D startup. E is lagging.
A sees the minimum number of nodes first, and then sends out a NOMINATE(A) message
B, C, D all see the NOMINATE(A) message from A, and respond with NOMINATE(A) message to all other nodes
A sees three other NOMINATE(A) votes (from B, C, D)
A sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes
B sees two more NOMINATE(A) votes (from C and D)
B sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes
C sees two more NOMINATE(A) votes (from B and D)
C sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes
D sees two more NOMINATE(A) votes (from B and C)
D sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes
A sees at least two other PRIMARY(A) votes (3 including it's own)
selects A as primary
B sees at least two other PRIMARY(A) votes (3 including it's own)
selects A as primary
C sees at least two other PRIMARY(A) votes (3 including it's own)
selects A as primary
D sees at least two other PRIMARY(A) votes (3 including it's own)
selects A as primary
"""
A, B, C, D = txnPoolNodeSet
nodesBCD = [B, C, D]
checkPoolReady(looper, txnPoolNodeSet)
# Checking whether one of the replicas of Node A nominated itself
timeout = waits.expectedPoolNominationTimeout(len(txnPoolNodeSet))
looper.run(eventually(checkNomination, A,
A.name, retryWait=1, timeout=timeout))
timeout = waits.expectedPoolNominationTimeout(len(txnPoolNodeSet))
for n in nodesBCD:
# Checking whether Node B, C and D nominated Node A
looper.run(eventually(checkNomination, n, A.name,
retryWait=1, timeout=timeout))
checkProtocolInstanceSetup(looper=looper, nodes=txnPoolNodeSet, retryWait=1)
assert A.hasPrimary
|
the-stack_0_24619
|
'''Logging utilities are stored in this file'''
import logging
import datetime, sys
# On module init
logger_made = False
g_logger = None
class MyConsoleHandler(logging.StreamHandler):
# Modify this when printing progress bar
terminator = logging.StreamHandler.terminator
@classmethod
def change_terminator(cls, repl):
cls.terminator = repl
@classmethod
def restore_terminator(cls):
cls.terminator = logging.StreamHandler.terminator
def get_logger():
global g_logger, logger_made
if not logger_made:
# if logger is not created, then create it
cur_time = str(datetime.datetime.now()).replace(':', '-').split('.')[0]
# Get Y-M-S H:M:S
# Set which log is required
console = True
file_log = False
# Logging setup
logger = logging.getLogger('Sheet-Disk')
logger.setLevel(logging.DEBUG)
if console:
# Handlers
c_handler = MyConsoleHandler(sys.stdout)
c_handler.setLevel(logging.INFO)
# Formatter
c_format = logging.Formatter('%(message)s')
c_handler.setFormatter(c_format)
# Add the handler
logger.addHandler(c_handler)
if file_log:
# Handlers
f_handler = logging.FileHandler('RunLog ' + cur_time + '.log', mode='w')
f_handler.setLevel(logging.DEBUG)
# Formatter
f_format = logging.Formatter('%(asctime)s - %(levelname)s - %(funcName)s - %(message)s')
f_handler.setFormatter(f_format)
# Add the handler
logger.addHandler(f_handler)
g_logger = logger
logger_made = True
return g_logger
|
the-stack_0_24620
|
import os
import glob
import pytest
import shutil
import sys
import time
from pathlib import Path, PurePath
from pytest_mock import MockerFixture
# Import Niffler Module
niffler_modules_path = Path.cwd() / 'modules'
sys.path.append(str(niffler_modules_path / 'png-extraction'))
import ImageExtractor
@pytest.fixture
def mock_logger(mocker: MockerFixture):
"""
Mock module logging
"""
return mocker.patch('ImageExtractor.logging')
def create_out_dir_structure(out_dir: PurePath):
"""
Creates directory structure for cold-extraction output
"""
pytest.create_dirs(*[
out_dir / 'extracted-images',
out_dir / 'failed-dicom/1',
out_dir / 'failed-dicom/2',
out_dir / 'failed-dicom/3',
out_dir / 'failed-dicom/4',
out_dir / 'maps',
out_dir / 'meta'
])
return out_dir
class TestExecute:
"""
Tests for ImageExtractor.execute
"""
def generate_kwargs(self, out_dir: PurePath, **kwargs):
"""
Generates kwargs for ImageExtractor.execute
"""
kwargs_dict = {
'pickle_file': str(out_dir / 'ImageExtractor.pickle'),
'dicom_home': str(pytest.data_dir / 'png-extraction' / 'input'),
'output_directory': str(out_dir),
'print_images': True,
'print_only_common_headers': "True",
'depth': 0,
'processes': 0,
'flattened_to_level': 'study',
'email': '[email protected]',
'send_email': False,
'no_splits': 1,
'is16Bit': "True",
'png_destination': str(out_dir / 'extracted-images') + '/',
'failed': str(out_dir / 'failed-dicom') + '/',
'maps_directory': str(out_dir / 'maps') + '/',
'meta_directory': str(out_dir / 'meta') + '/',
'LOG_FILENAME': str(out_dir / 'ImageExtractor.out'),
'metadata_col_freq_threshold': 0.1,
't_start': time.time()
}
kwargs_dict.update(**kwargs)
return kwargs_dict
def setup_method(self):
"""
Setup for tests
"""
self.out_dir = pytest.out_dir / 'png-extraction/outputs/TestExecute'
self.out_dirs_test_success = create_out_dir_structure(
self.out_dir / 'test_success'
)
self.out_no_dicoms = create_out_dir_structure(
self.out_dir / 'test_no_dicoms'
)
def teardown_method(self):
"""
Cleanup after tests
"""
shutil.rmtree(self.out_dir)
def test_success(self, mock_logger):
"""
ImageExtractor.execute function executes successfully
Checks content of output dir
"""
execute_kwargs = self.generate_kwargs(
out_dir=self.out_dirs_test_success
)
ImageExtractor.execute(**execute_kwargs)
assert len(
glob.glob(
f"{execute_kwargs['png_destination']}**/*.png",
recursive=True
)
) != 0
def test_no_dicoms(self, mock_logger):
"""
ImageExtractor.execute function executes
Checks if script exited using SystemExit
"""
execute_kwargs = self.generate_kwargs(
out_dir=self.out_no_dicoms,
dicom_home=str(
pytest.data_dir / 'png-extraction' / 'no_input_files')
)
ImageExtractor.logging.basicConfig(
filename=execute_kwargs['LOG_FILENAME'], level=ImageExtractor.logging.DEBUG)
with pytest.raises(SystemExit):
ImageExtractor.execute(**execute_kwargs)
class TestImageExtractorModule:
"""
Tests for ImageExtractor.initialize_config_and_execute
"""
def generate_config(self, **kwargs):
"""
Generates kwargs for ImageExtractor.initialize_config_and_execute
"""
config = {
"DICOMHome": str(pytest.data_dir / 'png-extraction' / 'input'),
"OutputDirectory": str(self.default_out_dir),
"Depth": 0,
"SplitIntoChunks": 1,
"PrintImages": True,
"CommonHeadersOnly": False,
"UseProcesses": 0,
"FlattenedToLevel": "patient",
"is16Bit": True,
"SendEmail": False,
"YourEmail": "[email protected]"
}
config.update(**kwargs)
return config
def setup_method(self):
"""
Setup for tests
"""
self.default_out_dir = pytest.out_dir / \
'png-extraction/outputs-integration/TestImageExtractorModule'
def teardown_method(self):
"""
Cleanup after tests
"""
shutil.rmtree(self.default_out_dir)
def test_main(self):
"""
ImageExtractor.initialize_config_and_execute function executes successfully
Checks content of output dir
"""
config = self.generate_config()
ImageExtractor.initialize_config_and_execute(config)
assert len(
glob.glob(
f"{config['OutputDirectory']}/**/*.png",
recursive=True
)
) != 0
|
the-stack_0_24623
|
import os
import importlib
import ssl
from funcy import distinct, remove
from flask_talisman import talisman
from .helpers import fix_assets_path, array_from_string, parse_boolean, int_or_none, set_from_string
from .organization import DATE_FORMAT, TIME_FORMAT # noqa
REDIS_URL = os.environ.get('REDASH_REDIS_URL', os.environ.get('REDIS_URL', "redis://localhost:6379/0"))
PROXIES_COUNT = int(os.environ.get('REDASH_PROXIES_COUNT', "1"))
STATSD_HOST = os.environ.get('REDASH_STATSD_HOST', "127.0.0.1")
STATSD_PORT = int(os.environ.get('REDASH_STATSD_PORT', "8125"))
STATSD_PREFIX = os.environ.get('REDASH_STATSD_PREFIX', "redash")
STATSD_USE_TAGS = parse_boolean(os.environ.get('REDASH_STATSD_USE_TAGS', "false"))
# Connection settings for Redash's own database (where we store the queries, results, etc)
SQLALCHEMY_DATABASE_URI = os.environ.get("REDASH_DATABASE_URL", os.environ.get('DATABASE_URL', "postgresql:///postgres"))
SQLALCHEMY_MAX_OVERFLOW = int_or_none(os.environ.get("SQLALCHEMY_MAX_OVERFLOW"))
SQLALCHEMY_POOL_SIZE = int_or_none(os.environ.get("SQLALCHEMY_POOL_SIZE"))
SQLALCHEMY_DISABLE_POOL = parse_boolean(os.environ.get("SQLALCHEMY_DISABLE_POOL", "false"))
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
# Celery related settings
CELERY_BROKER = os.environ.get("REDASH_CELERY_BROKER", REDIS_URL)
CELERY_RESULT_BACKEND = os.environ.get(
"REDASH_CELERY_RESULT_BACKEND",
os.environ.get("REDASH_CELERY_BACKEND", CELERY_BROKER))
CELERY_RESULT_EXPIRES = int(os.environ.get(
"REDASH_CELERY_RESULT_EXPIRES",
os.environ.get("REDASH_CELERY_TASK_RESULT_EXPIRES", 3600 * 4)))
CELERY_INIT_TIMEOUT = int(os.environ.get(
"REDASH_CELERY_INIT_TIMEOUT", 10))
CELERY_BROKER_USE_SSL = CELERY_BROKER.startswith('rediss')
CELERY_SSL_CONFIG = {
'ssl_cert_reqs': int(os.environ.get("REDASH_CELERY_BROKER_SSL_CERT_REQS", ssl.CERT_OPTIONAL)),
'ssl_ca_certs': os.environ.get("REDASH_CELERY_BROKER_SSL_CA_CERTS"),
'ssl_certfile': os.environ.get("REDASH_CELERY_BROKER_SSL_CERTFILE"),
'ssl_keyfile': os.environ.get("REDASH_CELERY_BROKER_SSL_KEYFILE"),
} if CELERY_BROKER_USE_SSL else None
CELERY_WORKER_PREFETCH_MULTIPLIER = int(os.environ.get("REDASH_CELERY_WORKER_PREFETCH_MULTIPLIER", 1))
CELERY_ACCEPT_CONTENT = os.environ.get("REDASH_CELERY_ACCEPT_CONTENT", "json").split(",")
CELERY_TASK_SERIALIZER = os.environ.get("REDASH_CELERY_TASK_SERIALIZER", "json")
CELERY_RESULT_SERIALIZER = os.environ.get("REDASH_CELERY_RESULT_SERIALIZER", "json")
# The following enables periodic job (every 5 minutes) of removing unused query results.
QUERY_RESULTS_CLEANUP_ENABLED = parse_boolean(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "true"))
QUERY_RESULTS_CLEANUP_COUNT = int(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_COUNT", "100"))
QUERY_RESULTS_CLEANUP_MAX_AGE = int(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_MAX_AGE", "7"))
SCHEMAS_REFRESH_SCHEDULE = int(os.environ.get("REDASH_SCHEMAS_REFRESH_SCHEDULE", 30))
SCHEMAS_REFRESH_QUEUE = os.environ.get("REDASH_SCHEMAS_REFRESH_QUEUE", "celery")
AUTH_TYPE = os.environ.get("REDASH_AUTH_TYPE", "api_key")
INVITATION_TOKEN_MAX_AGE = int(os.environ.get("REDASH_INVITATION_TOKEN_MAX_AGE", 60 * 60 * 24 * 7))
# The secret key to use in the Flask app for various cryptographic features
SECRET_KEY = os.environ.get("REDASH_COOKIE_SECRET", "c292a0a3aa32397cdb050e233733900f")
# The secret key to use when encrypting data source options
DATASOURCE_SECRET_KEY = os.environ.get('REDASH_SECRET_KEY', SECRET_KEY)
# Whether and how to redirect non-HTTP requests to HTTPS. Disabled by default.
ENFORCE_HTTPS = parse_boolean(os.environ.get("REDASH_ENFORCE_HTTPS", "false"))
ENFORCE_HTTPS_PERMANENT = parse_boolean(
os.environ.get("REDASH_ENFORCE_HTTPS_PERMANENT", "false"))
# Whether file downloads are enforced or not.
ENFORCE_FILE_SAVE = parse_boolean(
os.environ.get("REDASH_ENFORCE_FILE_SAVE", "true"))
# Whether to use secure cookies by default.
COOKIES_SECURE = parse_boolean(
os.environ.get("REDASH_COOKIES_SECURE", str(ENFORCE_HTTPS)))
# Whether the session cookie is set to secure.
SESSION_COOKIE_SECURE = parse_boolean(
os.environ.get("REDASH_SESSION_COOKIE_SECURE") or str(COOKIES_SECURE))
# Whether the session cookie is set HttpOnly.
SESSION_COOKIE_HTTPONLY = parse_boolean(
os.environ.get("REDASH_SESSION_COOKIE_HTTPONLY", "true"))
# Whether the session cookie is set to secure.
REMEMBER_COOKIE_SECURE = parse_boolean(
os.environ.get("REDASH_REMEMBER_COOKIE_SECURE") or str(COOKIES_SECURE))
# Whether the remember cookie is set HttpOnly.
REMEMBER_COOKIE_HTTPONLY = parse_boolean(
os.environ.get("REDASH_REMEMBER_COOKIE_HTTPONLY", "true"))
# Doesn't set X-Frame-Options by default since it's highly dependent
# on the specific deployment.
# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options
# for more information.
FRAME_OPTIONS = os.environ.get("REDASH_FRAME_OPTIONS", "deny")
FRAME_OPTIONS_ALLOW_FROM = os.environ.get(
"REDASH_FRAME_OPTIONS_ALLOW_FROM", "")
# Whether and how to send Strict-Transport-Security response headers.
# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security
# for more information.
HSTS_ENABLED = parse_boolean(
os.environ.get("REDASH_HSTS_ENABLED") or str(ENFORCE_HTTPS))
HSTS_PRELOAD = parse_boolean(os.environ.get("REDASH_HSTS_PRELOAD", "false"))
HSTS_MAX_AGE = int(
os.environ.get("REDASH_HSTS_MAX_AGE", talisman.ONE_YEAR_IN_SECS))
HSTS_INCLUDE_SUBDOMAINS = parse_boolean(
os.environ.get("REDASH_HSTS_INCLUDE_SUBDOMAINS", "false"))
# Whether and how to send Content-Security-Policy response headers.
# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy
# for more information.
# Overriding this value via an environment variables requires setting it
# as a string in the general CSP format of a semicolon separated list of
# individual CSP directives, see https://github.com/GoogleCloudPlatform/flask-talisman#example-7
# for more information. E.g.:
CONTENT_SECURITY_POLICY = os.environ.get(
"REDASH_CONTENT_SECURITY_POLICY",
"default-src 'self'; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-eval'; font-src 'self' data:; img-src 'self' http: https: data:; object-src 'none'; frame-ancestors 'none'; frame-src redash.io;"
)
CONTENT_SECURITY_POLICY_REPORT_URI = os.environ.get(
"REDASH_CONTENT_SECURITY_POLICY_REPORT_URI", "")
CONTENT_SECURITY_POLICY_REPORT_ONLY = parse_boolean(
os.environ.get("REDASH_CONTENT_SECURITY_POLICY_REPORT_ONLY", "false"))
CONTENT_SECURITY_POLICY_NONCE_IN = array_from_string(
os.environ.get("REDASH_CONTENT_SECURITY_POLICY_NONCE_IN", ""))
# Whether and how to send Referrer-Policy response headers. Defaults to
# 'strict-origin-when-cross-origin'.
# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy
# for more information.
REFERRER_POLICY = os.environ.get(
"REDASH_REFERRER_POLICY", "strict-origin-when-cross-origin")
# Whether and how to send Feature-Policy response headers. Defaults to
# an empty value.
# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Feature-Policy
# for more information.
FEATURE_POLICY = os.environ.get("REDASH_REFERRER_POLICY", "")
MULTI_ORG = parse_boolean(os.environ.get("REDASH_MULTI_ORG", "false"))
GOOGLE_CLIENT_ID = os.environ.get("REDASH_GOOGLE_CLIENT_ID", "")
GOOGLE_CLIENT_SECRET = os.environ.get("REDASH_GOOGLE_CLIENT_SECRET", "")
GOOGLE_OAUTH_ENABLED = bool(GOOGLE_CLIENT_ID and GOOGLE_CLIENT_SECRET)
# Enables the use of an externally-provided and trusted remote user via an HTTP
# header. The "user" must be an email address.
#
# By default the trusted header is X-Forwarded-Remote-User. You can change
# this by setting REDASH_REMOTE_USER_HEADER.
#
# Enabling this authentication method is *potentially dangerous*, and it is
# your responsibility to ensure that only a trusted frontend (usually on the
# same server) can talk to the redash backend server, otherwise people will be
# able to login as anyone they want by directly talking to the redash backend.
# You must *also* ensure that any special header in the original request is
# removed or always overwritten by your frontend, otherwise your frontend may
# pass it through to the backend unchanged.
#
# Note that redash will only check the remote user once, upon the first need
# for a login, and then set a cookie which keeps the user logged in. Dropping
# the remote user header after subsequent requests won't automatically log the
# user out. Doing so could be done with further work, but usually it's
# unnecessary.
#
# If you also set the organization setting auth_password_login_enabled to false,
# then your authentication will be seamless. Otherwise a link will be presented
# on the login page to trigger remote user auth.
REMOTE_USER_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_REMOTE_USER_LOGIN_ENABLED", "false"))
REMOTE_USER_HEADER = os.environ.get("REDASH_REMOTE_USER_HEADER", "X-Forwarded-Remote-User")
# If the organization setting auth_password_login_enabled is not false, then users will still be
# able to login through Redash instead of the LDAP server
LDAP_LOGIN_ENABLED = parse_boolean(os.environ.get('REDASH_LDAP_LOGIN_ENABLED', 'false'))
# Bind LDAP using SSL. Default is False
LDAP_SSL = parse_boolean(os.environ.get('REDASH_LDAP_USE_SSL', 'false'))
# Choose authentication method(SIMPLE, ANONYMOUS or NTLM). Default is SIMPLE
LDAP_AUTH_METHOD = os.environ.get('REDASH_LDAP_AUTH_METHOD', 'SIMPLE')
# The LDAP directory address (ex. ldap://10.0.10.1:389)
LDAP_HOST_URL = os.environ.get('REDASH_LDAP_URL', None)
# The DN & password used to connect to LDAP to determine the identity of the user being authenticated.
# For AD this should be "org\\user".
LDAP_BIND_DN = os.environ.get('REDASH_LDAP_BIND_DN', None)
LDAP_BIND_DN_PASSWORD = os.environ.get('REDASH_LDAP_BIND_DN_PASSWORD', '')
# AD/LDAP email and display name keys
LDAP_DISPLAY_NAME_KEY = os.environ.get('REDASH_LDAP_DISPLAY_NAME_KEY', 'displayName')
LDAP_EMAIL_KEY = os.environ.get('REDASH_LDAP_EMAIL_KEY', "mail")
# Prompt that should be shown above username/email field.
LDAP_CUSTOM_USERNAME_PROMPT = os.environ.get('REDASH_LDAP_CUSTOM_USERNAME_PROMPT', 'LDAP/AD/SSO username:')
# LDAP Search DN TEMPLATE (for AD this should be "(sAMAccountName=%(username)s)"")
LDAP_SEARCH_TEMPLATE = os.environ.get('REDASH_LDAP_SEARCH_TEMPLATE', '(cn=%(username)s)')
# The schema to bind to (ex. cn=users,dc=ORG,dc=local)
LDAP_SEARCH_DN = os.environ.get('REDASH_LDAP_SEARCH_DN', os.environ.get('REDASH_SEARCH_DN'))
STATIC_ASSETS_PATH = fix_assets_path(os.environ.get("REDASH_STATIC_ASSETS_PATH", "../client/dist/"))
JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600 * 12))
LOG_LEVEL = os.environ.get("REDASH_LOG_LEVEL", "INFO")
LOG_STDOUT = parse_boolean(os.environ.get('REDASH_LOG_STDOUT', 'false'))
LOG_PREFIX = os.environ.get('REDASH_LOG_PREFIX', '')
LOG_FORMAT = os.environ.get('REDASH_LOG_FORMAT', LOG_PREFIX + '[%(asctime)s][PID:%(process)d][%(levelname)s][%(name)s] %(message)s')
CELERYD_WORKER_LOG_FORMAT = os.environ.get(
"REDASH_CELERYD_WORKER_LOG_FORMAT",
os.environ.get('REDASH_CELERYD_LOG_FORMAT',
LOG_PREFIX + '[%(asctime)s][PID:%(process)d][%(levelname)s][%(processName)s] %(message)s'))
CELERYD_WORKER_TASK_LOG_FORMAT = os.environ.get(
"REDASH_CELERYD_WORKER_TASK_LOG_FORMAT",
os.environ.get('REDASH_CELERYD_TASK_LOG_FORMAT',
(LOG_PREFIX + '[%(asctime)s][PID:%(process)d][%(levelname)s][%(processName)s] '
'task_name=%(task_name)s '
'task_id=%(task_id)s %(message)s')))
# Mail settings:
MAIL_SERVER = os.environ.get('REDASH_MAIL_SERVER', 'localhost')
MAIL_PORT = int(os.environ.get('REDASH_MAIL_PORT', 25))
MAIL_USE_TLS = parse_boolean(os.environ.get('REDASH_MAIL_USE_TLS', 'false'))
MAIL_USE_SSL = parse_boolean(os.environ.get('REDASH_MAIL_USE_SSL', 'false'))
MAIL_USERNAME = os.environ.get('REDASH_MAIL_USERNAME', None)
MAIL_PASSWORD = os.environ.get('REDASH_MAIL_PASSWORD', None)
MAIL_DEFAULT_SENDER = os.environ.get('REDASH_MAIL_DEFAULT_SENDER', None)
MAIL_MAX_EMAILS = os.environ.get('REDASH_MAIL_MAX_EMAILS', None)
MAIL_ASCII_ATTACHMENTS = parse_boolean(os.environ.get('REDASH_MAIL_ASCII_ATTACHMENTS', 'false'))
def email_server_is_configured():
return MAIL_DEFAULT_SENDER is not None
HOST = os.environ.get('REDASH_HOST', '')
SEND_FAILURE_EMAIL_INTERVAL = int(os.environ.get('REDASH_SEND_FAILURE_EMAIL_INTERVAL', 60))
MAX_FAILURE_REPORTS_PER_QUERY = int(os.environ.get('REDASH_MAX_FAILURE_REPORTS_PER_QUERY', 100))
ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE = os.environ.get('REDASH_ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE', "({state}) {alert_name}")
# How many requests are allowed per IP to the login page before
# being throttled?
# See https://flask-limiter.readthedocs.io/en/stable/#rate-limit-string-notation
RATELIMIT_ENABLED = parse_boolean(os.environ.get('REDASH_RATELIMIT_ENABLED', 'true'))
THROTTLE_LOGIN_PATTERN = os.environ.get('REDASH_THROTTLE_LOGIN_PATTERN', '50/hour')
LIMITER_STORAGE = os.environ.get("REDASH_LIMITER_STORAGE", REDIS_URL)
# CORS settings for the Query Result API (and possbily future external APIs).
# In most cases all you need to do is set REDASH_CORS_ACCESS_CONTROL_ALLOW_ORIGIN
# to the calling domain (or domains in a comma separated list).
ACCESS_CONTROL_ALLOW_ORIGIN = set_from_string(os.environ.get("REDASH_CORS_ACCESS_CONTROL_ALLOW_ORIGIN", ""))
ACCESS_CONTROL_ALLOW_CREDENTIALS = parse_boolean(os.environ.get("REDASH_CORS_ACCESS_CONTROL_ALLOW_CREDENTIALS", "false"))
ACCESS_CONTROL_REQUEST_METHOD = os.environ.get("REDASH_CORS_ACCESS_CONTROL_REQUEST_METHOD", "GET, POST, PUT")
ACCESS_CONTROL_ALLOW_HEADERS = os.environ.get("REDASH_CORS_ACCESS_CONTROL_ALLOW_HEADERS", "Content-Type")
# Query Runners
default_query_runners = [
'redash.query_runner.athena',
'redash.query_runner.big_query',
'redash.query_runner.google_spreadsheets',
'redash.query_runner.graphite',
'redash.query_runner.mongodb',
'redash.query_runner.couchbase',
'redash.query_runner.mysql',
'redash.query_runner.pg',
'redash.query_runner.url',
'redash.query_runner.influx_db',
'redash.query_runner.elasticsearch',
'redash.query_runner.amazon_elasticsearch',
'redash.query_runner.presto',
'redash.query_runner.databricks',
'redash.query_runner.hive_ds',
'redash.query_runner.impala_ds',
'redash.query_runner.vertica',
'redash.query_runner.clickhouse',
'redash.query_runner.yandex_metrica',
'redash.query_runner.rockset',
'redash.query_runner.treasuredata',
'redash.query_runner.sqlite',
'redash.query_runner.dynamodb_sql',
'redash.query_runner.mssql',
'redash.query_runner.memsql_ds',
'redash.query_runner.mapd',
'redash.query_runner.jql',
'redash.query_runner.google_analytics',
'redash.query_runner.axibase_tsd',
'redash.query_runner.salesforce',
'redash.query_runner.query_results',
'redash.query_runner.prometheus',
'redash.query_runner.qubole',
'redash.query_runner.db2',
'redash.query_runner.druid',
'redash.query_runner.kylin',
'redash.query_runner.drill',
'redash.query_runner.uptycs',
'redash.query_runner.snowflake',
'redash.query_runner.phoenix',
'redash.query_runner.json_ds',
'redash.query_runner.cass',
'redash.query_runner.dgraph',
'redash.query_runner.azure_kusto',
]
enabled_query_runners = array_from_string(os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join(default_query_runners)))
additional_query_runners = array_from_string(os.environ.get("REDASH_ADDITIONAL_QUERY_RUNNERS", ""))
disabled_query_runners = array_from_string(os.environ.get("REDASH_DISABLED_QUERY_RUNNERS", ""))
QUERY_RUNNERS = remove(set(disabled_query_runners), distinct(enabled_query_runners + additional_query_runners))
dynamic_settings = importlib.import_module(os.environ.get('REDASH_DYNAMIC_SETTINGS_MODULE', 'redash.settings.dynamic_settings'))
# Destinations
default_destinations = [
'redash.destinations.email',
'redash.destinations.slack',
'redash.destinations.webhook',
'redash.destinations.hipchat',
'redash.destinations.mattermost',
'redash.destinations.chatwork',
'redash.destinations.pagerduty',
'redash.destinations.hangoutschat'
]
enabled_destinations = array_from_string(os.environ.get("REDASH_ENABLED_DESTINATIONS", ",".join(default_destinations)))
additional_destinations = array_from_string(os.environ.get("REDASH_ADDITIONAL_DESTINATIONS", ""))
DESTINATIONS = distinct(enabled_destinations + additional_destinations)
EVENT_REPORTING_WEBHOOKS = array_from_string(os.environ.get("REDASH_EVENT_REPORTING_WEBHOOKS", ""))
# Support for Sentry (https://getsentry.com/). Just set your Sentry DSN to enable it:
SENTRY_DSN = os.environ.get("REDASH_SENTRY_DSN", "")
# Client side toggles:
ALLOW_SCRIPTS_IN_USER_INPUT = parse_boolean(os.environ.get("REDASH_ALLOW_SCRIPTS_IN_USER_INPUT", "false"))
DASHBOARD_REFRESH_INTERVALS = map(int, array_from_string(os.environ.get("REDASH_DASHBOARD_REFRESH_INTERVALS", "60,300,600,1800,3600,43200,86400")))
QUERY_REFRESH_INTERVALS = map(int, array_from_string(os.environ.get("REDASH_QUERY_REFRESH_INTERVALS", "60, 300, 600, 900, 1800, 3600, 7200, 10800, 14400, 18000, 21600, 25200, 28800, 32400, 36000, 39600, 43200, 86400, 604800, 1209600, 2592000")))
PAGE_SIZE = int(os.environ.get('REDASH_PAGE_SIZE', 20))
PAGE_SIZE_OPTIONS = map(int, array_from_string(os.environ.get("REDASH_PAGE_SIZE_OPTIONS", "5,10,20,50,100")))
TABLE_CELL_MAX_JSON_SIZE = int(os.environ.get('REDASH_TABLE_CELL_MAX_JSON_SIZE', 50000))
# Features:
VERSION_CHECK = parse_boolean(os.environ.get("REDASH_VERSION_CHECK", "true"))
FEATURE_DISABLE_REFRESH_QUERIES = parse_boolean(os.environ.get("REDASH_FEATURE_DISABLE_REFRESH_QUERIES", "false"))
FEATURE_SHOW_QUERY_RESULTS_COUNT = parse_boolean(os.environ.get("REDASH_FEATURE_SHOW_QUERY_RESULTS_COUNT", "true"))
FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS = parse_boolean(os.environ.get("REDASH_FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS", "false"))
FEATURE_AUTO_PUBLISH_NAMED_QUERIES = parse_boolean(os.environ.get("REDASH_FEATURE_AUTO_PUBLISH_NAMED_QUERIES", "true"))
FEATURE_EXTENDED_ALERT_OPTIONS = parse_boolean(os.environ.get("REDASH_FEATURE_EXTENDED_ALERT_OPTIONS", "false"))
# BigQuery
BIGQUERY_HTTP_TIMEOUT = int(os.environ.get("REDASH_BIGQUERY_HTTP_TIMEOUT", "600"))
# Allow Parameters in Embeds
# WARNING: Deprecated!
# See https://discuss.redash.io/t/support-for-parameters-in-embedded-visualizations/3337 for more details.
ALLOW_PARAMETERS_IN_EMBEDS = parse_boolean(os.environ.get("REDASH_ALLOW_PARAMETERS_IN_EMBEDS", "false"))
# Enhance schema fetching
SCHEMA_RUN_TABLE_SIZE_CALCULATIONS = parse_boolean(os.environ.get("REDASH_SCHEMA_RUN_TABLE_SIZE_CALCULATIONS", "false"))
# kylin
KYLIN_OFFSET = int(os.environ.get('REDASH_KYLIN_OFFSET', 0))
KYLIN_LIMIT = int(os.environ.get('REDASH_KYLIN_LIMIT', 50000))
KYLIN_ACCEPT_PARTIAL = parse_boolean(os.environ.get("REDASH_KYLIN_ACCEPT_PARTIAL", "false"))
# sqlparse
SQLPARSE_FORMAT_OPTIONS = {
'reindent': parse_boolean(os.environ.get('SQLPARSE_FORMAT_REINDENT', 'true')),
'keyword_case': os.environ.get('SQLPARSE_FORMAT_KEYWORD_CASE', 'upper'),
}
|
the-stack_0_24624
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils import unittest
from rest_framework import generics, status, filters
from rest_framework.compat import django_filters, patterns, url
from rest_framework.tests.models import FilterableItem, BasicModel
factory = RequestFactory()
if django_filters:
# Basic filter on a list view.
class FilterFieldsRootView(generics.ListCreateAPIView):
model = FilterableItem
filter_fields = ['decimal', 'date']
filter_backend = filters.DjangoFilterBackend
# These class are used to test a filter class.
class SeveralFieldsFilter(django_filters.FilterSet):
text = django_filters.CharFilter(lookup_type='icontains')
decimal = django_filters.NumberFilter(lookup_type='lt')
date = django_filters.DateFilter(lookup_type='gt')
class Meta:
model = FilterableItem
fields = ['text', 'decimal', 'date']
class FilterClassRootView(generics.ListCreateAPIView):
model = FilterableItem
filter_class = SeveralFieldsFilter
filter_backend = filters.DjangoFilterBackend
# These classes are used to test a misconfigured filter class.
class MisconfiguredFilter(django_filters.FilterSet):
text = django_filters.CharFilter(lookup_type='icontains')
class Meta:
model = BasicModel
fields = ['text']
class IncorrectlyConfiguredRootView(generics.ListCreateAPIView):
model = FilterableItem
filter_class = MisconfiguredFilter
filter_backend = filters.DjangoFilterBackend
class FilterClassDetailView(generics.RetrieveAPIView):
model = FilterableItem
filter_class = SeveralFieldsFilter
filter_backend = filters.DjangoFilterBackend
urlpatterns = patterns('',
url(r'^(?P<pk>\d+)/$', FilterClassDetailView.as_view(), name='detail-view'),
url(r'^$', FilterClassRootView.as_view(), name='root-view'),
)
class CommonFilteringTestCase(TestCase):
def _serialize_object(self, obj):
return {'id': obj.id, 'text': obj.text, 'decimal': obj.decimal, 'date': obj.date}
def setUp(self):
"""
Create 10 FilterableItem instances.
"""
base_data = ('a', Decimal('0.25'), datetime.date(2012, 10, 8))
for i in range(10):
text = chr(i + ord(base_data[0])) * 3 # Produces string 'aaa', 'bbb', etc.
decimal = base_data[1] + i
date = base_data[2] - datetime.timedelta(days=i * 2)
FilterableItem(text=text, decimal=decimal, date=date).save()
self.objects = FilterableItem.objects
self.data = [
self._serialize_object(obj)
for obj in self.objects.all()
]
class IntegrationTestFiltering(CommonFilteringTestCase):
"""
Integration tests for filtered list views.
"""
@unittest.skipUnless(django_filters, 'django-filters not installed')
def test_get_filtered_fields_root_view(self):
"""
GET requests to paginated ListCreateAPIView should return paginated results.
"""
view = FilterFieldsRootView.as_view()
# Basic test with no filter.
request = factory.get('/')
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data)
# Tests that the decimal filter works.
search_decimal = Decimal('2.25')
request = factory.get('/?decimal=%s' % search_decimal)
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if f['decimal'] == search_decimal]
self.assertEqual(response.data, expected_data)
# Tests that the date filter works.
search_date = datetime.date(2012, 9, 22)
request = factory.get('/?date=%s' % search_date) # search_date str: '2012-09-22'
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if f['date'] == search_date]
self.assertEqual(response.data, expected_data)
@unittest.skipUnless(django_filters, 'django-filters not installed')
def test_get_filtered_class_root_view(self):
"""
GET requests to filtered ListCreateAPIView that have a filter_class set
should return filtered results.
"""
view = FilterClassRootView.as_view()
# Basic test with no filter.
request = factory.get('/')
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data)
# Tests that the decimal filter set with 'lt' in the filter class works.
search_decimal = Decimal('4.25')
request = factory.get('/?decimal=%s' % search_decimal)
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if f['decimal'] < search_decimal]
self.assertEqual(response.data, expected_data)
# Tests that the date filter set with 'gt' in the filter class works.
search_date = datetime.date(2012, 10, 2)
request = factory.get('/?date=%s' % search_date) # search_date str: '2012-10-02'
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if f['date'] > search_date]
self.assertEqual(response.data, expected_data)
# Tests that the text filter set with 'icontains' in the filter class works.
search_text = 'ff'
request = factory.get('/?text=%s' % search_text)
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if search_text in f['text'].lower()]
self.assertEqual(response.data, expected_data)
# Tests that multiple filters works.
search_decimal = Decimal('5.25')
search_date = datetime.date(2012, 10, 2)
request = factory.get('/?decimal=%s&date=%s' % (search_decimal, search_date))
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if f['date'] > search_date and
f['decimal'] < search_decimal]
self.assertEqual(response.data, expected_data)
@unittest.skipUnless(django_filters, 'django-filters not installed')
def test_incorrectly_configured_filter(self):
"""
An error should be displayed when the filter class is misconfigured.
"""
view = IncorrectlyConfiguredRootView.as_view()
request = factory.get('/')
self.assertRaises(AssertionError, view, request)
@unittest.skipUnless(django_filters, 'django-filters not installed')
def test_unknown_filter(self):
"""
GET requests with filters that aren't configured should return 200.
"""
view = FilterFieldsRootView.as_view()
search_integer = 10
request = factory.get('/?integer=%s' % search_integer)
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
class IntegrationTestDetailFiltering(CommonFilteringTestCase):
"""
Integration tests for filtered detail views.
"""
urls = 'rest_framework.tests.filterset'
def _get_url(self, item):
return reverse('detail-view', kwargs=dict(pk=item.pk))
@unittest.skipUnless(django_filters, 'django-filters not installed')
def test_get_filtered_detail_view(self):
"""
GET requests to filtered RetrieveAPIView that have a filter_class set
should return filtered results.
"""
item = self.objects.all()[0]
data = self._serialize_object(item)
# Basic test with no filter.
response = self.client.get(self._get_url(item))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)
# Tests that the decimal filter set that should fail.
search_decimal = Decimal('4.25')
high_item = self.objects.filter(decimal__gt=search_decimal)[0]
response = self.client.get('{url}?decimal={param}'.format(url=self._get_url(high_item), param=search_decimal))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# Tests that the decimal filter set that should succeed.
search_decimal = Decimal('4.25')
low_item = self.objects.filter(decimal__lt=search_decimal)[0]
low_item_data = self._serialize_object(low_item)
response = self.client.get('{url}?decimal={param}'.format(url=self._get_url(low_item), param=search_decimal))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, low_item_data)
# Tests that multiple filters works.
search_decimal = Decimal('5.25')
search_date = datetime.date(2012, 10, 2)
valid_item = self.objects.filter(decimal__lt=search_decimal, date__gt=search_date)[0]
valid_item_data = self._serialize_object(valid_item)
response = self.client.get('{url}?decimal={decimal}&date={date}'.format(url=self._get_url(valid_item), decimal=search_decimal, date=search_date))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, valid_item_data)
|
the-stack_0_24626
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author:nikan
@file: __init__.py.py
@time: 22/01/2018 2:59 PM
"""
import inspect
import logging
import pkgutil
import sys
import time
import traceback
from datetime import datetime
from importlib import import_module, reload
from os.path import join
from urllib.parse import urlparse
from jinja2 import Template
from scrapy import Spider
from PyScraper.settings import SCRIPT_TEMPLATES_DIR, GOV_SPIDER_DIR, GOV_SPIDER_MODULE
logger = logging.getLogger(__name__)
def run_in_thread(func, *args, daemon=True, **kwargs):
"""Run function in thread, return a Thread object"""
from threading import Thread
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = daemon
thread.start()
return thread
def import_class(cl):
d = cl.rfind(".")
classname = cl[d + 1:len(cl)]
module = import_module(cl[0:d])
return getattr(module, classname)
def get_class_source(kls_name):
cls = import_class(kls_name)
source = inspect.getsource(cls)
return source
def get_all_submodules(*, root_module_name):
"""
load submodule and get_all_submodules
:param module_name: abstract module path
:return: submodules
"""
start = time.process_time()
mods = []
mod_names = []
mod = import_module(root_module_name)
sys.modules[root_module_name] = mod
for loader, module_name, is_pkg in pkgutil.walk_packages(mod.__path__, onerror=lambda x: print(x)):
try:
sub_module_name = module_name
loader.find_module(sub_module_name).load_module(sub_module_name)
if is_pkg:
logger.debug(sub_module_name, "is pkg")
pass
else:
mod_names.append(root_module_name + "." + sub_module_name)
mods.append(import_module(sub_module_name))
except Exception as e:
traceback.print_exc()
print('load_cost_time:', time.process_time() - start)
return mods, mod_names
def walk_modules(path):
"""Loads a module and all its submodules from the given module path and
returns them. If *any* module throws an exception while importing, that
exception is thrown back.
For example: walk_modules('scrapy.utils')
"""
mods = []
mod = import_module(path)
reload(mod)
mods.append(mod)
if hasattr(mod, '__path__'):
for _, subpath, ispkg in pkgutil.iter_modules(mod.__path__):
fullpath = path + '.' + subpath
if ispkg:
mods += walk_modules(fullpath)
else:
submod = import_module(fullpath)
mods.append(submod)
return mods
def get_classes_from_submodules(submodules, class_type=None):
"""
get classes from modules
:param submodules:
:param class_type: if specify class_type, return the subclass of the class_type
:return:
"""
classes = []
for module in submodules:
for name, obj in inspect.getmembers(module):
if inspect.isclass(obj):
if not class_type or (
class_type and issubclass(obj,
class_type) and obj.__qualname__ != class_type.__qualname__):
classes.append(obj)
return classes
def load_spiders(*, submodules, class_type=Spider):
return get_classes_from_submodules(submodules, class_type=class_type)
def get_full_classname(klass):
return klass.__module__ + "." + klass.__qualname__
def create_script(*, script_name, rules, start_url, mail_to, script_type=None):
if script_type == 'gov':
return create_gov_script(script_name, rules, start_url, mail_to)
def create_gov_script(spider_name, rules, start_url, mail_to):
"""
to create a new government script
:param spider_name: the spider name
:param rules: the rules for government error correction
:param start_url: start url for spider
:param mail_to: mail receiver when rule error occurs
:return: new government script path
"""
if not spider_name or not start_url or not mail_to:
raise Exception("parameters should not be None")
if not start_url.startswith("http"):
start_url = "http://" + start_url
allowed_domain = urlparse(start_url).netloc
timestamp = str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
with open(SCRIPT_TEMPLATES_DIR + '/gov_template', 'r') as f:
script_template = Template(f.read())
result = script_template.render(spider_name=spider_name, rules=rules, start_url=start_url,
allowed_domain=allowed_domain,
mail_to=mail_to, datetime=timestamp)
today = str(datetime.today().date())
path = join(GOV_SPIDER_DIR, today + "_{spider_name}.py".format(spider_name=spider_name))
with open(path, 'w+') as f:
f.write(result)
spider_modulename = GOV_SPIDER_MODULE + ".{today}_{spider_name}.{spider_name}Spider".format(today=today,
spider_name=spider_name)
return spider_modulename
|
the-stack_0_24632
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetArchivedStickers(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``123``
- ID: ``0x57f17692``
Parameters:
offset_id: ``int`` ``64-bit``
limit: ``int`` ``32-bit``
masks (optional): ``bool``
Returns:
:obj:`messages.ArchivedStickers <pyrogram.raw.base.messages.ArchivedStickers>`
"""
__slots__: List[str] = ["offset_id", "limit", "masks"]
ID = 0x57f17692
QUALNAME = "functions.messages.GetArchivedStickers"
def __init__(self, *, offset_id: int, limit: int, masks: Union[None, bool] = None) -> None:
self.offset_id = offset_id # long
self.limit = limit # int
self.masks = masks # flags.0?true
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetArchivedStickers":
flags = Int.read(data)
masks = True if flags & (1 << 0) else False
offset_id = Long.read(data)
limit = Int.read(data)
return GetArchivedStickers(offset_id=offset_id, limit=limit, masks=masks)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
flags = 0
flags |= (1 << 0) if self.masks else 0
data.write(Int(flags))
data.write(Long(self.offset_id))
data.write(Int(self.limit))
return data.getvalue()
|
the-stack_0_24634
|
from setuptools import setup, find_packages
__version__ = '1.3.1'
setup(
version=__version__,
name='ufs_sdk',
packages=find_packages(),
install_requires=[
'requests'
],
description='UFS Python SDK',
author='Travel Managment Consulting',
author_email='[email protected]',
url='https://github.com/tmconsulting/ufs-python-sdk',
download_url='https://github.com/tmconsulting/ufs-python-sdk/archive/%s.tar.gz' % __version__,
license='MIT License',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
the-stack_0_24635
|
"""HTTP client functional tests."""
import asyncio
import binascii
import cgi
import contextlib
import email.parser
import gc
import http.server
import io
import json
import logging
import os
import os.path
import re
import ssl
import sys
import threading
import traceback
import unittest
import urllib.parse
from http.cookies import SimpleCookie
from unittest import mock
from multidict import MultiDict
import aiohttp
import aiohttp.http
from aiohttp import client, helpers, test_utils, web
from aiohttp.multipart import MultipartWriter
from aiohttp.test_utils import run_briefly, unused_port
@contextlib.contextmanager
def run_server(loop, *, listen_addr=('127.0.0.1', 0),
use_ssl=False, router=None):
properties = {}
transports = []
class HttpRequestHandler:
def __init__(self, addr):
host, port = addr
self.host = host
self.port = port
self.address = addr
self._url = '{}://{}:{}'.format(
'https' if use_ssl else 'http', host, port)
def __getitem__(self, key):
return properties[key]
def __setitem__(self, key, value):
properties[key] = value
def url(self, *suffix):
return urllib.parse.urljoin(
self._url, '/'.join(str(s) for s in suffix))
async def handler(request):
if properties.get('close', False):
return
for hdr, val in request.message.headers.items():
if (hdr.upper() == 'EXPECT') and (val == '100-continue'):
request.writer.write(b'HTTP/1.0 100 Continue\r\n\r\n')
break
rob = router(properties, request)
return (await rob.dispatch())
class TestHttpServer(web.RequestHandler):
def connection_made(self, transport):
transports.append(transport)
super().connection_made(transport)
if use_ssl:
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
keyfile = os.path.join(here, 'sample.key')
certfile = os.path.join(here, 'sample.crt')
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.load_cert_chain(certfile, keyfile)
else:
sslcontext = None
def run(loop, fut):
thread_loop = asyncio.new_event_loop()
asyncio.set_event_loop(thread_loop)
host, port = listen_addr
server_coroutine = thread_loop.create_server(
lambda: TestHttpServer(
web.Server(handler, loop=thread_loop), keepalive_timeout=0.5),
host, port, ssl=sslcontext)
server = thread_loop.run_until_complete(server_coroutine)
waiter = thread_loop.create_future()
loop.call_soon_threadsafe(
fut.set_result, (thread_loop, waiter,
server.sockets[0].getsockname()))
try:
thread_loop.run_until_complete(waiter)
finally:
# call pending connection_made if present
run_briefly(thread_loop)
# close opened transports
for tr in transports:
tr.close()
run_briefly(thread_loop) # call close callbacks
server.close()
thread_loop.stop()
thread_loop.close()
gc.collect()
fut = loop.create_future()
server_thread = threading.Thread(target=run, args=(loop, fut))
server_thread.start()
thread_loop, waiter, addr = loop.run_until_complete(fut)
try:
yield HttpRequestHandler(addr)
finally:
thread_loop.call_soon_threadsafe(waiter.set_result, None)
server_thread.join()
class Router:
_response_version = "1.1"
_responses = http.server.BaseHTTPRequestHandler.responses
def __init__(self, props, request):
# headers
self._headers = http.client.HTTPMessage()
for hdr, val in request.message.headers.items():
self._headers.add_header(hdr, val)
self._props = props
self._request = request
self._method = request.message.method
self._uri = request.message.path
self._version = request.message.version
self._compression = request.message.compression
self._body = request.content
url = urllib.parse.urlsplit(self._uri)
self._path = url.path
self._query = url.query
@staticmethod
def define(rmatch):
def wrapper(fn):
f_locals = sys._getframe(1).f_locals
mapping = f_locals.setdefault('_mapping', [])
mapping.append((re.compile(rmatch), fn.__name__))
return fn
return wrapper
async def dispatch(self): # pragma: no cover
for route, fn in self._mapping:
match = route.match(self._path)
if match is not None:
try:
return (await getattr(self, fn)(match))
except Exception:
out = io.StringIO()
traceback.print_exc(file=out)
return (await self._response(500, out.getvalue()))
return ()
return (await self._response(self._start_response(404)))
def _start_response(self, code):
return web.Response(status=code)
async def _response(self, response, body=None,
headers=None, chunked=False, write_body=None):
r_headers = {}
for key, val in self._headers.items():
key = '-'.join(p.capitalize() for p in key.split('-'))
r_headers[key] = val
encoding = self._headers.get('content-encoding', '').lower()
if 'gzip' in encoding: # pragma: no cover
cmod = 'gzip'
elif 'deflate' in encoding:
cmod = 'deflate'
else:
cmod = ''
resp = {
'method': self._method,
'version': '%s.%s' % self._version,
'path': self._uri,
'headers': r_headers,
'origin': self._request.transport.get_extra_info('addr', ' ')[0],
'query': self._query,
'form': {},
'compression': cmod,
'multipart-data': []
}
if body: # pragma: no cover
resp['content'] = body
else:
resp['content'] = (
await self._request.read()).decode('utf-8', 'ignore')
ct = self._headers.get('content-type', '').lower()
# application/x-www-form-urlencoded
if ct == 'application/x-www-form-urlencoded':
resp['form'] = urllib.parse.parse_qs(self._body.decode('latin1'))
# multipart/form-data
elif ct.startswith('multipart/form-data'): # pragma: no cover
out = io.BytesIO()
for key, val in self._headers.items():
out.write(bytes('{}: {}\r\n'.format(key, val), 'latin1'))
b = await self._request.read()
out.write(b'\r\n')
out.write(b)
out.write(b'\r\n')
out.seek(0)
message = email.parser.BytesParser().parse(out)
if message.is_multipart():
for msg in message.get_payload():
if msg.is_multipart():
logging.warning('multipart msg is not expected')
else:
key, params = cgi.parse_header(
msg.get('content-disposition', ''))
params['data'] = msg.get_payload()
params['content-type'] = msg.get_content_type()
cte = msg.get('content-transfer-encoding')
if cte is not None:
resp['content-transfer-encoding'] = cte
resp['multipart-data'].append(params)
body = json.dumps(resp, indent=4, sort_keys=True)
# default headers
hdrs = [('Connection', 'close'),
('Content-Type', 'application/json')]
if chunked:
hdrs.append(('Transfer-Encoding', 'chunked'))
else:
hdrs.append(('Content-Length', str(len(body))))
# extra headers
if headers:
hdrs.extend(headers.items())
# headers
for key, val in hdrs:
response.headers[key] = val
if chunked:
self._request.writer.enable_chunking()
await response.prepare(self._request)
# write payload
if write_body:
try:
write_body(response, body)
except Exception:
return
else:
response.write(body.encode('utf8'))
return response
class Functional(Router):
@Router.define('/method/([A-Za-z]+)$')
def method(self, match):
return self._response(self._start_response(200))
@Router.define('/keepalive$')
def keepalive(self, match):
transport = self._request.transport
transport._requests = getattr(transport, '_requests', 0) + 1
resp = self._start_response(200)
if 'close=' in self._query:
return self._response(
resp, 'requests={}'.format(transport._requests))
else:
return self._response(
resp, 'requests={}'.format(transport._requests),
headers={'CONNECTION': 'keep-alive'})
@Router.define('/cookies$')
def cookies(self, match):
cookies = SimpleCookie()
cookies['c1'] = 'cookie1'
cookies['c2'] = 'cookie2'
resp = self._start_response(200)
for cookie in cookies.output(header='').split('\n'):
resp.headers.extend({'Set-Cookie': cookie.strip()})
resp.headers.extend(
{'Set-Cookie':
'ISAWPLB{A7F52349-3531-4DA9-8776-F74BC6F4F1BB}='
'{925EC0B8-CB17-4BEB-8A35-1033813B0523}; HttpOnly; Path=/'})
return self._response(resp)
@Router.define('/cookies_partial$')
def cookies_partial(self, match):
cookies = SimpleCookie()
cookies['c1'] = 'other_cookie1'
resp = self._start_response(200)
for cookie in cookies.output(header='').split('\n'):
resp.add_header('Set-Cookie', cookie.strip())
return self._response(resp)
@Router.define('/broken$')
def broken(self, match):
resp = self._start_response(200)
def write_body(resp, body):
self._transport.close()
raise ValueError()
return self._response(
resp,
body=json.dumps({'t': (b'0' * 1024).decode('utf-8')}),
write_body=write_body)
class TestHttpClientFunctional(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
# just in case if we have transport close callbacks
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
def test_POST_DATA_with_charset(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
form = aiohttp.FormData()
form.add_field('name', 'текст',
content_type='text/plain; charset=koi8-r')
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request('post', url, data=form))
content = self.loop.run_until_complete(r.json())
self.assertEqual(1, len(content['multipart-data']))
field = content['multipart-data'][0]
self.assertEqual('name', field['name'])
self.assertEqual('текст', field['data'])
self.assertEqual(r.status, 200)
r.close()
session.close()
def test_POST_DATA_with_charset_pub_request(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
form = aiohttp.FormData()
form.add_field('name', 'текст',
content_type='text/plain; charset=koi8-r')
r = self.loop.run_until_complete(
aiohttp.request('post', url, data=form, loop=self.loop))
content = self.loop.run_until_complete(r.json())
self.assertEqual(1, len(content['multipart-data']))
field = content['multipart-data'][0]
self.assertEqual('name', field['name'])
self.assertEqual('текст', field['data'])
self.assertEqual(r.status, 200)
r.close()
def test_POST_DATA_with_content_transfer_encoding(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
form = aiohttp.FormData()
form.add_field('name', b'123',
content_transfer_encoding='base64')
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request('post', url, data=form))
content = self.loop.run_until_complete(r.json())
self.assertEqual(1, len(content['multipart-data']))
field = content['multipart-data'][0]
self.assertEqual('name', field['name'])
self.assertEqual(b'123', binascii.a2b_base64(field['data']))
# self.assertEqual('base64', field['content-transfer-encoding'])
self.assertEqual(r.status, 200)
r.close()
session.close()
def test_POST_MULTIPART(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
with MultipartWriter('form-data') as writer:
writer.append('foo')
writer.append_json({'bar': 'баз'})
writer.append_form([('тест', '4'), ('сетс', '2')])
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request('post', url, data=writer))
content = self.loop.run_until_complete(r.json())
self.assertEqual(3, len(content['multipart-data']))
self.assertEqual({'content-type': 'text/plain', 'data': 'foo'},
content['multipart-data'][0])
self.assertEqual({'content-type': 'application/json',
'data': '{"bar": "\\u0431\\u0430\\u0437"}'},
content['multipart-data'][1])
self.assertEqual(
{'content-type': 'application/x-www-form-urlencoded',
'data': '%D1%82%D0%B5%D1%81%D1%82=4&'
'%D1%81%D0%B5%D1%82%D1%81=2'},
content['multipart-data'][2])
self.assertEqual(r.status, 200)
r.close()
session.close()
def test_POST_STREAM_DATA(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
data = f.read()
fut = self.loop.create_future()
@aiohttp.streamer
async def stream(writer):
await fut
writer.write(data)
self.loop.call_later(0.01, fut.set_result, True)
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request(
'post', url, data=stream(),
headers={'Content-Length': str(len(data))}))
content = self.loop.run_until_complete(r.json())
r.close()
session.close()
self.assertEqual(str(len(data)),
content['headers']['Content-Length'])
self.assertEqual('application/octet-stream',
content['headers']['Content-Type'])
def test_POST_StreamReader(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
data = f.read()
stream = aiohttp.StreamReader(loop=self.loop)
stream.feed_data(data)
stream.feed_eof()
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request(
'post', url, data=stream,
headers={'Content-Length': str(len(data))}))
content = self.loop.run_until_complete(r.json())
r.close()
session.close()
self.assertEqual(str(len(data)),
content['headers']['Content-Length'])
def test_POST_DataQueue(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
data = f.read()
stream = aiohttp.DataQueue(loop=self.loop)
stream.feed_data(data[:100], 100)
stream.feed_data(data[100:], len(data[100:]))
stream.feed_eof()
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request(
'post', url, data=stream,
headers={'Content-Length': str(len(data))}))
content = self.loop.run_until_complete(r.json())
r.close()
session.close()
self.assertEqual(str(len(data)),
content['headers']['Content-Length'])
def test_POST_ChunksQueue(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
data = f.read()
stream = aiohttp.ChunksQueue(loop=self.loop)
stream.feed_data(data[:100], 100)
d = data[100:]
stream.feed_data(d, len(d))
stream.feed_eof()
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request(
'post', url, data=stream,
headers={'Content-Length': str(len(data))}))
content = self.loop.run_until_complete(r.json())
r.close()
session.close()
self.assertEqual(str(len(data)),
content['headers']['Content-Length'])
def test_request_conn_closed(self):
with run_server(self.loop, router=Functional) as httpd:
httpd['close'] = True
session = client.ClientSession(loop=self.loop)
with self.assertRaises(aiohttp.ServerDisconnectedError):
self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get')))
session.close()
def test_session_close(self):
conn = aiohttp.TCPConnector(loop=self.loop)
session = client.ClientSession(loop=self.loop, connector=conn)
with run_server(self.loop, router=Functional) as httpd:
r = self.loop.run_until_complete(
session.request(
'get', httpd.url('keepalive') + '?close=1'))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertEqual(content['content'], 'requests=1')
r.close()
r = self.loop.run_until_complete(
session.request('get', httpd.url('keepalive')))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertEqual(content['content'], 'requests=1')
r.close()
session.close()
conn.close()
def test_multidict_headers(self):
session = client.ClientSession(loop=self.loop)
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
data = b'sample data'
r = self.loop.run_until_complete(
session.request(
'post', url, data=data,
headers=MultiDict(
{'Content-Length': str(len(data))})))
content = self.loop.run_until_complete(r.json())
r.close()
self.assertEqual(str(len(data)),
content['headers']['Content-Length'])
session.close()
def test_dont_close_explicit_connector(self):
async def go(url):
connector = aiohttp.TCPConnector(loop=self.loop)
session = client.ClientSession(loop=self.loop, connector=connector)
r = await session.request('GET', url)
await r.read()
self.assertEqual(1, len(connector._conns))
connector.close()
session.close()
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('keepalive')
self.loop.run_until_complete(go(url))
def test_server_close_keepalive_connection(self):
class Proto(asyncio.Protocol):
def connection_made(self, transport):
self.transp = transport
self.data = b''
def data_received(self, data):
self.data += data
if data.endswith(b'\r\n\r\n'):
self.transp.write(
b'HTTP/1.1 200 OK\r\n'
b'CONTENT-LENGTH: 2\r\n'
b'CONNECTION: close\r\n'
b'\r\n'
b'ok')
self.transp.close()
def connection_lost(self, exc):
self.transp = None
async def go():
server = await self.loop.create_server(
Proto, '127.0.0.1', unused_port())
addr = server.sockets[0].getsockname()
connector = aiohttp.TCPConnector(loop=self.loop, limit=1)
session = client.ClientSession(loop=self.loop, connector=connector)
url = 'http://{}:{}/'.format(*addr)
for i in range(2):
r = await session.request('GET', url)
await r.read()
self.assertEqual(0, len(connector._conns))
session.close()
connector.close()
server.close()
await server.wait_closed()
self.loop.run_until_complete(go())
def test_handle_keepalive_on_closed_connection(self):
class Proto(asyncio.Protocol):
def connection_made(self, transport):
self.transp = transport
self.data = b''
def data_received(self, data):
self.data += data
if data.endswith(b'\r\n\r\n'):
self.transp.write(
b'HTTP/1.1 200 OK\r\n'
b'CONTENT-LENGTH: 2\r\n'
b'\r\n'
b'ok')
self.transp.close()
def connection_lost(self, exc):
self.transp = None
async def go():
server = await self.loop.create_server(
Proto, '127.0.0.1', unused_port())
addr = server.sockets[0].getsockname()
connector = aiohttp.TCPConnector(loop=self.loop, limit=1)
session = client.ClientSession(loop=self.loop, connector=connector)
url = 'http://{}:{}/'.format(*addr)
r = await session.request('GET', url)
await r.read()
self.assertEqual(1, len(connector._conns))
with self.assertRaises(aiohttp.ServerDisconnectedError):
await session.request('GET', url)
self.assertEqual(0, len(connector._conns))
session.close()
connector.close()
server.close()
await server.wait_closed()
self.loop.run_until_complete(go())
@mock.patch('aiohttp.client_reqrep.client_logger')
def test_session_cookies(self, m_log):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(loop=self.loop)
resp = self.loop.run_until_complete(
session.request('get', httpd.url('cookies')))
self.assertEqual(resp.cookies['c1'].value, 'cookie1')
self.assertEqual(resp.cookies['c2'].value, 'cookie2')
resp.close()
# Add the received cookies as shared for sending them to the test
# server, which is only accessible via IP
session.cookie_jar.update_cookies(resp.cookies)
# Assert, that we send those cookies in next requests
r = self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get')))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertEqual(
content['headers']['Cookie'], 'c1=cookie1; c2=cookie2')
r.close()
session.close()
def test_session_headers(self):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(
loop=self.loop, headers={
"X-Real-IP": "192.168.0.1"
})
r = self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get')))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertIn(
"X-Real-Ip", content['headers'])
self.assertEqual(
content['headers']["X-Real-Ip"], "192.168.0.1")
r.close()
session.close()
def test_session_headers_merge(self):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(
loop=self.loop, headers=[
("X-Real-IP", "192.168.0.1"),
("X-Sent-By", "requests")])
r = self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get'),
headers={"X-Sent-By": "aiohttp"}))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertIn(
"X-Real-Ip", content['headers'])
self.assertIn(
"X-Sent-By", content['headers'])
self.assertEqual(
content['headers']["X-Real-Ip"], "192.168.0.1")
self.assertEqual(
content['headers']["X-Sent-By"], "aiohttp")
r.close()
session.close()
def test_session_auth(self):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(
loop=self.loop, auth=helpers.BasicAuth("login", "pass"))
r = self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get')))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertIn(
"Authorization", content['headers'])
self.assertEqual(
content['headers']["Authorization"], "Basic bG9naW46cGFzcw==")
r.close()
session.close()
def test_session_auth_override(self):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(
loop=self.loop, auth=helpers.BasicAuth("login", "pass"))
r = self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get'),
auth=helpers.BasicAuth("other_login", "pass")))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertIn(
"Authorization", content['headers'])
self.assertEqual(
content['headers']["Authorization"],
"Basic b3RoZXJfbG9naW46cGFzcw==")
r.close()
session.close()
def test_session_auth_header_conflict(self):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(
loop=self.loop, auth=helpers.BasicAuth("login", "pass"))
headers = {'Authorization': "Basic b3RoZXJfbG9naW46cGFzcw=="}
with self.assertRaises(ValueError):
self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get'),
headers=headers))
session.close()
|
the-stack_0_24637
|
import os
import logging
LOG_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
LOG_FILENAME = os.path.join(LOG_PATH, '/tmp/rgw.log')
if os.path.exists(LOG_FILENAME): os.unlink(LOG_FILENAME)
logging.basicConfig(format='%(asctime)s : %(levelname)s: %(message)s', datefmt='[%m/%d/%Y - %I:%M:%S %p]',filename=LOG_FILENAME,level=logging.DEBUG)
def debug(debug_msg):
logging.debug(debug_msg)
print(debug_msg)
def error(error_msg):
logging.error(error_msg)
print(error_msg)
def info(information):
logging.info(information)
print(information)
|
the-stack_0_24639
|
from django.contrib.admin.widgets import AutocompleteSelect
from django.contrib import admin
from django import forms
from django.contrib.auth.models import User
from .models import Dozen, DozensOfCortadores
from .models import DozensOfAparadores, DozensOfArmadores, DozensOfRematadores
class DozenCreateForm(forms.ModelForm):
class Meta:
model = Dozen
fields = '__all__'
labels = {'user': 'Elija al cortador de esta docena'}
exclude = ['status', 'model', 'size']
""" widgets = {
'model': AutocompleteSelect(
Dozen._meta.get_field('model').remote_field,
admin.site,
#attrs={'placeholder': 'Seleccione un modelo'}
)
} """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#self.fields['size'].queryset = User.objects.filter(groups__name='cortadores')
self.fields['material'].widget.attrs.update({
'class': 'selectpicker',
'data-style': 'btn btn-link',
})
self.fields['color'].widget.attrs.update({
'class': 'selectpicker',
'data-style': 'btn btn-link',
})
self.fields['note'].widget.attrs.update({
'rows': '3',
'class': 'pt-3 size-placeholder',
'placeholder': '\nejemplo: a esta docena le falta 3 pares de cinta color cafe porque no hay material',
})
class DozenUpdateForm(forms.ModelForm):
class Meta:
model = Dozen
exclude = ['code']
fields = '__all__'
labels = {
'status': 'El estado actual de esta docena es'
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['material'].widget.attrs.update({
'class': 'form-control',
})
self.fields['status'].widget.attrs.update({
'class': 'form-control',
})
self.fields['note'].widget.attrs.update({
'class': 'form-control',
'rows': '3',
})
class DozensOfCortadoresForm(forms.ModelForm):
class Meta:
model = DozensOfCortadores
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['note'].widget.attrs.update({
'rows': '3',
})
class DozensOfAparadoresForm(forms.ModelForm):
class Meta:
model = DozensOfAparadores
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['note'].widget.attrs.update({
'rows': '3',
})
class DozensOfArmadoresForm(forms.ModelForm):
class Meta:
model = DozensOfArmadores
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['note'].widget.attrs.update({
'rows': '3',
})
class DozensOfRematadoresForm(forms.ModelForm):
class Meta:
model = DozensOfRematadores
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['note'].widget.attrs.update({
'rows': '3',
})
|
the-stack_0_24640
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import datetime
import filecmp
import hashlib
import http.client
import os
import platform
import re
import shutil
import subprocess
import sys
import textwrap
import traceback
import urllib.error
import urllib.parse
import urllib.request
import xml.etree.ElementTree as ET
import zipfile
from collections import namedtuple
import scriptutil
# This tool expects to find /solr off the base URL. You
# must have a working gpg, tar in your path. This has been
# tested on Linux and on Cygwin under Windows 7.
cygwin = platform.system().lower().startswith('cygwin')
cygwinWindowsRoot = os.popen('cygpath -w /').read().strip().replace('\\','/') if cygwin else ''
def unshortenURL(url):
parsed = urllib.parse.urlparse(url)
if parsed[0] in ('http', 'https'):
h = http.client.HTTPConnection(parsed.netloc)
h.request('HEAD', parsed.path)
response = h.getresponse()
if int(response.status/100) == 3 and response.getheader('Location'):
return response.getheader('Location')
return url
# TODO
# - make sure jars exist inside bin release
# - make sure docs exist
reHREF = re.compile('<a href="(.*?)">(.*?)</a>')
# Set to False to avoid re-downloading the packages...
FORCE_CLEAN = True
def getHREFs(urlString):
# Deref any redirects
while True:
url = urllib.parse.urlparse(urlString)
if url.scheme == "http":
h = http.client.HTTPConnection(url.netloc)
elif url.scheme == "https":
h = http.client.HTTPSConnection(url.netloc)
else:
raise RuntimeError("Unknown protocol: %s" % url.scheme)
h.request('HEAD', url.path)
r = h.getresponse()
newLoc = r.getheader('location')
if newLoc is not None:
urlString = newLoc
else:
break
links = []
try:
html = load(urlString)
except:
print('\nFAILED to open url %s' % urlString)
traceback.print_exc()
raise
for subUrl, text in reHREF.findall(html):
#print("Got suburl %s and text %s" % (subUrl, text))
fullURL = urllib.parse.urljoin(urlString, subUrl)
links.append((text, fullURL))
return links
def load(urlString):
try:
raw_request = urllib.request.Request(urlString)
raw_request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0')
raw_request.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
content = urllib.request.urlopen(raw_request).read().decode('utf-8')
except Exception as e:
print('Retrying download of url %s after exception: %s' % (urlString, e))
content = urllib.request.urlopen(urlString).read().decode('utf-8')
return content
def noJavaPackageClasses(desc, file):
with zipfile.ZipFile(file) as z2:
for name2 in z2.namelist():
if name2.endswith('.class') and (name2.startswith('java/') or name2.startswith('javax/')):
raise RuntimeError('%s contains java or javax class "%s"' % (desc, name2))
def decodeUTF8(bytes):
return codecs.getdecoder('UTF-8')(bytes)[0]
MANIFEST_FILE_NAME = 'META-INF/MANIFEST.MF'
NOTICE_FILE_NAME = 'META-INF/NOTICE.txt'
LICENSE_FILE_NAME = 'META-INF/LICENSE.txt'
def checkJARMetaData(desc, jarFile, gitRevision, version):
with zipfile.ZipFile(jarFile, 'r') as z:
for name in (MANIFEST_FILE_NAME, NOTICE_FILE_NAME, LICENSE_FILE_NAME):
try:
# The Python docs state a KeyError is raised ... so this None
# check is just defensive:
if z.getinfo(name) is None:
raise RuntimeError('%s is missing %s' % (desc, name))
except KeyError:
raise RuntimeError('%s is missing %s' % (desc, name))
s = decodeUTF8(z.read(MANIFEST_FILE_NAME))
for verify in (
'Specification-Vendor: The Apache Software Foundation',
'Implementation-Vendor: The Apache Software Foundation',
'Specification-Title: Apache Solr Search Server:',
'Implementation-Title: org.apache.solr',
'X-Compile-Source-JDK: 11',
'X-Compile-Target-JDK: 11',
'Specification-Version: %s' % version,
'X-Build-JDK: 11.',
'Extension-Name: org.apache.solr'):
if type(verify) is not tuple:
verify = (verify,)
for x in verify:
if s.find(x) != -1:
break
else:
if len(verify) == 1:
raise RuntimeError('%s is missing "%s" inside its META-INF/MANIFEST.MF' % (desc, verify[0]))
else:
raise RuntimeError('%s is missing one of "%s" inside its META-INF/MANIFEST.MF' % (desc, verify))
if gitRevision != 'skip':
# Make sure this matches the version and git revision we think we are releasing:
match = re.search("Implementation-Version: (.+\r\n .+)", s, re.MULTILINE)
if match:
implLine = match.group(1).replace("\r\n ", "")
verifyRevision = '%s %s' % (version, gitRevision)
if implLine.find(verifyRevision) == -1:
raise RuntimeError('%s is missing "%s" inside its META-INF/MANIFEST.MF (wrong git revision?)' % \
(desc, verifyRevision))
else:
raise RuntimeError('%s is missing Implementation-Version inside its META-INF/MANIFEST.MF' % desc)
notice = decodeUTF8(z.read(NOTICE_FILE_NAME))
license = decodeUTF8(z.read(LICENSE_FILE_NAME))
if SOLR_LICENSE is None or SOLR_NOTICE is None:
raise RuntimeError('BUG in smokeTestRelease!')
if notice != SOLR_NOTICE:
raise RuntimeError('%s: %s contents doesn\'t match main NOTICE.txt' % \
(desc, NOTICE_FILE_NAME))
if license != SOLR_LICENSE:
raise RuntimeError('%s: %s contents doesn\'t match main LICENSE.txt' % \
(desc, LICENSE_FILE_NAME))
def normSlashes(path):
return path.replace(os.sep, '/')
def checkAllJARs(topDir, gitRevision, version):
print(' verify JAR metadata/identity/no javax.* or java.* classes...')
for root, dirs, files in os.walk(topDir): # pylint: disable=unused-variable
normRoot = normSlashes(root)
if normRoot.endswith('/server/lib'):
# Solr's example intentionally ships servlet JAR:
continue
for file in files:
if file.lower().endswith('.jar'):
if ((normRoot.endswith('/modules/extraction/lib') and file.startswith('jakarta.activation-'))
or (normRoot.endswith('/modules/extraction/lib') and file.startswith('jakarta.annotation-api-'))
or (normRoot.endswith('/modules/extraction/lib') and file.startswith('jakarta.xml.bind-api-'))
or (normRoot.endswith('/modules/extraction/lib') and file.startswith('unit-api-'))):
print(' **WARNING**: skipping check of %s/%s: it has javax.* classes' % (root, file))
continue
fullPath = '%s/%s' % (root, file)
noJavaPackageClasses('JAR file "%s"' % fullPath, fullPath)
if file.lower().find('solr') != -1:
checkJARMetaData('JAR file "%s"' % fullPath, fullPath, gitRevision, version)
def checkSigs(urlString, version, tmpDir, isSigned, keysFile):
print(' test basics...')
ents = getDirEntries(urlString)
artifact = None
changesURL = None
mavenURL = None
dockerURL = None
artifactURL = None
expectedSigs = []
if isSigned:
expectedSigs.append('asc')
expectedSigs.extend(['sha512'])
sigs = []
artifacts = []
for text, subURL in ents:
if text == '.gitrev':
continue # Allow this in the distribution build directory
if text == 'KEYS':
raise RuntimeError('solr: release dir should not contain a KEYS file - only toplevel /dist/solr/KEYS is used')
elif text == 'maven/':
mavenURL = subURL
elif text == 'docker/':
dockerURL = subURL
elif text.startswith('changes'):
if text not in ('changes/', 'changes-%s/' % version):
raise RuntimeError('solr: found %s vs expected changes-%s/' % (text, version))
changesURL = subURL
elif artifact is None:
artifact = text
artifactURL = subURL
expected = 'solr-%s' % version
if not artifact.startswith(expected):
raise RuntimeError('solr: unknown artifact %s: expected prefix %s' % (text, expected))
sigs = []
elif text.startswith(artifact + '.'):
sigs.append(subURL.rsplit(".")[-1:][0])
else:
if sigs != expectedSigs:
raise RuntimeError('solr: artifact %s has wrong sigs: expected %s but got %s' % (artifact, expectedSigs, sigs))
artifacts.append((artifact, artifactURL))
artifact = text
artifactURL = subURL
sigs = []
if sigs != []:
artifacts.append((artifact, artifactURL))
if sigs != expectedSigs:
raise RuntimeError('solr: artifact %s has wrong sigs: expected %s but got %s' % (artifact, expectedSigs, sigs))
expected = ['solr-%s-src.tgz' % version,
'solr-%s.tgz' % version]
actual = [x[0] for x in artifacts]
if expected != actual:
raise RuntimeError('solr: wrong artifacts: expected %s but got %s' % (expected, actual))
# Set up clean gpg world; import keys file:
gpgHomeDir = '%s/solr.gpg' % tmpDir
if os.path.exists(gpgHomeDir):
shutil.rmtree(gpgHomeDir)
os.makedirs(gpgHomeDir, 0o700)
run('gpg --homedir %s --import %s' % (gpgHomeDir, keysFile),
'%s/solr.gpg.import.log' % tmpDir)
if mavenURL is None:
raise RuntimeError('solr is missing maven')
if dockerURL is None:
raise RuntimeError('solr is missing docker')
if changesURL is None:
raise RuntimeError('solr is missing changes-%s' % version)
testChanges(version, changesURL)
for artifact, urlString in artifacts: # pylint: disable=redefined-argument-from-local
print(' download %s...' % artifact)
scriptutil.download(artifact, urlString, tmpDir, force_clean=FORCE_CLEAN)
verifyDigests(artifact, urlString, tmpDir)
if isSigned:
print(' verify sig')
# Test sig (this is done with a clean brand-new GPG world)
scriptutil.download(artifact + '.asc', urlString + '.asc', tmpDir, force_clean=FORCE_CLEAN)
sigFile = '%s/%s.asc' % (tmpDir, artifact)
artifactFile = '%s/%s' % (tmpDir, artifact)
logFile = '%s/solr.%s.gpg.verify.log' % (tmpDir, artifact)
run('gpg --homedir %s --display-charset utf-8 --verify %s %s' % (gpgHomeDir, sigFile, artifactFile),
logFile)
# Forward any GPG warnings, except the expected one (since it's a clean world)
with open(logFile) as f:
print("File: %s" % logFile)
for line in f.readlines():
if line.lower().find('warning') != -1 \
and line.find('WARNING: This key is not certified with a trusted signature') == -1:
print(' GPG: %s' % line.strip())
# Test trust (this is done with the real users config)
run('gpg --import %s' % (keysFile),
'%s/solr.gpg.trust.import.log' % tmpDir)
print(' verify trust')
logFile = '%s/solr.%s.gpg.trust.log' % (tmpDir, artifact)
run('gpg --display-charset utf-8 --verify %s %s' % (sigFile, artifactFile), logFile)
# Forward any GPG warnings:
with open(logFile) as f:
for line in f.readlines():
if line.lower().find('warning') != -1:
print(' GPG: %s' % line.strip())
def testChanges(version, changesURLString):
print(' check changes HTML...')
changesURL = None
for text, subURL in getDirEntries(changesURLString):
if text == 'Changes.html':
changesURL = subURL
if changesURL is None:
raise RuntimeError('did not see Changes.html link from %s' % changesURLString)
s = load(changesURL)
checkChangesContent(s, version, changesURL, True)
def testChangesText(dir, version):
"Checks all CHANGES.txt under this dir."
for root, dirs, files in os.walk(dir): # pylint: disable=unused-variable
# NOTE: O(N) but N should be smallish:
if 'CHANGES.txt' in files:
fullPath = '%s/CHANGES.txt' % root
#print 'CHECK %s' % fullPath
checkChangesContent(open(fullPath, encoding='UTF-8').read(), version, fullPath, False)
reChangesSectionHREF = re.compile('<a id="(.*?)".*?>(.*?)</a>', re.IGNORECASE)
reUnderbarNotDashHTML = re.compile(r'<li>(\s*(SOLR)_\d\d\d\d+)')
reUnderbarNotDashTXT = re.compile(r'\s+((SOLR)_\d\d\d\d+)', re.MULTILINE)
def checkChangesContent(s, version, name, isHTML):
currentVersionTuple = versionToTuple(version, name)
if isHTML and s.find('Release %s' % version) == -1:
raise RuntimeError('did not see "Release %s" in %s' % (version, name))
if isHTML:
r = reUnderbarNotDashHTML
else:
r = reUnderbarNotDashTXT
m = r.search(s)
if m is not None:
raise RuntimeError('incorrect issue (_ instead of -) in %s: %s' % (name, m.group(1)))
if s.lower().find('not yet released') != -1:
raise RuntimeError('saw "not yet released" in %s' % name)
if not isHTML:
sub = version
if s.find(sub) == -1:
# benchmark never seems to include release info:
if name.find('/benchmark/') == -1:
raise RuntimeError('did not see "%s" in %s' % (sub, name))
if isHTML:
# Make sure that a section only appears once under each release,
# and that each release is not greater than the current version
seenIDs = set()
seenText = set()
release = None
for id, text in reChangesSectionHREF.findall(s):
if text.lower().startswith('release '):
release = text[8:].strip()
seenText.clear()
releaseTuple = versionToTuple(release, name)
if releaseTuple > currentVersionTuple:
raise RuntimeError('Future release %s is greater than %s in %s' % (release, version, name))
if id in seenIDs:
raise RuntimeError('%s has duplicate section "%s" under release "%s"' % (name, text, release))
seenIDs.add(id)
if text in seenText:
raise RuntimeError('%s has duplicate section "%s" under release "%s"' % (name, text, release))
seenText.add(text)
reVersion = re.compile(r'(\d+)\.(\d+)(?:\.(\d+))?\s*(-alpha|-beta|final|RC\d+)?\s*(?:\[.*\])?', re.IGNORECASE)
def versionToTuple(version, name):
versionMatch = reVersion.match(version)
if versionMatch is None:
raise RuntimeError('Version %s in %s cannot be parsed' % (version, name))
versionTuple = versionMatch.groups()
while versionTuple[-1] is None or versionTuple[-1] == '':
versionTuple = versionTuple[:-1]
if versionTuple[-1].lower() == '-alpha':
versionTuple = versionTuple[:-1] + ('0',)
elif versionTuple[-1].lower() == '-beta':
versionTuple = versionTuple[:-1] + ('1',)
elif versionTuple[-1].lower() == 'final':
versionTuple = versionTuple[:-2] + ('100',)
elif versionTuple[-1].lower()[:2] == 'rc':
versionTuple = versionTuple[:-2] + (versionTuple[-1][2:],)
return tuple(int(x) if x is not None and x.isnumeric() else x for x in versionTuple)
reUnixPath = re.compile(r'\b[a-zA-Z_]+=(?:"(?:\\"|[^"])*"' + '|(?:\\\\.|[^"\'\\s])*' + r"|'(?:\\'|[^'])*')" \
+ r'|(/(?:\\.|[^"\'\s])*)' \
+ r'|("/(?:\\.|[^"])*")' \
+ r"|('/(?:\\.|[^'])*')")
def unix2win(matchobj):
if matchobj.group(1) is not None: return cygwinWindowsRoot + matchobj.group()
if matchobj.group(2) is not None: return '"%s%s' % (cygwinWindowsRoot, matchobj.group().lstrip('"'))
if matchobj.group(3) is not None: return "'%s%s" % (cygwinWindowsRoot, matchobj.group().lstrip("'"))
return matchobj.group()
def cygwinifyPaths(command):
# The problem: Native Windows applications running under Cygwin can't
# handle Cygwin's Unix-style paths. However, environment variable
# values are automatically converted, so only paths outside of
# environment variable values should be converted to Windows paths.
# Assumption: all paths will be absolute.
if '; gradlew ' in command: command = reUnixPath.sub(unix2win, command)
return command
def printFileContents(fileName):
# Assume log file was written in system's default encoding, but
# even if we are wrong, we replace errors ... the ASCII chars
# (which is what we mostly care about eg for the test seed) should
# still survive:
txt = codecs.open(fileName, 'r', encoding=sys.getdefaultencoding(), errors='replace').read()
# Encode to our output encoding (likely also system's default
# encoding):
bytes = txt.encode(sys.stdout.encoding, errors='replace')
# Decode back to string and print... we should hit no exception here
# since all errors have been replaced:
print(codecs.getdecoder(sys.stdout.encoding)(bytes)[0])
print()
def run(command, logFile):
if cygwin: command = cygwinifyPaths(command)
if os.system('%s > %s 2>&1' % (command, logFile)):
logPath = os.path.abspath(logFile)
print('\ncommand "%s" failed:' % command)
printFileContents(logFile)
raise RuntimeError('command "%s" failed; see log file %s' % (command, logPath))
def verifyDigests(artifact, urlString, tmpDir):
print(' verify sha512 digest')
sha512Expected, t = load(urlString + '.sha512').strip().split()
if t != '*'+artifact:
raise RuntimeError('SHA512 %s.sha512 lists artifact %s but expected *%s' % (urlString, t, artifact))
s512 = hashlib.sha512()
f = open('%s/%s' % (tmpDir, artifact), 'rb')
while True:
x = f.read(65536)
if len(x) == 0:
break
s512.update(x)
f.close()
sha512Actual = s512.hexdigest()
if sha512Actual != sha512Expected:
raise RuntimeError('SHA512 digest mismatch for %s: expected %s but got %s' % (artifact, sha512Expected, sha512Actual))
def getDirEntries(urlString):
if urlString.startswith('file:/') and not urlString.startswith('file://'):
# stupid bogus ant URI
urlString = "file:///" + urlString[6:]
if urlString.startswith('file://'):
path = urlString[7:]
if path.endswith('/'):
path = path[:-1]
if cygwin: # Convert Windows path to Cygwin path
path = re.sub(r'^/([A-Za-z]):/', r'/cygdrive/\1/', path)
l = []
for ent in os.listdir(path):
entPath = '%s/%s' % (path, ent)
if os.path.isdir(entPath):
entPath += '/'
ent += '/'
l.append((ent, 'file://%s' % entPath))
l.sort()
return l
else:
links = getHREFs(urlString)
for i, (text, subURL) in enumerate(links): # pylint: disable=unused-variable
if text == 'Parent Directory' or text == '..':
return links[(i+1):]
return None
def unpackAndVerify(java, tmpDir, artifact, gitRevision, version, testArgs):
destDir = '%s/unpack' % tmpDir
if os.path.exists(destDir):
shutil.rmtree(destDir)
os.makedirs(destDir)
os.chdir(destDir)
print(' unpack %s...' % artifact)
unpackLogFile = '%s/solr-unpack-%s.log' % (tmpDir, artifact)
if artifact.endswith('.tar.gz') or artifact.endswith('.tgz'):
run('tar xzf %s/%s' % (tmpDir, artifact), unpackLogFile)
# make sure it unpacks to proper subdir
l = os.listdir(destDir)
expected = 'solr-%s' % version
if l != [expected]:
raise RuntimeError('unpack produced entries %s; expected only %s' % (l, expected))
unpackPath = '%s/%s' % (destDir, expected)
verifyUnpacked(java, artifact, unpackPath, gitRevision, version, testArgs)
return unpackPath
SOLR_NOTICE = None
SOLR_LICENSE = None
def is_in_list(in_folder, files, indent=4):
for file_name in files:
print("%sChecking %s" % (" "*indent, file_name))
found = False
for f in [file_name, file_name + '.txt', file_name + '.md']:
if f in in_folder:
in_folder.remove(f)
found = True
if not found:
raise RuntimeError('file "%s" is missing' % file_name)
def verifyUnpacked(java, artifact, unpackPath, gitRevision, version, testArgs):
global SOLR_NOTICE
global SOLR_LICENSE
os.chdir(unpackPath)
isSrc = artifact.find('-src') != -1
# Check text files in release
print(" %s" % artifact)
in_root_folder = list(filter(lambda x: x[0] != '.', os.listdir(unpackPath)))
in_solr_folder = []
if isSrc:
in_solr_folder.extend(os.listdir(os.path.join(unpackPath, 'solr')))
is_in_list(in_root_folder, ['LICENSE', 'NOTICE', 'README'])
is_in_list(in_solr_folder, ['CHANGES', 'README'])
else:
is_in_list(in_root_folder, ['LICENSE', 'NOTICE', 'README', 'CHANGES'])
if SOLR_NOTICE is None:
SOLR_NOTICE = open('%s/NOTICE.txt' % unpackPath, encoding='UTF-8').read()
if SOLR_LICENSE is None:
SOLR_LICENSE = open('%s/LICENSE.txt' % unpackPath, encoding='UTF-8').read()
# if not isSrc:
# # TODO: we should add verifyModule/verifySubmodule (e.g. analysis) here and recurse through
# expectedJARs = ()
#
# for fileName in expectedJARs:
# fileName += '.jar'
# if fileName not in l:
# raise RuntimeError('solr: file "%s" is missing from artifact %s' % (fileName, artifact))
# in_root_folder.remove(fileName)
if isSrc:
expected_src_root_folders = ['buildSrc', 'dev-docs', 'dev-tools', 'gradle', 'help', 'solr']
expected_src_root_files = ['build.gradle', 'gradlew', 'gradlew.bat', 'settings.gradle', 'versions.lock', 'versions.props']
expected_src_solr_files = ['build.gradle']
expected_src_solr_folders = ['benchmark', 'bin', 'modules', 'core', 'docker', 'documentation', 'example', 'licenses', 'packaging', 'distribution', 'prometheus-exporter', 'server', 'solr-ref-guide', 'solrj', 'test-framework', 'webapp', '.gitignore', '.gitattributes']
is_in_list(in_root_folder, expected_src_root_folders)
is_in_list(in_root_folder, expected_src_root_files)
is_in_list(in_solr_folder, expected_src_solr_folders)
is_in_list(in_solr_folder, expected_src_solr_files)
if len(in_solr_folder) > 0:
raise RuntimeError('solr: unexpected files/dirs in artifact %s solr/ folder: %s' % (artifact, in_solr_folder))
else:
is_in_list(in_root_folder, ['bin', 'modules', 'docker', 'prometheus-exporter', 'docs', 'example', 'licenses', 'server'])
if len(in_root_folder) > 0:
raise RuntimeError('solr: unexpected files/dirs in artifact %s: %s' % (artifact, in_root_folder))
if isSrc:
print(' make sure no JARs/WARs in src dist...')
lines = os.popen('find . -name \\*.jar').readlines()
if len(lines) != 0:
print(' FAILED:')
for line in lines:
print(' %s' % line.strip())
raise RuntimeError('source release has JARs...')
lines = os.popen('find . -name \\*.war').readlines()
if len(lines) != 0:
print(' FAILED:')
for line in lines:
print(' %s' % line.strip())
raise RuntimeError('source release has WARs...')
validateCmd = './gradlew --no-daemon check -p solr/documentation'
print(' run "%s"' % validateCmd)
java.run_java11(validateCmd, '%s/validate.log' % unpackPath)
print(" run tests w/ Java 11 and testArgs='%s'..." % testArgs)
java.run_java11('./gradlew --no-daemon test %s' % testArgs, '%s/test.log' % unpackPath)
print(" run integration tests w/ Java 11")
java.run_java11('./gradlew --no-daemon integrationTest -Dversion.release=%s' % version, '%s/itest.log' % unpackPath)
print(" build binary release w/ Java 11")
java.run_java11('./gradlew --no-daemon dev -Dversion.release=%s' % version, '%s/assemble.log' % unpackPath)
testSolrExample("%s/solr/packaging/build/dev" % unpackPath, java.java11_home)
if java.run_java17:
print(" run tests w/ Java 17 and testArgs='%s'..." % testArgs)
java.run_java17('./gradlew --no-daemon clean test %s' % testArgs, '%s/test-java17.log' % unpackPath)
print(" run integration tests w/ Java 17")
java.run_java17('./gradlew --no-daemon integrationTest -Dversion.release=%s' % version, '%s/itest-java17.log' % unpackPath)
print(" build binary release w/ Java 17")
java.run_java17('./gradlew --no-daemon dev -Dversion.release=%s' % version, '%s/assemble-java17.log' % unpackPath)
testSolrExample("%s/solr/packaging/build/dev" % unpackPath, java.java17_home)
else:
# Binary tarball
checkAllJARs(os.getcwd(), gitRevision, version)
print(' copying unpacked distribution for Java 11 ...')
java11UnpackPath = '%s-java11' % unpackPath
if os.path.exists(java11UnpackPath):
shutil.rmtree(java11UnpackPath)
shutil.copytree(unpackPath, java11UnpackPath)
os.chdir(java11UnpackPath)
print(' test solr example w/ Java 11...')
testSolrExample(java11UnpackPath, java.java11_home)
if java.run_java17:
print(' copying unpacked distribution for Java 17 ...')
java17UnpackPath = '%s-java17' % unpackPath
if os.path.exists(java17UnpackPath):
shutil.rmtree(java17UnpackPath)
shutil.copytree(unpackPath, java17UnpackPath)
os.chdir(java17UnpackPath)
print(' test solr example w/ Java 17...')
testSolrExample(java17UnpackPath, java.java17_home)
os.chdir(unpackPath)
testChangesText('.', version)
def readSolrOutput(p, startupEvent, failureEvent, logFile):
f = open(logFile, 'wb')
try:
while True:
line = p.stdout.readline()
if len(line) == 0:
p.poll()
if not startupEvent.isSet():
failureEvent.set()
startupEvent.set()
break
f.write(line)
f.flush()
#print('SOLR: %s' % line.strip())
if not startupEvent.isSet():
if line.find(b'Started ServerConnector@') != -1 and line.find(b'{HTTP/1.1}{0.0.0.0:8983}') != -1:
startupEvent.set()
elif p.poll() is not None:
failureEvent.set()
startupEvent.set()
break
except:
print()
print('Exception reading Solr output:')
traceback.print_exc()
failureEvent.set()
startupEvent.set()
finally:
f.close()
def is_port_in_use(port):
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
def testSolrExample(binaryDistPath, javaPath):
# test solr using some examples it comes with
logFile = '%s/solr-example.log' % binaryDistPath
old_cwd = os.getcwd() # So we can back-track
os.chdir(binaryDistPath)
print(' start Solr instance (log=%s)...' % logFile)
env = {}
env.update(os.environ)
env['JAVA_HOME'] = javaPath
env['PATH'] = '%s/bin:%s' % (javaPath, env['PATH'])
# Stop Solr running on port 8983 (in case a previous run didn't shutdown cleanly)
try:
if not cygwin:
subprocess.call(['bin/solr','stop','-p','8983'])
else:
subprocess.call('env "PATH=`cygpath -S -w`:$PATH" bin/solr.cmd stop -p 8983', shell=True)
except:
print(' Stop failed due to: '+sys.exc_info()[0])
print(' Running techproducts example on port 8983 from %s' % binaryDistPath)
try:
if not cygwin:
runExampleStatus = subprocess.call(['bin/solr','-e','techproducts'])
else:
runExampleStatus = subprocess.call('env "PATH=`cygpath -S -w`:$PATH" bin/solr.cmd -e techproducts', shell=True)
if runExampleStatus != 0:
raise RuntimeError('Failed to run the techproducts example, check log for previous errors.')
os.chdir('example')
print(' test utf8...')
run('sh ./exampledocs/test_utf8.sh http://localhost:8983/solr/techproducts', 'utf8.log')
print(' run query...')
s = load('http://localhost:8983/solr/techproducts/select/?q=video')
if s.find('"numFound":3,"start":0') == -1:
print('FAILED: response is:\n%s' % s)
raise RuntimeError('query on solr example instance failed')
s = load('http://localhost:8983/api/cores')
if s.find('"status":0,') == -1:
print('FAILED: response is:\n%s' % s)
raise RuntimeError('query api v2 on solr example instance failed')
finally:
# Stop server:
print(' stop server using: bin/solr stop -p 8983')
os.chdir(binaryDistPath)
if not cygwin:
subprocess.call(['bin/solr','stop','-p','8983'])
else:
subprocess.call('env "PATH=`cygpath -S -w`:$PATH" bin/solr.cmd stop -p 8983', shell=True)
os.chdir(old_cwd)
def removeTrailingZeros(version):
return re.sub(r'(\.0)*$', '', version)
def checkMaven(baseURL, tmpDir, gitRevision, version, isSigned, keysFile):
print(' download artifacts')
artifacts = []
artifactsURL = '%s/maven/org/apache/solr/' % baseURL
targetDir = '%s/maven/org/apache/solr' % tmpDir
if not os.path.exists(targetDir):
os.makedirs(targetDir)
crawl(artifacts, artifactsURL, targetDir)
print()
verifyPOMperBinaryArtifact(artifacts, version)
verifyMavenDigests(artifacts)
checkJavadocAndSourceArtifacts(artifacts, version)
verifyDeployedPOMsCoordinates(artifacts, version)
if isSigned:
verifyMavenSigs(tmpDir, artifacts, keysFile)
distFiles = getBinaryDistFiles(tmpDir, version, baseURL)
checkIdenticalMavenArtifacts(distFiles, artifacts, version)
checkAllJARs('%s/maven/org/apache/solr' % tmpDir, gitRevision, version)
def getBinaryDistFiles(tmpDir, version, baseURL):
distribution = 'solr-%s.tgz' % version
if not os.path.exists('%s/%s' % (tmpDir, distribution)):
distURL = '%s/solr/%s' % (baseURL, distribution)
print(' download %s...' % distribution, end=' ')
scriptutil.download(distribution, distURL, tmpDir, force_clean=FORCE_CLEAN)
destDir = '%s/unpack-solr-getBinaryDistFiles' % tmpDir
if os.path.exists(destDir):
shutil.rmtree(destDir)
os.makedirs(destDir)
os.chdir(destDir)
print(' unpack %s...' % distribution)
unpackLogFile = '%s/unpack-%s-getBinaryDistFiles.log' % (tmpDir, distribution)
run('tar xzf %s/%s' % (tmpDir, distribution), unpackLogFile)
distributionFiles = []
for root, dirs, files in os.walk(destDir): # pylint: disable=unused-variable
distributionFiles.extend([os.path.join(root, file) for file in files])
return distributionFiles
def checkJavadocAndSourceArtifacts(artifacts, version):
print(' check for javadoc and sources artifacts...')
for artifact in artifacts:
if artifact.endswith(version + '.jar'):
javadocJar = artifact[:-4] + '-javadoc.jar'
if javadocJar not in artifacts:
raise RuntimeError('missing: %s' % javadocJar)
sourcesJar = artifact[:-4] + '-sources.jar'
if sourcesJar not in artifacts:
raise RuntimeError('missing: %s' % sourcesJar)
def checkIdenticalMavenArtifacts(distFiles, artifacts, version):
print(' verify that Maven artifacts are same as in the binary distribution...')
reJarWar = re.compile(r'%s\.[wj]ar$' % version) # exclude *-javadoc.jar and *-sources.jar
distFilenames = dict()
for file in distFiles:
baseName = os.path.basename(file)
distFilenames[baseName] = file
for artifact in artifacts:
if reJarWar.search(artifact):
artifactFilename = os.path.basename(artifact)
if artifactFilename in ['solr-test-framework-%s.jar' % version]:
if artifactFilename in distFilenames:
raise RuntimeError(' solr-test-framework should not be present in solr binary distribution' % artifact)
continue
if artifactFilename not in distFilenames:
raise RuntimeError(' Maven artifact %s is not present in solr binary distribution' % artifact)
else:
identical = filecmp.cmp(artifact, distFilenames[artifactFilename], shallow=False)
if not identical:
raise RuntimeError(' Maven artifact %s is not identical to %s in solr binary distribution'
% (artifact, distFilenames[artifactFilename]))
def verifyMavenDigests(artifacts):
print(" verify Maven artifacts' md5/sha1 digests...")
reJarWarPom = re.compile(r'\.(?:[wj]ar|pom)$')
for artifactFile in [a for a in artifacts if reJarWarPom.search(a)]:
if artifactFile + '.md5' not in artifacts:
raise RuntimeError('missing: MD5 digest for %s' % artifactFile)
if artifactFile + '.sha1' not in artifacts:
raise RuntimeError('missing: SHA1 digest for %s' % artifactFile)
with open(artifactFile + '.md5', encoding='UTF-8') as md5File:
md5Expected = md5File.read().strip()
with open(artifactFile + '.sha1', encoding='UTF-8') as sha1File:
sha1Expected = sha1File.read().strip()
md5 = hashlib.md5()
sha1 = hashlib.sha1()
inputFile = open(artifactFile, 'rb')
while True:
bytes = inputFile.read(65536)
if len(bytes) == 0:
break
md5.update(bytes)
sha1.update(bytes)
inputFile.close()
md5Actual = md5.hexdigest()
sha1Actual = sha1.hexdigest()
if md5Actual != md5Expected:
raise RuntimeError('MD5 digest mismatch for %s: expected %s but got %s'
% (artifactFile, md5Expected, md5Actual))
if sha1Actual != sha1Expected:
raise RuntimeError('SHA1 digest mismatch for %s: expected %s but got %s'
% (artifactFile, sha1Expected, sha1Actual))
def getPOMcoordinate(treeRoot):
namespace = '{http://maven.apache.org/POM/4.0.0}'
groupId = treeRoot.find('%sgroupId' % namespace)
if groupId is None:
groupId = treeRoot.find('{0}parent/{0}groupId'.format(namespace))
groupId = groupId.text.strip()
artifactId = treeRoot.find('%sartifactId' % namespace).text.strip()
version = treeRoot.find('%sversion' % namespace)
if version is None:
version = treeRoot.find('{0}parent/{0}version'.format(namespace))
version = version.text.strip()
packaging = treeRoot.find('%spackaging' % namespace)
packaging = 'jar' if packaging is None else packaging.text.strip()
return groupId, artifactId, packaging, version
def verifyMavenSigs(tmpDir, artifacts, keysFile):
print(' verify maven artifact sigs', end=' ')
# Set up clean gpg world; import keys file:
gpgHomeDir = '%s/solr.gpg' % tmpDir
if os.path.exists(gpgHomeDir):
shutil.rmtree(gpgHomeDir)
os.makedirs(gpgHomeDir, 0o700)
run('gpg --homedir %s --import %s' % (gpgHomeDir, keysFile),
'%s/solr.gpg.import.log' % tmpDir)
reArtifacts = re.compile(r'\.(?:pom|[jw]ar)$')
for artifactFile in [a for a in artifacts if reArtifacts.search(a)]:
artifact = os.path.basename(artifactFile)
sigFile = '%s.asc' % artifactFile
# Test sig (this is done with a clean brand-new GPG world)
logFile = '%s/solr.%s.gpg.verify.log' % (tmpDir, artifact)
run('gpg --display-charset utf-8 --homedir %s --verify %s %s' % (gpgHomeDir, sigFile, artifactFile),
logFile)
# Forward any GPG warnings, except the expected one (since it's a clean world)
print_warnings_in_file(logFile)
# Test trust (this is done with the real users config)
run('gpg --import %s' % keysFile,
'%s/solr.gpg.trust.import.log' % tmpDir)
logFile = '%s/solr.%s.gpg.trust.log' % (tmpDir, artifact)
run('gpg --display-charset utf-8 --verify %s %s' % (sigFile, artifactFile), logFile)
# Forward any GPG warnings:
print_warnings_in_file(logFile)
sys.stdout.write('.')
print()
def print_warnings_in_file(file):
with open(file) as f:
for line in f.readlines():
if line.lower().find('warning') != -1 \
and line.find('WARNING: This key is not certified with a trusted signature') == -1 \
and line.find('WARNING: using insecure memory') == -1:
print(' GPG: %s' % line.strip())
def verifyPOMperBinaryArtifact(artifacts, version):
print(' verify that each binary artifact has a deployed POM...')
reBinaryJarWar = re.compile(r'%s\.[jw]ar$' % re.escape(version))
for artifact in [a for a in artifacts if reBinaryJarWar.search(a)]:
POM = artifact[:-4] + '.pom'
if POM not in artifacts:
raise RuntimeError('missing: POM for %s' % artifact)
def verifyDeployedPOMsCoordinates(artifacts, version):
"""
verify that each POM's coordinate (drawn from its content) matches
its filepath, and verify that the corresponding artifact exists.
"""
print(" verify deployed POMs' coordinates...")
for POM in [a for a in artifacts if a.endswith('.pom')]:
treeRoot = ET.parse(POM).getroot()
groupId, artifactId, packaging, POMversion = getPOMcoordinate(treeRoot)
POMpath = '%s/%s/%s/%s-%s.pom' \
% (groupId.replace('.', '/'), artifactId, version, artifactId, version)
if not POM.endswith(POMpath):
raise RuntimeError("Mismatch between POM coordinate %s:%s:%s and filepath: %s"
% (groupId, artifactId, POMversion, POM))
# Verify that the corresponding artifact exists
artifact = POM[:-3] + packaging
if artifact not in artifacts:
raise RuntimeError('Missing corresponding .%s artifact for POM %s' % (packaging, POM))
def crawl(downloadedFiles, urlString, targetDir, exclusions=set()):
for text, subURL in getDirEntries(urlString):
if text not in exclusions:
path = os.path.join(targetDir, text)
if text.endswith('/'):
if not os.path.exists(path):
os.makedirs(path)
crawl(downloadedFiles, subURL, path, exclusions)
else:
if not os.path.exists(path) or FORCE_CLEAN:
scriptutil.download(text, subURL, targetDir, quiet=True, force_clean=FORCE_CLEAN)
downloadedFiles.append(path)
sys.stdout.write('.')
def make_java_config(parser, java17_home):
def _make_runner(java_home, version):
print('Java %s JAVA_HOME=%s' % (version, java_home))
if cygwin:
java_home = subprocess.check_output('cygpath -u "%s"' % java_home, shell=True).decode('utf-8').strip()
cmd_prefix = 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % \
(java_home, java_home, java_home)
s = subprocess.check_output('%s; java -version' % cmd_prefix,
shell=True, stderr=subprocess.STDOUT).decode('utf-8')
if s.find(' version "%s' % version) == -1:
parser.error('got wrong version for java %s:\n%s' % (version, s))
def run_java(cmd, logfile):
run('%s; %s' % (cmd_prefix, cmd), logfile)
return run_java
java11_home = os.environ.get('JAVA_HOME')
if java11_home is None:
parser.error('JAVA_HOME must be set')
run_java11 = _make_runner(java11_home, '11')
run_java17 = None
if java17_home is not None:
run_java17 = _make_runner(java17_home, '17')
jc = namedtuple('JavaConfig', 'run_java11 java11_home run_java17 java17_home')
return jc(run_java11, java11_home, run_java17, java17_home)
version_re = re.compile(r'(\d+\.\d+\.\d+(-ALPHA|-BETA)?)')
revision_re = re.compile(r'rev-([a-f\d]+)')
def parse_config():
epilogue = textwrap.dedent('''
Example usage:
python3 -u dev-tools/scripts/smokeTestRelease.py https://dist.apache.org/repos/dist/dev/solr/solr-9.0.0-RC1-rev-c7510a0...
''')
description = 'Utility to test a release.'
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--tmp-dir', metavar='PATH',
help='Temporary directory to test inside, defaults to /tmp/smoke_solr_$version_$revision')
parser.add_argument('--not-signed', dest='is_signed', action='store_false', default=True,
help='Indicates the release is not signed')
parser.add_argument('--local-keys', metavar='PATH',
help='Uses local KEYS file instead of fetching from https://archive.apache.org/dist/solr/KEYS')
parser.add_argument('--revision',
help='GIT revision number that release was built with, defaults to that in URL')
parser.add_argument('--version', metavar='X.Y.Z(-ALPHA|-BETA)?',
help='Version of the release, defaults to that in URL')
parser.add_argument('--test-java17', metavar='java17_home',
help='Path to Java17 home directory, to run tests with if specified')
parser.add_argument('--download-only', action='store_true', default=False,
help='Only perform download and sha hash check steps')
parser.add_argument('--dev-mode', action='store_true', default=False,
help='Enable dev mode, will not check branch compatibility')
parser.add_argument('url', help='Url pointing to release to test')
parser.add_argument('test_args', nargs=argparse.REMAINDER,
help='Arguments to pass to gradle for testing, e.g. -Dwhat=ever.')
c = parser.parse_args()
if c.version is not None:
if not version_re.match(c.version):
parser.error('version "%s" does not match format X.Y.Z[-ALPHA|-BETA]' % c.version)
else:
version_match = version_re.search(c.url)
if version_match is None:
parser.error('Could not find version in URL')
c.version = version_match.group(1)
if c.revision is None:
revision_match = revision_re.search(c.url)
if revision_match is None:
parser.error('Could not find revision in URL')
c.revision = revision_match.group(1)
print('Revision: %s' % c.revision)
if c.local_keys is not None and not os.path.exists(c.local_keys):
parser.error('Local KEYS file "%s" not found' % c.local_keys)
c.java = make_java_config(parser, c.test_java17)
if c.tmp_dir:
c.tmp_dir = os.path.abspath(c.tmp_dir)
else:
tmp = '/tmp/smoke_solr_%s_%s' % (c.version, c.revision)
c.tmp_dir = tmp
i = 1
while os.path.exists(c.tmp_dir):
c.tmp_dir = tmp + '_%d' % i
i += 1
return c
reVersion1 = re.compile(r'\>(\d+)\.(\d+)\.(\d+)(-alpha|-beta)?/\<', re.IGNORECASE)
reVersion2 = re.compile(r'-(\d+)\.(\d+)\.(\d+)(-alpha|-beta)?\.', re.IGNORECASE)
def main():
c = parse_config()
# Pick <major>.<minor> part of version and require script to be from same branch
scriptVersion = re.search(r'((\d+).(\d+)).(\d+)', scriptutil.find_current_version()).group(1).strip()
if not c.version.startswith(scriptVersion + '.') and not c.dev_mode:
raise RuntimeError('smokeTestRelease.py for %s.X is incompatible with a %s release.' % (scriptVersion, c.version))
print('NOTE: output encoding is %s' % sys.stdout.encoding)
smokeTest(c.java, c.url, c.revision, c.version, c.tmp_dir, c.is_signed, c.local_keys, ' '.join(c.test_args),
downloadOnly=c.download_only)
def smokeTest(java, baseURL, gitRevision, version, tmpDir, isSigned, local_keys, testArgs, downloadOnly=False):
startTime = datetime.datetime.now()
# Avoid @Nightly and @Badapple tests as they are slow and buggy
# Instead verify that the recent Jenkins tests pass
print('NOTE: Not running @Nightly or @BadApple tests. Please verify that recent Jenkins runs have passed.')
testArgs = '-Dtests.nightly=false -Dtests.badapples=false %s' % testArgs
if FORCE_CLEAN:
if os.path.exists(tmpDir):
raise RuntimeError('temp dir %s exists; please remove first' % tmpDir)
if not os.path.exists(tmpDir):
os.makedirs(tmpDir)
solrPath = None
print()
print('Load release URL "%s"...' % baseURL)
newBaseURL = unshortenURL(baseURL)
if newBaseURL != baseURL:
print(' unshortened: %s' % newBaseURL)
baseURL = newBaseURL
if baseURL.endswith('distribution/build/release'):
# Used when building release locally in Jenkins
solrPath = baseURL
else:
# An ordinary release has a 'solr' sub folder
for text, subURL in getDirEntries(baseURL):
if text.lower() == 'solr/':
solrPath = subURL
if solrPath is None:
raise RuntimeError('could not find solr subdir')
print()
print('Get KEYS...')
if local_keys is not None:
print(" Using local KEYS file %s" % local_keys)
keysFile = local_keys
else:
keysFileURL = "https://archive.apache.org/dist/solr/KEYS"
print(" Downloading online KEYS file %s" % keysFileURL)
scriptutil.download('KEYS', keysFileURL, tmpDir, force_clean=FORCE_CLEAN)
keysFile = '%s/KEYS' % (tmpDir)
if is_port_in_use(8983):
raise RuntimeError('Port 8983 is already in use. The smoketester needs it to test Solr')
print()
print('Test Solr...')
checkSigs(solrPath, version, tmpDir, isSigned, keysFile)
if not downloadOnly:
unpackAndVerify(java, tmpDir, 'solr-%s.tgz' % version, gitRevision, version, testArgs)
unpackAndVerify(java, tmpDir, 'solr-%s-src.tgz' % version, gitRevision, version, testArgs)
print()
print('Test Maven artifacts...')
checkMaven(solrPath, tmpDir, gitRevision, version, isSigned, keysFile)
else:
print("Solr test done (--download-only specified)")
print('\nSUCCESS! [%s]\n' % (datetime.datetime.now() - startTime))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Keyboard interrupt...exiting')
|
the-stack_0_24641
|
'''
This module performs a few early syntax check on the input AST.
It checks the conformance of the input code to Pythran specific
constraints.
'''
from pythran.tables import MODULES
from pythran.intrinsic import Class
import gast as ast
class PythranSyntaxError(SyntaxError):
def __init__(self, msg, node=None):
SyntaxError.__init__(self, msg)
if node:
self.filename = getattr(node, 'filename', None)
self.lineno = node.lineno
self.offset = node.col_offset
class SyntaxChecker(ast.NodeVisitor):
"""
Visit an AST and raise a PythranSyntaxError upon unsupported construct.
Attributes
----------
attributes : {str}
Possible attributes from Pythonic modules/submodules.
"""
def __init__(self):
""" Gather attributes from MODULES content. """
self.attributes = set()
def save_attribute(module):
""" Recursively save Pythonic keywords as possible attributes. """
self.attributes.update(module.keys())
for signature in module.values():
if isinstance(signature, dict):
save_attribute(signature)
elif isinstance(signature, Class):
save_attribute(signature.fields)
for module in MODULES.values():
save_attribute(module)
def visit_Module(self, node):
err = ("Top level statements can only be assignments, strings,"
"functions, comments, or imports")
WhiteList = ast.FunctionDef, ast.Import, ast.ImportFrom, ast.Assign
for n in node.body:
if isinstance(n, ast.Expr) and isinstance(n.value, ast.Str):
continue
if isinstance(n, WhiteList):
continue
raise PythranSyntaxError(err, n)
self.generic_visit(node)
def visit_Interactive(self, node):
raise PythranSyntaxError("Interactive session not supported", node)
def visit_Expression(self, node):
raise PythranSyntaxError("Interactive expressions not supported", node)
def visit_Suite(self, node):
raise PythranSyntaxError(
"Suites are specific to Jython and not supported", node)
def visit_ClassDef(self, _):
raise PythranSyntaxError("Classes not supported")
def visit_Print(self, node):
self.generic_visit(node)
if node.dest:
raise PythranSyntaxError(
"Printing to a specific stream not supported", node.dest)
def visit_With(self, node):
raise PythranSyntaxError("With statements not supported", node)
def visit_Starred(self, node):
raise PythranSyntaxError("Call with star arguments not supported",
node)
def visit_keyword(self, node):
if node.arg is None:
raise PythranSyntaxError("Call with kwargs not supported", node)
def visit_Call(self, node):
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.generic_visit(node)
if node.args.vararg:
raise PythranSyntaxError("Varargs not supported", node)
if node.args.kwarg:
raise PythranSyntaxError("Keyword arguments not supported",
node)
def visit_Raise(self, node):
self.generic_visit(node)
if node.cause:
raise PythranSyntaxError(
"Cause in raise statements not supported",
node)
def visit_Attribute(self, node):
self.generic_visit(node)
if node.attr not in self.attributes:
raise PythranSyntaxError(
"Attribute '{0}' unknown".format(node.attr),
node)
def visit_Import(self, node):
""" Check if imported module exists in MODULES. """
for alias in node.names:
current_module = MODULES
# Recursive check for submodules
for path in alias.name.split('.'):
if path not in current_module:
raise PythranSyntaxError(
"Module '{0}' unknown.".format(alias.name),
node)
else:
current_module = current_module[path]
def visit_ImportFrom(self, node):
"""
Check validity of imported functions.
Check:
- no level specific value are provided.
- a module is provided
- module/submodule exists in MODULES
- imported function exists in the given module/submodule
"""
if node.level:
raise PythranSyntaxError("Relative import not supported", node)
if not node.module:
raise PythranSyntaxError("import from without module", node)
module = node.module
current_module = MODULES
# Check if module exists
for path in module.split('.'):
if path not in current_module:
raise PythranSyntaxError(
"Module '{0}' unknown.".format(module),
node)
else:
current_module = current_module[path]
# Check if imported functions exist
for alias in node.names:
if alias.name == '*':
continue
elif alias.name not in current_module:
raise PythranSyntaxError(
"identifier '{0}' not found in module '{1}'".format(
alias.name,
module),
node)
def visit_Exec(self, node):
raise PythranSyntaxError("Exec statement not supported", node)
def visit_Global(self, node):
raise PythranSyntaxError("Global variables not supported", node)
def check_syntax(node):
'''Does nothing but raising PythranSyntaxError when needed'''
SyntaxChecker().visit(node)
def check_specs(mod, specs, renamings, types):
'''
Does nothing but raising PythranSyntaxError if specs
are incompatible with the actual code
'''
from pythran.types.tog import unify, clone, tr
from pythran.types.tog import Function, TypeVariable, InferenceError
functions = {renamings.get(k, k): v for k, v in specs.functions.items()}
for fname, signatures in functions.items():
try:
ftype = types[fname]
except KeyError:
raise PythranSyntaxError(
"Invalid spec: exporting undefined function `{}`"
.format(fname))
for signature in signatures:
sig_type = Function([tr(p) for p in signature], TypeVariable())
try:
unify(clone(sig_type), clone(ftype))
except InferenceError:
raise PythranSyntaxError(
"Specification for `{}` does not match inferred type:\n"
"expected `{}`\n"
"got `Callable[[{}], ...]`".format(
fname,
ftype,
", ".join(map(str, sig_type.types[:-1])))
)
|
the-stack_0_24642
|
import random
import discord
import asyncio
import aiohttp
from time import perf_counter
from utils import checks
from TextToOwO import owo
from textwrap import dedent
from utils.db import SettingsDB
from discord.ext import commands
from datetime import datetime
from collections import Counter, OrderedDict
from utils.watora import globprefix, log, owner_id, ver, get_uptime, get_server_prefixes, is_basicpatron, is_patron, is_lover, get_str, sweet_bar, format_mentions, get_image_from_url
from cogs.gestion import cmd_help_msg as cmds
class Useful(commands.Cog):
"""The useful cog"""
def __init__(self, bot):
self.bot = bot
self.next_cost = ['80.00', '90.00', '105.00', '120.00', '150.00']
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
@commands.command(name="marry", aliases=["married", "mary", "mariage", "epouse", "epouser"])
async def _marry(self, ctx, *, user: discord.Member = None):
"""
{command_prefix}marry [user]
{help}
"""
husband_role_id = 501330901653782558
settings = await SettingsDB.get_instance().get_glob_settings()
embed = discord.Embed()
embed.color = 0xFF0000
if user == ctx.author:
return await ctx.send(get_str(ctx, "cmd-weeb-alone"))
if user == ctx.me:
if ctx.guild:
ids = [r.id for r in ctx.author.roles]
if husband_role_id in ids: # Watora's Husband can marry Watora
embed.title = "❤ " + get_str(ctx, "cmd-marry-happily")
embed.description = get_str(
ctx, "cmd-marry-success").format(f"**{ctx.author.name}**", f"**{user.name}**")
await ctx.send(embed=embed)
if str(user.id) in settings.marry:
before = settings.marry[str(user.id)]['id']
del settings.marry[before] # Watora divorces before
date = datetime.today().strftime("%d %b %Y")
settings.marry[str(ctx.author.id)] = {}
settings.marry[str(ctx.author.id)]['id'] = str(user.id)
settings.marry[str(ctx.author.id)]['date'] = date
# stock the name in case of a day where the user is not on any bot servers anymore.
settings.marry[str(ctx.author.id)]['name'] = user.name
settings.marry[str(user.id)] = {}
settings.marry[str(user.id)]['id'] = str(ctx.author.id)
settings.marry[str(user.id)]['date'] = date
# stock the name in case of a day where the user is not on any bot servers anymore.
settings.marry[str(user.id)]['name'] = ctx.author.name
await SettingsDB.get_instance().set_glob_settings(settings)
return
if not await is_lover(self.bot, ctx.author):
return await ctx.send(get_str(ctx, "cmd-weeb-dont-touch-me"))
else:
return await ctx.send(get_str(ctx, "cmd-marry-too-young") + " {}".format("<:WatoraHyperBlush:458349268944814080>"))
if not user:
if str(ctx.author.id) in settings.marry:
embed.title = "❤ {} ({})".format(get_str(ctx, "cmd-marry-married-to").format(
await self.bot.safe_fetch('user', int(settings.marry[str(ctx.author.id)]["id"]))
or settings.marry[str(ctx.author.id)]['name']),
settings.marry[str(ctx.author.id)]['date'])
try:
return await ctx.send(embed=embed)
except discord.Forbidden:
return await ctx.send(get_str(ctx, "need-embed-permission"), delete_after=20)
else:
return await self.bot.send_cmd_help(ctx)
embed.title = "💍 " + \
get_str(ctx, "cmd-marry-proposed").format(ctx.author.name, user.name)
if str(user.id) in settings.marry:
married_with = (await self.bot.safe_fetch('user', int(settings.marry[str(user.id)]['id']))
or settings.marry[str(user.id)]['name'])
married_since = settings.marry[str(user.id)]['date']
embed.description = "{} ({})".format(get_str(
ctx, "cmd-marry-user-a-married").format(user.name, married_with), married_since)
if married_with == ctx.author:
embed.description = get_str(ctx, "cmd-marry-a-together")
try:
return await ctx.send(embed=embed)
except discord.Forbidden:
return await ctx.send(get_str(ctx, "need-embed-permission"), delete_after=20)
elif str(ctx.author.id) in settings.marry:
embed.description = get_str(ctx, "cmd-marry-author-a-married").format(
"`{}divorce`".format(get_server_prefixes(ctx.bot, ctx.guild)))
try:
return await ctx.send(embed=embed)
except discord.Forbidden:
return await ctx.send(get_str(ctx, "need-embed-permission"), delete_after=20)
embed.description = get_str(
ctx, "cmd-marry-confirmation").format("`yes`", "`no`")
confirm_message = await ctx.send(embed=embed)
def check(m):
if m.author.bot or m.author != user:
return False
if m.channel != ctx.channel:
return False
if m.content:
return True
return False
try:
response_message = await self.bot.wait_for('message', timeout=120, check=check)
except asyncio.TimeoutError:
try:
await confirm_message.delete()
except discord.HTTPException:
pass
return
if response_message.author == ctx.author:
try:
await confirm_message.delete()
except discord.HTTPException:
pass
return
if response_message.content.lower().startswith('y'):
if str(user.id) in settings.marry: # 2nd check if it changed since the command call
married_with = (await self.bot.safe_fetch('user', int(settings.marry[str(user.id)]['id']))
or settings.marry[str(user.id)]['name'])
embed.description = get_str(
ctx, "cmd-marry-user-a-married").format(user.name, married_with)
if married_with == ctx.author:
embed.description = get_str(ctx, "cmd-marry-a-together")
try:
return await ctx.send(embed=embed)
except discord.Forbidden:
return await ctx.send(get_str(ctx, "need-embed-permission"), delete_after=20)
elif str(ctx.author.id) in settings.marry:
embed.description = get_str(ctx, "cmd-marry-author-a-married").format(
"`{}divorce`".format(get_server_prefixes(ctx.bot, ctx.guild)))
try:
return await ctx.send(embed=embed)
except discord.Forbidden:
return await ctx.send(get_str(ctx, "need-embed-permission"), delete_after=20)
embed.title = "❤ " + get_str(ctx, "cmd-marry-happily")
embed.description = get_str(
ctx, "cmd-marry-success").format(f"**{ctx.author.name}**", f"**{user.name}**")
await ctx.send(embed=embed)
date = datetime.today().strftime("%d %b %Y")
settings.marry[str(ctx.author.id)] = {}
settings.marry[str(ctx.author.id)]['id'] = str(user.id)
settings.marry[str(ctx.author.id)]['date'] = date
# stock the name in case of a day where the user is not on any bot servers anymore.
settings.marry[str(ctx.author.id)]['name'] = user.name
settings.marry[str(user.id)] = {}
settings.marry[str(user.id)]['id'] = str(ctx.author.id)
settings.marry[str(user.id)]['date'] = date
# stock the name in case of a day where the user is not on any bot servers anymore.
settings.marry[str(user.id)]['name'] = ctx.author.name
await SettingsDB.get_instance().set_glob_settings(settings)
elif response_message.content.lower().startswith('n'):
await ctx.send(get_str(ctx, "cmd-marry-declined").format(ctx.author.mention) + " <:WatoraDisappointed:458349267715883060>", delete_after=30)
else:
try:
await confirm_message.delete()
except discord.HTTPException:
pass
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
@commands.command(aliases=["divorcer", "divorces", "div", "cancelmarriage"])
async def divorce(self, ctx):
"""
{command_prefix}divorce
{help}
"""
settings = await SettingsDB.get_instance().get_glob_settings()
if str(ctx.author.id) not in settings.marry:
return await ctx.send(get_str(ctx, "cmd-divorce-a-single") + " <:WatoraDisappointed:458349267715883060>")
married = settings.marry[str(ctx.author.id)]
married_since = married['date']
married_with = await self.bot.safe_fetch('user', int(married["id"])) or married['name']
datetime_date = datetime.strptime(married_since, '%d %b %Y')
since_married = (ctx.message.created_at - datetime_date).days
since_married_full = "{} ({})".format(f"**{married_since}**", get_str(ctx, "cmd-userinfo-days-ago").format(
since_married) if since_married > 1 else get_str(ctx, "cmd-userinfo-day-ago").format(since_married))
confirm_message = await ctx.send(get_str(ctx, "cmd-divorce-confirmation").format(f"`{married_with}`", since_married_full, "`yes`", "`no`"))
def check(m):
if m.author.bot or m.author != ctx.author:
return False
if m.channel != ctx.channel:
return False
if m.content:
return True
return False
try:
response_message = await self.bot.wait_for('message', timeout=120, check=check)
except asyncio.TimeoutError:
try:
await confirm_message.delete()
except discord.HTTPException:
pass
return await ctx.send(get_str(ctx, "cmd-divorce-cancelled"), delete_after=30)
if response_message.content.lower().startswith('y'):
del settings.marry[str(ctx.author.id)]
try:
del settings.marry[married['id']]
except KeyError:
log.error(
f"ERROR: {settings.marry[married['id']]} seems to not be married with {settings.marry[str(ctx.author.id)]} but they just divorce")
await SettingsDB.get_instance().set_glob_settings(settings)
await ctx.send("☑ " + get_str(ctx, "cmd-divorce-success"))
else:
await ctx.send(get_str(ctx, "cmd-divorce-cancelled"), delete_after=30)
@commands.command(aliases=["aide", "command", "commands", "h"])
@commands.cooldown(rate=1, per=1.5, type=commands.BucketType.user)
async def help(self, ctx, *, command: str = None):
"""
{command_prefix}help [command]
{command_prefix}help [category]
{command_prefix}help
{help}
"""
if command:
command = command.strip('[]')
if command.lower() in cmds:
msg = "```apache\n"
a = 0
for cmd in cmds[command.lower()]:
if len(cmd) > a:
a = len(cmd)
for cmd in cmds[command.lower()]:
if len(cmd) < a:
for n in range(a - len(cmd)):
msg += " "
help = get_str(ctx, f"cmd-{cmd}-help").split("\n")[0]
msg += "{} : {}\n".format(cmd, help)
msg += "```"
return await ctx.send(msg)
if command.startswith(get_server_prefixes(ctx.bot, ctx.guild)):
command = command[len(
get_server_prefixes(ctx.bot, ctx.guild)):]
if not self.bot.get_command(command):
return await ctx.send(get_str(ctx, "cmd-help-cmd-not-found").format(f"`{command}`"))
else:
result = self.bot.get_command(command)
await self.bot.send_cmd_help(ctx, result)
else:
embed = discord.Embed()
embed.set_author(name=get_str(ctx, "cmd-help-title"),
url="https://docs.watora.xyz/commands/music", icon_url=self.bot.user.avatar_url)
if not ctx.guild:
embed.color = 0x71368a
else:
embed.color = ctx.me.color
embed.description = get_str(ctx, "cmd-help-description") + "\n" + get_str(
ctx, "cmd-help-support").format('[**World of Watora**](https://discord.gg/ArJgTpM).\n**__**')
if ctx.guild:
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
is_admin = ctx.channel.permissions_for(ctx.author).manage_guild
disabled = []
if str(ctx.channel.id) in settings.disabledchannels:
disabled = settings.disabledchannels[str(ctx.channel.id)]
for key in cmds:
try:
title = key[0].upper() + key[1:]
descrip = '**,** '.join([f"`{cm}`" for cm in cmds[key] if not ctx.guild or is_admin or (
(cm.lower() not in settings.disabledcommands) and (cm.lower() not in disabled))])
if descrip:
embed.add_field(
name=f'{title} ({len(cmds[key])})', value=descrip, inline=False)
except KeyError:
pass
embed.add_field(name="__", value=get_str(ctx, "cmd-help-more-info-cmd") + " **`{}help [command]`**".format(get_server_prefixes(
ctx.bot, ctx.guild)) + "\n" + get_str(ctx, "cmd-help-more-info-cat") + " **`{}help [category]`**".format(get_server_prefixes(ctx.bot, ctx.guild)))
try:
await ctx.send(embed=embed)
except discord.Forbidden:
await ctx.send(get_str(ctx, "need-embed-permission"))
@commands.command(aliases=['botinfo', 'infobot'])
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def stats(self, ctx):
"""
{command_prefix}stats
{help}
"""
settings = await SettingsDB.get_instance().get_glob_settings()
# users = len(set(self.bot.get_all_members())) # set removes duplicate # BLOCKING CODE FFS
servers = self.bot.guild_count
# channels = len([c for c in self.bot.get_all_channels()]) # BLOCKING CODE FFS
embed = discord.Embed()
owner = await self.bot.safe_fetch('user', owner_id) or str(owner_id)
embed.set_author(name=f"{self.bot.user.name} v{ver}",
icon_url=self.bot.user.avatar_url)
if isinstance(ctx.channel, discord.abc.GuildChannel):
embed.color = ctx.guild.me.color
# embed.add_field(name="Version", value=ver, inline=False)
embed.add_field(name="Library", value="Discord.py v{}".format(
str(discord.__version__)), inline=False)
embed.add_field(name="Uptime", value=str(get_uptime()))
embed.add_field(name="Guild{}".format(
"s" if servers > 1 else ""), value=servers)
# embed.add_field(name="Channels", value=channels)
embed.add_field(name="Shard{}".format(
"s" if self.bot.shard_count > 1 else ""), value=self.bot.shard_count)
# embed.add_field(name="Users", value=users)
embed.add_field(name="Owner", value=owner)
embed.add_field(name="Commands", value=len(self.bot.commands))
embed.add_field(name="Autoplaylists",
value=len(settings.autoplaylists))
embed.add_field(name="Donation",
value="[PayPal](https://www.paypal.me/watora)\n[Patreon](https://www.patreon.com/watora)")
embed.add_field(
name="Info", value="[Website](https://watora.xyz/)\n[FAQ](https://docs.watora.xyz/faq)")
embed.add_field(
name="Social", value="[Discord](https://discordapp.com/invite/ArJgTpM)\n[Twitter](https://twitter.com/watorabot)")
try:
await ctx.send(embed=embed)
except discord.Forbidden:
await ctx.send(get_str(ctx, "need-embed-permission"))
@commands.command()
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
async def ping(self, ctx):
"""
{command_prefix}ping
{help}
"""
embed = discord.Embed()
if isinstance(ctx.channel, discord.abc.GuildChannel):
embed.color = ctx.guild.me.color
embed.set_author(
name="Pong!", icon_url="https://cdn.discordapp.com/attachments/268495024801447936/349241478750404609/dmOYQuS.png")
start = perf_counter()
await ctx.channel.trigger_typing()
end = perf_counter()
embed.description = "%s ms!" % int((end - start) * 1000)
try:
await ctx.send(embed=embed)
except discord.Forbidden:
return await ctx.send(get_str(ctx, "need-embed-permission"))
@commands.command()
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
async def pong(self, ctx):
"""
{command_prefix}pong
Wut ? Did you just find an easter eggs ?
"""
await ctx.send("<:WatoraLost:458349268621721601>")
@commands.command()
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
@commands.guild_only()
async def shard(self, ctx):
"""
{command_prefix}shard
{help}
"""
await ctx.send(get_str(ctx, "cmd-shard").format(f"`{ctx.guild.shard_id}`"))
@commands.cooldown(rate=1, per=3, type=commands.BucketType.user)
@commands.command(aliases=["votes", "upvote", "upvotes"])
async def vote(self, ctx, user: discord.Member = None):
"""
{command_prefix}vote
Allows to upvote Watora on some bot lists.
"""
if not user:
user = ctx.author
e = discord.Embed()
if 'Update' in self.bot.cogs:
msg = ""
asyncio.ensure_future(self.bot.cogs['Update'].update())
votes = self.bot.cogs['Update'].votes
counter = Counter(k['id'] for k in votes if k.get('id'))
counter = OrderedDict(counter.most_common())
top5 = []
for i, id in enumerate(counter, start=1):
if i > 5:
break
top5.append(id)
member = await self.bot.safe_fetch('user', int(id)) or id
msg += f"`{i}` **{member}** : **{counter[id]}** vote{'s' if counter[id] > 1 else ''}\n"
month = datetime.now().strftime("%B")
if str(user.id) not in top5:
if str(user.id) in counter:
pos = list(counter).index(str(user.id)) + 1
nb = counter[str(user.id)]
if pos == 6:
msg += f"`{pos}` **{user}** : **{nb}** vote{'s' if nb > 1 else ''}\n"
else:
e.set_footer(
text=f"{pos} - {user} : {nb} vote{'s' if nb > 1 else ''}", icon_url=user.avatar_url)
if isinstance(ctx.channel, discord.abc.GuildChannel):
e.color = ctx.guild.me.color
e.set_thumbnail(url=self.bot.user.avatar_url)
e.set_author(
name=f"Top Voters of {month}:", url=f"https://discordbots.org/bot/{self.bot.user.id}/vote")
e.description = f"{msg}\n**[Vote for {self.bot.user.name} on Discord Bot List](https://discordbots.org/bot/{self.bot.user.id}/vote)**"
try:
await ctx.send(embed=e)
except discord.Forbidden:
return await ctx.send(get_str(ctx, "need-embed-permission"))
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
@commands.command(aliases=["sayd", "say", "watora", "talk", "write", "med"])
async def me(self, ctx, *, content=None):
"""
{command_prefix}say [content]
{help}
"""
if not content or 'd' in ctx.invoked_with.lower():
ctx.command.reset_cooldown(ctx)
try:
await ctx.message.delete()
except discord.HTTPException:
pass
return # just delete the message and go away
ctx.command.reset_cooldown(ctx)
content = await self.bot.format_cc(content, ctx.message)
content = format_mentions(content)
pic = get_image_from_url(content)
if pic:
e = discord.Embed()
e.set_image(url=pic)
content = content.replace(pic, '')
if self.bot.owo_map.get(ctx.guild.id, False):
content = owo.text_to_owo(content)
try:
return await ctx.send(embed=e, content=content)
except discord.Forbidden:
return await ctx.send(get_str(ctx, "need-embed-permission"))
if self.bot.owo_map.get(ctx.guild.id, False):
content = owo.text_to_owo(content)
await ctx.send(content)
@commands.command()
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
async def avatar(self, ctx, *, user: discord.Member = None):
"""
{command_prefix}avatar [user]
{command_prefix}avatar
{help}
"""
if not user:
user = ctx.author
embed = discord.Embed()
if not isinstance(ctx.channel, discord.abc.PrivateChannel):
embed.colour = user.colour
if user == self.bot.user:
embed.set_author(name=get_str(ctx, "cmd-avatar-my-avatar"))
else:
if ctx.author == user:
embed.set_author(name=get_str(ctx, "cmd-avatar-your-avatar"))
else:
if ctx.guild:
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
if settings.language == "french":
embed.set_author(name=get_str(ctx, "cmd-avatar-someone-avatar")
.format("'" if user.name.lower()[0] in ["a", "e", "i", "u", "y", "o"] else "e ", user))
else:
embed.set_author(name=get_str(
ctx, "cmd-avatar-someone-avatar").format(user))
ava = user.avatar_url
embed.set_image(url=ava or user.default_avatar_url)
embed.set_author(name=embed.author.name,
url=ava or user.default_avatar_url) # Hacky
try:
await ctx.send(embed=embed)
except discord.Forbidden:
await ctx.send(get_str(ctx, "need-embed-permission"))
@commands.guild_only()
@commands.command(aliases=["infouser", "profile", "ui", "profil", "memberinfo", "infomember", "whois"])
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
async def userinfo(self, ctx, *, user: discord.Member = None):
"""
{command_prefix}userinfo [user]
{command_prefix}userinfo
{help}
"""
if not user:
user = ctx.author
# shared = sum([1 for m in self.bot.get_all_members() if m.id == user.id])
if user.voice:
other_people = len(user.voice.channel .members) - 1
voice_fmt = '%s' % (get_str(ctx, "cmd-userinfo-voice-members") if other_people > 1 else get_str(
ctx, "cmd-userinfo-voice-member")) if other_people else get_str(ctx, "cmd-userinfo-voice-alone")
voice = voice_fmt.format(user.voice.channel.name, other_people)
else:
voice = get_str(ctx, "cmd-userinfo-not-connected")
roles = [x.name for x in user.roles if x.name != "@everyone"]
joined_at = user.joined_at
since_created = (ctx.message.created_at - user.created_at).days
try:
since_joined = (ctx.message.created_at - joined_at).days
except TypeError:
since_joined = 0
user_joined = joined_at.strftime("%d %b %Y %H:%M")
user_created = user.created_at.strftime("%d %b %Y %H:%M")
try:
member_number = sorted(
ctx.guild.members, key=lambda m: m.joined_at).index(user) + 1
except TypeError:
member_number = 0
created_on = "{}\n(".format(user_created) + "{}".format(get_str(ctx, "cmd-userinfo-days-ago")
if since_created > 1 else get_str(ctx, "cmd-userinfo-day-ago")).format(since_created) + ")"
joined_on = "{}\n(".format(user_joined) + "{}".format(get_str(ctx, "cmd-userinfo-days-ago")
if since_joined > 1 else get_str(ctx, "cmd-userinfo-day-ago")).format(since_joined) + ")"
game = "{}".format(user.status)
game = game[0].upper() + game[1:]
if user.activity:
if isinstance(user.activity, discord.Spotify):
game = get_str(
ctx, "cmd-userinfo-listening-music").format(user.activity.title, user.activity.artist)
elif user.activity.type == discord.ActivityType.playing:
game = get_str(ctx, "cmd-userinfo-playing") + \
" {}".format(user.activity.name)
elif user.activity.type == discord.ActivityType.watching: # watching
game = get_str(ctx, "cmd-userinfo-watching") + \
" {}".format(user.activity.name)
elif user.activity.type == discord.ActivityType.streaming:
game = get_str(ctx, "cmd-userinfo-streaming") + \
" [{}]({})".format(user.activity, user.activity.url)
if roles:
try:
roles = sorted(roles, key=[
x.name for x in user.guild.roles[::-1] if x.name != "@everyone"].index)
except ValueError: # idk
pass
nbroles = len(roles)
roles = ", ".join(roles)
else:
roles = "None"
nbroles = 0
data = discord.Embed(description=game)
if not isinstance(ctx.channel, discord.abc.PrivateChannel):
data.colour = user.colour
data.add_field(name=get_str(
ctx, "cmd-userinfo-joined-discord"), value=created_on)
data.add_field(name=get_str(
ctx, "cmd-userinfo-joined-guild"), value=joined_on)
# data.add_field(name="{}".format(get_str(ctx, "cmd-userinfo-servers-shared") if shared > 2 else get_str(ctx, "cmd-userinfo-server-shared")), value=shared)
data.add_field(name=get_str(ctx, "cmd-userinfo-voice"), value=voice)
settings = await SettingsDB.get_instance().get_glob_settings()
if str(user.id) in settings.marry:
married_with = await self.bot.safe_fetch('user', int(settings.marry[str(user.id)]['id'])) or settings.marry[str(user.id)]['name']
married_since = settings.marry[str(user.id)]['date']
data.add_field(name=get_str(ctx, "cmd-userinfo-married-with"),
value=f"💕 {married_with} ({married_since})", inline=False)
data.add_field(name="{}".format(get_str(ctx, "cmd-userinfo-roles") if nbroles >
1 else get_str(ctx, "cmd-userinfo-role")) + " [%s]" % nbroles, value=roles, inline=False)
data.set_footer(text=get_str(ctx, "cmd-userinfo-member", can_owo=False) + " #{} | ".format(
member_number) + get_str(ctx, "cmd-userinfo-user-id", can_owo=False) + ":{}".format(user.id))
name = str(user)
name = " ~ ".join((name, user.nick)) if user.nick else name
if user.avatar_url:
data.set_author(name=name, url=user.avatar_url)
data.set_thumbnail(url=user.avatar_url)
else:
data.set_author(name=name)
try:
await ctx.send(embed=data)
except discord.HTTPException:
await ctx.send(get_str(ctx, "need-embed-permission"))
@commands.guild_only()
@commands.command(aliases=["guildinfo", "infoguild", "infoserver", "si", "gi", "sinfo", "ginfo"])
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
async def serverinfo(self, ctx, *, guild=None):
"""
{command_prefix}serverinfo
{help}
"""
if guild: # owner can see other's guild informations
if ctx.author.id == owner_id:
try:
guild = await self.bot.safe_fetch('guild', int(guild))
except ValueError:
return await ctx.send("Guild not found...")
if not guild:
return await ctx.send("Guild not found...")
else:
guild = ctx.guild
else:
guild = ctx.guild
online = len([m.status for m in guild.members
if m.status == discord.Status.online or
m.status == discord.Status.idle or
m.status == discord.Status.dnd])
total_users = len(guild.members)
total_bot = len([m for m in guild.members if m.bot])
text_channels = len([x for x in guild.channels
if type(x) == discord.TextChannel])
voice_channels = len([x for x in guild.channels
if type(x) == discord.VoiceChannel])
passed = (ctx.message.created_at - guild.created_at).days
created_at = get_str(ctx, "cmd-serverinfo-since", can_owo=False).format(
guild.created_at.strftime("%d %b %Y %H:%M"), passed)
colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])
colour = int(colour, 16)
data = discord.Embed(
description=created_at,
colour=discord.Colour(value=colour))
data.add_field(name=get_str(ctx, "cmd-serverinfo-region"),
value=str(guild.region))
data.add_field(name=get_str(ctx, "cmd-serverinfo-users"), value="{}/{} ({} bot{})".format(
online, total_users, total_bot, 's' if total_bot > 2 else ''))
data.add_field(name=get_str(
ctx, "cmd-serverinfo-textchannels"), value=text_channels)
data.add_field(name=get_str(
ctx, "cmd-serverinfo-voicechannels"), value=voice_channels)
data.add_field(name=get_str(ctx, "cmd-serverinfo-roles"),
value=len(guild.roles))
data.add_field(name=get_str(ctx, "cmd-serverinfo-owner"),
value=str(guild.owner))
data.set_footer(text=get_str(
ctx, "cmd-serverinfo-server-id") + ": " + str(guild.id))
claimed = await self.bot.server_is_claimed(guild.id)
if claimed:
user_id = int(claimed[0])
claimed = list(claimed[1].items())[0]
user = await self.bot.safe_fetch('member', user_id, guild=guild) or user_id
data.add_field(
name="Patreon Server", value="Claimed by {}. Since {}".format(user, claimed[1]))
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
if settings.defaultnode:
member = ctx.guild.get_member(int(settings.defaultnode))
if member:
# TODO: Translations
data.add_field(name='Default music node',
value=f"Hosted by {member}", inline=False)
if guild.icon_url:
data.set_author(name=guild.name, url=guild.icon_url)
data.set_thumbnail(url=guild.icon_url)
else:
data.set_author(name=guild.name)
try:
await ctx.send(embed=data)
except discord.HTTPException:
await ctx.send(get_str(ctx, "need-embed-permission"))
@commands.guild_only()
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
@commands.command(aliases=["inforole"])
async def roleinfo(self, ctx, *, name):
"""
{command_prefix}roleinfo [role]
{help}
"""
role = self.bot.get_role(ctx, name)
if not role:
return await ctx.send(get_str(ctx, "cmd-joinclan-role-not-found").format(name))
role_count = 0
all_users = []
for user in ctx.guild.members:
if role in user.roles:
all_users.append('{}#{}'.format(user.name, user.discriminator))
role_count += 1
all_users.sort()
all_users = ', '.join(all_users)
em = discord.Embed(title=role.name, color=role.color)
em.add_field(name='ID', value=role.id, inline=False)
em.add_field(name='{}'.format(get_str(ctx, "cmd-roleinfo-users") if role_count >
1 else get_str(ctx, "cmd-roleinfo-user")), value=role_count)
if str(role.color) != "#000000":
em.add_field(name=get_str(ctx, "cmd-roleinfo-color"),
value=str(role.color))
em.set_thumbnail(url='http://www.colorhexa.com/%s.png' %
str(role.color).strip("#"))
em.add_field(name=get_str(ctx, "cmd-roleinfo-mentionable"), value=get_str(ctx,
"music-plsettings-yes") if role.mentionable else get_str(ctx, "music-plsettings-no"))
if 0 < role_count < 16:
em.add_field(name=get_str(ctx, "cmd-roleinfo-all-users"),
value=all_users, inline=False)
em.add_field(name=get_str(ctx, "cmd-roleinfo-creation"),
value=role.created_at.strftime("%Y-%m-%d"))
if str(role.color) != "#000000":
em.set_thumbnail(url='http://www.colorhexa.com/%s.png' %
str(role.color).strip("#"))
try:
await ctx.send(embed=em)
except discord.Forbidden:
await ctx.send(get_str(ctx, "need-embed-permission"))
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
@commands.command(aliases=["about"])
async def info(self, ctx):
"""
{command_prefix}about
{help}
"""
msg = get_str(ctx, "cmd-info")
try:
await ctx.author.send(msg)
except discord.HTTPException:
return await ctx.send(get_str(ctx, "cant-send-pm"))
try:
await ctx.message.add_reaction("☑")
except discord.Forbidden:
await ctx.send(get_str(ctx, "message-send-to-mp"))
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
@commands.command(aliases=["cred", "creds", "crds", "crd"])
async def credits(self, ctx):
"""
{command_prefix}credits
{help}
"""
em = discord.Embed(description=get_str(ctx, "cmd-credits-title"))
em.add_field(name="Zenrac", value="[{}]({})".format(
get_str(ctx, "cmd-credits-bot-dev"), "https://github.com/Zenrac"))
em.add_field(name="Rapptz - Danny", value='[discord.py]({})'.format(
"https://github.com/Rapptz/discord.py/tree/rewrite"))
em.add_field(name="Ifran-dahir",
value='[Jikan]({})'.format("https://jikan.moe/"))
em.add_field(name="Sedmelluq", value='[Lavaplayer]({})'.format(
"https://github.com/sedmelluq/"))
em.add_field(name="Frederikam", value='[Lavalink]({})'.format(
"https://github.com/Frederikam/Lavalink"))
em.add_field(name="Devoxin", value='[Lavalink.py]({})'.format(
"https://github.com/Devoxin/Lavalink.py"))
em.add_field(name="Wolke & Akio",
value='[weeb.sh API]({})'.format("https://weeb.sh/"))
em.add_field(name="AndyTempel", value='[weeb.sh Wrapper]({})'.format(
"https://github.com/AndyTempel/weebapi"))
em.add_field(name="Dank Memer", value='[Meme-Server]({})'.format(
"https://github.com/DankMemer/meme-server"))
em.add_field(name="RickBot IMGGEN", value='[Meme-Server]({})'.format(
"https://services.is-going-to-rickroll.me/"))
em.add_field(name="AndyTempel", value='[KSoft.Si API]({})'.format(
"https://api.ksoft.si/"))
em.add_field(name="Sworder & Ota",
value='[arcadia-api]({})'.format("https://arcadia-api.xyz"))
em.add_field(name="LazyShpee", value='[iode]({})'.format(
"https://github.com/LazyShpee"))
em.add_field(name="Akio", value='[MTCL]({})'.format(
"https://mctl.io/"))
em.add_field(name="Peko", value='[{}]({})'.format(
get_str(ctx, "cmd-credits-watora-designer"), "http://lumino.sakura.ne.jp"))
em.add_field(name=get_str(ctx, "cmd-current-translation-written-by", can_owo=False),
value=get_str(ctx, "cmd-current-translation-author", can_owo=False), inline=False)
try:
await ctx.send(embed=em)
except discord.Forbidden:
await ctx.send(get_str(ctx, "need-embed-permission"))
@commands.command(aliases=["patchnotes", "changlogs", "update", "patchnote"])
@commands.cooldown(rate=1, per=1.5, type=commands.BucketType.user)
async def changelog(self, ctx):
"""
{command_prefix}changelog
{help}
"""
patchchannel = self.bot.get_channel(340263164505620483)
try:
if ctx.guild:
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
if settings.language == "french":
patchchannel = self.bot.get_channel(268492317164437506)
except KeyError:
pass
if not patchchannel:
e = discord.Embed(
description='See all changelogs on my official server!')
if not ctx.guild:
e.color = 0x71368a
else:
e.color = ctx.me.color
e.set_thumbnail(url=self.bot.user.avatar_url)
e.add_field(name='{}:'.format(get_str(ctx, "cmd-invitation-my-server")),
value='[World of Watora]({})'.format("https://discord.gg/ArJgTpM"))
return await ctx.send(embed=e)
async for lmsg in patchchannel.history(limit=2):
if lmsg.author.id == owner_id:
msg = lmsg
break
await ctx.send(msg.content)
@commands.command(aliases=["dons", "donation", "donate", "donating", "donators", "donator"])
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
async def don(self, ctx, *, text=None):
"""
{command_prefix}don
{command_prefix}don [text]
{command_prefix}don off
{help}
"""
settings = None
if text:
if not await is_basicpatron(self.bot, ctx.author):
return await ctx.send(embed=discord.Embed(description="Sorry, you have to be Patron to set a custom message!\n\n**[Patreon](https://www.patreon.com/watora)**"))
settings = await SettingsDB.get_instance().get_glob_settings()
if 'donators' not in settings.donation:
settings.donation['donators'] = {}
if text.lower() in ['stop', 'off', 'disable', 'empty', 'remove']:
if str(ctx.author.id) in settings.donation['donators']:
settings.donation['donators'].pop(str(ctx.author.id))
await SettingsDB.get_instance().set_glob_settings(settings)
await ctx.send('Message removed!')
else:
settings.donation['donators'][str(ctx.author.id)] = text
await SettingsDB.get_instance().set_glob_settings(settings)
await ctx.send('Message set!')
e = discord.Embed().set_footer(text=get_str(
ctx, "cmd-don-thx-in-advance", can_owo=False))
try:
e.colour = ctx.author.colour
except AttributeError:
pass
topdona = pbar = ''
if not settings: # Only 1 DB call
settings = await SettingsDB.get_instance().get_glob_settings()
if 'top' in settings.donation:
topdona = settings.donation['top']
if 'bar' in settings.donation:
pbar = settings.donation['bar']
e.description = f"**{datetime.now().strftime('%B %Y')}**"
e.add_field(name=get_str(ctx, "cmd-don-make-a-donation"), value="[**Paypal**]({})".format(
"https://www.paypal.me/watora") + "\n[**Patreon**]({})".format("https://www.patreon.com/watora"), inline=False)
donators = settings.donation.get('donators', {})
if donators:
desc = ""
for k, v in donators.items():
fetched_member = await is_basicpatron(self.bot, int(k), fetch=True)
if fetched_member:
tier = 2
if await is_patron(self.bot, int(k), resp=fetched_member):
tier = 5
if await is_lover(self.bot, int(k), resp=fetched_member):
tier = 10
username = fetched_member['user']['username'] + \
'#' + fetched_member['user']['discriminator']
text = format_mentions(v)[:100]
desc += f'`{username}` **${tier}/m** : {text}\n'
if desc:
e.add_field(name="Current patrons", value=desc)
if pbar:
if "," in pbar:
pbar = pbar.replace(",", ".")
prog_bar_str = ''
pbar = pbar.split("/")
max_v, min_v = pbar[1], pbar[0]
max_value = float(pbar[1])
min_value = float(pbar[0])
prog_bar_str = sweet_bar(min_value, max_value)
pbar = "`{}€/{}€` {}".format(min_v, max_v, prog_bar_str)
e.add_field(name=get_str(
ctx, "cmd-don-server-cost") + ' :', value=pbar)
prog_bar_str = ''
if max_value < min_value:
max_value = [m for m in self.next_cost if float(m) > max_value]
if max_value:
prog_bar_str = sweet_bar(min_value, float(max_value[0]))
pbar = "`{}€/{}€` {}".format(min_v,
max_value[0], prog_bar_str)
e.add_field(name="Upgrade server cost :", value=pbar)
await ctx.send(embed=e)
@commands.command(aliases=["infoperms", "permissionsinfo", "infopermissions", "aboutpermissions"])
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
async def permsinfo(self, ctx):
"""
{command_prefix}permsinfo
{help}
"""
msg = get_str(ctx, "cmd-perms-info").format(
get_server_prefixes(ctx.bot, ctx.guild) if ctx.guild else "=")
try:
await ctx.author.send(msg)
except discord.HTTPException:
return await ctx.send(get_str(ctx, "cant-send-pm"))
try:
await ctx.message.add_reaction("☑")
except discord.Forbidden:
await ctx.send(get_str(ctx, "message-send-to-mp"))
@commands.command()
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
async def id(self, ctx, *, usr: discord.Member = None):
"""
{command_prefix}id [user]
{command_prefix}id
{help}
"""
if not usr:
await ctx.send(get_str(ctx, "cmd-id-your-id").format(ctx.author.mention, f"`{ctx.author.id}`"))
elif usr == self.bot.user:
await ctx.send(get_str(ctx, "cmd-id-my-id") + f" `{usr.id}`.")
else:
try:
if ctx.guild:
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
if settings.language == "french":
return await ctx.send(get_str(ctx, "cmd-id-user-id").format("'" if usr.name.lower()[0] in ["a", "e", "i", "u", "y", "o"] else "e ", f"`{usr.name}`", f"`{usr.id}`"))
except KeyError:
pass
await ctx.send(get_str(ctx, "cmd-id-user-id").format(f"`{usr.name}`", f"`{usr.id}`"))
@commands.command()
@commands.is_owner()
async def leaveserver(self, ctx, *, args):
"""
{command_prefix}leaveserver
Makes me leave a specified server.
"""
try:
target = await self.bot.safe_fetch('guild', int(args))
except ValueError:
target = None
if not target:
target = discord.utils.get(self.bot.guilds, name=args)
if not target:
return await ctx.send("Could not find this guild.")
await target.leave()
await ctx.send("I have left **{0.name}**... ({0.id})".format(target))
@commands.command()
@commands.is_owner()
async def pbar(self, ctx, *, bar):
"""
{command_prefix}pbar
Sets the progress bar in the donation message.
"""
settings = await SettingsDB.get_instance().get_glob_settings()
settings.donation['bar'] = bar
await SettingsDB.get_instance().set_glob_settings(settings)
try:
# try to update status
await self.bot.cogs['Update'].message_status(bypass=True)
except KeyError:
pass
await ctx.send(":ok_hand:")
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
@commands.command(aliases=["invite", "invitations"])
async def invitation(self, ctx):
"""
{command_prefix}invitation
{help}
"""
if self.bot.user.bot:
e = discord.Embed()
if not ctx.guild:
e.color = 0x71368a
else:
e.color = ctx.me.color
e.set_thumbnail(url=self.bot.user.avatar_url)
url = f"https://discordapp.com/api/oauth2/authorize?client_id={self.bot.user.id}&scope=bot"
if self.bot.user.id == 220644154177355777:
url += "&redirect_uri=https%3A%2F%2Fwatora.xyz%2F%3Finvited%3Dyes" # redirect uri
e.add_field(name='{}:'.format(get_str(ctx, "cmd-invitation-add-me")),
value='[{}]({})'.format(get_str(ctx, "cmd-invitation"), url), inline=False)
e.add_field(name='{}:'.format(get_str(ctx, "cmd-invitation-my-server")),
value='[World of Watora]({})'.format("https://discord.gg/ArJgTpM"))
await ctx.send(embed=e)
@commands.cooldown(rate=1, per=600.0, type=commands.BucketType.user)
@commands.command(aliases=["suggest", "idea"])
async def suggestion(self, ctx, *, content):
"""
{command_prefix}suggestion [text]
{help}
"""
if len(content.split(" ")) < 6 and not await is_basicpatron(self.bot, ctx.author):
ctx.command.reset_cooldown(ctx)
return await ctx.send(get_str(ctx, "cmd-suggestion-useless"))
e = discord.Embed(title='Suggestion', colour=0x738bd7)
msg = ctx.message
channel = 268495043235545088
e.set_author(name=str(
msg.author), icon_url=msg.author.avatar_url or msg.author.default_avatar_url)
e.description = content
e.timestamp = msg.created_at
if msg.guild:
e.add_field(name='Server', value='{0.name} (ID: {0.id})'.format(
msg.guild), inline=False)
e.add_field(name='Channel', value='{0} (ID: {0.id})'.format(
msg.channel), inline=False)
e.set_footer(text='Author ID: ' + str(msg.author.id))
confirm_message = await ctx.send("Your suggestion **about Watora** is going to be sent to Watora's developper. Are you sure about that ?\nWrite `yes` or `no`.\n```diff\n- Warning: Any kind of abuse will make your account blacklisted from the bot (it means that you'll not be able to use Watora anymore).\n+ Please only write it in ENGLISH (or french...)```")
def check(m):
if m.author.bot or m.author != ctx.author:
return False
if m.channel != ctx.channel:
return False
if m.content and m.content.lower()[0] in "yn" or m.content.lower().startswith(get_server_prefixes(ctx.bot, ctx.guild)) or m.content.startswith(m.guild.me.mention) or m.content.lower().startswith('exit'):
return True
return False
try:
response_message = await self.bot.wait_for('message', timeout=30, check=check)
except asyncio.TimeoutError:
try:
await confirm_message.delete()
except discord.HTTPException:
pass
ctx.command.reset_cooldown(ctx)
return await ctx.send("Suggestion cancelled.", delete_after=30)
if response_message.content.lower().startswith('y'):
smsg = await self.bot.http.send_message(channel, content='', embed=e.to_dict())
msg_id = smsg['id']
await self.bot.http.add_reaction(channel, msg_id, "☑")
await self.bot.http.add_reaction(channel, msg_id, "❌")
await ctx.send(get_str(ctx, "cmd-suggestion-sent") + " :mailbox_with_mail:")
else:
ctx.command.reset_cooldown(ctx)
await ctx.send("Suggestion cancelled.", delete_after=30)
@commands.cooldown(rate=1, per=600.0, type=commands.BucketType.user)
@commands.command(aliases=["problem"])
async def bug(self, ctx, *, content):
"""
{command_prefix}bug [text]
{help}
"""
if len(content.split(" ")) < 6 and not await is_basicpatron(self.bot, ctx.author):
ctx.command.reset_cooldown(ctx)
return await ctx.send(get_str(ctx, "cmd-bug-useless"))
e = discord.Embed(title='Bug', colour=0x723be7)
msg = ctx.message
channel = 268495081202384896
e.set_author(name=str(
msg.author), icon_url=msg.author.avatar_url or msg.author.default_avatar_url)
e.description = content
e.timestamp = msg.created_at
if msg.guild:
e.add_field(name='Server', value='{0.name} (ID: {0.id})'.format(
msg.guild), inline=False)
e.add_field(name='Channel', value='{0} (ID: {0.id})'.format(
msg.channel), inline=False)
e.set_footer(text='Author ID: ' + str(msg.author.id))
confirm_message = await ctx.send("Your bug report **about Watora** is going to be sent to Watora's developper. Are you sure about that ?\nWrite `yes` or `no`.\n```diff\n- Warning: Any kind of abuse will make your account blacklisted from the bot (it means that you'll not be able to use Watora anymore).\n+ Please only write it in ENGLISH (or french...)```")
def check(m):
if m.author.bot or m.author != ctx.author:
return False
if m.channel != ctx.channel:
return False
if m.content and m.content.lower()[0] in "yn" or m.content.lower().startswith(get_server_prefixes(ctx.bot, ctx.guild)) or m.content.startswith(m.guild.me.mention) or m.content.lower().startswith('exit'):
return True
return False
try:
response_message = await self.bot.wait_for('message', timeout=30, check=check)
except asyncio.TimeoutError:
try:
await confirm_message.delete()
except discord.HTTPException:
pass
ctx.command.reset_cooldown(ctx)
return await ctx.send("Report cancelled.", delete_after=30)
if response_message.content.lower().startswith('y'):
smsg = await self.bot.http.send_message(channel, content='', embed=e.to_dict())
await ctx.send(get_str(ctx, "cmd-bug-sent") + " :mailbox_with_mail:")
else:
ctx.command.reset_cooldown(ctx)
await ctx.send("Report cancelled.", delete_after=30)
@commands.cooldown(rate=1, per=600.0, type=commands.BucketType.user)
@commands.command(aliases=["avis"])
async def feedback(self, ctx, *, content):
"""
{command_prefix}feedback [text]
{help}
"""
if len(content.split(" ")) < 6 and not await is_basicpatron(self.bot, ctx.author):
ctx.command.reset_cooldown(ctx)
return await ctx.send(get_str(ctx, "cmd-feedback-useless"))
e = discord.Embed(title='Feedback', colour=0x2ecc71)
msg = ctx.message
channel = 346251537217093632
e.set_author(name=str(
msg.author), icon_url=msg.author.avatar_url or msg.author.default_avatar_url)
e.description = content
e.timestamp = msg.created_at
if msg.guild:
e.add_field(name='Server', value='{0.name} (ID: {0.id})'.format(
msg.guild), inline=False)
e.add_field(name='Channel', value='{0} (ID: {0.id})'.format(
msg.channel), inline=False)
e.set_footer(text='Author ID: ' + str(msg.author.id))
confirm_message = await ctx.send("Your feedback **about Watora** is going to be sent to Watora's developper. Are you sure about that ?\nWrite `yes` or `no`.\n```diff\n- Warning: Any kind of abuse will make your account blacklisted from the bot (it means that you'll not be able to use Watora anymore).\n+ Please only write it in ENGLISH (or french...)```")
def check(m):
if m.author.bot or m.author != ctx.author:
return False
if m.channel != ctx.channel:
return False
if m.content and m.content.lower()[0] in "yn" or m.content.lower().startswith(get_server_prefixes(ctx.bot, ctx.guild)) or m.content.startswith(m.guild.me.mention) or m.content.lower().startswith('exit'):
return True
return False
try:
response_message = await self.bot.wait_for('message', timeout=30, check=check)
except asyncio.TimeoutError:
try:
await confirm_message.delete()
except discord.HTTPException:
pass
ctx.command.reset_cooldown(ctx)
return await ctx.send("Feedback cancelled.", delete_after=30)
if response_message.content.lower().startswith('y'):
smsg = await self.bot.http.send_message(channel, content='', embed=e.to_dict())
await ctx.send(get_str(ctx, "cmd-feedback-sent") + " :mailbox_with_mail:")
else:
ctx.command.reset_cooldown(ctx)
await ctx.send("Feedback cancelled.", delete_after=30)
@commands.command(aliases=["ver"])
async def version(self, ctx):
"""
{command_prefix}ver
{help}
"""
await ctx.send(get_str(ctx, "cmd-ver-current") + f" **{ver}**.")
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
@commands.command(aliases=["infoshards", "shardinfo", "shardsinfo", "status", "shardstatus", "shardsstatus"])
async def infoshard(self, ctx):
"""
{command_prefix}infoshard
{help}
"""
nshards = len(self.bot.shards)
msg = "```xl\nCurrently on {} shard{} with {} guilds (all: {}).\n\n".format(
nshards, "s" if nshards > 1 else "", len(self.bot.guilds), self.bot.guild_count)
for i, n in enumerate(list(self.bot.shards.keys())):
gshard = 0
for s in self.bot.guilds:
if s.shard_id == n:
gshard += 1
msg += f"[{n}] : {gshard} guilds. (latency : {(round([shard[1] for shard in self.bot.latencies][i], 2))*1000} ms)\n"
msg += "```"
await ctx.send(msg)
@commands.cooldown(rate=1, per=10.0, type=commands.BucketType.user)
@commands.command(aliases=["langinfo", "infolang"])
async def infolangages(self, ctx):
"""
{command_prefix}infolangages
Where there are the most ppl using Watora ?
"""
c = Counter(x.region for x in self.bot.guilds)
msg = "```xl\nCurrently on {} guild{} (all: {}).\n\n".format(len(
self.bot.guilds), "s" if len(self.bot.guilds) > 1 else "", self.bot.guild_count)
for (x, y) in c.most_common():
msg += f"[{x}] : {y}\n"
msg += "```"
await ctx.send(msg)
@commands.is_owner()
@commands.command(aliases=["whereis", "whatserver"])
async def whereare(self, ctx, *, id: int):
"""
{command_prefix}whereare
Displays the list of server someone is with me.
"""
n = 1985
servers = []
msg = "{} is on :\n\n".format(id)
for s in self.bot.guilds:
if s.get_member(id):
servers.append(s)
for c, s in enumerate(servers, start=1):
msg += "**{}** : **{}** ({})\n".format(c, s.name, s.id)
if servers:
for i in range(0, len(msg), n):
await ctx.send(msg[i:i + n])
else:
await ctx.send("On 0 server.")
@commands.is_owner()
@commands.command(aliases=["getinvitations", "getinvitation"])
async def getinvite(self, ctx, *, id):
"""
{command_prefix}getinvite [guild_id]
Gets the availables invitations of a guild.
"""
try:
target = await self.bot.safe_fetch('guild', int(args))
except ValueError:
target = None
msg = ""
if not target:
target = discord.utils.get(self.bot.guilds, name=id)
if not target:
return await ctx.send("**{}** guild not found.".format(id))
try:
for e in await target.invites():
msg += e.url + '\n'
await ctx.author.send("Invitation : {}".format(msg))
except discord.HTTPException:
await ctx.send(":x: missing permissions...")
@commands.is_owner()
@commands.command(aliases=["lastmessage", "lastmsg"])
async def lastmessages(self, ctx, nb: int, *, id: int):
"""
{command_prefix}lastmessages [channel_id]
Gets the last messages in a channel.
"""
msg = []
n = 1985
patchchannel = self.bot.get_channel(id)
if not patchchannel:
return
async for lmsg in patchchannel.history(limit=nb):
msg.append(f"[{lmsg.created_at}] {lmsg.author}/ {lmsg.content}")
if lmsg.attachments:
for att in lmsg.attachments:
msg.append(att.url)
msg.reverse()
msg = '\n\n'.join(msg)
for i in range(0, len(msg), n):
await ctx.send(msg[i:i + n])
@checks.has_permissions(manage_roles=True)
@commands.guild_only()
@commands.command(aliases=["giveroles"])
async def giverole(self, ctx, role: discord.Role, *, user: discord.Member):
"""
{command_prefix}giverole [role] [user]
{help}
"""
if not ctx.channel.permissions_for(ctx.me).manage_roles:
return await ctx.send(get_str(ctx, "need-manage-roles-permission"))
if role not in user.roles:
if role.position >= ctx.author.top_role.position and ctx.author.id != owner_id and not ctx.author is ctx.guild.owner:
return await ctx.send(get_str(ctx, "role-not-enough-high"))
if role.position >= ctx.me.top_role.position:
return await ctx.send(get_str(ctx, "not-enough-permissions"))
await user.add_roles(role)
try:
await ctx.message.add_reaction("☑")
except discord.Forbidden:
await ctx.send(get_str(ctx, "cmd-giverole-add").format(f"**{user}**", f"`{role}`"))
else:
if role.position >= ctx.author.top_role.position and ctx.author.id != owner_id and not ctx.author is ctx.guild.owner:
return await ctx.send(get_str(ctx, "role-not-enough-high"))
if role.position >= ctx.me.top_role.position:
return await ctx.send(get_str(ctx, "not-enough-permissions"))
await user.remove_roles(role)
await ctx.send(get_str(ctx, "cmd-giverole-remove").format(f"`{role}`", f"**{user}**"))
@commands.group(name="getrole", aliases=["getroles"])
@commands.guild_only()
async def _getrole(self, ctx, *, name):
"""
{command_prefix}getrole [role]
{command_prefix}getrole add [role]
{command_prefix}getrole remove [role]
{help}
"""
adding = None
if name.split(' ')[0].lower() in ['add', 'remove']:
adding = True if (name.split(' ')[0].lower() == 'add') else False
name = ' '.join(name.split(' ')[1:])
if not name:
return await self.bot.send_cmd_help(ctx)
role = self.bot.get_role(ctx, name)
if not role:
return await ctx.send(get_str(ctx, "cmd-joinclan-role-not-found").format(name))
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
if not settings.roles:
return await ctx.send(get_str(ctx, "cmd-getrole-no-role-available").format(f"`{get_server_prefixes(ctx.bot, ctx.guild)}setroles`"))
if role.id not in settings.roles and (ctx.author.id != owner_id):
return await ctx.send(get_str(ctx, "cmd-getrole-role-not-obtainable").format(f"`{get_server_prefixes(ctx.bot, ctx.guild)}setroles`"))
if [r for r in ctx.author.roles if r == role]:
if adding: # can be None
return
try:
await ctx.author.remove_roles([r for r in ctx.author.roles if r == role][0])
except discord.Forbidden:
return await ctx.send(get_str(ctx, "cmd-getrole-not-enough-perm").format(ctx.author.mention, f"`{role}`"))
await ctx.send(get_str(ctx, "cmd-getrole-remove-success").format(ctx.author.mention, f"`{role}`"))
else:
if adding is False: # can be None
return
try:
await ctx.author.add_roles(role)
try:
await ctx.message.add_reaction("☑")
except discord.Forbidden:
await ctx.send(get_str(ctx, "cmd-getrole-add-success").format(role))
except discord.Forbidden:
await ctx.send(get_str(ctx, "cmd-getrole-not-enough-perm-add").format(f"`{role}`"))
@commands.guild_only()
@commands.group(name="setrole", aliases=["setroles"], invoke_without_command=True)
async def _setrole(self, ctx, *, name):
"""
{command_prefix}setrole create [role_name]
{command_prefix}setrole delete [role_name]
{command_prefix}setrole list
{help}
"""
if not ctx.invoked_subcommand:
await ctx.invoke(self.___add, ctx=ctx, name=name)
@checks.has_permissions(manage_guild=True)
@_setrole.command(name="add", aliases=["+", "new", "create"])
async def ___add(self, ctx, *, name):
"""
{command_prefix}setrole create [role_name]
{help}
"""
role = self.bot.get_role(ctx, name)
if not role:
return await ctx.send(get_str(ctx, "cmd-joinclan-role-not-found").format(name))
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
if role.id not in settings.roles:
settings.roles.append(role.id)
await SettingsDB.get_instance().set_guild_settings(settings)
await ctx.send(get_str(ctx, "cmd-setrole-added"))
else:
await ctx.send(get_str(ctx, "cmd-setrole-already"))
@checks.has_permissions(manage_guild=True)
@_setrole.command(name="reset", aliases=["removeall"])
async def ___reset(self, ctx):
"""
{command_prefix}setrole reset
{help}
"""
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
settings.roles = []
await SettingsDB.get_instance().set_guild_settings(settings)
await ctx.send(get_str(ctx, "cmd-setrole-reset").format("`{}getrole`".format(get_server_prefixes(ctx.bot, ctx.guild))))
@checks.has_permissions(manage_guild=True)
@_setrole.command(name="remove", aliases=["delete", "-"])
async def ___delete(self, ctx, *, name):
"""
{command_prefix}setrole delete [role_name]
{help}
"""
role = self.bot.get_role(ctx, name)
if not role:
return await ctx.send(get_str(ctx, "cmd-joinclan-role-not-found").format(name))
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
if settings.roles:
if role.id in settings.roles:
settings.roles.remove(role.id)
await SettingsDB.get_instance().set_guild_settings(settings)
await ctx.send(get_str(ctx, "cmd-setrole-removed"))
else:
await ctx.send(get_str(ctx, "cmd-setrole-already-d"))
else:
await ctx.send(get_str(ctx, "cmd-getrole-no-role-available").format(f"`{get_server_prefixes(ctx.bot, ctx.guild)}setrole`"))
@_setrole.command(name="list", aliases=["all", "view", "now"])
async def ___list(self, ctx):
"""
{command_prefix}setrole list
{help}
"""
n = 0
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
msg = [get_str(ctx, "cmd-setrole-list")]
if settings.roles:
for clan in settings.roles:
roles = [r for r in ctx.guild.roles if r.id == clan]
if not roles:
settings.roles.remove(clan)
await SettingsDB.get_instance().set_guild_settings(settings)
continue
role = roles[0]
n += 1
msg.append("``{}`` {}\n".format(n, role.name))
if len(msg) == 1:
return await ctx.send(get_str(ctx, "cmd-getrole-no-role-available").format(f"`{get_server_prefixes(ctx.bot, ctx.guild)}setrole`"))
to_send = ""
for line in msg:
if len(to_send) + len(line) > 1980: # TODO find a better way to do this
await ctx.send(to_send) # This is ugly
to_send = ""
to_send += line
if to_send:
await ctx.send(to_send)
else:
await ctx.send(get_str(ctx, "cmd-getrole-no-role-available").format(f"`{get_server_prefixes(ctx.bot, ctx.guild)}setrole`"))
@commands.command(aliases=['redeem'])
@commands.cooldown(rate=1, per=2.0, type=commands.BucketType.user)
async def claim(self, ctx, guild_id: int = None):
"""
{command_prefix}claim (guild_id)
Allows to claim a guild.
"""
if guild_id:
guild = await self.bot.safe_fetch('guild', guild_id)
if not guild:
# TODO: Translations
return await ctx.send("I didn't find this guild. Ensure your ID or use the command on the guild without specifying an ID.")
else:
guild = ctx.guild
if not await is_patron(self.bot, ctx.author):
# TODO: Translations
return await ctx.send("You need to be at least Super Patron on my server to claim a server!")
settings = await SettingsDB.get_instance().get_glob_settings()
claimed = await self.bot.server_is_claimed(guild.id)
if claimed:
if int(claimed[0]) == ctx.author.id:
# TODO: Translations
return await ctx.send(f"This server is already claimed by yourself. Use `{get_server_prefixes(ctx.bot, guild)}unclaim` if you want to unclaim it!")
claimer = await self.bot.safe_fetch('member', int(claimed[0]), guild=guild) or claimed[0]
# TODO: Translations
await ctx.send(f"This server is already claimed by {claimer}.")
for k, m in settings.claim.items():
if str(guild.id) in m:
if not await is_patron(self.bot, int(k)):
settings.claim[k].pop(str(guild.id))
# TODO: Translations
confirm_message = await ctx.send("Are you sure you want to claim **{}** (id: {}), you'll not be able to unclaim it before 7 days! Type `yes` or `no`.".format(guild.name, guild.id))
def check(m):
if m.author.bot or m.author != ctx.author:
return False
if m.channel != ctx.channel:
return False
if m.content:
return True
return False
try:
response_message = await self.bot.wait_for('message', timeout=120, check=check)
except asyncio.TimeoutError:
try:
await confirm_message.delete()
except discord.HTTPException:
pass
return
if not response_message.content.lower().startswith('y'):
# TODO: Translations
return await ctx.send("Claim cancelled.")
if str(ctx.author.id) not in settings.claim:
settings.claim[str(ctx.author.id)] = {str(
guild.id): datetime.today().strftime("%d %b %Y")}
else:
max_claim = 2
if await is_lover(self.bot, ctx.author):
max_claim = 5
if ctx.author.id == owner_id:
max_claim = 9e40
if len(settings.claim[str(ctx.author.id)]) >= max_claim:
# TODO: Translations
return await ctx.send("You reached your max claim server count ({}).\n"
"You can unclaim one of your claimed server by issuing `{}unclaim (guild_id)`\n"
"To see your current claimed server, use the command `{}claimlist`".format(max_claim, get_server_prefixes(ctx.bot, guild), get_server_prefixes(ctx.bot, guild)))
settings.claim[str(ctx.author.id)][str(guild.id)
] = datetime.today().strftime("%d %b %Y")
# TODO: Translations
await ctx.send('Server successfully claimed !')
await SettingsDB.get_instance().set_glob_settings(settings)
@commands.command(aliases=['unredeem'])
@commands.cooldown(rate=1, per=2.0, type=commands.BucketType.user)
async def unclaim(self, ctx, guild_id: int = None):
"""
{command_prefix}unclaim (guild_id)
Allows to unclaim a guild.
"""
if not guild_id:
guild_id = str(ctx.guild.id)
else:
# param is type int just to ensure that it can be converted to int easily thanks to discord.py
guild_id = str(guild_id)
if not await is_patron(self.bot, ctx.author):
# TODO: Translations
return await ctx.send("You need to be at least Super Patron on my server to claim/unclaim a server!")
settings = await SettingsDB.get_instance().get_glob_settings()
if str(ctx.author.id) in settings.claim:
if guild_id in settings.claim[str(ctx.author.id)]:
claimed_since = settings.claim[str(ctx.author.id)][guild_id]
datetime_date = datetime.strptime(claimed_since, '%d %b %Y')
since_claimed = (ctx.message.created_at - datetime_date).days
if (since_claimed < 7) and ctx.author.id != owner_id:
# TODO: Translations
return await ctx.send("Sorry you're in cooldown! You'll be able to unclaim this server in `{}` days!".format(7 - since_claimed))
settings.claim[str(ctx.author.id)].pop(guild_id)
# TODO: Translations
await ctx.send('Server successfully unclaimed !')
return await SettingsDB.get_instance().set_glob_settings(settings)
# TODO: Translations
await ctx.send('This server is not in your claimed servers...')
@commands.command(aliases=['redeemlist'])
@commands.cooldown(rate=1, per=2.0, type=commands.BucketType.user)
async def claimlist(self, ctx, *, member: discord.Member = None):
"""
{command_prefix}claimlist
Displays the list of your claimed guild.
"""
if not member:
member = ctx.author
if not await is_patron(self.bot, member):
# TODO: Translations
return await ctx.send("You need to be at least Super Patron on my server to claim/unclaim a server!")
settings = await SettingsDB.get_instance().get_glob_settings()
if (str(member.id) not in settings.claim) or not settings.claim[str(member.id)]:
# TODO: Translations
return await ctx.send("You don't have any claimed server. Start to add some by using `{}claim`".format(get_server_prefixes(ctx.bot, ctx.guild)))
desc = ''
for i, m in enumerate(settings.claim[str(member.id)].items(), start=1):
guild = await self.bot.safe_fetch('guild', int(m[0]))
desc += f'`{i}. `' + (('**' + guild.name + '** ')
if guild else '') + f'(`{m[0]}`) ' + f'({m[1]})\n'
embed = discord.Embed(description=desc)
embed.set_author(name=member.name, icon_url=member.avatar_url)
max_claim = 2
if await is_lover(self.bot, member):
max_claim = 5
if member.id == owner_id:
max_claim = 9e40
embed.set_footer(
# TODO: Translations
text=f"Used claim {len(settings.claim[str(member.id)])}/{max_claim}")
await ctx.send(embed=embed)
@commands.guild_only()
@commands.command(aliases=["guildparam", "servparam", "serversettings", "setting", "config", "guildsettings", "set", "sets"])
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
async def settings(self, ctx, *, guild=None):
"""
{command_prefix}settings
{help}
"""
if guild: # owner can see other's guild settings
if ctx.author.id == owner_id:
try:
guild = await self.bot.safe_fetch('guild', int(guild))
except ValueError:
return await ctx.send("Guild not found...")
if not guild:
return await ctx.send("Guild not found...")
else:
guild = ctx.guild
else:
guild = ctx.guild
settings = await SettingsDB.get_instance().get_guild_settings(guild.id)
welcome_channels = []
goodbye_channels = []
ignore_channels = []
autorolemsg = []
getrolemsg = []
djmsg = []
set_dj_roles = set_autoroles_msg = set_roles_msg = ""
for channel in guild.channels:
if str(channel.id) in settings.welcomes:
welcome_channels.append(f"#{channel.name}")
welcome = ', '.join(welcome_channels)
if welcome_channels == []:
welcome = "❌"
for channel in guild.channels:
if str(channel.id) in settings.goodbyes:
goodbye_channels.append(f"#{channel.name}")
goodbye = ', '.join(goodbye_channels)
if goodbye_channels == []:
goodbye = "❌"
cc = len(settings.customcommands)
dc = len(settings.disabledcommands)
clan = len(settings.clans)
lang = settings.language
for channel in guild.channels:
if str(channel.id) in settings.disabledchannels:
cmd_disabled = len(settings.disabledchannels[str(channel.id)])
opt = '({} command{})'.format(cmd_disabled, 's'
if cmd_disabled != 1 else '') if cmd_disabled != 0 else ''
ignore_channels.append(f"#{channel.name} {opt}")
ignore = ', '.join(ignore_channels)
if ignore_channels == []:
ignore = "❌"
if settings.bound:
cid = settings.bound
allid = [c.id for c in guild.channels]
if int(cid) in allid:
bind = [c.name for c in guild.channels if c.id == int(cid)][0]
else:
bind = False
else:
bind = False
if settings.autoroles:
for id in settings.autoroles:
role = guild.get_role(id)
if role:
autorolemsg.append(role.name)
set_autoroles_msg = ', '.join(autorolemsg)
if not set_autoroles_msg:
set_autoroles_msg = "❌"
else:
set_autoroles_msg = "❌"
if settings.roles:
for id in settings.roles:
role = guild.get_role(id)
if role:
getrolemsg.append(role.name)
set_roles_msg = ', '.join(getrolemsg)
if not set_roles_msg:
set_roles_msg = "❌"
else:
set_roles_msg = "❌"
if settings.djs:
if "all" in settings.djs:
set_dj_roles = "@\u200beveryone"
else:
for id in settings.djs:
role = guild.get_role(id)
if role:
djmsg.append(role.name)
set_dj_roles = ', '.join(djmsg)
if not set_dj_roles:
set_dj_roles = "❌"
else:
set_dj_roles = "❌"
vol = f"{settings.volume}%"
vote = f"{settings.vote}%"
if not settings.timer:
timer = get_str(ctx, 'music-autoleave-never')
else:
timer = f"{settings.timer} {get_str(ctx, 'cmd-nextep-seconds')}"
if settings.channel:
channel = guild.get_channel(settings.channel)
if channel:
np = f"#{channel}"
else:
np = "❌"
elif settings.channel is None:
np = "☑ Auto"
else:
np = "❌"
embed = discord.Embed()
embed.set_author(name=get_str(
ctx, "cmd-settings-title"), icon_url=guild.icon_url)
if not guild:
embed.color = 0x71368a
else:
embed.color = ctx.me.color
if not settings.blacklisted:
bl_users = 0
else:
message_bl = []
for l in settings.blacklisted:
m = discord.utils.find(lambda m: m.id == int(l), ctx.guild.roles) or await self.bot.safe_fetch('member', int(l), guild=ctx.guild)
if m:
message_bl.append(f"`{m}`")
if message_bl:
bl_users = ', '.join(message_bl)
else:
bl_users = 0
ac_desc_list = []
for key in settings.autosongs.keys():
channel = ctx.guild.get_channel(int(key))
if channel:
ac_desc_list.append(channel.name)
if ac_desc_list:
ac_desc = ', '.join(ac_desc_list)
else:
ac_desc = get_str(ctx, "music-plsettings-no")
struct = "*{} :* **{}**"
embed.description = get_str(
ctx, "cmd-help-support").format('[World of Watora](https://discord.gg/ArJgTpM)')
msg = []
names = ['guild-pref', 'language', 'owo']
values = [get_server_prefixes(ctx.bot, guild), lang, [get_str(
ctx, "music-plsettings-no"), get_str(ctx, "music-plsettings-yes")][settings.owo]]
for i, name in enumerate(names):
msg.append(struct.format(
get_str(ctx, f"cmd-settings-{name}"), values[i]))
embed.add_field(name=get_str(ctx, "cmd-settings-glob"),
value='\n'.join(msg))
msg = []
names = ['clans', 'autoroles', 'o-roles', 'dj-roles']
values = [clan, set_autoroles_msg, set_roles_msg, set_dj_roles]
for i, name in enumerate(names):
msg.append(struct.format(
get_str(ctx, f"cmd-settings-{name}"), values[i]))
embed.add_field(name=get_str(ctx, "cmd-userinfo-roles"),
value='\n'.join(msg), inline=False)
msg = []
names = ['cc', 'dc']
values = [cc, dc]
for i, name in enumerate(names):
msg.append(struct.format(
get_str(ctx, f"cmd-settings-{name}"), values[i]))
embed.add_field(name=get_str(ctx, "cmd-settings-commands"),
value='\n'.join(msg), inline=False)
msg = []
names = ['bluser']
values = [bl_users]
if bind:
names.append('bind')
values.append(bind)
else:
names.append('ic')
values.append(ignore)
for i, name in enumerate(names):
msg.append(struct.format(
get_str(ctx, f"cmd-settings-{name}"), values[i]))
embed.add_field(name=get_str(ctx, "cmd-settings-permissions"),
value='\n'.join(msg), inline=False)
msg = []
names = ['wm', 'gm']
values = [welcome, goodbye]
for i, name in enumerate(names):
msg.append(struct.format(
get_str(ctx, f"cmd-settings-{name}"), values[i]))
embed.add_field(name=get_str(ctx, "cmd-settings-messages"),
value='\n'.join(msg), inline=False)
msg = []
names = ['autoplay', 'lazy', 'lvc', 'dv', 'vote', 'ac', 'np']
values = [[get_str(ctx, "music-plsettings-no"), get_str(ctx, "music-plsettings-yes")][settings.autoplay], [get_str(
ctx, "music-plsettings-no"), get_str(ctx, "music-plsettings-yes")][settings.lazy], timer, vol, vote, ac_desc, np]
for i, name in enumerate(names):
msg.append(struct.format(
get_str(ctx, f"cmd-settings-{name}"), values[i]))
if settings.defaultnode:
member = ctx.guild.get_member(int(settings.defaultnode))
if member:
# TODO: Translations and move it
msg.append(struct.format(
"Default music node", f'Hosted by {member}'))
embed.add_field(name=get_str(ctx, "cmd-settings-player"),
value='\n'.join(msg), inline=False)
try:
await ctx.send(embed=embed)
except discord.Forbidden:
await ctx.send(get_str(ctx, "need-embed-permission"))
def setup(bot):
bot.add_cog(Useful(bot))
|
the-stack_0_24643
|
from util import load_dataset, dump_dataset, print_defaultdict
from collections import defaultdict
import numpy as np
from skmultilearn.model_selection import IterativeStratification
import logging
class Dataset(object):
def __init__(self, args, data):
self.args = args
self.data = data
def prepare(self):
raise NotImplementedError()
def split_data(self, verbose=True):
'''
Split dataset in separate training/validation/test datasets.
:return:
'''
params = self.args["split"]
labels = params["labels"]
np.random.shuffle(self.data)
labeled, unlabeled = self.separate_unlabeled_samples(labels)
if params["difficulty_based"]:
distribution = self.split_on_difficulty(labeled)
dataset = distribution["Easy"] + distribution["Medium"] + distribution["Hard"]
train, dev, test = self.split_stratified(dataset)
train += distribution["Various"]
else:
train, dev, test = self.split_stratified(labeled)
data_split = {
"train" : self.flatten_samples(train),
"dev" : self.flatten_samples(dev),
"test" : self.flatten_samples(test),
"unlabeled": self.flatten_samples(unlabeled)
}
if verbose:
for split in data_split:
if split != "unlabeled":
logging.info("Stats for the {} data split:".format(split))
self.compute_tag_distribution(data_split[split])
return data_split
def flatten_samples(self, dataset):
"""
Remove irrelevant fields from samples and
create separate distinct samples if necessary
(e.g. list<list<solutions>> -> list<solutions>)
"""
raise NotImplementedError()
def serialize(self, ds_path=None):
dump_dataset(ds_path, self.data)
def deserialize(self, ds_path):
self.data = load_dataset(ds_path)
@staticmethod
def split_check_relevant_labels(sample, labels):
if "tags" not in sample:
return False
for tag in sample["tags"]:
if tag in labels:
return True
return False
def separate_unlabeled_samples(self, labels):
labeled, unlabeled = [], []
for sample in self.data:
if self.split_check_relevant_labels(sample, labels):
labeled.append(sample)
else:
unlabeled.append(sample)
for sample in labeled:
targets = [1 if label in sample["tags"] else 0
for label in labels]
sample["Y"] = targets
return labeled, unlabeled
@staticmethod
def split_on_difficulty(data):
distribution = defaultdict(list)
def store_sample(sample, difficulty_class):
distribution[difficulty_class].append(sample)
sample["difficulty_class"] = difficulty_class
for sample in data:
if "difficulty" not in sample or \
sample["difficulty"] is None:
store_sample(sample, "Various")
continue
diff = int(sample["difficulty"])
if diff <= 1500:
store_sample(sample, "Easy")
elif 1500 < diff < 2500:
store_sample(sample, "Medium")
else:
store_sample(sample, "Hard")
return distribution
def split_stratified(self, dataset):
Y = np.array([sample["Y"] for sample in dataset])
dataset = np.array(dataset)
percentage = self.args["split"]["percentage"]
stratifier = IterativeStratification(n_splits=2, order=2,
sample_distribution_per_fold=[percentage, 1.0 - percentage])
remaining_idx, test_idx = next(stratifier.split(dataset, Y))
X_test = dataset[test_idx]
dataset = dataset[remaining_idx]
Y = Y[remaining_idx]
percentage = percentage / (1.0 - percentage)
stratifier = IterativeStratification(n_splits=2, order=2,
sample_distribution_per_fold=[percentage, 1.0 - percentage])
train_idx, dev_idx = next(stratifier.split(dataset, Y))
X_train = dataset[train_idx]
X_dev = dataset[dev_idx]
return list(X_train), list(X_dev), list(X_test)
def compute_tag_distribution(self, dataset):
label_distro = defaultdict(int)
logging.info("Num. of samples ".format(len(dataset)))
for sample in dataset:
for tag in sample["tags"]:
if tag in self.args["split"]["labels"]:
label_distro[tag] += 1
print_defaultdict(label_distro)
return label_distro
|
the-stack_0_24646
|
import os
# For Flask-Cache. Allow testing with caching enabled.
CACHE = {
'CACHE_TYPE': 'simple'
}
# Version timestamp, which can be used
# to get frontend clients to reload for an update
VERSION = 'TEST'
# Maximum amount of logs to send
MAX_LOGS = 200
# Show only logs from within the past
LOGS_AFTER = {
'days': 1
}
# Where the database and keys files are located
DB_PATH = ':memory:' # Hack to prevent creating a dummy DB file
KEYS_FILE = os.devnull # Hack to prevent creating a dummy key file
LABELS_PATH = os.devnull # Hack to prevent creating a dummy labels file
SSE_REDIS_URL = 'redis://localhost:6379'
# For querying coordinates for locations
GOOGLE_PLACES_API_KEY = os.getenv('SCANMAP_TEST_GOOGLE_PLACES_API_KEY', '<BLANK>')
LOCATIONS = {
'ny': {
'LIVE': True,
'EXTRAS': {
'CAMERAS': 'data/cams/ny.json',
},
'MAP': {
'CENTER': [-73.96161699999999, 40.678806],
'ZOOM': 12
},
'SEARCH': {
'FILTER': 'NY',
'CENTER': [40.678806,-73.96161699999999],
'RADIUS': 15000
},
'INFO': ''
}
}
|
the-stack_0_24647
|
from setuptools import setup
VERSION = '2.0.1'
setup(
name="protopigeon",
version=VERSION,
author='Jon Parrott',
author_email='[email protected]',
maintainer='Jon Parrott / Cloud Sherpas',
maintainer_email='[email protected]',
description="A helper library for working with Google's protorpc and App Engine's datastore",
url='https://github.com/jonparrott/Protopigeon',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
],
packages=['protopigeon'],
install_requires=[
],
)
|
the-stack_0_24648
|
#!/usr/bin/env python
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Arizona Robotics Research Group,
# University of Arizona. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of Arizona nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import roslib
roslib.load_manifest('smart_arm_controller')
import rospy
from std_msgs.msg import Float64
joint_names = ('shoulder_pan_controller',
'shoulder_pitch_controller',
'elbow_flex_controller',
'wrist_roll_controller',
'left_finger_controller',
'right_finger_controller')
joint_commands = (0.0, 1.972222, -1.972222, 0.0, 0.0, 0.0)
if __name__ == '__main__':
pubs = [rospy.Publisher(name + '/command', Float64) for name in joint_names]
rospy.init_node('make_cobra_pose', anonymous=True)
for i in range(len(pubs)):
pubs[i].publish(joint_commands[i])
|
the-stack_0_24649
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# the above line is to avoid 'SyntaxError: Non-UTF-8 code starting with' error
'''
Created on Feb 27, 2018
Course work:
@author: raja
Source:
https://stackoverflow.com/questions/2835559/parsing-values-from-a-json-file
https://stackoverflow.com/questions/40325980/how-is-the-vader-compound-polarity-score-calculated-in-python-nltk
https://stackoverflow.com/questions/tagged/vader
https://github.com/cjhutto/vaderSentiment
https://pypi.python.org/pypi/vaderSentiment/0.5
https://web.stanford.edu/~jurafsky/pubs/neutrality.pdf
https://stackoverflow.com/questions/370357/python-variable-scope-error
https://stackoverflow.com/questions/26045779/python-how-to-turn-all-numbers-in-a-list-into-their-negative-counterparts
http://www.nltk.org/_modules/nltk/sentiment/vader.html
https://github.com/cjhutto/vaderSentiment
Cite:
Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model for
Sentiment Analysis of Social Media Text. Eighth International Conference on
Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
'''
# Import necessary modules
import json
from pprint import pprint
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
EMOTION_METER_PAR = 5
WORD_LENGTH_PAR = 4
LINES_PAR = 5
def get_lines_in_array(filename):
with open(filename) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
#print(content)
return content
def is_invalid_content(content):
# if only 3 words, ignore it
if(len(content.split()) < WORD_LENGTH_PAR):
return True
return False
def sort_dict_reverse(d):
s = [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)]
return s
sid = SentimentIntensityAnalyzer()
def get_negative_meter(sentence):
#print(sentence)
ss = sid.polarity_scores(sentence)
#print(type(ss['pos']))
#positive_meter = round((ss['pos'] * 10), 2)
negative_meter = round((ss['neg'] * 10), 2)
'''
for k in sorted(ss):
#print(ss)
print('{0}: {1}, '.format(k, ss[k]), end = '')
'''
#print('positive : {0}, negative : {1}'.format(positive_meter, negative_meter))
#print()
return negative_meter
def main():
#print(data)
contents = get_lines_in_array('cons.txt')
#print(contents)
neg_meter_dict = {}
lines_counter= 0
total_contents = len(contents)
for x in range(total_contents):
if(is_invalid_content(contents[x])):
continue
emotion_meter = get_negative_meter(contents[x])
# if meter is less than 5, ignore them
if(emotion_meter > EMOTION_METER_PAR):
continue
neg_meter_dict[contents[x]] = emotion_meter
#print('---')
#print(contents[x])
sorted = sort_dict_reverse(neg_meter_dict)
#print(neg_meter_dict)
for k, v in sorted:
lines_counter = lines_counter + 1
if(lines_counter > LINES_PAR):
continue
print(k, v)
if __name__ == '__main__':
main()
|
the-stack_0_24650
|
# Library imports
import os
# Project imports
from pyStorageBackend.generic_backend import GenericJsonBackend
from pyStorageBackend.file_lock import FileLock
class LocalJsonBackend(GenericJsonBackend):
def __init__(self, settings: dict):
"""
Local JSON implementation of GenericBackend. JSON file contents are loaded into memory when opened.
All read/write operations are in memory. Memory contents are written to json file when sync() is called
:param settings: Not used
"""
super(LocalJsonBackend, self).__init__(settings)
self._file_lock = FileLock(self.settings["path"])
def _read(self):
with open(self.settings["path"], "r") as fp:
return fp.read()
def _overwrite(self, contents):
# Concat temp file path, by appending .tmp
tempname = self.settings["path"] + '.tmp'
# Try to open the temp file, and write contents
try:
fp = open(tempname, "w+")
fp.write(contents)
# Catch any exception, delete the temp file then re-raise exception
except:
os.remove(tempname)
raise
# Write temporary file was successful, replace the real file with the temp one
else:
try:
fp.close()
os.replace(tempname, self._path)
except Exception as e:
exit()
# Make sure the file point gets closed
finally:
fp.close()
pass
def _set_lock(self):
return self._file_lock.acquire()
def _release_lock(self):
self._file_lock.release()
|
the-stack_0_24651
|
import contextlib
from pathlib import Path
from typing import NamedTuple
import ir_datasets
from ir_datasets.util import GzipExtract, DownloadConfig, _DownloadConfig
from ir_datasets.datasets.base import Dataset, YamlDocumentation
from ir_datasets.formats import TsvDocs, CLIRMatrixQueries, CLIRMatrixQrels
NAME = 'clirmatrix'
_logger = ir_datasets.log.easy()
QRELS_DEFS = {
6: "6",
5: "5",
4: "4",
3: "3",
2: "2",
1: "1",
0: "0",
}
def _init():
LANGS = ('af', 'als', 'am', 'an', 'ar', 'arz', 'ast', 'az', 'azb', 'ba', 'bar', 'be', 'bg', 'bn', 'bpy', 'br', 'bs', 'bug', 'ca', 'cdo', 'ce', 'ceb', 'ckb', 'cs', 'cv', 'cy', 'da', 'de', 'diq', 'el', 'eml', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fo', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'he', 'hi', 'hr', 'hsb', 'ht', 'hu', 'hy', 'ia', 'id', 'ilo', 'io', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'kn', 'ko', 'ku', 'ky', 'la', 'lb', 'li', 'lmo', 'lt', 'lv', 'mai', 'mg', 'mhr', 'min', 'mk', 'ml', 'mn', 'mr', 'mrj', 'ms', 'my', 'mzn', 'nap', 'nds', 'ne', 'new', 'nl', 'nn', 'no', 'oc', 'or', 'os', 'pa', 'pl', 'pms', 'pnb', 'ps', 'pt', 'qu', 'ro', 'ru', 'sa', 'sah', 'scn', 'sco', 'sd', 'sh', 'si', 'simple', 'sk', 'sl', 'sq', 'sr', 'su', 'sv', 'sw', 'szl', 'ta', 'te', 'tg', 'th', 'tl', 'tr', 'tt', 'uk', 'ur', 'uz', 'vec', 'vi', 'vo', 'wa', 'war', 'wuu', 'xmf', 'yi', 'yo', 'zh')
LANG_REGEX = '(' + '|'.join(LANGS) + ')'
MULTI8_LANGS = ('ar', 'de', 'en', 'es', 'fr', 'ja', 'ru', 'zh')
MULTI8_LANG_REGEX = '(' + '|'.join(MULTI8_LANGS) + ')'
base_path = ir_datasets.util.home_path()/NAME
def _dlc_init():
dlc = DownloadConfig.context(NAME, base_path)
clirmatrix_dlc = _DownloadConfig(dlc['downloads'].path(), parser='json')
return clirmatrix_dlc
_dlc = ir_datasets.util.Lazy(_dlc_init)
_docs_cache = {}
def _docs_initializer(lang_code):
if lang_code not in _docs_cache:
dlc = _dlc().context("clirmatrix_docs", base_path)
docs = TsvDocs(GzipExtract(dlc[f'docs/{lang_code}']), namespace=f'{NAME}/{lang_code}', lang=lang_code)
_docs_cache[lang_code] = docs
return _docs_cache[lang_code]
def _initializer(args, dlc_context=None):
docs_lang, queries_lang, split = args
docs = _docs_initializer(docs_lang)
components = [docs]
if queries_lang: # queries & split are optional
dlc = _dlc().context(dlc_context, base_path)
dlc_key = f'queries/{queries_lang}_{docs_lang}/{split}'
qrel_dlc = GzipExtract(dlc[dlc_key])
qrels = CLIRMatrixQrels(qrel_dlc, QRELS_DEFS)
queries = CLIRMatrixQueries(qrel_dlc, queries_lang)
components += [queries, qrels]
return Dataset(*components)
def _multi8_initializer(args):
return _initializer(args, 'clirmatrix_multi8')
def _bi139_base_initializer(args):
return _initializer(args, 'clirmatrix_bi139_base')
def _bi139_full_initializer(args):
return _initializer(args, 'clirmatrix_bi139_full')
def _corpus_initializer(args):
return _initializer((args[0], None, None))
documentation = YamlDocumentation(f'docs/{NAME}.yaml')
base = Dataset(documentation('_'))
ir_datasets.registry.register(NAME, base)
ir_datasets.registry.register_pattern(rf'^{NAME}/{LANG_REGEX}$', _corpus_initializer)
ir_datasets.registry.register_pattern(rf'^{NAME}/{MULTI8_LANG_REGEX}/multi8/{MULTI8_LANG_REGEX}/(train|dev|test1|test2)$', _multi8_initializer)
ir_datasets.registry.register_pattern(rf'^{NAME}/{LANG_REGEX}/bi139-base/{LANG_REGEX}/(train|dev|test1|test2)$', _bi139_base_initializer)
ir_datasets.registry.register_pattern(rf'^{NAME}/{LANG_REGEX}/bi139-full/{LANG_REGEX}/(train|dev|test1|test2)$', _bi139_full_initializer)
return base
collection = _init()
|
the-stack_0_24652
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
import topi
import unittest
from tvm.contrib.nvcc import have_fp16, have_int8
from tvm.contrib import nvcc
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
def test_cuda_vectorize_add():
num_thread = 8
def check_cuda(dtype, n, lanes):
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
if dtype == "float16" and not have_fp16(tvm.gpu(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
if dtype == "int8" and not have_int8(tvm.gpu(0).compute_version):
print("skip because gpu does not support int8")
return
A = te.placeholder((n,), name='A', dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name='B')
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
fun = tvm.build(s, [A, B], "cuda")
ctx = tvm.gpu(0)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(
np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), B.dtype, ctx)
fun(a, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1)
check_cuda("float32", 64, 2)
check_cuda("float32", 64, 3)
check_cuda("float32", 64, 4)
check_cuda("int8", 64, 2)
check_cuda("int8", 64, 3)
check_cuda("int8", 64, 4)
check_cuda("uint8", 64, 2)
check_cuda("uint8", 64, 3)
check_cuda("uint8", 64, 4)
check_cuda("float16", 64, 2)
check_cuda("float16", 64, 4)
check_cuda("float16", 64, 6)
check_cuda("float16", 64, 8)
def test_cuda_multiply_add():
num_thread = 8
def check_cuda(dtype, n, lanes):
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
if dtype == "int8" and not have_int8(tvm.gpu(0).compute_version):
print("skip because gpu does not support int8")
return
A = te.placeholder((n,), name='A', dtype="%sx%d" % (dtype, lanes))
B = te.placeholder((n,), name='B', dtype="%sx%d" % (dtype, lanes))
C = te.placeholder((n,), name='C', dtype="int32")
D = te.compute((n,),
lambda i: tvm.tir.call_pure_extern("int32", "__dp4a", A[i], B[i], C[i]), name='D')
s = te.create_schedule(D.op)
xo, xi = s[D].split(D.op.axis[0], factor=num_thread)
s[D].bind(xo, bx)
s[D].bind(xi, tx)
fun = tvm.build(s, [A, B, C, D], "cuda")
np_a = np.random.randint(low=-128, high=127, size=(n,lanes))
np_b = np.random.randint(low=-128, high=127, size=(n,lanes))
np_c = np.random.randint(low=0, high=127, size=(n,))
np_d = [sum(x * y) + z for x, y, z in zip(np_a, np_b, np_c)]
ctx = tvm.gpu(0)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(np_a)
b = tvm.nd.empty((n,), B.dtype, ctx).copyfrom(np_b)
c = tvm.nd.empty((n,), C.dtype, ctx).copyfrom(np_c)
d = tvm.nd.empty((n,), D.dtype, ctx)
fun(a, b, c, d)
tvm.testing.assert_allclose(d.asnumpy(), np_d)
check_cuda("int8", 64, 4)
def test_cuda_vectorize_load():
num_thread = 8
def check_cuda(dtype, n, lanes):
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
ctx = tvm.gpu(0)
A = te.placeholder((n,), name='A', dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i], name='B')
s = te.create_schedule(B.op)
block, thread = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(block, bx)
s[B].bind(thread, tx)
fun = tvm.build(s, [A, B], "cuda", name="vector_load")
np_a = np.random.randint(low=-128, high=127, size=(n,lanes))
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(np_a)
b = tvm.nd.empty((n,), B.dtype, ctx)
fun(a,b)
tvm.testing.assert_allclose(a.asnumpy(), b.asnumpy())
check_cuda("int8", 64, 2)
check_cuda("int8", 64, 3)
check_cuda("int8", 64, 4)
check_cuda("int8", 64, 8)
check_cuda("int8", 64, 16)
def test_cuda_make_int8():
def check_cuda(n, value, lanes):
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
dtype = 'int8'
ctx = tvm.gpu(0)
A = te.compute((n, lanes), lambda i,j: tvm.tir.const(value, dtype=dtype))
s = te.create_schedule(A.op)
y, x = s[A].op.axis
s[A].vectorize(x)
s[A].bind(y, bx)
fun = tvm.build(s, [A], "cuda", name="make_int8x4")
np_a = np.full((n, lanes), value, dtype=dtype)
a = tvm.nd.empty(np_a.shape, dtype, ctx)
fun(a)
np.testing.assert_equal(a.asnumpy(), np_a)
check_cuda(64, 0xAB, 4)
check_cuda(64, 0, 4)
check_cuda(64, -3, 4)
check_cuda(64, 0xAB, 3)
check_cuda(64, 0, 3)
check_cuda(64, -3, 3)
check_cuda(64, 0xAB, 2)
check_cuda(64, 0, 2)
check_cuda(64, -3, 2)
def test_cuda_inf_nan():
target = 'cuda'
def check_inf_nan(ctx, n, value, dtype):
A = te.placeholder((n,), name='A', dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lambda i: inf_value, name='C')
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tx)
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, ctx)
c = tvm.nd.empty((n,), A.dtype, ctx)
# Only need to test compiling here
fun(a, c)
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
ctx = tvm.context(target, 0)
check_inf_nan(ctx, 1, -float('inf'), 'float32')
check_inf_nan(ctx, 1, -float('inf'), 'float64')
check_inf_nan(ctx, 1, float('inf'), 'float32')
check_inf_nan(ctx, 1, float('inf'), 'float64')
check_inf_nan(ctx, 1, float('nan'), 'float32')
check_inf_nan(ctx, 1, float('nan'), 'float64')
def test_cuda_shuffle():
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
idxm = tvm.tir.indexmod
a = te.placeholder((64, ), 'int32')
b = te.placeholder((64, ), 'int32')
c = te.compute((64, ), lambda x: a[x] + b[x - idxm(x, 4) + (3 - idxm(x, 4))])
sch = te.create_schedule(c.op)
x = c.op.axis[0]
xo, xi = sch[c].split(x, 4)
thrx = te.thread_axis("threadIdx.x")
sch[c].bind(xo, thrx)
sch[c].vectorize(xi)
def MyVectorize():
def vectorizer(op):
if op.for_type == tvm.tir.For.Vectorized:
four = tvm.tir.const(4, 'int32')
idx = tvm.tir.Ramp(thrx.var * four, tvm.tir.const(1, 'int32'), 4)
all_ones = tvm.tir.const(1, 'int32x4')
store = op.body
value = store.value
new_a = tvm.tir.Load('int32x4', value.a.buffer_var, idx, all_ones)
bs, ids = [], []
for i in range(4):
bs.append(tvm.tir.Load('int32', value.b.buffer_var, thrx.var * four + tvm.tir.const(i, 'int32')))
ids.append(tvm.tir.const(3 - i, 'int32'))
new_b = tvm.tir.Shuffle(bs, ids)
return tvm.tir.Store(store.buffer_var, new_a + new_b, idx, all_ones)
return None
def _transform(f, *_):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, None, vectorizer, ['tir.For']))
return tvm.tir.transform.prim_func_pass(_transform, opt_level=0, name="MyVectorize")
with tvm.transform.PassContext(config={"tir.add_lower_pass": [(1, MyVectorize())]}):
module = tvm.build(sch, [a, b, c], target='cuda')
a_ = np.array(list(range(64)), dtype='int32')
b_ = np.array((list(range(4))[::-1]) * 16, dtype='int32')
c_ = np.zeros((64, ), dtype='int32')
ref = a_ + np.array((list(range(4))) * 16, dtype='int32')
nda, ndb, ndc = [tvm.nd.array(i, tvm.gpu(0)) for i in [a_, b_, c_]]
module(nda, ndb, ndc)
tvm.testing.assert_allclose(ndc.asnumpy(), ref)
def test_crossthread_reduction1():
def check(device):
ctx = tvm.context(device, 0)
if not ctx.exist or not tvm.runtime.enabled(device):
print("skip because", device, "is not enabled..")
return
n = te.var("n")
m = te.var("m")
A = te.placeholder((n, m), name='A')
k = te.reduce_axis((0, m), "m")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
def sched(nthd):
s = te.create_schedule(B.op)
ko, _ = s[B].split(B.op.reduce_axis[0], nparts=nthd)
s[B].bind(ko, te.thread_axis("threadIdx.x"))
s[B].bind(B.op.axis[0], te.thread_axis("blockIdx.x"))
func = tvm.build(s, [A, B], device)
return func
def verify(nthd):
func = sched(nthd)
nn = 3
# checks three typical cases
vals = [nthd-1, nthd, nthd+1]
for kk in [x for x in vals]:
size = (nn, kk)
a = tvm.nd.array(np.random.uniform(size=size).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), ctx)
func(a, b)
tvm.testing.assert_allclose(b.asnumpy(), \
np.sum(a.asnumpy(), axis=1), rtol=1e-3)
verify(16)
verify(32)
verify(64)
check("cuda")
check("rocm")
def test_crossthread_reduction2():
def check(device):
ctx = tvm.context(device, 0)
if not ctx.exist or not tvm.runtime.enabled(device):
print("skip because", device, "is not enabled..")
return
n = te.var("n")
k0 = te.var("k0")
k1 = te.var("k1")
A = te.placeholder((n, k0, k1), name='A')
k0 = te.reduce_axis((0, k0), "k0")
k1 = te.reduce_axis((0, k1), "k1")
B = te.compute((n,), lambda i: te.sum(A[i, k0, k1], axis=(k0, k1)), name="B")
def sched(nthdx, nthdy):
s = te.create_schedule(B.op)
k0o, _ = s[B].split(B.op.reduce_axis[0], nparts=nthdx)
k1o, _ = s[B].split(B.op.reduce_axis[1], nparts=nthdy)
s[B].bind(k0o, te.thread_axis("threadIdx.x"))
s[B].bind(k1o, te.thread_axis("threadIdx.y"))
s[B].bind(B.op.axis[0], te.thread_axis("blockIdx.x"))
func = tvm.build(s, [A, B], device)
return func
def verify(nthdx, nthdy):
func = sched(nthdx, nthdy)
nn = 3
# checks three typical cases
vx = [nthdx-1, nthdx, nthdx+1]
vy = [nthdy-1, nthdy, nthdy+1]
for kk0, kk1 in [(x, y) for x in vx for y in vy]:
size = (nn, kk0, kk1)
a = tvm.nd.array(np.random.uniform(size=size).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), ctx)
func(a, b)
tvm.testing.assert_allclose(b.asnumpy(), \
np.sum(a.asnumpy(), axis=(1, 2)), rtol=1e-3)
verify(16, 16)
verify(32, 32)
verify(16, 32)
verify(32, 16)
check("cuda")
check("rocm")
def test_cuda_reduction_binding():
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
k = te.reduce_axis((0, 32), 'k')
A = te.placeholder((96, 32), name='A')
B = te.compute( (96,), lambda m:
te.sum(A[m, k], axis=k),
name='B')
s = te.create_schedule(B.op)
s[B].reorder(B.op.reduce_axis[0], B.op.axis[0])
mo, _ = s[B].split(B.op.axis[0], 32)
s[B].bind(mo, te.thread_axis("blockIdx.x"))
fcuda = tvm.build(s, [A, B], "cuda")
def test_rfactor_predicates():
def check(device):
ctx = tvm.context(device, 0)
if not ctx.exist or not tvm.runtime.enabled(device):
print("skip because", device, "is not enabled..")
return
n = te.reduce_axis((0, 129), 'n')
A = te.placeholder((129,), name='A')
B = te.compute( (1, ), lambda b:
te.sum(A[n],
axis=n),
name='B'
)
s = te.create_schedule(B.op)
_, ni = s[B].split(s[B].op.reduce_axis[0], factor=8)
BF = s.rfactor(B, ni, 0)
s[B].set_store_predicate(tx.var.equal(0))
s[B].bind(s[B].op.reduce_axis[0], tx)
s[B].bind(s[B].op.axis[0], bx)
s[BF].compute_at(s[B], s[B].op.axis[0])
_, noi = s[BF].split(s[BF].op.reduce_axis[0], factor=2)
BF2 = s.rfactor(BF, noi, 0)
s[BF].bind(s[BF].op.axis[0], tx)
s[BF2].compute_at(s[BF], s[BF].op.axis[1])
fcuda = tvm.build(s, [A, B], device)
check("cuda")
check("rocm")
@unittest.skipIf(not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"), "skip because cuda is not enabled..")
def test_cuda_const_float_to_half():
# This import is required to use nvcc to perform code gen;
# otherwise it is found that the code gen is done by nvrtc.
from tvm import autotvm
shape = (2, 3, 4)
a = te.placeholder(shape, dtype='float16', name='a')
b = tvm.tir.const(0.5, dtype='float16')
c = te.compute(shape, lambda i, j, k: a[i, j, k] > b, name='c')
s = te.create_schedule(c.op)
axes = [axis for axis in c.op.axis]
fused = s[c].fuse(*axes)
bx, tx = s[c].split(fused, factor=64)
s[c].bind(bx, te.thread_axis('blockIdx.x'))
s[c].bind(tx, te.thread_axis('threadIdx.x'))
func = tvm.build(s, [a, c], 'cuda')
ctx = tvm.gpu(0)
a_np = np.random.uniform(size=shape).astype(a.dtype)
c_np = np.zeros(shape=shape, dtype=c.dtype)
a = tvm.nd.array(a_np, ctx)
c = tvm.nd.array(c_np, ctx)
func(a, c)
np.testing.assert_equal(c.asnumpy(), a_np > b.value)
def test_cuda_reduction():
def check(device, dtype, m=32, n=32):
ctx = tvm.context(device, 0)
if not ctx.exist or not tvm.runtime.enabled(device):
print("skip because", device, "is not enabled..")
return
if dtype == "float16" and not have_fp16(ctx.compute_version):
print("Skip because gpu does not have fp16 support")
return
a = te.placeholder((m, n), name="a", dtype=dtype)
b = te.placeholder((m, n), name="b", dtype=dtype)
c = a + b
d = a * b
e = topi.elemwise_sum([c, d])
g = topi.sum(e)
with tvm.target.create(device):
sg = topi.cuda.schedule_reduce(g)
func = tvm.build(sg, [a, b, g], device)
a_np = np.random.uniform(size=(m, n)).astype(a.dtype)
b_np = np.random.uniform(size=(m, n)).astype(b.dtype)
g_np = np.sum(np.add(a_np * b_np, a_np + b_np))
a_nd = tvm.nd.array(a_np, ctx)
b_nd = tvm.nd.array(b_np, ctx)
g_nd = tvm.nd.array(np.zeros(g_np.shape, dtype=g_np.dtype), ctx)
func(a_nd, b_nd, g_nd)
tvm.testing.assert_allclose(g_nd.asnumpy(), g_np, rtol=1e-3)
check("cuda", "float32")
check("rocm", "float32")
check("cuda", "float16")
def test_cuda_mix_threaded_and_normal_reduction():
def check(device, dtype, m=32, n=32):
ctx = tvm.context(device, 0)
if not ctx.exist or not tvm.runtime.enabled(device):
print("skip because", device, "is not enabled..")
return
if dtype == "float16" and not have_fp16(ctx.compute_version):
print("Skip because gpu does not have fp16 support")
return
a = tvm.te.placeholder((m, n), name="a", dtype=dtype)
b = topi.sum(a)
with tvm.target.create(device):
sb = tvm.te.create_schedule(b.op)
i, _ = b.op.reduce_axis
sb[b].bind(i, tvm.te.thread_axis("threadIdx.x"))
func = tvm.build(sb, [a, b], device)
a_np = np.random.uniform(size=(m, n)).astype(a.dtype)
b_np = np.sum(a_np)
a_nd = tvm.nd.array(a_np, ctx)
b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), ctx)
func(a_nd, b_nd)
tvm.testing.assert_allclose(b_nd.asnumpy(), b_np, rtol=1e-3)
check("cuda", "float32")
check("rocm", "float32")
check("cuda", "float16")
def test_cuda_floordiv_with_vectorization():
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
with tvm.target.cuda():
# B[i] = A[floordiv(i, k)]
n = 256
k = 37
A = te.placeholder((n,), name='A')
B = te.compute((n,), lambda i: A[tvm.tir.floordiv(i, k)], name='B')
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], nparts=1)
xio, xii = s[B].split(xi, factor=4)
s[B].vectorize(xii)
s[B].bind(xo, bx)
s[B].bind(xio, tx)
func = tvm.build(s, [A, B], 'cuda')
ctx = tvm.gpu(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
b_np = np.array([a_np[i//k] for i in range(0, n)])
a_nd = tvm.nd.array(a_np, ctx)
b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), ctx)
func(a_nd, b_nd)
tvm.testing.assert_allclose(b_nd.asnumpy(), b_np, rtol=1e-3)
def test_vectorized_casts():
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
def check(t0, t1):
if (t0 == "float16" or t1 == "float16") and not have_fp16(tvm.gpu(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
# compute
n = 128
A = te.placeholder((n,), dtype=t0, name='A')
B = te.placeholder((n,), dtype=t1, name='B')
C = te.compute((n,), lambda i: A[i] + topi.cast(B[i], A.dtype), name='C')
# schedule
s = tvm.te.create_schedule(C.op)
ob, ib = s[C].split(s[C].op.axis[0], nparts=32)
_, iib = s[C].split(ib, factor=4)
s[C].vectorize(iib)
s[C].bind(ob, tx)
func = tvm.build(s, [A, B, C], "cuda")
# correctness
ctx = tvm.gpu(0)
low, high = (0, 20) if t0.startswith('u') or t1.startswith('u') else (-10, 10)
a_np = np.random.randint(low, high, size=n).astype(A.dtype)
b_np = np.random.randint(low, high, size=n).astype(B.dtype)
c_np = (a_np + b_np).astype(A.dtype)
a_nd = tvm.nd.array(a_np, ctx)
b_nd = tvm.nd.array(b_np, ctx)
c_nd = tvm.nd.array(np.zeros(c_np.shape, dtype=c_np.dtype), ctx)
func(a_nd, b_nd, c_nd)
tvm.testing.assert_allclose(c_nd.asnumpy(), c_np, rtol=1e-3)
def skip(t0, t1):
if t0 == t1:
return True
# CUDA does support cast between {u}int8 and fp16.
skip_set = {"float16", "uint8", "int8"}
if t0 in skip_set and t1 in skip_set:
return True
return False
types = ["float16", "float32", "int8", "uint8", "int16", "uint16", "int32", "uint32"]
for t0, t1 in [(x, y) for x in types for y in types if not skip(x, y)]:
check(t0, t1)
def sched(B):
s = te.create_schedule(B.op)
io, ii = s[B].split(s[B].op.axis[0], nparts=1)
iio, iii = s[B].split(ii, nparts=32)
_, iiii = s[B].split(iii, factor=4)
s[B].vectorize(iiii)
s[B].bind(io, bx)
s[B].bind(iio, tx)
return s
def test_vectorized_intrin1():
test_funcs = [
(tvm.tir.floor, lambda x : np.floor(x)),
(tvm.tir.ceil, lambda x : np.ceil(x)),
(tvm.tir.trunc, lambda x : np.trunc(x)),
(tvm.tir.abs, lambda x : np.fabs(x)),
(tvm.tir.round, lambda x : np.round(x)),
(tvm.tir.exp, lambda x : np.exp(x)),
(tvm.tir.exp2, lambda x : np.exp2(x)),
(tvm.tir.exp10, lambda x : np.power(10,x)),
(tvm.tir.log, lambda x : np.log(x)),
(tvm.tir.log2, lambda x : np.log2(x)),
(tvm.tir.log10, lambda x : np.log10(x)),
(tvm.tir.tan, lambda x : np.tan(x)),
(tvm.tir.cos, lambda x : np.cos(x)),
(tvm.tir.cosh, lambda x : np.cosh(x)),
(tvm.tir.sin, lambda x : np.sin(x)),
(tvm.tir.sinh, lambda x : np.sinh(x)),
(tvm.tir.atan, lambda x : np.arctan(x)),
(tvm.tir.tanh, lambda x : np.tanh(x)),
(tvm.tir.sqrt, lambda x : np.sqrt(x)),
]
def run_test(tvm_intrin, np_func, dtype):
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
if dtype == "float16" and not have_fp16(tvm.gpu(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
# set of intrinsics does not support fp16 yet.
skip_set = {tvm.tir.abs,
tvm.tir.round,
tvm.tir.tan,
tvm.tir.atan,
tvm.tir.tanh,
tvm.tir.cosh,
tvm.tir.sinh}
if dtype == "float16" and tvm_intrin in skip_set:
print("Skip because '{0}' does not support fp16 yet".format(tvm_intrin.__name__))
return
n = 128
A = te.placeholder((n,), dtype=dtype, name='A')
B = te.compute((n,), lambda *i: tvm_intrin(A(*i)), name='B')
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
ctx = tvm.gpu(0)
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(A.dtype), ctx)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), np_func(a.asnumpy()), atol=1e-3, rtol=1e-3)
for func in test_funcs:
run_test(*func, "float32")
run_test(*func, "float16")
def test_vectorized_intrin2(dtype="float32"):
c2 = tvm.tir.const(2, dtype=dtype)
test_funcs = [
(tvm.tir.power, lambda x : np.power(x, 2.0)),
(tvm.tir.fmod, lambda x : np.fmod(x, 2.0))
]
def run_test(tvm_intrin, np_func):
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
n = 128
A = te.placeholder((n,), dtype=dtype, name='A')
B = te.compute((n,), lambda i: tvm_intrin(A[i], c2), name='B')
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
ctx = tvm.gpu(0)
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(A.dtype), ctx)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), np_func(a.asnumpy()), atol=1e-3, rtol=1e-3)
for func in test_funcs:
run_test(*func)
def test_vectorized_popcount():
def ref_popcount(x):
cnt = 0
while x:
x -= x & -x
cnt += 1
return cnt
def run_test(dtype):
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
n = 128
A = te.placeholder((n,), dtype=dtype, name='A')
B = te.compute((n,), lambda i: tvm.tir.popcount(A[i]), name='B')
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
ctx = tvm.gpu(0)
a = tvm.nd.array(np.random.randint(0, 100000, size=n).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(B.dtype), ctx)
f(a, b)
ref = np.vectorize(ref_popcount)(a.asnumpy())
tvm.testing.assert_allclose(b.asnumpy(), ref)
run_test("uint32")
run_test("uint64")
def test_cuda_vectorize_load_permute_pad():
def check_cuda(dtype, n, l, padding, lanes):
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
if dtype == "float16" and not have_fp16(tvm.gpu(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
ctx = tvm.gpu(0)
A = tvm.te.placeholder((n, l), name='A', dtype=dtype)
B = tvm.te.compute((n // lanes, l + 2 * padding, lanes),
lambda i, j, k: tvm.te.if_then_else(
tvm.te.any(j < padding, j >= l + padding),
tvm.runtime.convert(0).astype(dtype), A[i * lanes + k, j - padding]),
name='B')
s = te.create_schedule(B.op)
block, thread, vectorize = s[B].op.axis
s[B].bind(block, bx)
s[B].bind(thread, tx)
s[B].vectorize(vectorize)
fun = tvm.build(s, [A, B], "cuda", name="vector_load_permute_pad")
np_a = np.random.randint(
low=-128, high=127, size=(n, l)).astype(A.dtype)
a = tvm.nd.empty((n, l), A.dtype, ctx).copyfrom(np_a)
b = tvm.nd.empty((n // lanes, l + padding * 2, lanes), B.dtype, ctx)
fun(a, b)
np_a_reshape = np_a.reshape(n // lanes, lanes, l).transpose(0, 2, 1)
ref = np.pad(np_a_reshape, ((0, 0), (padding, padding),
(0, 0)), mode='constant', constant_values=0)
tvm.testing.assert_allclose(b.asnumpy(), ref)
check_cuda("int8", 64, 16, 3, 2)
check_cuda("uint8", 64, 16, 3, 2)
check_cuda("int8", 64, 16, 3, 4)
check_cuda("uint8", 64, 16, 3, 4)
check_cuda("int32", 64, 16, 3, 4)
check_cuda("float16", 64, 16, 3, 4)
check_cuda("float32", 64, 16, 3, 4)
if __name__ == "__main__":
test_cuda_vectorize_add()
test_cuda_multiply_add()
test_cuda_vectorize_load()
test_cuda_make_int8()
test_cuda_inf_nan()
test_cuda_shuffle()
test_vectorized_casts()
test_cuda_reduction_binding()
test_crossthread_reduction1()
test_crossthread_reduction2()
test_rfactor_predicates()
test_cuda_const_float_to_half()
test_cuda_reduction()
test_cuda_mix_threaded_and_normal_reduction()
test_cuda_floordiv_with_vectorization()
test_vectorized_intrin1()
test_vectorized_intrin2()
test_vectorized_popcount()
test_cuda_vectorize_load_permute_pad()
|
the-stack_0_24653
|
import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
def read_text(file_name: str):
return open(os.path.join(file_name)).read()
setuptools.setup(
name="neointerface", # This is the name of the package
version="3.1.2", # The initial release version
author="Alexey Kuznetsov, Julian West", # Full name of the authors
description="A Python interface to use the Neo4j graph database",
long_description=long_description, # Long description read from the the readme file
long_description_content_type="text/markdown",
packages=setuptools.find_packages(exclude=["tests", "utils"]), # List of all python modules to be installed
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
], # Information to filter the project on PyPi website
license=read_text("LICENSE"),
python_requires='>=3.6', # Minimum version requirement of the package
# package_dir={'':''}, # Directory of the source code of the package
install_requires=["numpy==1.19.5", "pandas==1.1.5", "neo4j==4.4.0", "requests==2.25.1"] # Install other dependencies if any
)
|
the-stack_0_24654
|
from collections import OrderedDict
import numpy as np
from gym.spaces import Dict , Box
import math
import os
import torch
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
from metaworld.envs.mujoco.utils.rotation import euler2quat
from pyquaternion import Quaternion
import cv2
class SawyerBlockEnv(SawyerXYZEnv):
def __init__(
self,
obj_low=None,
obj_high=None,
random_init=False,
tasks = [{'goal': np.array([0.1, 0.8, 0.2]), 'obj_init_pos':np.array([0, 0.6, 0.02]), 'obj_init_angle': 0.3}],
goal_low=None,
goal_high=None,
hand_init_pos = (0, 0.6, 0.0),
liftThresh = 0.04,
rewMode = 'orig',
rotMode='fixed',
low_dim=False,
hand_only=False,
problem="rand",
**kwargs
):
#self.quick_init(locals())
hand_low=(-0.2, 0.4, 0.0)
hand_high=(0.2, 0.8, 0.05)
obj_low=(-0.3, 0.4, 0.1)
obj_high=(0.3, 0.8, 0.3)
SawyerXYZEnv.__init__(
self,
frame_skip=5,
action_scale=1./10,
hand_low=hand_low,
hand_high=hand_high,
model_name=self.model_name,
**kwargs
)
if obj_low is None:
obj_low = self.hand_low
if goal_low is None:
goal_low = self.hand_low
if obj_high is None:
obj_high = self.hand_high
if goal_high is None:
goal_high = self.hand_high
self.epcount = 0
self.epsucc = []
self.problem = problem
self.low_dim = low_dim
self.hand_only = hand_only
self.random_init = random_init
self.liftThresh = liftThresh
self.max_path_length = 100
self.tasks = tasks
self.num_tasks = len(tasks)
self.rewMode = rewMode
self.rotMode = rotMode
self.randomize = True
self.hand_init_pos = np.array(hand_init_pos)
if rotMode == 'fixed':
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([1, 1, 1, 1]),
)
elif rotMode == 'rotz':
self.action_rot_scale = 1./50
self.action_space = Box(
np.array([-1, -1, -1, -np.pi, -1]),
np.array([1, 1, 1, np.pi, 1]),
)
elif rotMode == 'quat':
self.action_space = Box(
np.array([-1, -1, -1, 0, -1, -1, -1, -1]),
np.array([1, 1, 1, 2*np.pi, 1, 1, 1, 1]),
)
else:
self.action_space = Box(
np.array([-1, -1, -1, -np.pi/2, -np.pi/2, 0, -1]),
np.array([1, 1, 1, np.pi/2, np.pi/2, np.pi*2, 1]),
)
self.hand_and_obj_space = Box(
np.hstack((self.hand_low, obj_low)),
np.hstack((self.hand_high, obj_high)),
)
self.goal_space = Box(goal_low, goal_high)
self.observation_space = Box(0, 1.0, (64,64,6))
def get_site_pos(self, siteName):
_id = self.model.site_names.index(siteName)
return self.data.site_xpos[_id].copy()
@property
def model_name(self):
path = os.environ['ASSETS_PATH']
return path + "sawyer_xyz/sawyer_multiobject.xml"
def step(self, action):
if self.rotMode == 'euler':
action_ = np.zeros(7)
action_[:3] = action[:3]
action_[3:] = euler2quat(action[3:6])
self.set_xyz_action_rot(action_)
elif self.rotMode == 'fixed':
self.set_xyz_action(action[:3])
elif self.rotMode == 'rotz':
self.set_xyz_action_rotz(action[:4])
else:
self.set_xyz_action_rot(action[:7])
self.do_simulation([action[-1], -action[-1]])
ob = None
ob = self._get_obs()
reward = self.compute_reward()
self.curr_path_length +=1
if self.curr_path_length == self.max_path_length:
self._reset_hand()
done = True
else:
done = False
return ob, reward, done, {'pos': ob, 'hand': self.get_endeff_pos(), 'success':self.is_goal()}
def _get_obs(self):
im = self.sim.render(64, 64, camera_name="agentview")
obs = np.concatenate([im, self.goalim], 2)
return obs
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
start_id = 9 + self.targetobj*2
qpos[start_id:(start_id+2)] = pos.copy()
qvel[start_id:(start_id+2)] = 0
self.set_state(qpos, qvel)
def render(self, mode=""):
return self.sim.render(64, 64, camera_name="agentview")
def sample_goal(self):
start_id = 9 + self.targetobj*2
qpos = self.data.qpos.flat.copy()
ogpos = qpos[start_id:(start_id+2)]
self._reset_hand(goal=True)
goal_pos = np.random.uniform(
-0.3,
0.3,
size=(2,),
)
if (self.problem == "1"):
goal_pos = ogpos.copy()
goal_pos[1] += 0.15
elif (self.problem == "2"):
goal_pos = ogpos.copy()
goal_pos[0] += 0.15
self._state_goal = goal_pos
self._set_obj_xyz(goal_pos)
if self.problem == "2":
start_id2 = 9 + self.targetobj2*2
qpos = self.data.qpos.flat.copy()
ogpos2 = qpos[start_id2:(start_id2+2)]
goal_pos2 = ogpos2.copy()
goal_pos2[1] += 0.15
self._state_goal2 = goal_pos2
pl = self.targetobj
self.targetobj = self.targetobj2
self._set_obj_xyz(goal_pos2)
self.targetobj = pl
if not self.low_dim:
self.goalim = self.sim.render(64, 64, camera_name="agentview")
self._reset_hand()
self._set_obj_xyz(ogpos)
if self.problem == "2":
pl = self.targetobj
self.targetobj = self.targetobj2
self._set_obj_xyz(ogpos2)
self.targetobj = pl
def reset_model(self):
self._reset_hand()
buffer_dis = 0.04
block_pos = None
for i in range(3):
self.targetobj = i
init_pos = np.random.uniform(
-0.2,
0.2,
size=(2,),
)
if (self.problem == "1"):
init_pos[1] = 0.0
init_pos[0] = -0.15 + (0.15 * i)
elif (self.problem == "2"):
if i == 0:
init_pos[0] = 0.0
init_pos[1] = 0
if i == 1:
init_pos[0] = 0.2
init_pos[1] = -0.2
if i == 2:
init_pos[0] = 0.0
init_pos[1] = 0.15
self.obj_init_pos = init_pos
self._set_obj_xyz(self.obj_init_pos)
for _ in range(100):
self.do_simulation([0.0, 0.0])
if not self.randomize:
self.targetobj = 0
else:
self.targetobj = np.random.randint(3)
if self.problem == "2":
self.targetobj = 0
self.targetobj2 = 2
self.sample_goal()
place = self.targetobj
self.curr_path_length = 0
self.epcount += 1
o = self._get_obs()
#Can try changing this
return o
def _reset_hand(self, goal=False):
pos = self.hand_init_pos.copy()
if (self.problem == "1"):
if not goal:
pos[1] -= 0.3
else:
if self.targetobj == 0:
pos[0] = -0.2
elif self.targetobj == 1:
pos[0] = -0.0
else:
pos[0] = 0.2
pos[1] += 0.15
elif (self.problem == "2"):
if not goal:
pos[0] = -0.15
pos[1] = 0.50
else:
pos[0] = 0.1
pos[1] = 0.9
for _ in range(10):
self.data.set_mocap_pos('mocap', pos)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation([-1,1], self.frame_skip)
rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger)/2
self.pickCompleted = False
def compute_reward(self):
start_id = 9 + self.targetobj*2
qpos = self.data.qpos.flat.copy()
ogpos = qpos[start_id:(start_id+2)]
dist = np.linalg.norm(ogpos - self._state_goal)
dist2 = 0
if self.problem == "2":
start_id2 = 9 + self.targetobj2*2
ogpos2 = qpos[start_id2:(start_id2+2)]
dist2 = np.linalg.norm(ogpos2 - self._state_goal2)
return - (dist + dist2)
def is_goal(self):
start_id = 9 + self.targetobj*2
qpos = self.data.qpos.flat.copy()
ogpos = qpos[start_id:(start_id+2)]
dist = np.linalg.norm(ogpos - self._state_goal)
dist2 = 0
if self.problem == "2":
start_id2 = 9 + self.targetobj2*2
ogpos2 = qpos[start_id2:(start_id2+2)]
dist2 = np.linalg.norm(ogpos2 - self._state_goal2)
if (dist < 0.1) and (dist2 < 0.1):
return 1
else:
return 0
if (dist < 0.08):
return 1
else:
return 0
|
the-stack_0_24655
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@created: 26.06.20
@author: felix
"""
from collections import defaultdict
if __name__ == '__main__':
d = defaultdict(list)
n, m = [int(i.strip()) for i in input().split()]
for i in range(n):
d['n'].append(input())
for i in range(m):
d['m'].append(input())
for val in d['m']:
if val in d['n']:
print(' '.join([str(i) for i, k in enumerate(d["n"], start=1) if k == val]))
else:
print(-1)
continue
|
the-stack_0_24656
|
# ADAPTED FROM https://github.com/openai/gym-http-api
import requests
import six.moves.urllib.parse as urlparse
import json
import os
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Client(object):
"""
Gym client to interface with gym_http_server
"""
def __init__(self, remote_base):
self.remote_base = remote_base
self.session = requests.Session()
self.session.headers.update({'Content-type': 'application/json'})
self.instance_id = None
def _parse_server_error_or_raise_for_status(self, resp):
j = {}
try:
j = resp.json()
except:
# Most likely json parse failed because of network error, not server error (server
# sends its errors in json). Don't let parse exception go up, but rather raise default
# error.
resp.raise_for_status()
if resp.status_code != 200 and "message" in j: # descriptive message from server side
raise ServerError(message=j["message"], status_code=resp.status_code)
resp.raise_for_status()
return j
def _post_request(self, route, data):
url = urlparse.urljoin(self.remote_base, route)
logger.info("POST {}\n{}".format(url, json.dumps(data)))
resp = self.session.post(urlparse.urljoin(self.remote_base, route),
data=json.dumps(data))
return self._parse_server_error_or_raise_for_status(resp)
def _get_request(self, route):
url = urlparse.urljoin(self.remote_base, route)
logger.info("GET {}".format(url))
resp = self.session.get(url)
return self._parse_server_error_or_raise_for_status(resp)
def env_create(self, token):
env_id = "Gait"
route = '/v1/envs/'
data = {'env_id': env_id,
'token': token}
resp = self._post_request(route, data)
self.instance_id = resp['instance_id']
self.env_monitor_start("tmp", force=True)
return self.env_reset()
def env_reset(self):
route = '/v1/envs/{}/reset/'.format(self.instance_id)
resp = self._post_request(route, None)
observation = resp['observation']
return observation
def env_step(self, action, render=False):
route = '/v1/envs/{}/step/'.format(self.instance_id)
data = {'action': action, 'render': render}
resp = self._post_request(route, data)
observation = resp['observation']
reward = resp['reward']
done = resp['done']
info = resp['info']
return [observation, reward, done, info]
def env_monitor_start(self, directory,
force=False, resume=False, video_callable=False):
route = '/v1/envs/{}/monitor/start/'.format(self.instance_id)
data = {'directory': directory,
'force': force,
'resume': resume,
'video_callable': video_callable}
self._post_request(route, data)
def submit(self):
route = '/v1/envs/{}/monitor/close/'.format(self.instance_id)
result = self._post_request(route, None)
if result['reward']:
print("Your total reward from this submission: %f" % result['reward'])
else:
print("There was an error in your submission. Please contact administrators.")
route = '/v1/envs/{}/close/'.format(self.instance_id)
self.env_close()
def env_close(self):
route = '/v1/envs/{}/close/'.format(self.instance_id)
self._post_request(route, None)
class ServerError(Exception):
def __init__(self, message, status_code=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
|
the-stack_0_24657
|
""" WASI API.
These are some of the WASI api functions implemented in python.
See also: https://wasi.dev
"""
import time
import os
import logging
import stat
import struct
from ... import ir
ESUCCESS = 0
E2BIG = 1
EACCES = 2
EADDRINUSE = 3
EADDRNOTAVAIL = 4
EAFNOSUPPORT = 5
EAGAIN = 6
EALREADY = 7
EBADF = 8
EBADMSG = 9
EBUSY = 10
ECANCELED = 11
ECHILD = 12
ECONNABORTED = 13
ECONNREFUSED = 14
ECONNRESET = 15
EDEADLK = 16
EDESTADDRREQ = 17
EDOM = 18
EDQUOT = 19
EEXIST = 20
EFAULT = 21
EFBIG = 22
EHOSTUNREACH = 23
EIDRM = 24
EILSEQ = 25
EINPROGRESS = 26
EINTR = 27
EINVAL = 28
EIO = 29
EISCONN = 30
EISDIR = 31
ELOOP = 32
EMFILE = 33
EMLINK = 34
EMSGSIZE = 35
EMULTIHOP = 36
PREOPENTYPE_DIR = 0
FILETYPE_BLOCK_DEVICE = 1
FILETYPE_CHARACTER_DEVICE = 2
FILETYPE_DIRECTORY = 3
FILETYPE_REGULAR_FILE = 4
FDFLAG_APPEND = 0x1
FDFLAG_DSYNC = 0x2
FDFLAG_NONBLOCK = 0x4
FDFLAG_RSYNC = 0x8
FDFLAG_SYNC = 0x10
class ProcExit(RuntimeError):
"""WASI process exit.
Having an exception for this allows catching it, and cleanly shutting down.
"""
def __init__(self, exit_code):
super().__init__()
self.exit_code = exit_code
def encode_strings(items):
"""Encode a sequence of strings into a binary blob
with zero terminated bytes and a list with offsets
into this blob.
"""
blob = bytearray()
offsets = []
for item in items:
offset = len(blob)
data = item.encode("utf8") + bytes([0])
blob.extend(data)
offsets.append(offset)
return blob, offsets
class WasiApi:
logger = logging.getLogger("wasi")
def __init__(self, args):
self._instance = None
self._trace_calls = True
self._args = ["prog1337.wasm"] + args
self._encoded_args = encode_strings(self._args)
self._env = {}
env_entries = ["{}={}".format(k, v) for k, v in self._env.items()]
self._encoded_env = encode_strings(env_entries)
# 0 -> stdin
# 1 -> stdout
# 2 -> stderr
self._available_fd = {
3: ".",
}
self._next_fd = 7
def _write_mem_u8(self, address: int, value: int):
self._write_mem_fmt(address, "<B", value)
def _write_mem_u16(self, address: int, value: int):
self._write_mem_fmt(address, "<H", value)
def _write_mem_u32(self, address: int, value: int):
self._write_mem_fmt(address, "<I", value)
def _write_mem_u64(self, address: int, value: int):
self._write_mem_fmt(address, "<Q", value)
def _write_mem_fmt(self, address: int, fmt: str, value: int):
data = struct.pack(fmt, value)
self._write_mem_data(address, data)
def _write_mem_data(self, address, data: bytes):
memory = self._instance.exports["memory"]
memory.write(address, data)
def _read_mem_u16(self, address: int) -> int:
return self._read_mem_fmt(address, "<H")
def _read_mem_u32(self, address: int) -> int:
return self._read_mem_fmt(address, "<I")
def _read_mem_u64(self, address: int) -> int:
return self._read_mem_fmt(address, "<Q")
def _read_mem_fmt(self, address: int, fmt: str):
size = struct.calcsize(fmt)
data = self._read_mem_data(address, size)
return struct.unpack(fmt, data)[0]
def _read_mem_data(self, address: int, size: int) -> bytes:
memory = self._instance.exports["memory"]
return memory.read(address, size)
def _read_string(self, address: int, size: int) -> str:
data = self._read_mem_data(address, size)
return data.decode("utf8")
def _trace(self, txt):
""" Trace execution. """
if self._trace_calls:
self.logger.debug(txt)
def args_sizes_get(
self, argc_ptr: ir.i32, argv_buf_size_ptr: ir.i32
) -> ir.i32:
"""Get sizes of arguments passed to the WASI program."""
self._trace(
"args_sizes_get(argc_ptr={}, {})".format(
argc_ptr, argv_buf_size_ptr
)
)
blob, offsets = self._encoded_args
argc = len(offsets)
buf_size = len(blob)
self._write_mem_u32(argc_ptr, argc)
self._write_mem_u32(argv_buf_size_ptr, buf_size)
return ESUCCESS
def args_get(self, argv: ir.i32, argv_buf: ir.i32) -> ir.i32:
self._trace("args_get(argv={}, {})".format(argv, argv_buf))
blob, offsets = self._encoded_args
# Table with pointers:
for nr, offset in enumerate(offsets):
address = argv_buf + offset
self._write_mem_u32(argv + 4 * nr, address)
# Actual buffer with the data:
self._write_mem_data(argv_buf, blob)
return ESUCCESS
def clock_time_get(
self, id: ir.i32, precision: ir.i64, timestamp_ptr: ir.i32
) -> ir.i32:
self._trace("clock_time_get(id={})".format(id))
nanos = int(time.time() * 1e9)
self._write_mem_u64(timestamp_ptr, nanos)
return ESUCCESS
def environ_sizes_get(
self, environc_ptr: ir.i32, environ_buf_size_ptr: ir.i32
) -> ir.i32:
self._trace(
"environ_sizes_get(environc_ptr={}, environ_buf_size_ptr={})".format(
environc_ptr, environ_buf_size_ptr
)
)
blob, offsets = self._encoded_env
self._write_mem_u32(environc_ptr, len(offsets))
self._write_mem_u32(environ_buf_size_ptr, len(blob))
return ESUCCESS
def environ_get(self, environ: ir.i32, environ_buf: ir.i32) -> ir.i32:
self._trace(
"environ_get(environ={}, environ_buf={})".format(
environ, environ_buf
)
)
blob, offsets = self._encoded_env
# Fill table with pointers:
for nr, offset in enumerate(offsets):
address = environ_buf + offset
self._write_mem_u32(environ + 4 * nr, address)
# Fill memory buffer:
self._write_mem_data(environ_buf, blob)
return ESUCCESS
def fd_prestat_get(self, fd: ir.i32, buf: ir.i32) -> ir.i32:
self._trace("fd_prestat_get(fd={}, buf={})".format(fd, buf))
if fd in self._available_fd:
path_str = self._available_fd[fd]
if isinstance(path_str, str):
self._write_mem_u32(buf, PREOPENTYPE_DIR)
name_len = len(path_str)
self._write_mem_u32(buf + 4, name_len)
return ESUCCESS
else:
return EBADF
else:
return EBADF
def fd_prestat_dir_name(
self, fd: ir.i32, path: ir.i32, path_len: ir.i32
) -> ir.i32:
self._trace(
"fd_prestat_dir_name(fd={}, {}, {})".format(fd, path, path_len)
)
if fd in self._available_fd:
path_str = self._available_fd[fd]
if isinstance(path_str, str):
assert path_len == len(path_str)
path_data = path_str.encode("ascii")
self._write_mem_data(path, path_data)
return ESUCCESS
else:
return EBADF
else:
return EBADF
def fd_fdstat_get(self, fd: ir.i32, fdstat: ir.i32) -> ir.i32:
self._trace("fd_fdstat_get(fd={}, fdstat={})".format(fd, fdstat))
# assert fd == 0
if fd in self._available_fd:
path_str = self._available_fd[fd]
if isinstance(path_str, str):
fs_filetype = FILETYPE_DIRECTORY
fs_flags = 0
fs_rights_base = 2 ** 64 - 1
fs_rights_inheriting = 2 ** 64 - 1
self._write_mem_u8(fdstat, fs_filetype)
self._write_mem_u16(fdstat + 2, fs_flags)
self._write_mem_u64(fdstat + 8, fs_rights_base)
self._write_mem_u64(fdstat + 16, fs_rights_inheriting)
return ESUCCESS
else:
return EBADF
else:
return EBADF
def fd_fdstat_set_flags(self, fd: ir.i32, fdflags: ir.i32) -> ir.i32:
self.logger.error(
"fd_fdstat_set_flags(fd=%s, fdflags=%s)", fd, fdflags
)
return EACCES
def fd_close(self, fd: ir.i32) -> ir.i32:
self._trace("fd_close(fd={})".format(fd))
if fd in self._available_fd:
py_f = self._available_fd[fd]
if hasattr(py_f, "close"):
py_f.close()
return ESUCCESS
else:
return EBADF
else:
return EACCES
def _read_iovecs(self, iovs_address: int, iovs_len: int):
iovs = []
for nr in range(iovs_len):
ciovec_address = iovs_address + nr * 8
buf_addr = self._read_mem_u32(ciovec_address)
buf_size = self._read_mem_u32(ciovec_address + 4)
iovs.append((buf_addr, buf_size))
return iovs
def fd_read(
self, fd: ir.i32, iovs: ir.i32, iovs_len: ir.i32, nread: ir.i32
) -> ir.i32:
self._trace(
"fd_read(fd={}, iovs={}, iovs_len={})".format(fd, iovs, iovs_len)
)
# Check fd:
if fd == 0:
py_f = fd
elif fd in self._available_fd:
py_f = self._available_fd[fd]
if not hasattr(py_f, "read"):
return EACCES
else:
return EBADF
total_bytes = 0
for buf_addr, buf_size in self._read_iovecs(iovs, iovs_len):
if isinstance(py_f, int):
data = os.read(py_f, buf_size)
else:
data = py_f.read(buf_size)
assert len(data) <= buf_size
self._write_mem_data(buf_addr, data)
total_bytes += len(data)
self._write_mem_u32(nread, total_bytes)
return ESUCCESS
def fd_seek(
self, fd: ir.i32, offset: ir.i64, whence: ir.i32, new_pos_ptr: ir.i32
) -> ir.i32:
self._trace(
"fd_seek(fd={}, offset={}, whence={})".format(fd, offset, whence)
)
if fd in self._available_fd:
py_f = self._available_fd[fd]
if hasattr(py_f, "seek"):
if whence not in [0, 1, 2]:
return EINVAL
new_pos = py_f.seek(offset, whence)
self._write_mem_u64(new_pos_ptr, new_pos)
return ESUCCESS
else:
return EACCES
else:
return EBADF
def fd_write(
self, fd: ir.i32, iovs: ir.i32, iovs_len: ir.i32, n_written: ir.i32
) -> ir.i32:
self._trace("fd_write(fd={}, iovs_len={})".format(fd, iovs_len))
# Check fd:
if fd == 1 or fd == 2:
# Assume standard output / stderr:
py_f = fd
elif fd in self._available_fd:
py_f = self._available_fd[fd]
if not hasattr(py_f, "write"):
return EACCES
else:
return EACCES
total_bytes = 0
# Loop over all iovs:
for buf_addr, buf_size in self._read_iovecs(iovs, iovs_len):
self.logger.debug(
"Read %s bytes from address %s", buf_size, buf_addr
)
data = self._read_mem_data(buf_addr, buf_size)
total_bytes += len(data)
if isinstance(py_f, int):
os.write(py_f, data)
else:
py_f.write(data)
# print(data.decode("ascii", errors="ignore"), end="")
# sys.stdout.write(data)
self._write_mem_u32(n_written, total_bytes)
return ESUCCESS
def path_create_directory(
self,
fd: ir.i32,
path_buf: ir.i32,
path_len: ir.i32,
) -> ir.i32:
""" Similar to mkdirat in POSIX """
path = self._read_string(path_buf, path_len)
self._trace("path_create_directory(fd={}, path={})".format(fd, path))
# Check if we have a base folder:
if fd not in self._available_fd:
return EBADF
# Base folder must be string
base_folder = self._available_fd[fd]
if not isinstance(base_folder, str):
return EBADF
# This is the full path:
full_path = os.path.join(base_folder, path)
if os.path.exists(full_path):
return EEXIST
os.mkdir(full_path)
return ESUCCESS
def path_filestat_get(
self,
fd: ir.i32,
flags: ir.i32,
path_buf: ir.i32,
path_len: ir.i32,
buf: ir.i32,
) -> ir.i32:
""" Return attributes of file or directory. Similar to POSIX stat. """
path = self._read_string(path_buf, path_len)
self._trace(
"path_filestat_get(fd={}, flags={}, path={}, buf={})".format(
fd, flags, path, buf
)
)
# Check if we have a base folder:
if fd not in self._available_fd:
return EBADF
# Base folder must be string
base_folder = self._available_fd[fd]
if not isinstance(base_folder, str):
return EBADF
# This is the full path:
full_path = os.path.join(base_folder, path)
if not os.path.exists(full_path):
return EEXIST
# TODO: use flags
stat_res = os.stat(full_path)
device = stat_res.st_dev
inode = stat_res.st_ino
if stat.S_ISREG(stat_res.st_mode):
filetype = 4 # regular_file
elif stat.S_ISDIR(stat_res.st_mode):
filetype = 3 # directory
else:
filetype = 0 # unknown
nlink = stat_res.st_nlink
size = stat_res.st_size
atim = stat_res.st_atime_ns
mtim = stat_res.st_mtime_ns
ctim = stat_res.st_ctime_ns
# Fill filestat struct:
self._write_mem_u64(buf, device) # device ID (u64)
self._write_mem_u64(buf + 8, inode) # file inode (u64)
self._write_mem_u8(buf + 16, filetype) # u8
self._write_mem_u64(buf + 24, nlink) # number of hard links (u64)
self._write_mem_u64(buf + 32, size) # File size (u64)
self._write_mem_u64(buf + 40, atim) # time in nano seconds
self._write_mem_u64(buf + 48, mtim)
self._write_mem_u64(buf + 56, ctim)
return EACCES
def path_open(
self,
fd: ir.i32,
dirflags: ir.i32,
path_buf: ir.i32,
path_len: ir.i32,
oflags: ir.i32,
fs_rights_base: ir.i64,
fs_rights_inheriting: ir.i64,
fdflags: ir.i32,
opened_fd_ptr: ir.i32,
) -> ir.i32:
path = self._read_string(path_buf, path_len)
self._trace(
"path_open(fd={}, dirflags={}, path={}, oflags={}, fs_rights_base={}, fs_rights_inheriting={})".format(
fd,
dirflags,
path,
oflags,
fs_rights_base,
fs_rights_inheriting,
)
)
# Check if we have a base folder:
if fd not in self._available_fd:
return EBADF
# Base folder must be string
base_folder = self._available_fd[fd]
if not isinstance(base_folder, str):
return EBADF
# This is the full path:
full_path = os.path.join(base_folder, path)
# TODO: check rights_base and right inherting
# TODO: handle all flags!.
if oflags == 0:
# Read only mode!
if not os.path.exists(full_path):
return EEXIST
py_f = open(full_path, "rb")
elif oflags == 13:
# Write and create, fail when exists
if os.path.exists(full_path):
return EEXIST
py_f = open(full_path, "wb")
else:
return EACCES
new_fd = self._next_fd
assert new_fd not in self._available_fd
self._available_fd[new_fd] = py_f
self._next_fd += 1
self._write_mem_u32(opened_fd_ptr, new_fd)
return ESUCCESS
def path_symlink(
self,
old_path_buf: ir.i32,
old_path_len: ir.i32,
fd: ir.i32,
new_path_buf: ir.i32,
new_path_len: ir.i32,
) -> ir.i32:
old_path = self._read_string(old_path_buf, old_path_len)
new_path = self._read_string(new_path_buf, new_path_len)
self.logger.error(
"TODO: path_symlink(old_path={}, fd={}, new_path={})".format(
old_path, fd, new_path
)
)
return EACCES
def path_unlink_file(
self, fd: ir.i32, path_buf: ir.i32, path_len: ir.i32
) -> ir.i32:
path = self._read_string(path_buf, path_len)
self.logger.error(
"TODO: path_unlink_file(fd={}, path={})".format(fd, path)
)
# Check if we have a base folder:
if fd not in self._available_fd:
return EBADF
# Base folder must be string
base_folder = self._available_fd[fd]
if not isinstance(base_folder, str):
return EBADF
# This is the full path:
full_path = os.path.join(base_folder, path)
# TODO: actual removal!
return EACCES
def poll_oneoff(
self,
events_in: ir.i32,
events_out: ir.i32,
nsubscriptions: ir.i32,
nevents_ptr: ir.i32,
) -> ir.i32:
self.logger.error("TODO: poll_oneoff()")
return EACCES
def proc_exit(self, code: ir.i32) -> None:
"""Request program termination.
Strategy: raise an exception which can be catched elsewhere.
This ensures program termination.
TODO: this does not work with native code, since the
exception does not propagate through the native code.
"""
self._trace("proc_exit(code={})".format(code))
raise ProcExit(code)
def random_get(self, buf: ir.i32, buf_len: ir.i32) -> ir.i32:
""" Generate some random bytes into buf. """
noise = os.urandom(buf_len)
self._write_mem_data(buf, noise)
return ESUCCESS
|
the-stack_0_24658
|
from app.config import QueueNames
def test_queue_names_set_in_paas_app_wrapper():
with open("scripts/paas_app_wrapper.sh", 'r') as stream:
search = ' -Q '
watched_queues = set()
for line in stream.readlines():
start_of_queue_arg = line.find(search)
if start_of_queue_arg > 0:
start_of_queue_names = start_of_queue_arg + len(search)
end_of_queue_names = line.find('2>') if '2>' in line else len(line)
watched_queues.update({q.strip() for q in line[start_of_queue_names:end_of_queue_names].split(',')})
# ses-callbacks isn't used in api (only used in SNS lambda)
ignored_queues = {'ses-callbacks'}
assert watched_queues == set(QueueNames.all_queues()) | ignored_queues
|
the-stack_0_24659
|
import sys
import time
import datetime
import mock
import elasticsearch
import lumbermill.utils.DictUtils as DictUtils
from tests.ModuleBaseTestCase import ModuleBaseTestCase
from lumbermill.utils.DynamicValues import mapDynamicValueInString
from lumbermill.output import ElasticSearch
class TestElasticSearch(ModuleBaseTestCase):
def setUp(self):
super(TestElasticSearch, self).setUp(ElasticSearch.ElasticSearch(mock.Mock()))
es_service = self.getElasticSeachService()
self.es_server = "%s:%s" % (es_service['server'], es_service['port'])
self.test_index_name = "test_index"
self.es = self.connect([self.es_server])
if not self.es.ping():
self.logger.error("Could not connect to %s" % self.es_server)
self.fail()
try:
if not self.es.indices.exists(index=self.test_index_name):
self.es.indices.create(index=self.test_index_name) # ignore=[400, 404]
except elasticsearch.exceptions.RequestError:
self.logger.error("Could not create index %s on %s." % (self.test_index_name, self.es_server))
self.fail()
return
def connect(self, nodes):
es = False
tries = 0
while tries < 5 and not es:
try:
# Connect to es node and round-robin between them.
es = elasticsearch.Elasticsearch(nodes,
connection_class=elasticsearch.connection.Urllib3HttpConnection,
sniff_on_start=False,
sniff_on_connection_fail=False,
sniff_timeout=5,
maxsize=20,
use_ssl=False,
http_auth=None)
except:
etype, evalue, etb = sys.exc_info()
self.logger.warning("Connection to %s failed. Exception: %s, Error: %s." % (nodes, etype, evalue))
self.logger.warning("Waiting %s seconds before retring to connect." % ((4 + tries)))
time.sleep(4 + tries)
tries += 1
continue
if not es:
self.logger.error("Connection to %s failed. Shutting down." % (nodes))
sys.exit()
return es
def testDefaultIndex(self):
self.test_object.configure({'nodes': [self.es_server],
'batch_size': 1})
self.checkConfiguration()
self.test_object.initAfterFork()
timestring = datetime.datetime.utcnow().strftime('%Y.%m.%d')
index_name = 'lumbermill-%s' % timestring
try:
self.es.indices.delete(index=index_name, ignore=[400, 404])
except:
pass
self.es.indices.create(index=index_name)
event = DictUtils.getDefaultEventDict({'McTeagle': "But it was with more simple, homespun verses that McTeagle's unique style first flowered."})
doc_id = event['lumbermill']['event_id']
self.test_object.receiveEvent(event)
self.test_object.shutDown()
time.sleep(1)
try:
result = self.es.get(index=index_name, id=doc_id)
except elasticsearch.exceptions.NotFoundError as e:
self.fail(e)
self.assertEqual(type(result), dict)
self.assertEqual(event, result['_source'])
self.es.indices.delete(index=index_name, ignore=[400, 404])
def testDefaultDocId(self):
self.test_object.configure({'nodes': [self.es_server],
'index_name': self.test_index_name,
'batch_size': 1})
self.checkConfiguration()
self.test_object.initAfterFork()
event = DictUtils.getDefaultEventDict({'McTeagle': "But it was with more simple, homespun verses that McTeagle's unique style first flowered."})
doc_id = event['lumbermill']['event_id']
self.test_object.receiveEvent(event)
self.test_object.shutDown()
time.sleep(1)
try:
result = self.es.get(index=self.test_index_name, id=doc_id)
except elasticsearch.exceptions.NotFoundError as e:
self.fail(e)
self.assertEqual(type(result), dict)
self.assertEqual(event, result['_source'])
def testCustomDocId(self):
self.test_object.configure({'nodes': [self.es_server],
'index_name': self.test_index_name,
'doc_id': '$(event_doc_id)',
'sniff_on_start': False,
'store_interval_in_secs': 1})
self.checkConfiguration()
self.test_object.initAfterFork()
event = DictUtils.getDefaultEventDict({'McTeagle': "But it was with more simple, homespun verses that McTeagle's unique style first flowered.",
'event_doc_id': 'Ewan'})
self.test_object.receiveEvent(event)
self.test_object.shutDown()
result = self.es.get(index=self.test_index_name, id='Ewan')
self.assertEqual(type(result), dict)
self.assertEqual(event, result['_source'])
def testCustomIndexName(self):
self.test_object.configure({'nodes': [self.es_server],
'index_name': 'testindex-%Y.%m.%d-$(lumbermill.event_type)',
'sniff_on_start': False,
'store_interval_in_secs': 1})
self.checkConfiguration()
self.test_object.initAfterFork()
event = DictUtils.getDefaultEventDict({'McTeagle': "But it was with more simple, homespun verses that McTeagle's unique style first flowered."})
doc_id = event['lumbermill']['event_id']
self.test_object.receiveEvent(event)
self.test_object.shutDown()
index_name = mapDynamicValueInString('testindex-%Y.%m.%d-%(lumbermill.event_type)s', event, use_strftime=True).lower()
result = self.es.get(index=index_name, id=doc_id)
self.assertEqual(type(result), dict)
self.assertEqual(event, result['_source'])
self.es.indices.delete(index=index_name, ignore=[400, 404])
def __testStorageTTL(self):
"""
Does not seem to be testable without waiting for at least 60 seconds.
That seems to be the smallest interval the purger thread is running, no matter what I set ttl.interval to.
The documentation @http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-indices.html#indices-ttl
does not say anything about a lower limit but testing leads me to the assumption that 60s is the lowest limit.
"""
self.test_object.configure({'nodes': [self.es_server],
'index_name': self.test_index_name,
'ttl': 100,
'sniff_on_start': False,
'store_interval_in_secs': 1})
self.checkConfiguration()
self.test_object.initAfterFork()
# Enable ttl mapping.
self.es.indices.close(index=self.test_index_name)
self.es.indices.put_settings(index=self.test_index_name, body='{"ttl": {"interval" : "1s"}}')
self.es.indices.open(index=self.test_index_name)
self.es.indices.put_mapping(index=self.test_index_name, doc_type='Unknown', body='{"_ttl" : { "enabled" : true }}')
event = DictUtils.getDefaultEventDict({'McTeagle': "But it was with more simple, homespun verses that McTeagle's unique style first flowered."})
doc_id = event['lumbermill']['event_id']
self.test_object.receiveEvent(event)
self.test_object.shutDown()
try:
result = self.es.get(index=self.test_index_name, id=doc_id)
except elasticsearch.NotFoundError:
self.fail("Document was not found.")
self.assertEqual(type(result), dict)
self.assertEqual(event, result['_source'])
time.sleep(2)
try:
result = self.es.get(index=self.test_index_name, id=doc_id)
self.fail("Document was not deleted after ttl.")
except elasticsearch.NotFoundError:
pass
def testSelectedFields(self):
self.test_object.configure({'nodes': [self.es_server],
'fields': ['sheep'],
'doc_id': '$(id)',
'doc_type': '$(type)',
'batch_size': 1})
self.checkConfiguration()
self.test_object.initAfterFork()
timestring = datetime.datetime.utcnow().strftime('%Y.%m.%d')
index_name = 'lumbermill-%s' % timestring
try:
self.es.indices.delete(index=index_name, ignore=[400, 404])
except:
pass
self.es.indices.create(index=index_name)
event = DictUtils.getDefaultEventDict({'McTeagle': "But it was with more simple, homespun verses that McTeagle's unique style first flowered.",
'sheep': {'flying': 'scotsman',
'id': '12345',
'type': 'pirate'}})
doc_id = event['sheep.id']
self.test_object.receiveEvent(event)
self.test_object.shutDown()
time.sleep(1)
try:
result = self.es.get(index=index_name, id=doc_id)
except elasticsearch.exceptions.NotFoundError as e:
self.fail(e)
self.assertEqual(type(result), dict)
self.assertEqual(event['sheep'], result['_source'])
self.es.indices.delete(index=index_name, ignore=[400, 404])
def tearDown(self):
self.es.indices.delete(index=self.test_index_name, ignore=[400, 404])
ModuleBaseTestCase.tearDown(self)
|
the-stack_0_24660
|
# Copyright 2017 SUSE LINUX GmbH, Nuernberg, Germany.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from .utils import TestUtils
@pytest.mark.admin
class TestKubicAdmin(object):
"""docstring for TestBaseEnv"""
@pytest.mark.bootstrapped
@pytest.mark.parametrize("service", [
"docker",
"kubelet",
])
def test_services_running(self, host, service):
host_service = host.service(service)
assert host_service.is_running
@pytest.mark.bootstrapped
@pytest.mark.parametrize("service", [
"docker",
"kubelet",
])
def test_services_enabled(self, host, service):
host_service = host.service(service)
assert host_service.is_enabled
@pytest.mark.bootstrapped
@pytest.mark.parametrize("service", [
"container-feeder",
])
def test_service_non_registry(self, host, service):
"""Test service is only running when not using registry."""
registry_conf = TestUtils.load_registry_configuration(host)
if not registry_conf['use_registry']:
host_service = host.service(service)
assert host_service.is_running
@pytest.mark.bootstrapped
def test_salt_role(self, host):
assert 'admin' in host.salt("grains.get", "roles")
@pytest.mark.bootstrapped
def test_etcd_aliveness(self, host):
cmd = "etcdctl cluster-health"
health = host.run_expect([0], cmd)
assert "cluster is healthy" in health.stdout
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.