filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_22974 | '''
proxy-synthesis
Copyright (c) 2021-present NAVER Corp.
Apache License v2.0
'''
import os
import sys
import glob
import random
import shutil
import argparse
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
# from torch.utils.tensorboard import SummaryWriter
import net
import loss
import utils
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["CUDA_VISIBLE_DEVICES"]="1"
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('-j', '--workers', default=4, type=int,
help='number of data loading workers')
parser.add_argument('--backbone', default='resnet50', type=str, # TODO: change this
help='bninception, resnet18, resnet34, resnet50, resnet101')
parser.add_argument('--pooling_type', default='GAP', type=str,
help='GAP | GMP | GAP,GMP')
parser.add_argument('--input_size', default=224, type=int,
help='the size of input batch')
parser.add_argument('--do_nmi', default=True, action='store_true', help='do nmi or not')
parser.add_argument('--freeze_BN', default=True, action='store_true', help='freeze bn')
parser.add_argument('-b', '--batch_size', default=32, type=int, help='mini-batch size')
parser.add_argument('--dim', default=512, type=int, help='dimensionality of embeddings')
parser.add_argument('--loss', default='Norm_SoftMax', type=str, help='loss you want')
parser.add_argument('-C', default=1171, type=int, help='C')
parser.add_argument('--data', default='/home/ruofan/PycharmProjects/SoftTriple/datasets/logo2k', help='path to dataset')
parser.add_argument('--data_name', default='logo2k', type=str, help='dataset name')
parser.add_argument('--save_path', default='logs/logo2k_512_NormSoftmax',
type=str, help='where your models will be saved')
parser.add_argument('--gpu', default=1, type=int, help='GPU id to use.')
parser.add_argument('--k_list', default='1,2,4,8', type=str, help='Recall@k list')
def main():
args = parser.parse_args()
'''Set number of classes'''
if args.data_name.lower() in ["car", "cars", "cars196"]:
args.C = 98
elif args.data_name.lower() in ["sop", "stanfordonlineproducts"]:
args.C = 11318
elif args.data_name.lower() in ["cub", "cub200"]:
args.C = 100
elif args.data_name.lower() in ['inshop']:
args.C = 3997
else:
print("Using custom dataset")
## create data_loader
# load data
testdir = os.path.join(args.data, 'test')
if 'resnet' in args.backbone:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
scale_value = 1
else:
normalize = transforms.Normalize(mean=[104., 117., 128.],
std=[1., 1., 1.])
scale_value = 255
test_transforms = transforms.Compose([
# transforms.Lambda(utils.RGB2BGR),
transforms.Resize(256),
transforms.CenterCrop(args.input_size),
transforms.ToTensor(),
# transforms.Lambda(lambda x: x.mul(scale_value)),
normalize, ])
test_image = datasets.ImageFolder(testdir, test_transforms)
test_class_dict, max_r = utils.get_class_dict(test_image)
args.test_class_dict = test_class_dict
args.max_r = max_r
test_loader = torch.utils.data.DataLoader(
test_image,
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.data_name.lower() == 'inshop':
image_info = np.array(test_image.imgs)
print('\tcheck: gallery == %s, query == %s\n' % (
image_info[0, 0].split('/')[-3], image_info[-1, 0].split('/')[-3]))
args.query_labels = np.array(
[info[0].split('/')[-2] for info in image_info[image_info[:, 1] == '1']]) # 14218 images
args.gallery_labels = np.array(
[info[0].split('/')[-2] for info in image_info[image_info[:, 1] == '0']]) # 12612 images
if len(args.query_labels) != 14218 or len(args.gallery_labels) != 12612:
print('check you inshop DB')
exit()
'''Create model'''
# define backbone
if args.backbone == 'bninception':
model = net.bninception().cuda()
else: # resnet family
model = net.Resnet(resnet_type=args.backbone).cuda()
# define pooling method
pooling = net.pooling(pooling_type=args.pooling_type.split(',')).cuda()
# define embedding method
embedding = net.embedding(input_dim=model.output_dim, output_dim=args.dim).cuda()
state_dict = torch.load(os.path.join(args.save_path, 'model_00050.pth'), map_location='cpu')
model.load_state_dict(state_dict['model_state'])
embedding.load_state_dict(state_dict['embedding_state'])
k_list = [int(k) for k in args.k_list.split(',')] # [1, 2, 4, 8]
nmi, recall, MAP, features, labels = validate(test_loader, model, pooling, embedding, k_list, args)
return nmi, recall, MAP
def validate(test_loader, model, pooling, embedding, k_list, args):
# switch to evaluation mode
model.eval()
embedding.eval()
testdata = torch.Tensor()
testdata_l2 = torch.Tensor()
testlabel = torch.LongTensor()
with torch.no_grad():
for i, (input, target) in tqdm(enumerate(test_loader), total=len(test_loader)):
if args.gpu is not None:
input = input.cuda()
# compute output
output = model(input)
output = pooling(output)
output = embedding(output)
output_l2 = F.normalize(output, p=2, dim=1)
testdata = torch.cat((testdata, output.cpu()), 0)
testdata_l2 = torch.cat((testdata_l2, output_l2.cpu()), 0)
testlabel = torch.cat((testlabel, target))
features = testdata.cpu().numpy().astype('float32')
features_l2 = testdata_l2.cpu().numpy().astype('float32')
labels = testlabel.cpu().numpy().astype('float32')
nmi, recall, MAP = utils.evaluation(features_l2, labels, k_list, args)
return nmi, recall, MAP, features, labels
if __name__ == '__main__':
main()
|
the-stack_0_22977 | '''
Based off https://github.com/pimutils/python-vdir, which is itself based off
vdirsyncer.
'''
import errno
import os
import uuid
from typing import Optional # noqa
from atomicwrites import atomic_write
class cached_property:
'''A read-only @property that is only evaluated once. Only usable on class
instances' methods.
'''
def __init__(self, fget, doc=None):
self.__name__ = fget.__name__
self.__module__ = fget.__module__
self.__doc__ = doc or fget.__doc__
self.fget = fget
def __get__(self, obj, cls):
if obj is None: # pragma: no cover
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def to_unicode(x, encoding='ascii'):
if not isinstance(x, str):
return x.decode(encoding)
return x
def to_bytes(x, encoding='ascii'):
if not isinstance(x, bytes):
return x.encode(encoding)
return x
SAFE_UID_CHARS = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789_.-+')
def _href_safe(uid, safe=SAFE_UID_CHARS):
return not bool(set(uid) - set(safe))
def _generate_href(uid=None, safe=SAFE_UID_CHARS):
if not uid or not _href_safe(uid, safe):
return to_unicode(uuid.uuid4().hex)
else:
return uid
def get_etag_from_file(f):
'''Get mtime-based etag from a filepath, file-like object or raw file
descriptor.
This function will flush/sync the file as much as necessary to obtain a
correct mtime.
'''
close_f = False
if hasattr(f, 'read'):
f.flush()
f = f.fileno()
elif isinstance(f, str):
flags = 0
if os.path.isdir(f):
flags = os.O_DIRECTORY
f = os.open(f, flags)
close_f = True
# assure that all internal buffers associated with this file are
# written to disk
try:
os.fsync(f)
stat = os.fstat(f)
finally:
if close_f:
os.close(f)
mtime = getattr(stat, 'st_mtime_ns', None)
if mtime is None:
mtime = stat.st_mtime
return f'{mtime:.9f}'
class VdirError(IOError):
def __init__(self, *args, **kwargs):
for key, value in kwargs.items():
if getattr(self, key, object()) is not None: # pragma: no cover
raise TypeError(f'Invalid argument: {key}')
setattr(self, key, value)
super().__init__(*args)
class NotFoundError(VdirError):
pass
class CollectionNotFoundError(VdirError):
pass
class WrongEtagError(VdirError):
pass
class AlreadyExistingError(VdirError):
existing_href = None # type: Optional[str]
class Item:
def __init__(self, raw):
assert isinstance(raw, str)
self.raw = raw
@cached_property
def uid(self):
uid = ''
lines = iter(self.raw.splitlines())
for line in lines:
if line.startswith('UID:'):
uid += line[4:].strip()
break
for line in lines:
if not line.startswith(' '):
break
uid += line[1:]
return uid or None
def _normalize_meta_value(value):
return to_unicode(value or '').strip()
class VdirBase:
item_class = Item
default_mode = 0o750
def __init__(self, path, fileext, encoding='utf-8'):
if not os.path.isdir(path):
raise CollectionNotFoundError(path)
self.path = path
self.encoding = encoding
self.fileext = fileext
@classmethod
def discover(cls, path, **kwargs):
try:
collections = os.listdir(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return
for collection in collections:
collection_path = os.path.join(path, collection)
if os.path.isdir(collection_path):
yield cls(path=collection_path, **kwargs)
@classmethod
def create(cls, collection_name, **kwargs):
kwargs = dict(kwargs)
path = kwargs['path']
path = os.path.join(path, collection_name)
if not os.path.exists(path):
os.makedirs(path, mode=cls.default_mode)
elif not os.path.isdir(path):
raise OSError(f'{repr(path)} is not a directory.')
kwargs['path'] = path
return kwargs
def _get_filepath(self, href):
return os.path.join(self.path, href)
def _get_href(self, uid):
return _generate_href(uid) + self.fileext
def list(self):
for fname in os.listdir(self.path):
fpath = os.path.join(self.path, fname)
if os.path.isfile(fpath) and fname.endswith(self.fileext):
yield fname, get_etag_from_file(fpath)
def get(self, href):
fpath = self._get_filepath(href)
try:
with open(fpath, 'rb') as f:
return (Item(f.read().decode(self.encoding)),
get_etag_from_file(fpath))
except OSError as e:
if e.errno == errno.ENOENT:
raise NotFoundError(href)
else:
raise
def upload(self, item):
if not isinstance(item.raw, str):
raise TypeError('item.raw must be a unicode string.')
try:
href = self._get_href(item.uid)
fpath, etag = self._upload_impl(item, href)
except OSError as e:
if e.errno in (
errno.ENAMETOOLONG, # Unix
errno.ENOENT # Windows
):
# random href instead of UID-based
href = self._get_href(None)
fpath, etag = self._upload_impl(item, href)
else:
raise
return href, etag
def _upload_impl(self, item, href):
fpath = self._get_filepath(href)
try:
with atomic_write(fpath, mode='wb', overwrite=False) as f:
f.write(item.raw.encode(self.encoding))
return fpath, get_etag_from_file(f)
except OSError as e:
if e.errno == errno.EEXIST:
raise AlreadyExistingError(existing_href=href)
else:
raise
def update(self, href, item, etag):
fpath = self._get_filepath(href)
if not os.path.exists(fpath):
raise NotFoundError(item.uid)
actual_etag = get_etag_from_file(fpath)
if etag != actual_etag:
raise WrongEtagError(etag, actual_etag)
if not isinstance(item.raw, str):
raise TypeError('item.raw must be a unicode string.')
with atomic_write(fpath, mode='wb', overwrite=True) as f:
f.write(item.raw.encode(self.encoding))
etag = get_etag_from_file(f)
return etag
def delete(self, href, etag):
fpath = self._get_filepath(href)
if not os.path.isfile(fpath):
raise NotFoundError(href)
actual_etag = get_etag_from_file(fpath)
if etag != actual_etag:
raise WrongEtagError(etag, actual_etag)
os.remove(fpath)
def get_meta(self, key):
fpath = os.path.join(self.path, key)
try:
with open(fpath, 'rb') as f:
return f.read().decode(self.encoding).strip() or None
except OSError as e:
if e.errno == errno.ENOENT:
return None
else:
raise
def set_meta(self, key, value):
value = value or ''
assert isinstance(value, str)
fpath = os.path.join(self.path, key)
with atomic_write(fpath, mode='wb', overwrite=True) as f:
f.write(value.encode(self.encoding))
class Color:
def __init__(self, x):
if not x:
raise ValueError('Color is false-ish.')
if not x.startswith('#'):
raise ValueError('Color must start with a #.')
if len(x) != 7:
raise ValueError('Color must not have shortcuts. '
'#ffffff instead of #fff')
self.raw = x.upper()
@cached_property
def rgb(self):
x = self.raw
r = x[1:3]
g = x[3:5]
b = x[5:8]
if len(r) == len(g) == len(b) == 2:
return int(r, 16), int(g, 16), int(b, 16)
else:
raise ValueError(f'Unable to parse color value: {self.value}')
class ColorMixin:
color_type = Color
def get_color(self):
try:
return self.color_type(self.get_meta('color'))
except ValueError:
return None
def set_color(self, value):
self.set_meta('color', self.color_type(value).raw)
class DisplayNameMixin:
def get_displayname(self):
return self.get_meta('displayname')
def set_displayname(self, value):
self.set_meta('displayname', value)
class Vdir(VdirBase, ColorMixin, DisplayNameMixin):
pass
|
the-stack_0_22979 | # system imports
import os
import time
import sys
sys.dont_write_bytecode = True #stop generating .pyc files
if not sys.warnoptions: #stop generating warnings
import warnings
warnings.simplefilter("ignore")
import datetime
# common matrix manipulation
import numpy as np
# plotting, Image showing, Image string operations
import matplotlib.pyplot as plt
# import Image
from PIL import Image
# Image loading from disk
import cv2
# Progress bar
from tqdm import tqdm
# Pytorch
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torchvision import transforms
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
# model
from model.model import DispNet_sequential as DispNet
# loss
from loss.loss import reconstruct_using_disparity
from loss.disparity_smoothness_loss import disparity_smoothness, temporal_disparity_smoothness
from loss.ssim_loss import SSIM_loss
# Pyramid generation
from utils.pyramid import scale_pyramid
from dataset.dataset import KITTIDataset_from_folder_for_temporal_smoothness, KITTIDataset_from_txt_for_temporal_smoothness
from params import *
if __name__ == '__main__':
print("\n \n --- Monocular Depth Estimation train code --- \n \n")
# load dataset
if source_of_training == "txt":
dataset = KITTIDataset_from_txt_for_temporal_smoothness()
else:
dataset = KITTIDataset_from_folder_for_temporal_smoothness()
TrainLoader = torch.utils.data.DataLoader(dataset, batch_size = BATCH_SIZE, shuffle = True, num_workers = MANY_WORKERS)
# load model
net = DispNet()
net.to(torch.device(compute_device))
if resume_trining:
print("\n Loading previous weights from ", pth_file_location)
net.load_state_dict(torch.load(pth_file_location))
# configure loss
print("\n \nTraining with the following loss parmeters:")
print("appearance_matching_loss_weight: ",appearance_matching_loss_weight)
print("LR_loss_weight: ", LR_loss_weight)
print("disparity_smoothness_loss_weight: ", disparity_smoothness_loss_weight)
print("alpha_appearance_matching_loss: ", alpha_appearance_matching_loss)
print("temporal_disparity_smoothness_loss_weight: ", temporal_disparity_smoothness_loss_weight)
print("\n")
if is_gpu_available:
loss_function = nn.L1Loss().cuda()
else:
loss_function = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr = LEARNING_RATE)
scheduler = StepLR(optimizer, step_size=15, gamma=0.1)
current_datetime = datetime.datetime.now()
print("Training Started @ ", current_datetime.strftime("%Y-%m-%d %H:%M:%S"))
for epoch in range(1, EPOCH+1):
for batch_data in tqdm(TrainLoader):
# retrieve stereo images
left_original = batch_data["left_img"]
prev_left_original = batch_data["prev_left_img"]
right_original = batch_data["right_img"]
prev_right_original = batch_data["prev_right_img"]
# send to CUDA device
if is_gpu_available:
left = left_original.type(torch.FloatTensor).cuda()
prev_left = prev_left_original.type(torch.FloatTensor).cuda()
right = right_original.type(torch.FloatTensor).cuda()
prev_right = prev_right_original.type(torch.FloatTensor).cuda()
else:
left = left_original.type(torch.FloatTensor)
prev_left = prev_left_original.type(torch.FloatTensor)
right = right_original.type(torch.FloatTensor)
prev_right = prev_right_original.type(torch.FloatTensor)
# generate pyramid
left_pyramid = scale_pyramid(left,4)
right_pyramid = scale_pyramid(right,4)
prev_left_pyramid = scale_pyramid(prev_left,4)
prev_right_pyramid = scale_pyramid(prev_right,4)
# forward pass with left image
output = net.forward(left)
prev_output = net.forward(prev_left)
# collect disparities from the model
left_disp = [output[i][:, 0, :, :] for i in range(4)]
right_disp = [output[i][:, 1, :, :] for i in range(4)]
prev_left_disp = [prev_output[i][:, 0, :, :] for i in range(4)]
prev_right_disp = [prev_output[i][:, 1, :, :] for i in range(4)]
# reconsturct corresponding images using disparities
right_reconstuct = [reconstruct_using_disparity(left_pyramid[i], right_disp[i]) for i in range(4)]
left_reconstuct = [reconstruct_using_disparity(right_pyramid[i], left_disp[i]) for i in range(4)]
"""
calculate L1 loss
"""
# TODO: Put weighted loss for pyramid : error in smaller image should contribute more
left_L1loss = [loss_function(left_pyramid[i], left_reconstuct[i]) for i in range(4)]
right_L1loss = [loss_function(right_pyramid[i], right_reconstuct[i]) for i in range(4)]
if is_gpu_available:
total_L1_loss = torch.FloatTensor([0]).cuda()
total_SSIM_loss = torch.FloatTensor([0]).cuda()
else:
total_L1_loss = torch.FloatTensor([0])
total_SSIM_loss = torch.FloatTensor([0])
for i in range(4):
total_L1_loss += (left_L1loss[i] + right_L1loss[i])
total_L1_loss /= 4
"""
calculate SSIM loss
"""
left_SSIM_loss = [torch.mean(SSIM_loss(left_pyramid[i], left_reconstuct[i])) for i in range(4)] #Reconstructed Image and Original Image
right_SSIM_loss = [torch.mean(SSIM_loss(right_pyramid[i], right_reconstuct[i])) for i in range(4)]
for i in range(4):
total_SSIM_loss += (left_SSIM_loss[i] + right_SSIM_loss[i])
total_SSIM_loss /= 4
"""
Total apparance matching loss
"""
appearance_matching_loss = (alpha_appearance_matching_loss * total_SSIM_loss) + (1- alpha_appearance_matching_loss)*total_L1_loss
# print("appearance matching loss: ", appearance_matching_loss)
# append axis of channel to treat disparities as images
left_disp[0] = left_disp[0].view([-1, 1, 256, 512])
left_disp[1] = left_disp[1].view([-1, 1, 128, 256])
left_disp[2] = left_disp[2].view([-1, 1, 64, 128])
left_disp[3] = left_disp[3].view([-1, 1, 32, 64])
prev_left_disp[0] = prev_left_disp[0].view([-1, 1, 256, 512])
prev_left_disp[1] = prev_left_disp[1].view([-1, 1, 128, 256])
prev_left_disp[2] = prev_left_disp[2].view([-1, 1, 64, 128])
prev_left_disp[3] = prev_left_disp[3].view([-1, 1, 32, 64])
right_disp[0] = right_disp[0].view([-1, 1, 256, 512])
right_disp[1] = right_disp[1].view([-1, 1, 128, 256])
right_disp[2] = right_disp[2].view([-1, 1, 64, 128])
right_disp[3] = right_disp[3].view([-1, 1, 32, 64])
prev_right_disp[0] = prev_right_disp[0].view([-1, 1, 256, 512])
prev_right_disp[1] = prev_right_disp[1].view([-1, 1, 128, 256])
prev_right_disp[2] = prev_right_disp[2].view([-1, 1, 64, 128])
prev_right_disp[3] = prev_right_disp[3].view([-1, 1, 32, 64])
"""
Calculate L-R consistency loss
"""
reconstruct_left = [reconstruct_using_disparity(right_disp[i], left_disp[i]) for i in range(4)]
reconstruct_right = [reconstruct_using_disparity(left_disp[i], right_disp[i]) for i in range(4)]
LR_loss_left = [torch.mean(left_disp[i]-reconstruct_left[i]) for i in range(4)]
LR_loss_right = [torch.mean(right_disp[i]-reconstruct_right[i]) for i in range(4)]
if is_gpu_available:
total_LR_loss = torch.FloatTensor([0]).cuda()
else:
total_LR_loss = torch.FloatTensor([0])
for i in range(4):
total_LR_loss += LR_loss_left[i] + LR_loss_right[i]
total_LR_loss /= 4
"""
Disparity smoothness loss
"""
disparity_smoothnesss_loss_left = disparity_smoothness(left_pyramid,left_disp)
disparity_smoothness_loss_right = disparity_smoothness(right_pyramid,right_disp)
disparity_smoothness_loss = sum(disparity_smoothnesss_loss_left + disparity_smoothness_loss_right)
"""
Temporal Disparity Smoothness loss
"""
temporal_disparity_smoothness_loss_left = temporal_disparity_smoothness(prev_left_pyramid, left_pyramid, prev_left_disp, left_disp)
temporal_disparity_smoothness_loss_right = temporal_disparity_smoothness(prev_right_pyramid, right_pyramid, prev_right_disp, right_disp)
temporal_disparity_smoothness_loss = sum(temporal_disparity_smoothness_loss_left + temporal_disparity_smoothness_loss_right)
loss = (appearance_matching_loss_weight * appearance_matching_loss+ \
LR_loss_weight * total_LR_loss + \
disparity_smoothness_loss_weight * disparity_smoothness_loss +\
temporal_disparity_smoothness_loss_weight* temporal_disparity_smoothness_loss)/BATCH_SIZE
loss.backward()
optimizer.step()
net.zero_grad()
scheduler.step()
#TO DO: Query same image and see how it evolves over epochs
print("Epoch : ", epoch, " Loss: ", loss)
rgb = right_disp[0][0].detach().cpu().numpy()
fig = plt.figure(1)
plt.imshow(rgb[0],cmap='plasma')
plt.savefig(save_images_in + str(epoch))
torch.save(net.state_dict(), pth_file_location)
|
the-stack_0_22980 | """
File: mirror_lake.py
name: HsuenChi Chiu
----------------------------------
This file reads in mt-rainier.jpg and
makes a new image that creates a mirror
lake vibe by placing the inverse image of
mt-rainier.jpg below the original one
"""
from simpleimage import SimpleImage
def reflect(filename):
"""
:param filename: original image
:return: the original image and the vertically flipped image
"""
img = SimpleImage(filename)
# create new image
new_img = SimpleImage.blank(img.width, img.height*2)
for x in range(img.width):
for y in range(img.height):
img_pixel = img.get_pixel(x,y)
new_pixel1 = new_img.get_pixel(x,y)
# find the vertically reflect pixel
new_pixel2 = new_img.get_pixel(x,new_img.height-1-y)
new_pixel1.red = img_pixel.red
new_pixel1.green = img_pixel.green
new_pixel1.blue = img_pixel.blue
new_pixel2.red = img_pixel.red
new_pixel2.green = img_pixel.green
new_pixel2.blue = img_pixel.blue
return new_img
def main():
"""
the program helps reflect the original image vertically.
"""
original_mt = SimpleImage('images/mt-rainier.jpg')
original_mt.show()
reflected = reflect('images/mt-rainier.jpg')
reflected.show()
if __name__ == '__main__':
main()
|
the-stack_0_22981 | from typing import List, Dict, Tuple, Any
import sys
import re
import csv
import string
import math
import tarfile
import types
from pathlib import Path
import random
from tqdm.auto import tqdm
from transformers import PreTrainedTokenizerBase, AutoTokenizer
from allennlp.predictors.predictor import Predictor
from allennlp.common.util import JsonDict
from allennlp.data import Instance
import argtyped
from scripts.helpers import (
download_file,
save_txt,
load_txt,
load_json,
save_json,
load_tsv,
save_tsv,
)
random.seed(0)
class Arguments(argtyped.Arguments, underscore=True):
source: Path
output: Path
noun_phrases: Path = Path("noun_phrases.json")
cache_dir: Path = Path.home() / ".cache" / "vln"
categories: Path = Path("categories.txt")
matterport: Path = Path("matterport_categories.tsv")
places365: Path = Path("places365_categories.tsv")
parser: Path = Path.home() / ".allennlp" / "elmo"
forbidden_words: Tuple[str, ...] =("turn",)
min_tokens: int = 1
max_tokens: int = 5
max_instr_length: int = 200
batch_size: int = 100
start: int = 0
num_splits: int = 1
num_workers: int = 1
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Override the function from ConstituencyParserPredictor
"""
spacy_tokens = self._tokenizer.tokenize(json_dict["sentence"])
spacy_tokens = spacy_tokens[: self.max_length]
sentence_text = [token.text for token in spacy_tokens]
pos_tags = [token.tag_ for token in spacy_tokens]
return self._dataset_reader.text_to_instance(sentence_text, pos_tags)
def clean_sentence(stc: str) -> str:
return stc.strip(". ,\n").lower()
def create_token(
tree: Dict,
tokenizer: PreTrainedTokenizerBase,
min_tokens: int,
max_tokens: int,
forbidden_words: Tuple[str, ...],
):
if tree["nodeType"] in ("NP", "NNP", "FRAG"):
proposal = clean_sentence(tree["word"])
num_tokens = len(tokenizer.tokenize(proposal))
if (
"." not in proposal
and min_tokens <= num_tokens
and num_tokens <= max_tokens
and all(word not in proposal for word in forbidden_words)
):
return proposal
return None
def retrieve_noun_phrases(
sentence: str,
tree: Dict,
tokenizer: PreTrainedTokenizerBase,
min_tokens: int,
max_tokens: int,
forbidden_words: Tuple[str, ...],
):
"""
Return a dictionary with noun phrases and the spanning positions
"""
token = create_token(tree, tokenizer, min_tokens, max_tokens, forbidden_words)
if token is not None:
return [token]
if "children" not in tree:
return []
noun_phrases = []
for children in tree["children"]:
if children["nodeType"] not in ("ADJP", "PP"):
noun_phrases += retrieve_noun_phrases(
sentence, children, tokenizer, min_tokens, max_tokens, forbidden_words
)
return noun_phrases
def is_empty(sentence: str) -> bool:
return sentence.strip() == ""
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx : min(ndx + n, l)]
def extracting_noun_phrases(
sentences: List[Dict[str, Any]], args: Arguments, cuda_device: int
):
"""
Extract every noun phrases on the given sentences
"""
# load models
predictor = Predictor.from_path(
args.cache_dir / args.parser, cuda_device=cuda_device
)
predictor.max_length = args.max_instr_length # type: ignore
predictor._json_to_instance = types.MethodType(_json_to_instance, predictor) # type: ignore
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
if not isinstance(tokenizer, PreTrainedTokenizerBase):
raise ValueError(f"Unexpected type for tokenizer {type(tokenizer)}")
# extract the noun phrases
inputs = []
for stc in sentences:
if is_empty(stc["sentence"]):
stc["noun_phrases"] = []
else:
inputs.append(stc)
total = math.ceil(len(sentences) / args.batch_size)
for sub in tqdm(batch(inputs, n=args.batch_size), total=total,):
preds = predictor.predict_batch_json(sub)
for pred, s in zip(preds, sub):
s["noun_phrases"] = retrieve_noun_phrases(
s["sentence"],
pred["hierplane_tree"]["root"],
tokenizer,
args.min_tokens,
args.max_tokens,
args.forbidden_words,
)
def select_best_noun_phrases(
samples: List[Dict[str, Any]], args: Arguments
):
"""
Given a bunch of noun phrases, we tried to select the best (or to reject all noun phrases)
"""
# turn is causing a lot of confusion to the parser
forbidden_words: Tuple = ("turn",)
for i, s in enumerate(tqdm(samples)):
samples[i]["noun_phrases"] = [
n for n in s["noun_phrases"] if not any(w in n for w in forbidden_words)
]
# we want to prioritize phrases that refer to known objects
objects_and_rooms: List[str] = load_txt(args.cache_dir / args.categories)
for i, sample in enumerate(samples):
if sample["noun_phrases"] == []:
samples[i]["sentence"] = ""
continue
flags = [any(w in n for w in objects_and_rooms) for n in sample["noun_phrases"]]
if sum(flags) > 0:
samples[i]["sentence"] = random.choice(
[n for n, f in zip(sample["noun_phrases"], flags) if f]
)
elif sum(flags) == 0:
samples[i]["sentence"] = random.choice(sample["noun_phrases"])
def clean_category(stc):
stc = re.sub(";|\?|[0-9]", "", stc)
stc = re.sub("\((.*)\)", "\1", stc)
stc = re.sub(" ", " ", stc)
return stc.strip()
def build_categories(args: Arguments):
if not (args.cache_dir / args.matterport).is_file():
download_file(
"https://github.com/niessner/Matterport/raw/master/metadata/category_mapping.tsv",
args.cache_dir / args.matterport,
)
if not (args.cache_dir / args.places365).is_file():
download_file(
"https://raw.githubusercontent.com/CSAILVision/places365/master/categories_places365.txt",
args.cache_dir / args.places365,
)
categories = []
with open(args.cache_dir / args.matterport, newline="") as fid:
reader = csv.reader(fid, delimiter="\t")
fieldnames = next(reader)
for row in reader:
item = dict(zip(fieldnames, row))
cat = item["raw_category"].replace("\\", "/").split("/")
cat = [clean_category(c) for c in cat]
cat = [c for c in cat if len(c) > 2]
categories += cat
with open(args.cache_dir / args.places365) as fid:
for line in fid.readlines():
name = line[3:].replace("_", " ")
name = re.sub(r"\d", "", name)
name = name.split("/")[0]
name = name.strip()
if len(name) > 2:
categories.append(name)
save_txt(set(categories), args.cache_dir / args.categories, ["listing_id", "photo_id", "url", "sentence"])
def run_extraction(args: Arguments, local_rank: int):
fieldnames = ["listing_id", "photo_id", "url", "sentence"]
if not (args.cache_dir / args.categories).is_file():
build_categories(args)
if not (args.parser).is_dir():
args.parser.mkdir(parents=True)
download_file(
"https://storage.googleapis.com/allennlp-public-models/elmo-constituency-parser-2020.02.10.tar.gz",
args.parser / "parser.tar.gz",
)
tf = tarfile.open(args.parser / "parser.tar.gz")
tf.extractall(args.parser)
# Load sentences
start = max(local_rank, 0) + args.start
data = load_tsv(args.source, fieldnames)
print(start, args.num_splits, len(data), len(data[start:: args.num_splits]))
data = data[start:: args.num_splits]
for sample in data:
sample["sentence"] = clean_sentence(sample["sentence"])
extracting_noun_phrases(data, args, start % args.num_workers)
select_best_noun_phrases(data, args)
print("Exporting noun phrases to ", args.output)
output = args.output.parent / f"{args.output.stem}.part-{start}{args.output.suffix}"
save_tsv(data, output, fieldnames)
if __name__ == "__main__":
args = Arguments()
if local_rank <= 0:
print(args.to_string(width=80))
args.cache_dir.mkdir(exist_ok=True, parents=True)
run_extraction(args, local_rank)
|
the-stack_0_22984 | from pwnlib.constants.constant import Constant
__NR_exit = Constant('__NR_exit',1)
__NR_fork = Constant('__NR_fork',2)
__NR_read = Constant('__NR_read',3)
__NR_write = Constant('__NR_write',4)
__NR_open = Constant('__NR_open',5)
__NR_close = Constant('__NR_close',6)
__NR_waitpid = Constant('__NR_waitpid',7)
__NR_creat = Constant('__NR_creat',8)
__NR_link = Constant('__NR_link',9)
__NR_unlink = Constant('__NR_unlink',10)
__NR_execve = Constant('__NR_execve',11)
__NR_chdir = Constant('__NR_chdir',12)
__NR_time = Constant('__NR_time',13)
__NR_mknod = Constant('__NR_mknod',14)
__NR_chmod = Constant('__NR_chmod',15)
__NR_lchown = Constant('__NR_lchown',16)
__NR_break = Constant('__NR_break',17)
__NR_oldstat = Constant('__NR_oldstat',18)
__NR_lseek = Constant('__NR_lseek',19)
__NR_getpid = Constant('__NR_getpid',20)
__NR_mount = Constant('__NR_mount',21)
__NR_umount = Constant('__NR_umount',22)
__NR_setuid = Constant('__NR_setuid',23)
__NR_getuid = Constant('__NR_getuid',24)
__NR_stime = Constant('__NR_stime',25)
__NR_ptrace = Constant('__NR_ptrace',26)
__NR_alarm = Constant('__NR_alarm',27)
__NR_oldfstat = Constant('__NR_oldfstat',28)
__NR_pause = Constant('__NR_pause',29)
__NR_utime = Constant('__NR_utime',30)
__NR_stty = Constant('__NR_stty',31)
__NR_gtty = Constant('__NR_gtty',32)
__NR_access = Constant('__NR_access',33)
__NR_nice = Constant('__NR_nice',34)
__NR_ftime = Constant('__NR_ftime',35)
__NR_sync = Constant('__NR_sync',36)
__NR_kill = Constant('__NR_kill',37)
__NR_rename = Constant('__NR_rename',38)
__NR_mkdir = Constant('__NR_mkdir',39)
__NR_rmdir = Constant('__NR_rmdir',40)
__NR_dup = Constant('__NR_dup',41)
__NR_pipe = Constant('__NR_pipe',42)
__NR_times = Constant('__NR_times',43)
__NR_prof = Constant('__NR_prof',44)
__NR_brk = Constant('__NR_brk',45)
__NR_setgid = Constant('__NR_setgid',46)
__NR_getgid = Constant('__NR_getgid',47)
__NR_signal = Constant('__NR_signal',48)
__NR_geteuid = Constant('__NR_geteuid',49)
__NR_getegid = Constant('__NR_getegid',50)
__NR_acct = Constant('__NR_acct',51)
__NR_umount2 = Constant('__NR_umount2',52)
__NR_lock = Constant('__NR_lock',53)
__NR_ioctl = Constant('__NR_ioctl',54)
__NR_fcntl = Constant('__NR_fcntl',55)
__NR_mpx = Constant('__NR_mpx',56)
__NR_setpgid = Constant('__NR_setpgid',57)
__NR_ulimit = Constant('__NR_ulimit',58)
__NR_oldolduname = Constant('__NR_oldolduname',59)
__NR_umask = Constant('__NR_umask',60)
__NR_chroot = Constant('__NR_chroot',61)
__NR_ustat = Constant('__NR_ustat',62)
__NR_dup2 = Constant('__NR_dup2',63)
__NR_getppid = Constant('__NR_getppid',64)
__NR_getpgrp = Constant('__NR_getpgrp',65)
__NR_setsid = Constant('__NR_setsid',66)
__NR_sigaction = Constant('__NR_sigaction',67)
__NR_sgetmask = Constant('__NR_sgetmask',68)
__NR_ssetmask = Constant('__NR_ssetmask',69)
__NR_setreuid = Constant('__NR_setreuid',70)
__NR_setregid = Constant('__NR_setregid',71)
__NR_sigsuspend = Constant('__NR_sigsuspend',72)
__NR_sigpending = Constant('__NR_sigpending',73)
__NR_sethostname = Constant('__NR_sethostname',74)
__NR_setrlimit = Constant('__NR_setrlimit',75)
__NR_getrlimit = Constant('__NR_getrlimit',76)
__NR_getrusage = Constant('__NR_getrusage',77)
__NR_gettimeofday = Constant('__NR_gettimeofday',78)
__NR_settimeofday = Constant('__NR_settimeofday',79)
__NR_getgroups = Constant('__NR_getgroups',80)
__NR_setgroups = Constant('__NR_setgroups',81)
__NR_select = Constant('__NR_select',82)
__NR_symlink = Constant('__NR_symlink',83)
__NR_oldlstat = Constant('__NR_oldlstat',84)
__NR_readlink = Constant('__NR_readlink',85)
__NR_uselib = Constant('__NR_uselib',86)
__NR_swapon = Constant('__NR_swapon',87)
__NR_reboot = Constant('__NR_reboot',88)
__NR_readdir = Constant('__NR_readdir',89)
__NR_mmap = Constant('__NR_mmap',90)
__NR_munmap = Constant('__NR_munmap',91)
__NR_truncate = Constant('__NR_truncate',92)
__NR_ftruncate = Constant('__NR_ftruncate',93)
__NR_fchmod = Constant('__NR_fchmod',94)
__NR_fchown = Constant('__NR_fchown',95)
__NR_getpriority = Constant('__NR_getpriority',96)
__NR_setpriority = Constant('__NR_setpriority',97)
__NR_profil = Constant('__NR_profil',98)
__NR_statfs = Constant('__NR_statfs',99)
__NR_fstatfs = Constant('__NR_fstatfs',100)
__NR_ioperm = Constant('__NR_ioperm',101)
__NR_socketcall = Constant('__NR_socketcall',102)
__NR_syslog = Constant('__NR_syslog',103)
__NR_setitimer = Constant('__NR_setitimer',104)
__NR_getitimer = Constant('__NR_getitimer',105)
__NR_stat = Constant('__NR_stat',106)
__NR_lstat = Constant('__NR_lstat',107)
__NR_fstat = Constant('__NR_fstat',108)
__NR_olduname = Constant('__NR_olduname',109)
__NR_iopl = Constant('__NR_iopl',110)
__NR_vhangup = Constant('__NR_vhangup',111)
__NR_idle = Constant('__NR_idle',112)
__NR_vm86 = Constant('__NR_vm86',113)
__NR_wait4 = Constant('__NR_wait4',114)
__NR_swapoff = Constant('__NR_swapoff',115)
__NR_sysinfo = Constant('__NR_sysinfo',116)
__NR_ipc = Constant('__NR_ipc',117)
__NR_fsync = Constant('__NR_fsync',118)
__NR_sigreturn = Constant('__NR_sigreturn',119)
__NR_clone = Constant('__NR_clone',120)
__NR_setdomainname = Constant('__NR_setdomainname',121)
__NR_uname = Constant('__NR_uname',122)
__NR_modify_ldt = Constant('__NR_modify_ldt',123)
__NR_adjtimex = Constant('__NR_adjtimex',124)
__NR_mprotect = Constant('__NR_mprotect',125)
__NR_sigprocmask = Constant('__NR_sigprocmask',126)
__NR_create_module = Constant('__NR_create_module',127)
__NR_init_module = Constant('__NR_init_module',128)
__NR_delete_module = Constant('__NR_delete_module',129)
__NR_get_kernel_syms = Constant('__NR_get_kernel_syms',130)
__NR_quotactl = Constant('__NR_quotactl',131)
__NR_getpgid = Constant('__NR_getpgid',132)
__NR_fchdir = Constant('__NR_fchdir',133)
__NR_bdflush = Constant('__NR_bdflush',134)
__NR_sysfs = Constant('__NR_sysfs',135)
__NR_personality = Constant('__NR_personality',136)
__NR_afs_syscall = Constant('__NR_afs_syscall',137)
__NR_setfsuid = Constant('__NR_setfsuid',138)
__NR_setfsgid = Constant('__NR_setfsgid',139)
__NR__llseek = Constant('__NR__llseek',140)
__NR_getdents = Constant('__NR_getdents',141)
__NR__newselect = Constant('__NR__newselect',142)
__NR_flock = Constant('__NR_flock',143)
__NR_msync = Constant('__NR_msync',144)
__NR_readv = Constant('__NR_readv',145)
__NR_writev = Constant('__NR_writev',146)
__NR_getsid = Constant('__NR_getsid',147)
__NR_fdatasync = Constant('__NR_fdatasync',148)
__NR__sysctl = Constant('__NR__sysctl',149)
__NR_mlock = Constant('__NR_mlock',150)
__NR_munlock = Constant('__NR_munlock',151)
__NR_mlockall = Constant('__NR_mlockall',152)
__NR_munlockall = Constant('__NR_munlockall',153)
__NR_sched_setparam = Constant('__NR_sched_setparam',154)
__NR_sched_getparam = Constant('__NR_sched_getparam',155)
__NR_sched_setscheduler = Constant('__NR_sched_setscheduler',156)
__NR_sched_getscheduler = Constant('__NR_sched_getscheduler',157)
__NR_sched_yield = Constant('__NR_sched_yield',158)
__NR_sched_get_priority_max = Constant('__NR_sched_get_priority_max',159)
__NR_sched_get_priority_min = Constant('__NR_sched_get_priority_min',160)
__NR_sched_rr_get_interval = Constant('__NR_sched_rr_get_interval',161)
__NR_nanosleep = Constant('__NR_nanosleep',162)
__NR_mremap = Constant('__NR_mremap',163)
__NR_setresuid = Constant('__NR_setresuid',164)
__NR_getresuid = Constant('__NR_getresuid',165)
__NR_query_module = Constant('__NR_query_module',166)
__NR_poll = Constant('__NR_poll',167)
__NR_nfsservctl = Constant('__NR_nfsservctl',168)
__NR_setresgid = Constant('__NR_setresgid',169)
__NR_getresgid = Constant('__NR_getresgid',170)
__NR_prctl = Constant('__NR_prctl',171)
__NR_rt_sigreturn = Constant('__NR_rt_sigreturn',172)
__NR_rt_sigaction = Constant('__NR_rt_sigaction',173)
__NR_rt_sigprocmask = Constant('__NR_rt_sigprocmask',174)
__NR_rt_sigpending = Constant('__NR_rt_sigpending',175)
__NR_rt_sigtimedwait = Constant('__NR_rt_sigtimedwait',176)
__NR_rt_sigqueueinfo = Constant('__NR_rt_sigqueueinfo',177)
__NR_rt_sigsuspend = Constant('__NR_rt_sigsuspend',178)
__NR_pread = Constant('__NR_pread',179)
__NR_pwrite = Constant('__NR_pwrite',180)
__NR_chown = Constant('__NR_chown',181)
__NR_getcwd = Constant('__NR_getcwd',182)
__NR_capget = Constant('__NR_capget',183)
__NR_capset = Constant('__NR_capset',184)
__NR_sigaltstack = Constant('__NR_sigaltstack',185)
__NR_sendfile = Constant('__NR_sendfile',186)
__NR_getpmsg = Constant('__NR_getpmsg',187)
__NR_putpmsg = Constant('__NR_putpmsg',188)
__NR_vfork = Constant('__NR_vfork',189)
__NR_ugetrlimit = Constant('__NR_ugetrlimit',190)
__NR_readahead = Constant('__NR_readahead',191)
__NR_mmap2 = Constant('__NR_mmap2',192)
__NR_truncate64 = Constant('__NR_truncate64',193)
__NR_ftruncate64 = Constant('__NR_ftruncate64',194)
__NR_stat64 = Constant('__NR_stat64',195)
__NR_lstat64 = Constant('__NR_lstat64',196)
__NR_fstat64 = Constant('__NR_fstat64',197)
__NR_pciconfig_read = Constant('__NR_pciconfig_read',198)
__NR_pciconfig_write = Constant('__NR_pciconfig_write',199)
__NR_pciconfig_iobase = Constant('__NR_pciconfig_iobase',200)
__NR_multiplexer = Constant('__NR_multiplexer',201)
__NR_getdents64 = Constant('__NR_getdents64',202)
__NR_pivot_root = Constant('__NR_pivot_root',203)
__NR_fcntl64 = Constant('__NR_fcntl64',204)
__NR_madvise = Constant('__NR_madvise',205)
__NR_mincore = Constant('__NR_mincore',206)
__NR_gettid = Constant('__NR_gettid',207)
__NR_tkill = Constant('__NR_tkill',208)
__NR_setxattr = Constant('__NR_setxattr',209)
__NR_lsetxattr = Constant('__NR_lsetxattr',210)
__NR_fsetxattr = Constant('__NR_fsetxattr',211)
__NR_getxattr = Constant('__NR_getxattr',212)
__NR_lgetxattr = Constant('__NR_lgetxattr',213)
__NR_fgetxattr = Constant('__NR_fgetxattr',214)
__NR_listxattr = Constant('__NR_listxattr',215)
__NR_llistxattr = Constant('__NR_llistxattr',216)
__NR_flistxattr = Constant('__NR_flistxattr',217)
__NR_removexattr = Constant('__NR_removexattr',218)
__NR_lremovexattr = Constant('__NR_lremovexattr',219)
__NR_fremovexattr = Constant('__NR_fremovexattr',220)
__NR_futex = Constant('__NR_futex',221)
__NR_sched_setaffinity = Constant('__NR_sched_setaffinity',222)
__NR_sched_getaffinity = Constant('__NR_sched_getaffinity',223)
__NR_tuxcall = Constant('__NR_tuxcall',225)
__NR_sendfile64 = Constant('__NR_sendfile64',226)
__NR_io_setup = Constant('__NR_io_setup',227)
__NR_io_destroy = Constant('__NR_io_destroy',228)
__NR_io_getevents = Constant('__NR_io_getevents',229)
__NR_io_submit = Constant('__NR_io_submit',230)
__NR_io_cancel = Constant('__NR_io_cancel',231)
__NR_set_tid_address = Constant('__NR_set_tid_address',232)
__NR_fadvise64 = Constant('__NR_fadvise64',233)
__NR_exit_group = Constant('__NR_exit_group',234)
__NR_lookup_dcookie = Constant('__NR_lookup_dcookie',235)
__NR_epoll_create = Constant('__NR_epoll_create',236)
__NR_epoll_ctl = Constant('__NR_epoll_ctl',237)
__NR_epoll_wait = Constant('__NR_epoll_wait',238)
__NR_remap_file_pages = Constant('__NR_remap_file_pages',239)
__NR_timer_create = Constant('__NR_timer_create',240)
__NR_timer_settime = Constant('__NR_timer_settime',241)
__NR_timer_gettime = Constant('__NR_timer_gettime',242)
__NR_timer_getoverrun = Constant('__NR_timer_getoverrun',243)
__NR_timer_delete = Constant('__NR_timer_delete',244)
__NR_clock_settime = Constant('__NR_clock_settime',245)
__NR_clock_gettime = Constant('__NR_clock_gettime',246)
__NR_clock_getres = Constant('__NR_clock_getres',247)
__NR_clock_nanosleep = Constant('__NR_clock_nanosleep',248)
__NR_swapcontext = Constant('__NR_swapcontext',249)
__NR_tgkill = Constant('__NR_tgkill',250)
__NR_utimes = Constant('__NR_utimes',251)
__NR_statfs64 = Constant('__NR_statfs64',252)
__NR_fstatfs64 = Constant('__NR_fstatfs64',253)
__NR_fadvise64_64 = Constant('__NR_fadvise64_64',254)
__NR_rtas = Constant('__NR_rtas',255)
__NR_sys_debug_setcontext = Constant('__NR_sys_debug_setcontext',256)
__NR_mq_open = Constant('__NR_mq_open',262)
__NR_mq_unlink = Constant('__NR_mq_unlink',263)
__NR_mq_timedsend = Constant('__NR_mq_timedsend',264)
__NR_mq_timedreceive = Constant('__NR_mq_timedreceive',265)
__NR_mq_notify = Constant('__NR_mq_notify',266)
__NR_mq_getsetattr = Constant('__NR_mq_getsetattr',267)
__NR_kexec_load = Constant('__NR_kexec_load',268)
__NR_add_key = Constant('__NR_add_key',269)
__NR_request_key = Constant('__NR_request_key',270)
__NR_keyctl = Constant('__NR_keyctl',271)
__NR_waitid = Constant('__NR_waitid',272)
__NR_ioprio_set = Constant('__NR_ioprio_set',273)
__NR_ioprio_get = Constant('__NR_ioprio_get',274)
__NR_inotify_init = Constant('__NR_inotify_init',275)
__NR_inotify_add_watch = Constant('__NR_inotify_add_watch',276)
__NR_inotify_rm_watch = Constant('__NR_inotify_rm_watch',277)
__NR_spu_run = Constant('__NR_spu_run',278)
__NR_spu_create = Constant('__NR_spu_create',279)
__NR_pselect6 = Constant('__NR_pselect6',280)
__NR_ppoll = Constant('__NR_ppoll',281)
__NR_unshare = Constant('__NR_unshare',282)
__NR_splice = Constant('__NR_splice',283)
__NR_tee = Constant('__NR_tee',284)
__NR_vmsplice = Constant('__NR_vmsplice',285)
__NR_openat = Constant('__NR_openat',286)
__NR_mkdirat = Constant('__NR_mkdirat',287)
__NR_mknodat = Constant('__NR_mknodat',288)
__NR_fchownat = Constant('__NR_fchownat',289)
__NR_futimesat = Constant('__NR_futimesat',290)
__NR_fstatat64 = Constant('__NR_fstatat64',291)
__NR_unlinkat = Constant('__NR_unlinkat',292)
__NR_renameat = Constant('__NR_renameat',293)
__NR_linkat = Constant('__NR_linkat',294)
__NR_symlinkat = Constant('__NR_symlinkat',295)
__NR_readlinkat = Constant('__NR_readlinkat',296)
__NR_fchmodat = Constant('__NR_fchmodat',297)
__NR_faccessat = Constant('__NR_faccessat',298)
__NR_get_robust_list = Constant('__NR_get_robust_list',299)
__NR_set_robust_list = Constant('__NR_set_robust_list',300)
__NR_move_pages = Constant('__NR_move_pages',301)
__NR_getcpu = Constant('__NR_getcpu',302)
__NR_epoll_pwait = Constant('__NR_epoll_pwait',303)
__NR_utimensat = Constant('__NR_utimensat',304)
__NR_signalfd = Constant('__NR_signalfd',305)
__NR_timerfd = Constant('__NR_timerfd',306)
__NR_eventfd = Constant('__NR_eventfd',307)
__NR_sync_file_range2 = Constant('__NR_sync_file_range2',308)
__NR_fallocate = Constant('__NR_fallocate',309)
__NR_subpage_prot = Constant('__NR_subpage_prot',310)
__NR_timerfd_settime = Constant('__NR_timerfd_settime',311)
__NR_timerfd_gettime = Constant('__NR_timerfd_gettime',312)
__NR_signalfd4 = Constant('__NR_signalfd4',313)
__NR_eventfd2 = Constant('__NR_eventfd2',314)
__NR_epoll_create1 = Constant('__NR_epoll_create1',315)
__NR_dup3 = Constant('__NR_dup3',316)
__NR_pipe2 = Constant('__NR_pipe2',317)
__NR_inotify_init1 = Constant('__NR_inotify_init1',318)
__NR_perf_event_open = Constant('__NR_perf_event_open',319)
__NR_preadv = Constant('__NR_preadv',320)
__NR_pwritev = Constant('__NR_pwritev',321)
__NR_rt_tgsigqueueinfo = Constant('__NR_rt_tgsigqueueinfo',322)
__NR_fanotify_init = Constant('__NR_fanotify_init',323)
__NR_fanotify_mark = Constant('__NR_fanotify_mark',324)
__NR_prlimit64 = Constant('__NR_prlimit64',325)
__NR_socket = Constant('__NR_socket',326)
__NR_bind = Constant('__NR_bind',327)
__NR_connect = Constant('__NR_connect',328)
__NR_listen = Constant('__NR_listen',329)
__NR_accept = Constant('__NR_accept',330)
__NR_getsockname = Constant('__NR_getsockname',331)
__NR_getpeername = Constant('__NR_getpeername',332)
__NR_socketpair = Constant('__NR_socketpair',333)
__NR_send = Constant('__NR_send',334)
__NR_sendto = Constant('__NR_sendto',335)
__NR_recv = Constant('__NR_recv',336)
__NR_recvfrom = Constant('__NR_recvfrom',337)
__NR_shutdown = Constant('__NR_shutdown',338)
__NR_setsockopt = Constant('__NR_setsockopt',339)
__NR_getsockopt = Constant('__NR_getsockopt',340)
__NR_sendmsg = Constant('__NR_sendmsg',341)
__NR_recvmsg = Constant('__NR_recvmsg',342)
__NR_recvmmsg = Constant('__NR_recvmmsg',343)
__NR_accept4 = Constant('__NR_accept4',344)
__NR_name_to_handle_at = Constant('__NR_name_to_handle_at',345)
__NR_open_by_handle_at = Constant('__NR_open_by_handle_at',346)
__NR_clock_adjtime = Constant('__NR_clock_adjtime',347)
__NR_syncfs = Constant('__NR_syncfs',348)
__NR_sendmmsg = Constant('__NR_sendmmsg',349)
__NR_setns = Constant('__NR_setns',350)
__NR_process_vm_readv = Constant('__NR_process_vm_readv',351)
__NR_process_vm_writev = Constant('__NR_process_vm_writev',352)
__NR_finit_module = Constant('__NR_finit_module',353)
__NR_kcmp = Constant('__NR_kcmp',354)
__NR_sched_setattr = Constant('__NR_sched_setattr',355)
__NR_sched_getattr = Constant('__NR_sched_getattr',356)
__NR_renameat2 = Constant('__NR_renameat2',357)
__NR_seccomp = Constant('__NR_seccomp',358)
__NR_getrandom = Constant('__NR_getrandom',359)
__NR_memfd_create = Constant('__NR_memfd_create',360)
__NR_bpf = Constant('__NR_bpf',361)
__NR_execveat = Constant('__NR_execveat',362)
__NR_switch_endian = Constant('__NR_switch_endian',363)
__NR_userfaultfd = Constant('__NR_userfaultfd',364)
__NR_membarrier = Constant('__NR_membarrier',365)
__NR_mlock2 = Constant('__NR_mlock2',378)
__NR_copy_file_range = Constant('__NR_copy_file_range',379)
__NR_preadv2 = Constant('__NR_preadv2',380)
__NR_pwritev2 = Constant('__NR_pwritev2',381)
__NR_kexec_file_load = Constant('__NR_kexec_file_load',382)
__NR_statx = Constant('__NR_statx',383)
__NR_pkey_alloc = Constant('__NR_pkey_alloc',384)
__NR_pkey_free = Constant('__NR_pkey_free',385)
__NR_pkey_mprotect = Constant('__NR_pkey_mprotect',386)
__NR_rseq = Constant('__NR_rseq',387)
__NR_io_pgetevents = Constant('__NR_io_pgetevents',388)
__NR_semget = Constant('__NR_semget',393)
__NR_semctl = Constant('__NR_semctl',394)
__NR_shmget = Constant('__NR_shmget',395)
__NR_shmctl = Constant('__NR_shmctl',396)
__NR_shmat = Constant('__NR_shmat',397)
__NR_shmdt = Constant('__NR_shmdt',398)
__NR_msgget = Constant('__NR_msgget',399)
__NR_msgsnd = Constant('__NR_msgsnd',400)
__NR_msgrcv = Constant('__NR_msgrcv',401)
__NR_msgctl = Constant('__NR_msgctl',402)
__NR_clock_gettime64 = Constant('__NR_clock_gettime64',403)
__NR_clock_settime64 = Constant('__NR_clock_settime64',404)
__NR_clock_adjtime64 = Constant('__NR_clock_adjtime64',405)
__NR_clock_getres_time64 = Constant('__NR_clock_getres_time64',406)
__NR_clock_nanosleep_time64 = Constant('__NR_clock_nanosleep_time64',407)
__NR_timer_gettime64 = Constant('__NR_timer_gettime64',408)
__NR_timer_settime64 = Constant('__NR_timer_settime64',409)
__NR_timerfd_gettime64 = Constant('__NR_timerfd_gettime64',410)
__NR_timerfd_settime64 = Constant('__NR_timerfd_settime64',411)
__NR_utimensat_time64 = Constant('__NR_utimensat_time64',412)
__NR_pselect6_time64 = Constant('__NR_pselect6_time64',413)
__NR_ppoll_time64 = Constant('__NR_ppoll_time64',414)
__NR_io_pgetevents_time64 = Constant('__NR_io_pgetevents_time64',416)
__NR_recvmmsg_time64 = Constant('__NR_recvmmsg_time64',417)
__NR_mq_timedsend_time64 = Constant('__NR_mq_timedsend_time64',418)
__NR_mq_timedreceive_time64 = Constant('__NR_mq_timedreceive_time64',419)
__NR_semtimedop_time64 = Constant('__NR_semtimedop_time64',420)
__NR_rt_sigtimedwait_time64 = Constant('__NR_rt_sigtimedwait_time64',421)
__NR_futex_time64 = Constant('__NR_futex_time64',422)
__NR_sched_rr_get_interval_time64 = Constant('__NR_sched_rr_get_interval_time64',423)
__NR_pidfd_send_signal = Constant('__NR_pidfd_send_signal',424)
__NR_io_uring_setup = Constant('__NR_io_uring_setup',425)
__NR_io_uring_enter = Constant('__NR_io_uring_enter',426)
__NR_io_uring_register = Constant('__NR_io_uring_register',427)
__NR_open_tree = Constant('__NR_open_tree',428)
__NR_move_mount = Constant('__NR_move_mount',429)
__NR_fsopen = Constant('__NR_fsopen',430)
__NR_fsconfig = Constant('__NR_fsconfig',431)
__NR_fsmount = Constant('__NR_fsmount',432)
__NR_fspick = Constant('__NR_fspick',433)
__NR_pidfd_open = Constant('__NR_pidfd_open',434)
__NR_clone3 = Constant('__NR_clone3',435)
__NR_openat2 = Constant('__NR_openat2',437)
__NR_pidfd_getfd = Constant('__NR_pidfd_getfd',438)
MAP_32BIT = Constant('MAP_32BIT',0x40)
INADDR_ANY = Constant('INADDR_ANY',0)
INADDR_BROADCAST = Constant('INADDR_BROADCAST',0xffffffff)
INADDR_NONE = Constant('INADDR_NONE',0xffffffff)
INADDR_LOOPBACK = Constant('INADDR_LOOPBACK',0x7f000001)
EPERM = Constant('EPERM',1)
ENOENT = Constant('ENOENT',2)
ESRCH = Constant('ESRCH',3)
EINTR = Constant('EINTR',4)
EIO = Constant('EIO',5)
ENXIO = Constant('ENXIO',6)
E2BIG = Constant('E2BIG',7)
ENOEXEC = Constant('ENOEXEC',8)
EBADF = Constant('EBADF',9)
ECHILD = Constant('ECHILD',10)
EAGAIN = Constant('EAGAIN',11)
ENOMEM = Constant('ENOMEM',12)
EACCES = Constant('EACCES',13)
EFAULT = Constant('EFAULT',14)
ENOTBLK = Constant('ENOTBLK',15)
EBUSY = Constant('EBUSY',16)
EEXIST = Constant('EEXIST',17)
EXDEV = Constant('EXDEV',18)
ENODEV = Constant('ENODEV',19)
ENOTDIR = Constant('ENOTDIR',20)
EISDIR = Constant('EISDIR',21)
EINVAL = Constant('EINVAL',22)
ENFILE = Constant('ENFILE',23)
EMFILE = Constant('EMFILE',24)
ENOTTY = Constant('ENOTTY',25)
ETXTBSY = Constant('ETXTBSY',26)
EFBIG = Constant('EFBIG',27)
ENOSPC = Constant('ENOSPC',28)
ESPIPE = Constant('ESPIPE',29)
EROFS = Constant('EROFS',30)
EMLINK = Constant('EMLINK',31)
EPIPE = Constant('EPIPE',32)
EDOM = Constant('EDOM',33)
ERANGE = Constant('ERANGE',34)
EDEADLK = Constant('EDEADLK',35)
ENAMETOOLONG = Constant('ENAMETOOLONG',36)
ENOLCK = Constant('ENOLCK',37)
ENOSYS = Constant('ENOSYS',38)
ENOTEMPTY = Constant('ENOTEMPTY',39)
ELOOP = Constant('ELOOP',40)
EWOULDBLOCK = Constant('EWOULDBLOCK',11)
ENOMSG = Constant('ENOMSG',42)
EIDRM = Constant('EIDRM',43)
ECHRNG = Constant('ECHRNG',44)
EL2NSYNC = Constant('EL2NSYNC',45)
EL3HLT = Constant('EL3HLT',46)
EL3RST = Constant('EL3RST',47)
ELNRNG = Constant('ELNRNG',48)
EUNATCH = Constant('EUNATCH',49)
ENOCSI = Constant('ENOCSI',50)
EL2HLT = Constant('EL2HLT',51)
EBADE = Constant('EBADE',52)
EBADR = Constant('EBADR',53)
EXFULL = Constant('EXFULL',54)
ENOANO = Constant('ENOANO',55)
EBADRQC = Constant('EBADRQC',56)
EBADSLT = Constant('EBADSLT',57)
EDEADLOCK = Constant('EDEADLOCK',35)
EBFONT = Constant('EBFONT',59)
ENOSTR = Constant('ENOSTR',60)
ENODATA = Constant('ENODATA',61)
ETIME = Constant('ETIME',62)
ENOSR = Constant('ENOSR',63)
ENONET = Constant('ENONET',64)
ENOPKG = Constant('ENOPKG',65)
EREMOTE = Constant('EREMOTE',66)
ENOLINK = Constant('ENOLINK',67)
EADV = Constant('EADV',68)
ESRMNT = Constant('ESRMNT',69)
ECOMM = Constant('ECOMM',70)
EPROTO = Constant('EPROTO',71)
EMULTIHOP = Constant('EMULTIHOP',72)
EDOTDOT = Constant('EDOTDOT',73)
EBADMSG = Constant('EBADMSG',74)
EOVERFLOW = Constant('EOVERFLOW',75)
ENOTUNIQ = Constant('ENOTUNIQ',76)
EBADFD = Constant('EBADFD',77)
EREMCHG = Constant('EREMCHG',78)
ELIBACC = Constant('ELIBACC',79)
ELIBBAD = Constant('ELIBBAD',80)
ELIBSCN = Constant('ELIBSCN',81)
ELIBMAX = Constant('ELIBMAX',82)
ELIBEXEC = Constant('ELIBEXEC',83)
EILSEQ = Constant('EILSEQ',84)
ERESTART = Constant('ERESTART',85)
ESTRPIPE = Constant('ESTRPIPE',86)
EUSERS = Constant('EUSERS',87)
ENOTSOCK = Constant('ENOTSOCK',88)
EDESTADDRREQ = Constant('EDESTADDRREQ',89)
EMSGSIZE = Constant('EMSGSIZE',90)
EPROTOTYPE = Constant('EPROTOTYPE',91)
ENOPROTOOPT = Constant('ENOPROTOOPT',92)
EPROTONOSUPPORT = Constant('EPROTONOSUPPORT',93)
ESOCKTNOSUPPORT = Constant('ESOCKTNOSUPPORT',94)
EOPNOTSUPP = Constant('EOPNOTSUPP',95)
ENOTSUP = Constant('ENOTSUP',95)
EPFNOSUPPORT = Constant('EPFNOSUPPORT',96)
EAFNOSUPPORT = Constant('EAFNOSUPPORT',97)
EADDRINUSE = Constant('EADDRINUSE',98)
EADDRNOTAVAIL = Constant('EADDRNOTAVAIL',99)
ENETDOWN = Constant('ENETDOWN',100)
ENETUNREACH = Constant('ENETUNREACH',101)
ENETRESET = Constant('ENETRESET',102)
ECONNABORTED = Constant('ECONNABORTED',103)
ECONNRESET = Constant('ECONNRESET',104)
ENOBUFS = Constant('ENOBUFS',105)
EISCONN = Constant('EISCONN',106)
ENOTCONN = Constant('ENOTCONN',107)
ESHUTDOWN = Constant('ESHUTDOWN',108)
ETOOMANYREFS = Constant('ETOOMANYREFS',109)
ETIMEDOUT = Constant('ETIMEDOUT',110)
ECONNREFUSED = Constant('ECONNREFUSED',111)
EHOSTDOWN = Constant('EHOSTDOWN',112)
EHOSTUNREACH = Constant('EHOSTUNREACH',113)
EALREADY = Constant('EALREADY',114)
EINPROGRESS = Constant('EINPROGRESS',115)
ESTALE = Constant('ESTALE',116)
EUCLEAN = Constant('EUCLEAN',117)
ENOTNAM = Constant('ENOTNAM',118)
ENAVAIL = Constant('ENAVAIL',119)
EISNAM = Constant('EISNAM',120)
EREMOTEIO = Constant('EREMOTEIO',121)
EDQUOT = Constant('EDQUOT',122)
ENOMEDIUM = Constant('ENOMEDIUM',123)
EMEDIUMTYPE = Constant('EMEDIUMTYPE',124)
ECANCELED = Constant('ECANCELED',125)
ENOKEY = Constant('ENOKEY',126)
EKEYEXPIRED = Constant('EKEYEXPIRED',127)
EKEYREVOKED = Constant('EKEYREVOKED',128)
EKEYREJECTED = Constant('EKEYREJECTED',129)
EOWNERDEAD = Constant('EOWNERDEAD',130)
ENOTRECOVERABLE = Constant('ENOTRECOVERABLE',131)
ERFKILL = Constant('ERFKILL',132)
EHWPOISON = Constant('EHWPOISON',133)
__SYS_NERR = Constant('__SYS_NERR',((133) + 1))
__LITTLE_ENDIAN = Constant('__LITTLE_ENDIAN',1234)
__BIG_ENDIAN = Constant('__BIG_ENDIAN',4321)
__BYTE_ORDER = Constant('__BYTE_ORDER',4321)
__FLOAT_WORD_ORDER = Constant('__FLOAT_WORD_ORDER',4321)
LITTLE_ENDIAN = Constant('LITTLE_ENDIAN',1234)
BIG_ENDIAN = Constant('BIG_ENDIAN',4321)
BYTE_ORDER = Constant('BYTE_ORDER',4321)
__WORDSIZE = Constant('__WORDSIZE',32)
INT8_MAX = Constant('INT8_MAX',(127))
INT16_MAX = Constant('INT16_MAX',(32767))
INT32_MAX = Constant('INT32_MAX',(2147483647))
INT64_MAX = Constant('INT64_MAX',(9223372036854775807))
INT8_MIN = Constant('INT8_MIN',(-1 - (127)))
INT16_MIN = Constant('INT16_MIN',(-1 - (32767)))
INT32_MIN = Constant('INT32_MIN',(-1 - (2147483647)))
INT64_MIN = Constant('INT64_MIN',(-1 - (9223372036854775807)))
INT_LEAST8_MAX = Constant('INT_LEAST8_MAX',(127))
INT_LEAST8_MIN = Constant('INT_LEAST8_MIN',(-1 - (127)))
INT_LEAST16_MAX = Constant('INT_LEAST16_MAX',(32767))
INT_LEAST16_MIN = Constant('INT_LEAST16_MIN',(-1 - (32767)))
INT_LEAST32_MAX = Constant('INT_LEAST32_MAX',(2147483647))
INT_LEAST32_MIN = Constant('INT_LEAST32_MIN',(-1 - (2147483647)))
INT_LEAST64_MAX = Constant('INT_LEAST64_MAX',(9223372036854775807))
INT_LEAST64_MIN = Constant('INT_LEAST64_MIN',(-1 - (9223372036854775807)))
UINT8_MAX = Constant('UINT8_MAX',0xff)
UINT16_MAX = Constant('UINT16_MAX',0xffff)
UINT32_MAX = Constant('UINT32_MAX',0xffffffff)
UINT64_MAX = Constant('UINT64_MAX',0xffffffffffffffff)
UINT_LEAST8_MAX = Constant('UINT_LEAST8_MAX',0xff)
UINT_LEAST16_MAX = Constant('UINT_LEAST16_MAX',0xffff)
UINT_LEAST32_MAX = Constant('UINT_LEAST32_MAX',0xffffffff)
UINT_LEAST64_MAX = Constant('UINT_LEAST64_MAX',0xffffffffffffffff)
INTPTR_MIN = Constant('INTPTR_MIN',(-1 - (2147483647)))
INTPTR_MAX = Constant('INTPTR_MAX',(2147483647))
UINTPTR_MAX = Constant('UINTPTR_MAX',0xffffffff)
SIZE_MAX = Constant('SIZE_MAX',0xffffffff)
PTRDIFF_MIN = Constant('PTRDIFF_MIN',(-1 - (2147483647)))
PTRDIFF_MAX = Constant('PTRDIFF_MAX',(2147483647))
INTMAX_MIN = Constant('INTMAX_MIN',(-1 - (9223372036854775807)))
INTMAX_MAX = Constant('INTMAX_MAX',(9223372036854775807))
UINTMAX_MAX = Constant('UINTMAX_MAX',0xffffffffffffffff)
INT_FAST8_MIN = Constant('INT_FAST8_MIN',(-1 - (127)))
INT_FAST8_MAX = Constant('INT_FAST8_MAX',(127))
INT_FAST64_MIN = Constant('INT_FAST64_MIN',(-1 - (9223372036854775807)))
INT_FAST64_MAX = Constant('INT_FAST64_MAX',(9223372036854775807))
UINT_FAST8_MAX = Constant('UINT_FAST8_MAX',0xff)
UINT_FAST64_MAX = Constant('UINT_FAST64_MAX',0xffffffffffffffff)
INT_FAST16_MIN = Constant('INT_FAST16_MIN',(-1 - (2147483647)))
INT_FAST16_MAX = Constant('INT_FAST16_MAX',(2147483647))
UINT_FAST16_MAX = Constant('UINT_FAST16_MAX',0xffffffff)
INT_FAST32_MIN = Constant('INT_FAST32_MIN',(-1 - (2147483647)))
INT_FAST32_MAX = Constant('INT_FAST32_MAX',(2147483647))
UINT_FAST32_MAX = Constant('UINT_FAST32_MAX',0xffffffff)
WINT_MIN = Constant('WINT_MIN',0)
__FSUID_H = Constant('__FSUID_H',1)
NSIG = Constant('NSIG',32)
_NSIG = Constant('_NSIG',65)
SIGHUP = Constant('SIGHUP',1)
SIGINT = Constant('SIGINT',2)
SIGQUIT = Constant('SIGQUIT',3)
SIGILL = Constant('SIGILL',4)
SIGTRAP = Constant('SIGTRAP',5)
SIGABRT = Constant('SIGABRT',6)
SIGIOT = Constant('SIGIOT',6)
SIGFPE = Constant('SIGFPE',8)
SIGKILL = Constant('SIGKILL',9)
SIGSEGV = Constant('SIGSEGV',11)
SIGPIPE = Constant('SIGPIPE',13)
SIGALRM = Constant('SIGALRM',14)
SIGTERM = Constant('SIGTERM',15)
SIGUNUSED = Constant('SIGUNUSED',31)
SIGRTMIN = Constant('SIGRTMIN',32)
SIGRTMAX = Constant('SIGRTMAX',(65-1))
SA_NOCLDSTOP = Constant('SA_NOCLDSTOP',0x00000001)
SA_NOCLDWAIT = Constant('SA_NOCLDWAIT',0x00000002)
SA_SIGINFO = Constant('SA_SIGINFO',0x00000004)
SA_RESTORER = Constant('SA_RESTORER',0x04000000)
SA_ONSTACK = Constant('SA_ONSTACK',0x08000000)
SA_RESTART = Constant('SA_RESTART',0x10000000)
SA_INTERRUPT = Constant('SA_INTERRUPT',0x20000000)
SA_NODEFER = Constant('SA_NODEFER',0x40000000)
SA_RESETHAND = Constant('SA_RESETHAND',0x80000000)
SA_NOMASK = Constant('SA_NOMASK',0x40000000)
SA_ONESHOT = Constant('SA_ONESHOT',0x80000000)
SS_ONSTACK = Constant('SS_ONSTACK',1)
SS_DISABLE = Constant('SS_DISABLE',2)
MINSIGSTKSZ = Constant('MINSIGSTKSZ',2048)
SIGSTKSZ = Constant('SIGSTKSZ',8192)
SIG_BLOCK = Constant('SIG_BLOCK',0)
SIG_UNBLOCK = Constant('SIG_UNBLOCK',1)
SIG_SETMASK = Constant('SIG_SETMASK',2)
SI_MAX_SIZE = Constant('SI_MAX_SIZE',128)
SIGEV_SIGNAL = Constant('SIGEV_SIGNAL',0)
SIGEV_NONE = Constant('SIGEV_NONE',1)
SIGEV_THREAD = Constant('SIGEV_THREAD',2)
SIGEV_THREAD_ID = Constant('SIGEV_THREAD_ID',4)
SIGEV_MAX_SIZE = Constant('SIGEV_MAX_SIZE',64)
_SYS_TIME_H = Constant('_SYS_TIME_H',1)
ITIMER_REAL = Constant('ITIMER_REAL',0)
ITIMER_VIRTUAL = Constant('ITIMER_VIRTUAL',1)
ITIMER_PROF = Constant('ITIMER_PROF',2)
FD_SETSIZE = Constant('FD_SETSIZE',1024)
R_OK = Constant('R_OK',4)
W_OK = Constant('W_OK',2)
X_OK = Constant('X_OK',1)
F_OK = Constant('F_OK',0)
SEEK_SET = Constant('SEEK_SET',0)
SEEK_CUR = Constant('SEEK_CUR',1)
SEEK_END = Constant('SEEK_END',2)
STDIN_FILENO = Constant('STDIN_FILENO',0)
STDOUT_FILENO = Constant('STDOUT_FILENO',1)
STDERR_FILENO = Constant('STDERR_FILENO',2)
_CS_PATH = Constant('_CS_PATH',1)
_SC_CLK_TCK = Constant('_SC_CLK_TCK',1)
_SC_ARG_MAX = Constant('_SC_ARG_MAX',2)
_SC_NGROUPS_MAX = Constant('_SC_NGROUPS_MAX',3)
_SC_OPEN_MAX = Constant('_SC_OPEN_MAX',4)
_SC_PAGESIZE = Constant('_SC_PAGESIZE',5)
_SC_NPROCESSORS_ONLN = Constant('_SC_NPROCESSORS_ONLN',6)
_SC_NPROCESSORS_CONF = Constant('_SC_NPROCESSORS_CONF',6)
_SC_PHYS_PAGES = Constant('_SC_PHYS_PAGES',7)
_SC_GETPW_R_SIZE_MAX = Constant('_SC_GETPW_R_SIZE_MAX',8)
_SC_GETGR_R_SIZE_MAX = Constant('_SC_GETGR_R_SIZE_MAX',9)
_PC_PATH_MAX = Constant('_PC_PATH_MAX',1)
_PC_VDISABLE = Constant('_PC_VDISABLE',2)
L_cuserid = Constant('L_cuserid',17)
_POSIX_VERSION = Constant('_POSIX_VERSION',199506)
F_ULOCK = Constant('F_ULOCK',0)
F_LOCK = Constant('F_LOCK',1)
F_TLOCK = Constant('F_TLOCK',2)
F_TEST = Constant('F_TEST',3)
_POSIX_MAPPED_FILES = Constant('_POSIX_MAPPED_FILES',200809)
S_IFMT = Constant('S_IFMT',0o0170000)
S_IFSOCK = Constant('S_IFSOCK',0o140000)
S_IFLNK = Constant('S_IFLNK',0o120000)
S_IFREG = Constant('S_IFREG',0o100000)
S_IFBLK = Constant('S_IFBLK',0o060000)
S_IFDIR = Constant('S_IFDIR',0o040000)
S_IFCHR = Constant('S_IFCHR',0o020000)
S_IFIFO = Constant('S_IFIFO',0o010000)
S_ISUID = Constant('S_ISUID',0o004000)
S_ISGID = Constant('S_ISGID',0o002000)
S_ISVTX = Constant('S_ISVTX',0o001000)
S_IRWXU = Constant('S_IRWXU',0o0700)
S_IRUSR = Constant('S_IRUSR',0o0400)
S_IWUSR = Constant('S_IWUSR',0o0200)
S_IXUSR = Constant('S_IXUSR',0o0100)
S_IRWXG = Constant('S_IRWXG',0o0070)
S_IRGRP = Constant('S_IRGRP',0o0040)
S_IWGRP = Constant('S_IWGRP',0o0020)
S_IXGRP = Constant('S_IXGRP',0o0010)
S_IRWXO = Constant('S_IRWXO',0o0007)
S_IROTH = Constant('S_IROTH',0o0004)
S_IWOTH = Constant('S_IWOTH',0o0002)
S_IXOTH = Constant('S_IXOTH',0o0001)
S_IREAD = Constant('S_IREAD',0o0400)
S_IWRITE = Constant('S_IWRITE',0o0200)
S_IEXEC = Constant('S_IEXEC',0o0100)
_SYS_UIO = Constant('_SYS_UIO',1)
SOL_SOCKET = Constant('SOL_SOCKET',1)
SO_DEBUG = Constant('SO_DEBUG',1)
SO_REUSEADDR = Constant('SO_REUSEADDR',2)
SO_TYPE = Constant('SO_TYPE',3)
SO_ERROR = Constant('SO_ERROR',4)
SO_DONTROUTE = Constant('SO_DONTROUTE',5)
SO_BROADCAST = Constant('SO_BROADCAST',6)
SO_SNDBUF = Constant('SO_SNDBUF',7)
SO_RCVBUF = Constant('SO_RCVBUF',8)
SO_KEEPALIVE = Constant('SO_KEEPALIVE',9)
SO_OOBINLINE = Constant('SO_OOBINLINE',10)
SO_NO_CHECK = Constant('SO_NO_CHECK',11)
SO_PRIORITY = Constant('SO_PRIORITY',12)
SO_LINGER = Constant('SO_LINGER',13)
SO_BSDCOMPAT = Constant('SO_BSDCOMPAT',14)
SO_REUSEPORT = Constant('SO_REUSEPORT',15)
SO_PASSCRED = Constant('SO_PASSCRED',16)
SO_PEERCRED = Constant('SO_PEERCRED',17)
SO_RCVLOWAT = Constant('SO_RCVLOWAT',18)
SO_SNDLOWAT = Constant('SO_SNDLOWAT',19)
SO_RCVTIMEO = Constant('SO_RCVTIMEO',20)
SO_SNDTIMEO = Constant('SO_SNDTIMEO',21)
SO_SECURITY_AUTHENTICATION = Constant('SO_SECURITY_AUTHENTICATION',22)
SO_SECURITY_ENCRYPTION_TRANSPORT = Constant('SO_SECURITY_ENCRYPTION_TRANSPORT',23)
SO_SECURITY_ENCRYPTION_NETWORK = Constant('SO_SECURITY_ENCRYPTION_NETWORK',24)
SO_BINDTODEVICE = Constant('SO_BINDTODEVICE',25)
SO_ATTACH_FILTER = Constant('SO_ATTACH_FILTER',26)
SO_DETACH_FILTER = Constant('SO_DETACH_FILTER',27)
SO_GET_FILTER = Constant('SO_GET_FILTER',26)
SO_PEERNAME = Constant('SO_PEERNAME',28)
SO_TIMESTAMP = Constant('SO_TIMESTAMP',29)
SCM_TIMESTAMP = Constant('SCM_TIMESTAMP',29)
SO_ACCEPTCONN = Constant('SO_ACCEPTCONN',30)
SO_PEERSEC = Constant('SO_PEERSEC',31)
SO_SNDBUFFORCE = Constant('SO_SNDBUFFORCE',32)
SO_RCVBUFFORCE = Constant('SO_RCVBUFFORCE',33)
SO_PASSSEC = Constant('SO_PASSSEC',34)
SO_TIMESTAMPNS = Constant('SO_TIMESTAMPNS',35)
SCM_TIMESTAMPNS = Constant('SCM_TIMESTAMPNS',35)
SO_MARK = Constant('SO_MARK',36)
SO_TIMESTAMPING = Constant('SO_TIMESTAMPING',37)
SCM_TIMESTAMPING = Constant('SCM_TIMESTAMPING',37)
SO_PROTOCOL = Constant('SO_PROTOCOL',38)
SO_DOMAIN = Constant('SO_DOMAIN',39)
SO_RXQ_OVFL = Constant('SO_RXQ_OVFL',40)
SO_WIFI_STATUS = Constant('SO_WIFI_STATUS',41)
SCM_WIFI_STATUS = Constant('SCM_WIFI_STATUS',41)
SO_PEEK_OFF = Constant('SO_PEEK_OFF',42)
SO_NOFCS = Constant('SO_NOFCS',43)
SO_LOCK_FILTER = Constant('SO_LOCK_FILTER',44)
SO_SELECT_ERR_QUEUE = Constant('SO_SELECT_ERR_QUEUE',45)
SO_BUSY_POLL = Constant('SO_BUSY_POLL',46)
SO_MAX_PACING_RATE = Constant('SO_MAX_PACING_RATE',47)
SO_BPF_EXTENSIONS = Constant('SO_BPF_EXTENSIONS',48)
SO_INCOMING_CPU = Constant('SO_INCOMING_CPU',49)
SO_ATTACH_BPF = Constant('SO_ATTACH_BPF',50)
SO_DETACH_BPF = Constant('SO_DETACH_BPF',27)
SO_ATTACH_REUSEPORT_CBPF = Constant('SO_ATTACH_REUSEPORT_CBPF',51)
SO_ATTACH_REUSEPORT_EBPF = Constant('SO_ATTACH_REUSEPORT_EBPF',52)
SO_CNX_ADVICE = Constant('SO_CNX_ADVICE',53)
SCM_TIMESTAMPING_OPT_STATS = Constant('SCM_TIMESTAMPING_OPT_STATS',54)
SO_MEMINFO = Constant('SO_MEMINFO',55)
SO_INCOMING_NAPI_ID = Constant('SO_INCOMING_NAPI_ID',56)
SO_COOKIE = Constant('SO_COOKIE',57)
SCM_TIMESTAMPING_PKTINFO = Constant('SCM_TIMESTAMPING_PKTINFO',58)
SO_PEERGROUPS = Constant('SO_PEERGROUPS',59)
SO_ZEROCOPY = Constant('SO_ZEROCOPY',60)
SOCK_STREAM = Constant('SOCK_STREAM',1)
SOCK_DGRAM = Constant('SOCK_DGRAM',2)
SOCK_RAW = Constant('SOCK_RAW',3)
SOCK_RDM = Constant('SOCK_RDM',4)
SOCK_SEQPACKET = Constant('SOCK_SEQPACKET',5)
SOCK_DCCP = Constant('SOCK_DCCP',6)
SOCK_PACKET = Constant('SOCK_PACKET',10)
UIO_FASTIOV = Constant('UIO_FASTIOV',8)
UIO_MAXIOV = Constant('UIO_MAXIOV',1024)
SCM_RIGHTS = Constant('SCM_RIGHTS',0x01)
SCM_CREDENTIALS = Constant('SCM_CREDENTIALS',0x02)
SCM_CONNECT = Constant('SCM_CONNECT',0x03)
AF_UNSPEC = Constant('AF_UNSPEC',0)
AF_UNIX = Constant('AF_UNIX',1)
AF_LOCAL = Constant('AF_LOCAL',1)
AF_INET = Constant('AF_INET',2)
AF_AX25 = Constant('AF_AX25',3)
AF_IPX = Constant('AF_IPX',4)
AF_APPLETALK = Constant('AF_APPLETALK',5)
AF_NETROM = Constant('AF_NETROM',6)
AF_BRIDGE = Constant('AF_BRIDGE',7)
AF_ATMPVC = Constant('AF_ATMPVC',8)
AF_X25 = Constant('AF_X25',9)
AF_INET6 = Constant('AF_INET6',10)
AF_ROSE = Constant('AF_ROSE',11)
AF_DECnet = Constant('AF_DECnet',12)
AF_NETBEUI = Constant('AF_NETBEUI',13)
AF_SECURITY = Constant('AF_SECURITY',14)
AF_KEY = Constant('AF_KEY',15)
AF_NETLINK = Constant('AF_NETLINK',16)
AF_ROUTE = Constant('AF_ROUTE',16)
AF_PACKET = Constant('AF_PACKET',17)
AF_ASH = Constant('AF_ASH',18)
AF_ECONET = Constant('AF_ECONET',19)
AF_ATMSVC = Constant('AF_ATMSVC',20)
AF_SNA = Constant('AF_SNA',22)
AF_IRDA = Constant('AF_IRDA',23)
AF_PPPOX = Constant('AF_PPPOX',24)
AF_WANPIPE = Constant('AF_WANPIPE',25)
AF_LLC = Constant('AF_LLC',26)
AF_IB = Constant('AF_IB',27)
AF_MPLS = Constant('AF_MPLS',28)
AF_CAN = Constant('AF_CAN',29)
AF_TIPC = Constant('AF_TIPC',30)
AF_BLUETOOTH = Constant('AF_BLUETOOTH',31)
AF_IUCV = Constant('AF_IUCV',32)
AF_RXRPC = Constant('AF_RXRPC',33)
AF_ISDN = Constant('AF_ISDN',34)
AF_PHONET = Constant('AF_PHONET',35)
AF_IEEE802154 = Constant('AF_IEEE802154',36)
AF_CAIF = Constant('AF_CAIF',37)
AF_ALG = Constant('AF_ALG',38)
AF_NFC = Constant('AF_NFC',39)
AF_VSOCK = Constant('AF_VSOCK',40)
AF_KCM = Constant('AF_KCM',41)
AF_QIPCRTR = Constant('AF_QIPCRTR',42)
AF_SMC = Constant('AF_SMC',43)
AF_MAX = Constant('AF_MAX',44)
PF_UNSPEC = Constant('PF_UNSPEC',0)
PF_UNIX = Constant('PF_UNIX',1)
PF_LOCAL = Constant('PF_LOCAL',1)
PF_INET = Constant('PF_INET',2)
PF_AX25 = Constant('PF_AX25',3)
PF_IPX = Constant('PF_IPX',4)
PF_APPLETALK = Constant('PF_APPLETALK',5)
PF_NETROM = Constant('PF_NETROM',6)
PF_BRIDGE = Constant('PF_BRIDGE',7)
PF_ATMPVC = Constant('PF_ATMPVC',8)
PF_X25 = Constant('PF_X25',9)
PF_INET6 = Constant('PF_INET6',10)
PF_ROSE = Constant('PF_ROSE',11)
PF_DECnet = Constant('PF_DECnet',12)
PF_NETBEUI = Constant('PF_NETBEUI',13)
PF_SECURITY = Constant('PF_SECURITY',14)
PF_KEY = Constant('PF_KEY',15)
PF_NETLINK = Constant('PF_NETLINK',16)
PF_ROUTE = Constant('PF_ROUTE',16)
PF_PACKET = Constant('PF_PACKET',17)
PF_ASH = Constant('PF_ASH',18)
PF_ECONET = Constant('PF_ECONET',19)
PF_ATMSVC = Constant('PF_ATMSVC',20)
PF_SNA = Constant('PF_SNA',22)
PF_IRDA = Constant('PF_IRDA',23)
PF_PPPOX = Constant('PF_PPPOX',24)
PF_WANPIPE = Constant('PF_WANPIPE',25)
PF_LLC = Constant('PF_LLC',26)
PF_IB = Constant('PF_IB',27)
PF_MPLS = Constant('PF_MPLS',28)
PF_CAN = Constant('PF_CAN',29)
PF_TIPC = Constant('PF_TIPC',30)
PF_BLUETOOTH = Constant('PF_BLUETOOTH',31)
PF_IUCV = Constant('PF_IUCV',32)
PF_RXRPC = Constant('PF_RXRPC',33)
PF_ISDN = Constant('PF_ISDN',34)
PF_PHONET = Constant('PF_PHONET',35)
PF_IEEE802154 = Constant('PF_IEEE802154',36)
PF_CAIF = Constant('PF_CAIF',37)
PF_ALG = Constant('PF_ALG',38)
PF_NFC = Constant('PF_NFC',39)
PF_VSOCK = Constant('PF_VSOCK',40)
PF_KCM = Constant('PF_KCM',41)
PF_QIPCRTR = Constant('PF_QIPCRTR',42)
PF_SMC = Constant('PF_SMC',43)
PF_MAX = Constant('PF_MAX',44)
SOMAXCONN = Constant('SOMAXCONN',128)
MSG_OOB = Constant('MSG_OOB',1)
MSG_PEEK = Constant('MSG_PEEK',2)
MSG_DONTROUTE = Constant('MSG_DONTROUTE',4)
MSG_TRYHARD = Constant('MSG_TRYHARD',4)
MSG_CTRUNC = Constant('MSG_CTRUNC',8)
MSG_PROBE = Constant('MSG_PROBE',0x10)
MSG_TRUNC = Constant('MSG_TRUNC',0x20)
MSG_DONTWAIT = Constant('MSG_DONTWAIT',0x40)
MSG_EOR = Constant('MSG_EOR',0x80)
MSG_WAITALL = Constant('MSG_WAITALL',0x100)
MSG_FIN = Constant('MSG_FIN',0x200)
MSG_SYN = Constant('MSG_SYN',0x400)
MSG_CONFIRM = Constant('MSG_CONFIRM',0x800)
MSG_RST = Constant('MSG_RST',0x1000)
MSG_ERRQUEUE = Constant('MSG_ERRQUEUE',0x2000)
MSG_NOSIGNAL = Constant('MSG_NOSIGNAL',0x4000)
MSG_MORE = Constant('MSG_MORE',0x8000)
MSG_WAITFORONE = Constant('MSG_WAITFORONE',0x10000)
MSG_SENDPAGE_NOTLAST = Constant('MSG_SENDPAGE_NOTLAST',0x20000)
MSG_BATCH = Constant('MSG_BATCH',0x40000)
MSG_EOF = Constant('MSG_EOF',0x200)
MSG_ZEROCOPY = Constant('MSG_ZEROCOPY',0x4000000)
MSG_FASTOPEN = Constant('MSG_FASTOPEN',0x20000000)
MSG_CMSG_CLOEXEC = Constant('MSG_CMSG_CLOEXEC',0x40000000)
SOL_IP = Constant('SOL_IP',0)
SOL_TCP = Constant('SOL_TCP',6)
SOL_UDP = Constant('SOL_UDP',17)
SOL_IPV6 = Constant('SOL_IPV6',41)
SOL_ICMPV6 = Constant('SOL_ICMPV6',58)
SOL_SCTP = Constant('SOL_SCTP',132)
SOL_UDPLITE = Constant('SOL_UDPLITE',136)
SOL_RAW = Constant('SOL_RAW',255)
SOL_IPX = Constant('SOL_IPX',256)
SOL_AX25 = Constant('SOL_AX25',257)
SOL_ATALK = Constant('SOL_ATALK',258)
SOL_NETROM = Constant('SOL_NETROM',259)
SOL_ROSE = Constant('SOL_ROSE',260)
SOL_DECNET = Constant('SOL_DECNET',261)
SOL_X25 = Constant('SOL_X25',262)
SOL_PACKET = Constant('SOL_PACKET',263)
SOL_ATM = Constant('SOL_ATM',264)
SOL_AAL = Constant('SOL_AAL',265)
SOL_IRDA = Constant('SOL_IRDA',266)
SOL_NETBEUI = Constant('SOL_NETBEUI',267)
SOL_LLC = Constant('SOL_LLC',268)
SOL_DCCP = Constant('SOL_DCCP',269)
SOL_NETLINK = Constant('SOL_NETLINK',270)
SOL_TIPC = Constant('SOL_TIPC',271)
SOL_RXRPC = Constant('SOL_RXRPC',272)
SOL_PPPOL2TP = Constant('SOL_PPPOL2TP',273)
SOL_BLUETOOTH = Constant('SOL_BLUETOOTH',274)
SOL_PNPIPE = Constant('SOL_PNPIPE',275)
SOL_RDS = Constant('SOL_RDS',276)
SOL_IUCV = Constant('SOL_IUCV',277)
SOL_CAIF = Constant('SOL_CAIF',278)
SOL_ALG = Constant('SOL_ALG',279)
SOL_NFC = Constant('SOL_NFC',280)
SOL_KCM = Constant('SOL_KCM',281)
SOL_TLS = Constant('SOL_TLS',282)
IPX_TYPE = Constant('IPX_TYPE',1)
SHUT_RD = Constant('SHUT_RD',0)
SHUT_WR = Constant('SHUT_WR',1)
SHUT_RDWR = Constant('SHUT_RDWR',2)
NI_NOFQDN = Constant('NI_NOFQDN',1)
NI_NUMERICHOST = Constant('NI_NUMERICHOST',2)
NI_NAMEREQD = Constant('NI_NAMEREQD',4)
NI_NUMERICSERV = Constant('NI_NUMERICSERV',8)
NI_DGRAM = Constant('NI_DGRAM',16)
EAI_FAMILY = Constant('EAI_FAMILY',-1)
EAI_SOCKTYPE = Constant('EAI_SOCKTYPE',-2)
EAI_BADFLAGS = Constant('EAI_BADFLAGS',-3)
EAI_NONAME = Constant('EAI_NONAME',-4)
EAI_SERVICE = Constant('EAI_SERVICE',-5)
EAI_ADDRFAMILY = Constant('EAI_ADDRFAMILY',-6)
EAI_NODATA = Constant('EAI_NODATA',-7)
EAI_MEMORY = Constant('EAI_MEMORY',-8)
EAI_FAIL = Constant('EAI_FAIL',-9)
EAI_AGAIN = Constant('EAI_AGAIN',-10)
EAI_SYSTEM = Constant('EAI_SYSTEM',-11)
AI_NUMERICHOST = Constant('AI_NUMERICHOST',1)
AI_CANONNAME = Constant('AI_CANONNAME',2)
AI_PASSIVE = Constant('AI_PASSIVE',4)
AI_NUMERICSERV = Constant('AI_NUMERICSERV',8)
AI_ADDRCONFIG = Constant('AI_ADDRCONFIG',16)
AI_V4MAPPED = Constant('AI_V4MAPPED',32)
AI_ALL = Constant('AI_ALL',64)
SIOCADDRT = Constant('SIOCADDRT',0x890B)
SIOCDELRT = Constant('SIOCDELRT',0x890C)
SIOCRTMSG = Constant('SIOCRTMSG',0x890D)
SIOCGIFNAME = Constant('SIOCGIFNAME',0x8910)
SIOCSIFLINK = Constant('SIOCSIFLINK',0x8911)
SIOCGIFCONF = Constant('SIOCGIFCONF',0x8912)
SIOCGIFFLAGS = Constant('SIOCGIFFLAGS',0x8913)
SIOCSIFFLAGS = Constant('SIOCSIFFLAGS',0x8914)
SIOCGIFADDR = Constant('SIOCGIFADDR',0x8915)
SIOCSIFADDR = Constant('SIOCSIFADDR',0x8916)
SIOCGIFDSTADDR = Constant('SIOCGIFDSTADDR',0x8917)
SIOCSIFDSTADDR = Constant('SIOCSIFDSTADDR',0x8918)
SIOCGIFBRDADDR = Constant('SIOCGIFBRDADDR',0x8919)
SIOCSIFBRDADDR = Constant('SIOCSIFBRDADDR',0x891a)
SIOCGIFNETMASK = Constant('SIOCGIFNETMASK',0x891b)
SIOCSIFNETMASK = Constant('SIOCSIFNETMASK',0x891c)
SIOCGIFMETRIC = Constant('SIOCGIFMETRIC',0x891d)
SIOCSIFMETRIC = Constant('SIOCSIFMETRIC',0x891e)
SIOCGIFMEM = Constant('SIOCGIFMEM',0x891f)
SIOCSIFMEM = Constant('SIOCSIFMEM',0x8920)
SIOCGIFMTU = Constant('SIOCGIFMTU',0x8921)
SIOCSIFMTU = Constant('SIOCSIFMTU',0x8922)
SIOCSIFNAME = Constant('SIOCSIFNAME',0x8923)
SIOCSIFHWADDR = Constant('SIOCSIFHWADDR',0x8924)
SIOCGIFENCAP = Constant('SIOCGIFENCAP',0x8925)
SIOCSIFENCAP = Constant('SIOCSIFENCAP',0x8926)
SIOCGIFHWADDR = Constant('SIOCGIFHWADDR',0x8927)
SIOCGIFSLAVE = Constant('SIOCGIFSLAVE',0x8929)
SIOCSIFSLAVE = Constant('SIOCSIFSLAVE',0x8930)
SIOCADDMULTI = Constant('SIOCADDMULTI',0x8931)
SIOCDELMULTI = Constant('SIOCDELMULTI',0x8932)
SIOCGIFINDEX = Constant('SIOCGIFINDEX',0x8933)
SIOGIFINDEX = Constant('SIOGIFINDEX',0x8933)
SIOCSIFPFLAGS = Constant('SIOCSIFPFLAGS',0x8934)
SIOCGIFPFLAGS = Constant('SIOCGIFPFLAGS',0x8935)
SIOCDIFADDR = Constant('SIOCDIFADDR',0x8936)
SIOCSIFHWBROADCAST = Constant('SIOCSIFHWBROADCAST',0x8937)
SIOCGIFCOUNT = Constant('SIOCGIFCOUNT',0x8938)
SIOCGIFBR = Constant('SIOCGIFBR',0x8940)
SIOCSIFBR = Constant('SIOCSIFBR',0x8941)
SIOCGIFTXQLEN = Constant('SIOCGIFTXQLEN',0x8942)
SIOCSIFTXQLEN = Constant('SIOCSIFTXQLEN',0x8943)
SIOCGIFDIVERT = Constant('SIOCGIFDIVERT',0x8944)
SIOCSIFDIVERT = Constant('SIOCSIFDIVERT',0x8945)
SIOCETHTOOL = Constant('SIOCETHTOOL',0x8946)
SIOCDARP = Constant('SIOCDARP',0x8953)
SIOCGARP = Constant('SIOCGARP',0x8954)
SIOCSARP = Constant('SIOCSARP',0x8955)
SIOCDRARP = Constant('SIOCDRARP',0x8960)
SIOCGRARP = Constant('SIOCGRARP',0x8961)
SIOCSRARP = Constant('SIOCSRARP',0x8962)
SIOCGIFMAP = Constant('SIOCGIFMAP',0x8970)
SIOCSIFMAP = Constant('SIOCSIFMAP',0x8971)
SIOCADDDLCI = Constant('SIOCADDDLCI',0x8980)
SIOCDELDLCI = Constant('SIOCDELDLCI',0x8981)
SIOCDEVPRIVATE = Constant('SIOCDEVPRIVATE',0x89F0)
F_LINUX_SPECIFIC_BASE = Constant('F_LINUX_SPECIFIC_BASE',1024)
F_SETOWN_EX = Constant('F_SETOWN_EX',15)
F_GETOWN_EX = Constant('F_GETOWN_EX',16)
F_GETOWNER_UIDS = Constant('F_GETOWNER_UIDS',17)
F_OFD_GETLK = Constant('F_OFD_GETLK',36)
F_OFD_SETLK = Constant('F_OFD_SETLK',37)
F_OFD_SETLKW = Constant('F_OFD_SETLKW',38)
F_OWNER_TID = Constant('F_OWNER_TID',0)
F_OWNER_PID = Constant('F_OWNER_PID',1)
F_OWNER_PGRP = Constant('F_OWNER_PGRP',2)
AT_FDCWD = Constant('AT_FDCWD',-100)
AT_SYMLINK_NOFOLLOW = Constant('AT_SYMLINK_NOFOLLOW',0x100)
AT_REMOVEDIR = Constant('AT_REMOVEDIR',0x200)
AT_SYMLINK_FOLLOW = Constant('AT_SYMLINK_FOLLOW',0x400)
AT_NO_AUTOMOUNT = Constant('AT_NO_AUTOMOUNT',0x800)
AT_EMPTY_PATH = Constant('AT_EMPTY_PATH',0x1000)
AT_EACCESS = Constant('AT_EACCESS',0x200)
MREMAP_MAYMOVE = Constant('MREMAP_MAYMOVE',1)
MREMAP_FIXED = Constant('MREMAP_FIXED',2)
PROT_READ = Constant('PROT_READ',0x1)
PROT_WRITE = Constant('PROT_WRITE',0x2)
PROT_EXEC = Constant('PROT_EXEC',0x4)
PROT_SEM = Constant('PROT_SEM',0x8)
PROT_NONE = Constant('PROT_NONE',0x0)
PROT_GROWSDOWN = Constant('PROT_GROWSDOWN',0x01000000)
PROT_GROWSUP = Constant('PROT_GROWSUP',0x02000000)
MAP_SHARED = Constant('MAP_SHARED',0x01)
MAP_PRIVATE = Constant('MAP_PRIVATE',0x02)
MAP_TYPE = Constant('MAP_TYPE',0xf)
MADV_REMOVE = Constant('MADV_REMOVE',9)
MADV_DONTFORK = Constant('MADV_DONTFORK',10)
MADV_DOFORK = Constant('MADV_DOFORK',11)
MADV_MERGEABLE = Constant('MADV_MERGEABLE',12)
MADV_UNMERGEABLE = Constant('MADV_UNMERGEABLE',13)
MADV_HUGEPAGE = Constant('MADV_HUGEPAGE',14)
MADV_NOHUGEPAGE = Constant('MADV_NOHUGEPAGE',15)
MADV_DONTDUMP = Constant('MADV_DONTDUMP',16)
MADV_DODUMP = Constant('MADV_DODUMP',17)
MADV_HWPOISON = Constant('MADV_HWPOISON',100)
MADV_SOFT_OFFLINE = Constant('MADV_SOFT_OFFLINE',101)
MLOCK_ONFAULT = Constant('MLOCK_ONFAULT',1)
MAP_FILE = Constant('MAP_FILE',0)
PTRACE_TRACEME = Constant('PTRACE_TRACEME',0)
PTRACE_PEEKTEXT = Constant('PTRACE_PEEKTEXT',1)
PTRACE_PEEKDATA = Constant('PTRACE_PEEKDATA',2)
PTRACE_PEEKUSR = Constant('PTRACE_PEEKUSR',3)
PTRACE_PEEKUSER = Constant('PTRACE_PEEKUSER',3)
PTRACE_POKETEXT = Constant('PTRACE_POKETEXT',4)
PTRACE_POKEDATA = Constant('PTRACE_POKEDATA',5)
PTRACE_POKEUSR = Constant('PTRACE_POKEUSR',6)
PTRACE_POKEUSER = Constant('PTRACE_POKEUSER',6)
PTRACE_CONT = Constant('PTRACE_CONT',7)
PTRACE_KILL = Constant('PTRACE_KILL',8)
PTRACE_SINGLESTEP = Constant('PTRACE_SINGLESTEP',9)
PTRACE_ATTACH = Constant('PTRACE_ATTACH',0x10)
PTRACE_DETACH = Constant('PTRACE_DETACH',0x11)
PTRACE_SYSCALL = Constant('PTRACE_SYSCALL',24)
PTRACE_GETEVENTMSG = Constant('PTRACE_GETEVENTMSG',0x4201)
PTRACE_GETSIGINFO = Constant('PTRACE_GETSIGINFO',0x4202)
PTRACE_SETSIGINFO = Constant('PTRACE_SETSIGINFO',0x4203)
PTRACE_O_TRACESYSGOOD = Constant('PTRACE_O_TRACESYSGOOD',0x00000001)
PTRACE_O_TRACEFORK = Constant('PTRACE_O_TRACEFORK',0x00000002)
PTRACE_O_TRACEVFORK = Constant('PTRACE_O_TRACEVFORK',0x00000004)
PTRACE_O_TRACECLONE = Constant('PTRACE_O_TRACECLONE',0x00000008)
PTRACE_O_TRACEEXEC = Constant('PTRACE_O_TRACEEXEC',0x00000010)
PTRACE_O_TRACEVFORKDONE = Constant('PTRACE_O_TRACEVFORKDONE',0x00000020)
PTRACE_O_TRACEEXIT = Constant('PTRACE_O_TRACEEXIT',0x00000040)
PTRACE_O_MASK = Constant('PTRACE_O_MASK',0x0000007f)
PTRACE_EVENT_FORK = Constant('PTRACE_EVENT_FORK',1)
PTRACE_EVENT_VFORK = Constant('PTRACE_EVENT_VFORK',2)
PTRACE_EVENT_CLONE = Constant('PTRACE_EVENT_CLONE',3)
PTRACE_EVENT_EXEC = Constant('PTRACE_EVENT_EXEC',4)
PTRACE_EVENT_VFORK_DONE = Constant('PTRACE_EVENT_VFORK_DONE',5)
PTRACE_EVENT_EXIT = Constant('PTRACE_EVENT_EXIT',6)
PT_TRACE_ME = Constant('PT_TRACE_ME',0)
PT_READ_I = Constant('PT_READ_I',1)
PT_READ_D = Constant('PT_READ_D',2)
PT_READ_U = Constant('PT_READ_U',3)
PT_WRITE_I = Constant('PT_WRITE_I',4)
PT_WRITE_D = Constant('PT_WRITE_D',5)
PT_WRITE_U = Constant('PT_WRITE_U',6)
PT_CONTINUE = Constant('PT_CONTINUE',7)
PT_KILL = Constant('PT_KILL',8)
PT_STEP = Constant('PT_STEP',9)
PT_ATTACH = Constant('PT_ATTACH',0x10)
PT_DETACH = Constant('PT_DETACH',0x11)
SYS_accept = Constant('SYS_accept',330)
SYS_accept4 = Constant('SYS_accept4',344)
SYS_access = Constant('SYS_access',33)
SYS_acct = Constant('SYS_acct',51)
SYS_add_key = Constant('SYS_add_key',269)
SYS_adjtimex = Constant('SYS_adjtimex',124)
SYS_afs_syscall = Constant('SYS_afs_syscall',137)
SYS_alarm = Constant('SYS_alarm',27)
SYS_bdflush = Constant('SYS_bdflush',134)
SYS_bind = Constant('SYS_bind',327)
SYS_bpf = Constant('SYS_bpf',361)
SYS_break = Constant('SYS_break',17)
SYS_brk = Constant('SYS_brk',45)
SYS_capget = Constant('SYS_capget',183)
SYS_capset = Constant('SYS_capset',184)
SYS_chdir = Constant('SYS_chdir',12)
SYS_chmod = Constant('SYS_chmod',15)
SYS_chown = Constant('SYS_chown',181)
SYS_chroot = Constant('SYS_chroot',61)
SYS_clock_adjtime = Constant('SYS_clock_adjtime',347)
SYS_clock_adjtime64 = Constant('SYS_clock_adjtime64',405)
SYS_clock_getres = Constant('SYS_clock_getres',247)
SYS_clock_getres_time64 = Constant('SYS_clock_getres_time64',406)
SYS_clock_gettime = Constant('SYS_clock_gettime',246)
SYS_clock_gettime64 = Constant('SYS_clock_gettime64',403)
SYS_clock_nanosleep = Constant('SYS_clock_nanosleep',248)
SYS_clock_nanosleep_time64 = Constant('SYS_clock_nanosleep_time64',407)
SYS_clock_settime = Constant('SYS_clock_settime',245)
SYS_clock_settime64 = Constant('SYS_clock_settime64',404)
SYS_clone = Constant('SYS_clone',120)
SYS_clone3 = Constant('SYS_clone3',435)
SYS_close = Constant('SYS_close',6)
SYS_connect = Constant('SYS_connect',328)
SYS_copy_file_range = Constant('SYS_copy_file_range',379)
SYS_creat = Constant('SYS_creat',8)
SYS_create_module = Constant('SYS_create_module',127)
SYS_delete_module = Constant('SYS_delete_module',129)
SYS_dup = Constant('SYS_dup',41)
SYS_dup2 = Constant('SYS_dup2',63)
SYS_dup3 = Constant('SYS_dup3',316)
SYS_epoll_create = Constant('SYS_epoll_create',236)
SYS_epoll_create1 = Constant('SYS_epoll_create1',315)
SYS_epoll_ctl = Constant('SYS_epoll_ctl',237)
SYS_epoll_pwait = Constant('SYS_epoll_pwait',303)
SYS_epoll_wait = Constant('SYS_epoll_wait',238)
SYS_eventfd = Constant('SYS_eventfd',307)
SYS_eventfd2 = Constant('SYS_eventfd2',314)
SYS_execve = Constant('SYS_execve',11)
SYS_execveat = Constant('SYS_execveat',362)
SYS_exit = Constant('SYS_exit',1)
SYS_exit_group = Constant('SYS_exit_group',234)
SYS_faccessat = Constant('SYS_faccessat',298)
SYS_fadvise64 = Constant('SYS_fadvise64',233)
SYS_fadvise64_64 = Constant('SYS_fadvise64_64',254)
SYS_fallocate = Constant('SYS_fallocate',309)
SYS_fanotify_init = Constant('SYS_fanotify_init',323)
SYS_fanotify_mark = Constant('SYS_fanotify_mark',324)
SYS_fchdir = Constant('SYS_fchdir',133)
SYS_fchmod = Constant('SYS_fchmod',94)
SYS_fchmodat = Constant('SYS_fchmodat',297)
SYS_fchown = Constant('SYS_fchown',95)
SYS_fchownat = Constant('SYS_fchownat',289)
SYS_fcntl = Constant('SYS_fcntl',55)
SYS_fcntl64 = Constant('SYS_fcntl64',204)
SYS_fdatasync = Constant('SYS_fdatasync',148)
SYS_fgetxattr = Constant('SYS_fgetxattr',214)
SYS_finit_module = Constant('SYS_finit_module',353)
SYS_flistxattr = Constant('SYS_flistxattr',217)
SYS_flock = Constant('SYS_flock',143)
SYS_fork = Constant('SYS_fork',2)
SYS_fremovexattr = Constant('SYS_fremovexattr',220)
SYS_fsconfig = Constant('SYS_fsconfig',431)
SYS_fsetxattr = Constant('SYS_fsetxattr',211)
SYS_fsmount = Constant('SYS_fsmount',432)
SYS_fsopen = Constant('SYS_fsopen',430)
SYS_fspick = Constant('SYS_fspick',433)
SYS_fstat = Constant('SYS_fstat',108)
SYS_fstat64 = Constant('SYS_fstat64',197)
SYS_fstatat64 = Constant('SYS_fstatat64',291)
SYS_fstatfs = Constant('SYS_fstatfs',100)
SYS_fstatfs64 = Constant('SYS_fstatfs64',253)
SYS_fsync = Constant('SYS_fsync',118)
SYS_ftime = Constant('SYS_ftime',35)
SYS_ftruncate = Constant('SYS_ftruncate',93)
SYS_ftruncate64 = Constant('SYS_ftruncate64',194)
SYS_futex = Constant('SYS_futex',221)
SYS_futex_time64 = Constant('SYS_futex_time64',422)
SYS_futimesat = Constant('SYS_futimesat',290)
SYS_getcpu = Constant('SYS_getcpu',302)
SYS_getcwd = Constant('SYS_getcwd',182)
SYS_getdents = Constant('SYS_getdents',141)
SYS_getdents64 = Constant('SYS_getdents64',202)
SYS_getegid = Constant('SYS_getegid',50)
SYS_geteuid = Constant('SYS_geteuid',49)
SYS_getgid = Constant('SYS_getgid',47)
SYS_getgroups = Constant('SYS_getgroups',80)
SYS_getitimer = Constant('SYS_getitimer',105)
SYS_get_kernel_syms = Constant('SYS_get_kernel_syms',130)
SYS_getpeername = Constant('SYS_getpeername',332)
SYS_getpgid = Constant('SYS_getpgid',132)
SYS_getpgrp = Constant('SYS_getpgrp',65)
SYS_getpid = Constant('SYS_getpid',20)
SYS_getpmsg = Constant('SYS_getpmsg',187)
SYS_getppid = Constant('SYS_getppid',64)
SYS_getpriority = Constant('SYS_getpriority',96)
SYS_getrandom = Constant('SYS_getrandom',359)
SYS_getresgid = Constant('SYS_getresgid',170)
SYS_getresuid = Constant('SYS_getresuid',165)
SYS_getrlimit = Constant('SYS_getrlimit',76)
SYS_get_robust_list = Constant('SYS_get_robust_list',299)
SYS_getrusage = Constant('SYS_getrusage',77)
SYS_getsid = Constant('SYS_getsid',147)
SYS_getsockname = Constant('SYS_getsockname',331)
SYS_getsockopt = Constant('SYS_getsockopt',340)
SYS_gettid = Constant('SYS_gettid',207)
SYS_gettimeofday = Constant('SYS_gettimeofday',78)
SYS_getuid = Constant('SYS_getuid',24)
SYS_getxattr = Constant('SYS_getxattr',212)
SYS_gtty = Constant('SYS_gtty',32)
SYS_idle = Constant('SYS_idle',112)
SYS_init_module = Constant('SYS_init_module',128)
SYS_inotify_add_watch = Constant('SYS_inotify_add_watch',276)
SYS_inotify_init = Constant('SYS_inotify_init',275)
SYS_inotify_init1 = Constant('SYS_inotify_init1',318)
SYS_inotify_rm_watch = Constant('SYS_inotify_rm_watch',277)
SYS_io_cancel = Constant('SYS_io_cancel',231)
SYS_ioctl = Constant('SYS_ioctl',54)
SYS_io_destroy = Constant('SYS_io_destroy',228)
SYS_io_getevents = Constant('SYS_io_getevents',229)
SYS_ioperm = Constant('SYS_ioperm',101)
SYS_io_pgetevents = Constant('SYS_io_pgetevents',388)
SYS_io_pgetevents_time64 = Constant('SYS_io_pgetevents_time64',416)
SYS_iopl = Constant('SYS_iopl',110)
SYS_ioprio_get = Constant('SYS_ioprio_get',274)
SYS_ioprio_set = Constant('SYS_ioprio_set',273)
SYS_io_setup = Constant('SYS_io_setup',227)
SYS_io_submit = Constant('SYS_io_submit',230)
SYS_io_uring_enter = Constant('SYS_io_uring_enter',426)
SYS_io_uring_register = Constant('SYS_io_uring_register',427)
SYS_io_uring_setup = Constant('SYS_io_uring_setup',425)
SYS_ipc = Constant('SYS_ipc',117)
SYS_kcmp = Constant('SYS_kcmp',354)
SYS_kexec_file_load = Constant('SYS_kexec_file_load',382)
SYS_kexec_load = Constant('SYS_kexec_load',268)
SYS_keyctl = Constant('SYS_keyctl',271)
SYS_kill = Constant('SYS_kill',37)
SYS_lchown = Constant('SYS_lchown',16)
SYS_lgetxattr = Constant('SYS_lgetxattr',213)
SYS_link = Constant('SYS_link',9)
SYS_linkat = Constant('SYS_linkat',294)
SYS_listen = Constant('SYS_listen',329)
SYS_listxattr = Constant('SYS_listxattr',215)
SYS_llistxattr = Constant('SYS_llistxattr',216)
SYS__llseek = Constant('SYS__llseek',140)
SYS_lock = Constant('SYS_lock',53)
SYS_lookup_dcookie = Constant('SYS_lookup_dcookie',235)
SYS_lremovexattr = Constant('SYS_lremovexattr',219)
SYS_lseek = Constant('SYS_lseek',19)
SYS_lsetxattr = Constant('SYS_lsetxattr',210)
SYS_lstat = Constant('SYS_lstat',107)
SYS_lstat64 = Constant('SYS_lstat64',196)
SYS_madvise = Constant('SYS_madvise',205)
SYS_membarrier = Constant('SYS_membarrier',365)
SYS_memfd_create = Constant('SYS_memfd_create',360)
SYS_mincore = Constant('SYS_mincore',206)
SYS_mkdir = Constant('SYS_mkdir',39)
SYS_mkdirat = Constant('SYS_mkdirat',287)
SYS_mknod = Constant('SYS_mknod',14)
SYS_mknodat = Constant('SYS_mknodat',288)
SYS_mlock = Constant('SYS_mlock',150)
SYS_mlock2 = Constant('SYS_mlock2',378)
SYS_mlockall = Constant('SYS_mlockall',152)
SYS_mmap = Constant('SYS_mmap',90)
SYS_mmap2 = Constant('SYS_mmap2',192)
SYS_modify_ldt = Constant('SYS_modify_ldt',123)
SYS_mount = Constant('SYS_mount',21)
SYS_move_mount = Constant('SYS_move_mount',429)
SYS_move_pages = Constant('SYS_move_pages',301)
SYS_mprotect = Constant('SYS_mprotect',125)
SYS_mpx = Constant('SYS_mpx',56)
SYS_mq_getsetattr = Constant('SYS_mq_getsetattr',267)
SYS_mq_notify = Constant('SYS_mq_notify',266)
SYS_mq_open = Constant('SYS_mq_open',262)
SYS_mq_timedreceive = Constant('SYS_mq_timedreceive',265)
SYS_mq_timedreceive_time64 = Constant('SYS_mq_timedreceive_time64',419)
SYS_mq_timedsend = Constant('SYS_mq_timedsend',264)
SYS_mq_timedsend_time64 = Constant('SYS_mq_timedsend_time64',418)
SYS_mq_unlink = Constant('SYS_mq_unlink',263)
SYS_mremap = Constant('SYS_mremap',163)
SYS_msgctl = Constant('SYS_msgctl',402)
SYS_msgget = Constant('SYS_msgget',399)
SYS_msgrcv = Constant('SYS_msgrcv',401)
SYS_msgsnd = Constant('SYS_msgsnd',400)
SYS_msync = Constant('SYS_msync',144)
SYS_multiplexer = Constant('SYS_multiplexer',201)
SYS_munlock = Constant('SYS_munlock',151)
SYS_munlockall = Constant('SYS_munlockall',153)
SYS_munmap = Constant('SYS_munmap',91)
SYS_name_to_handle_at = Constant('SYS_name_to_handle_at',345)
SYS_nanosleep = Constant('SYS_nanosleep',162)
SYS__newselect = Constant('SYS__newselect',142)
SYS_nfsservctl = Constant('SYS_nfsservctl',168)
SYS_nice = Constant('SYS_nice',34)
SYS_oldfstat = Constant('SYS_oldfstat',28)
SYS_oldlstat = Constant('SYS_oldlstat',84)
SYS_oldolduname = Constant('SYS_oldolduname',59)
SYS_oldstat = Constant('SYS_oldstat',18)
SYS_olduname = Constant('SYS_olduname',109)
SYS_open = Constant('SYS_open',5)
SYS_openat = Constant('SYS_openat',286)
SYS_openat2 = Constant('SYS_openat2',437)
SYS_open_by_handle_at = Constant('SYS_open_by_handle_at',346)
SYS_open_tree = Constant('SYS_open_tree',428)
SYS_pause = Constant('SYS_pause',29)
SYS_pciconfig_iobase = Constant('SYS_pciconfig_iobase',200)
SYS_pciconfig_read = Constant('SYS_pciconfig_read',198)
SYS_pciconfig_write = Constant('SYS_pciconfig_write',199)
SYS_perf_event_open = Constant('SYS_perf_event_open',319)
SYS_personality = Constant('SYS_personality',136)
SYS_pidfd_getfd = Constant('SYS_pidfd_getfd',438)
SYS_pidfd_open = Constant('SYS_pidfd_open',434)
SYS_pidfd_send_signal = Constant('SYS_pidfd_send_signal',424)
SYS_pipe = Constant('SYS_pipe',42)
SYS_pipe2 = Constant('SYS_pipe2',317)
SYS_pivot_root = Constant('SYS_pivot_root',203)
SYS_pkey_alloc = Constant('SYS_pkey_alloc',384)
SYS_pkey_free = Constant('SYS_pkey_free',385)
SYS_pkey_mprotect = Constant('SYS_pkey_mprotect',386)
SYS_poll = Constant('SYS_poll',167)
SYS_ppoll = Constant('SYS_ppoll',281)
SYS_ppoll_time64 = Constant('SYS_ppoll_time64',414)
SYS_prctl = Constant('SYS_prctl',171)
SYS_pread = Constant('SYS_pread',179)
SYS_preadv = Constant('SYS_preadv',320)
SYS_preadv2 = Constant('SYS_preadv2',380)
SYS_prlimit64 = Constant('SYS_prlimit64',325)
SYS_process_vm_readv = Constant('SYS_process_vm_readv',351)
SYS_process_vm_writev = Constant('SYS_process_vm_writev',352)
SYS_prof = Constant('SYS_prof',44)
SYS_profil = Constant('SYS_profil',98)
SYS_pselect6 = Constant('SYS_pselect6',280)
SYS_pselect6_time64 = Constant('SYS_pselect6_time64',413)
SYS_ptrace = Constant('SYS_ptrace',26)
SYS_putpmsg = Constant('SYS_putpmsg',188)
SYS_pwrite = Constant('SYS_pwrite',180)
SYS_pwritev = Constant('SYS_pwritev',321)
SYS_pwritev2 = Constant('SYS_pwritev2',381)
SYS_query_module = Constant('SYS_query_module',166)
SYS_quotactl = Constant('SYS_quotactl',131)
SYS_read = Constant('SYS_read',3)
SYS_readahead = Constant('SYS_readahead',191)
SYS_readdir = Constant('SYS_readdir',89)
SYS_readlink = Constant('SYS_readlink',85)
SYS_readlinkat = Constant('SYS_readlinkat',296)
SYS_readv = Constant('SYS_readv',145)
SYS_reboot = Constant('SYS_reboot',88)
SYS_recv = Constant('SYS_recv',336)
SYS_recvfrom = Constant('SYS_recvfrom',337)
SYS_recvmmsg = Constant('SYS_recvmmsg',343)
SYS_recvmmsg_time64 = Constant('SYS_recvmmsg_time64',417)
SYS_recvmsg = Constant('SYS_recvmsg',342)
SYS_remap_file_pages = Constant('SYS_remap_file_pages',239)
SYS_removexattr = Constant('SYS_removexattr',218)
SYS_rename = Constant('SYS_rename',38)
SYS_renameat = Constant('SYS_renameat',293)
SYS_renameat2 = Constant('SYS_renameat2',357)
SYS_request_key = Constant('SYS_request_key',270)
SYS_rmdir = Constant('SYS_rmdir',40)
SYS_rseq = Constant('SYS_rseq',387)
SYS_rtas = Constant('SYS_rtas',255)
SYS_rt_sigaction = Constant('SYS_rt_sigaction',173)
SYS_rt_sigpending = Constant('SYS_rt_sigpending',175)
SYS_rt_sigprocmask = Constant('SYS_rt_sigprocmask',174)
SYS_rt_sigqueueinfo = Constant('SYS_rt_sigqueueinfo',177)
SYS_rt_sigreturn = Constant('SYS_rt_sigreturn',172)
SYS_rt_sigsuspend = Constant('SYS_rt_sigsuspend',178)
SYS_rt_sigtimedwait = Constant('SYS_rt_sigtimedwait',176)
SYS_rt_sigtimedwait_time64 = Constant('SYS_rt_sigtimedwait_time64',421)
SYS_rt_tgsigqueueinfo = Constant('SYS_rt_tgsigqueueinfo',322)
SYS_sched_getaffinity = Constant('SYS_sched_getaffinity',223)
SYS_sched_getattr = Constant('SYS_sched_getattr',356)
SYS_sched_getparam = Constant('SYS_sched_getparam',155)
SYS_sched_get_priority_max = Constant('SYS_sched_get_priority_max',159)
SYS_sched_get_priority_min = Constant('SYS_sched_get_priority_min',160)
SYS_sched_getscheduler = Constant('SYS_sched_getscheduler',157)
SYS_sched_rr_get_interval = Constant('SYS_sched_rr_get_interval',161)
SYS_sched_rr_get_interval_time64 = Constant('SYS_sched_rr_get_interval_time64',423)
SYS_sched_setaffinity = Constant('SYS_sched_setaffinity',222)
SYS_sched_setattr = Constant('SYS_sched_setattr',355)
SYS_sched_setparam = Constant('SYS_sched_setparam',154)
SYS_sched_setscheduler = Constant('SYS_sched_setscheduler',156)
SYS_sched_yield = Constant('SYS_sched_yield',158)
SYS_seccomp = Constant('SYS_seccomp',358)
SYS_select = Constant('SYS_select',82)
SYS_semctl = Constant('SYS_semctl',394)
SYS_semget = Constant('SYS_semget',393)
SYS_semtimedop_time64 = Constant('SYS_semtimedop_time64',420)
SYS_send = Constant('SYS_send',334)
SYS_sendfile = Constant('SYS_sendfile',186)
SYS_sendfile64 = Constant('SYS_sendfile64',226)
SYS_sendmmsg = Constant('SYS_sendmmsg',349)
SYS_sendmsg = Constant('SYS_sendmsg',341)
SYS_sendto = Constant('SYS_sendto',335)
SYS_setdomainname = Constant('SYS_setdomainname',121)
SYS_setfsgid = Constant('SYS_setfsgid',139)
SYS_setfsuid = Constant('SYS_setfsuid',138)
SYS_setgid = Constant('SYS_setgid',46)
SYS_setgroups = Constant('SYS_setgroups',81)
SYS_sethostname = Constant('SYS_sethostname',74)
SYS_setitimer = Constant('SYS_setitimer',104)
SYS_setns = Constant('SYS_setns',350)
SYS_setpgid = Constant('SYS_setpgid',57)
SYS_setpriority = Constant('SYS_setpriority',97)
SYS_setregid = Constant('SYS_setregid',71)
SYS_setresgid = Constant('SYS_setresgid',169)
SYS_setresuid = Constant('SYS_setresuid',164)
SYS_setreuid = Constant('SYS_setreuid',70)
SYS_setrlimit = Constant('SYS_setrlimit',75)
SYS_set_robust_list = Constant('SYS_set_robust_list',300)
SYS_setsid = Constant('SYS_setsid',66)
SYS_setsockopt = Constant('SYS_setsockopt',339)
SYS_set_tid_address = Constant('SYS_set_tid_address',232)
SYS_settimeofday = Constant('SYS_settimeofday',79)
SYS_setuid = Constant('SYS_setuid',23)
SYS_setxattr = Constant('SYS_setxattr',209)
SYS_sgetmask = Constant('SYS_sgetmask',68)
SYS_shmat = Constant('SYS_shmat',397)
SYS_shmctl = Constant('SYS_shmctl',396)
SYS_shmdt = Constant('SYS_shmdt',398)
SYS_shmget = Constant('SYS_shmget',395)
SYS_shutdown = Constant('SYS_shutdown',338)
SYS_sigaction = Constant('SYS_sigaction',67)
SYS_sigaltstack = Constant('SYS_sigaltstack',185)
SYS_signal = Constant('SYS_signal',48)
SYS_signalfd = Constant('SYS_signalfd',305)
SYS_signalfd4 = Constant('SYS_signalfd4',313)
SYS_sigpending = Constant('SYS_sigpending',73)
SYS_sigprocmask = Constant('SYS_sigprocmask',126)
SYS_sigreturn = Constant('SYS_sigreturn',119)
SYS_sigsuspend = Constant('SYS_sigsuspend',72)
SYS_socket = Constant('SYS_socket',326)
SYS_socketcall = Constant('SYS_socketcall',102)
SYS_socketpair = Constant('SYS_socketpair',333)
SYS_splice = Constant('SYS_splice',283)
SYS_spu_create = Constant('SYS_spu_create',279)
SYS_spu_run = Constant('SYS_spu_run',278)
SYS_ssetmask = Constant('SYS_ssetmask',69)
SYS_stat = Constant('SYS_stat',106)
SYS_stat64 = Constant('SYS_stat64',195)
SYS_statfs = Constant('SYS_statfs',99)
SYS_statfs64 = Constant('SYS_statfs64',252)
SYS_statx = Constant('SYS_statx',383)
SYS_stime = Constant('SYS_stime',25)
SYS_stty = Constant('SYS_stty',31)
SYS_subpage_prot = Constant('SYS_subpage_prot',310)
SYS_swapcontext = Constant('SYS_swapcontext',249)
SYS_swapoff = Constant('SYS_swapoff',115)
SYS_swapon = Constant('SYS_swapon',87)
SYS_switch_endian = Constant('SYS_switch_endian',363)
SYS_symlink = Constant('SYS_symlink',83)
SYS_symlinkat = Constant('SYS_symlinkat',295)
SYS_sync = Constant('SYS_sync',36)
SYS_sync_file_range2 = Constant('SYS_sync_file_range2',308)
SYS_syncfs = Constant('SYS_syncfs',348)
SYS__sysctl = Constant('SYS__sysctl',149)
SYS_sys_debug_setcontext = Constant('SYS_sys_debug_setcontext',256)
SYS_sysfs = Constant('SYS_sysfs',135)
SYS_sysinfo = Constant('SYS_sysinfo',116)
SYS_syslog = Constant('SYS_syslog',103)
SYS_tee = Constant('SYS_tee',284)
SYS_tgkill = Constant('SYS_tgkill',250)
SYS_time = Constant('SYS_time',13)
SYS_timer_create = Constant('SYS_timer_create',240)
SYS_timer_delete = Constant('SYS_timer_delete',244)
SYS_timerfd = Constant('SYS_timerfd',306)
SYS_timerfd_gettime = Constant('SYS_timerfd_gettime',312)
SYS_timerfd_gettime64 = Constant('SYS_timerfd_gettime64',410)
SYS_timerfd_settime = Constant('SYS_timerfd_settime',311)
SYS_timerfd_settime64 = Constant('SYS_timerfd_settime64',411)
SYS_timer_getoverrun = Constant('SYS_timer_getoverrun',243)
SYS_timer_gettime = Constant('SYS_timer_gettime',242)
SYS_timer_gettime64 = Constant('SYS_timer_gettime64',408)
SYS_timer_settime = Constant('SYS_timer_settime',241)
SYS_timer_settime64 = Constant('SYS_timer_settime64',409)
SYS_times = Constant('SYS_times',43)
SYS_tkill = Constant('SYS_tkill',208)
SYS_truncate = Constant('SYS_truncate',92)
SYS_truncate64 = Constant('SYS_truncate64',193)
SYS_tuxcall = Constant('SYS_tuxcall',225)
SYS_ugetrlimit = Constant('SYS_ugetrlimit',190)
SYS_ulimit = Constant('SYS_ulimit',58)
SYS_umask = Constant('SYS_umask',60)
SYS_umount = Constant('SYS_umount',22)
SYS_umount2 = Constant('SYS_umount2',52)
SYS_uname = Constant('SYS_uname',122)
SYS_unlink = Constant('SYS_unlink',10)
SYS_unlinkat = Constant('SYS_unlinkat',292)
SYS_unshare = Constant('SYS_unshare',282)
SYS_uselib = Constant('SYS_uselib',86)
SYS_userfaultfd = Constant('SYS_userfaultfd',364)
SYS_ustat = Constant('SYS_ustat',62)
SYS_utime = Constant('SYS_utime',30)
SYS_utimensat = Constant('SYS_utimensat',304)
SYS_utimensat_time64 = Constant('SYS_utimensat_time64',412)
SYS_utimes = Constant('SYS_utimes',251)
SYS_vfork = Constant('SYS_vfork',189)
SYS_vhangup = Constant('SYS_vhangup',111)
SYS_vm86 = Constant('SYS_vm86',113)
SYS_vmsplice = Constant('SYS_vmsplice',285)
SYS_wait4 = Constant('SYS_wait4',114)
SYS_waitid = Constant('SYS_waitid',272)
SYS_waitpid = Constant('SYS_waitpid',7)
SYS_write = Constant('SYS_write',4)
SYS_writev = Constant('SYS_writev',146)
r0 = Constant('r0',0)
r1 = Constant('r1',1)
r2 = Constant('r2',2)
r3 = Constant('r3',3)
r4 = Constant('r4',4)
r5 = Constant('r5',5)
r6 = Constant('r6',6)
r7 = Constant('r7',7)
r8 = Constant('r8',8)
r9 = Constant('r9',9)
r10 = Constant('r10',10)
r11 = Constant('r11',11)
r12 = Constant('r12',12)
r13 = Constant('r13',13)
r14 = Constant('r14',14)
r15 = Constant('r15',15)
r16 = Constant('r16',16)
r17 = Constant('r17',17)
r18 = Constant('r18',18)
r19 = Constant('r19',19)
r20 = Constant('r20',20)
r21 = Constant('r21',21)
r22 = Constant('r22',22)
r23 = Constant('r23',23)
r24 = Constant('r24',24)
r25 = Constant('r25',25)
r26 = Constant('r26',26)
r27 = Constant('r27',27)
r28 = Constant('r28',28)
r29 = Constant('r29',29)
r30 = Constant('r30',30)
r31 = Constant('r31',31)
|
the-stack_0_22986 | import pytest
import cudf
from cuxfilter import charts
from cuxfilter import DataFrame
from cuxfilter.charts.deckgl import PolygonDeckGL, TS_CODE
from bokeh.util.compiler import TypeScript
pytest
class TestDeckGL:
def test_PolygonDeckGL(self):
assert (
PolygonDeckGL.__implementation__.code == TypeScript(TS_CODE).code
)
def test_init(self):
cux_df = DataFrame.from_dataframe(
cudf.DataFrame(
{
"states": [float(i + 30) for i in range(10)],
"val": [float(i + 10) for i in range(10)],
"val_t": [float(i + 100) for i in range(10)],
}
)
)
choropleth3d_chart = charts.deckgl.choropleth3d(
x="states",
color_column="val",
elevation_column="val_t",
color_aggregate_fn="mean",
elevation_aggregation_fn="count",
data_points=57,
add_interaction=False,
elevation_factor=100000,
geoJSONSource=(
"https://raw.githubusercontent.com/loganpowell/census-geojson"
"/master/GeoJSON/5m/2018/state.json"
),
geoJSONProperty="STATEFP",
)
cux_df.dashboard([choropleth3d_chart])
assert isinstance(choropleth3d_chart, charts.deckgl.plots.Choropleth3d)
assert choropleth3d_chart.deck_spec == {
"mapboxApiAccessToken": None,
"mapStyle": "mapbox://styles/mapbox/dark-v9",
"initialViewState": {
"latitude": 28.400005999999998,
"longitude": 0.31556500000000653,
"zoom": 3,
"max_zoom": 16,
},
"controller": True,
}
assert choropleth3d_chart.layer_spec == {
"opacity": 1,
"getLineWidth": 10,
"getPolygon": "@@=coordinates",
"getElevation": "@@=val_t*100000",
"getFillColor": "@@=color",
"stroked": True,
"filled": True,
"extruded": True,
"lineWidthScale": 10,
"lineWidthMinPixels": 1,
"highlightColor": [200, 200, 200, 200],
"visible": True,
"pickable": True,
"getLineColor": [0, 188, 212],
"autoHighlight": True,
"elevationScale": 0.8,
"pickMultipleObjects": True,
}
assert isinstance(
choropleth3d_chart.chart, charts.deckgl.plots.PolygonDeckGL
)
|
the-stack_0_22987 | import collections, json, os, sys, yaml
# Allow open to be patched for tests.
open = __builtins__['open']
ALWAYS_LOAD_YAML, ALWAYS_DUMP_YAML = True, True
def dumps(data, use_yaml=None, safe=True, **kwds):
"""
Dumps data into a nicely formatted JSON string.
:param dict data: a dictionary to dump
:param kwds: keywords to pass to json.dumps
:returns: a string with formatted data
:rtype: str
"""
if use_yaml is None:
use_yaml = ALWAYS_DUMP_YAML
if use_yaml:
dumps = yaml.safe_dump if safe else yaml.dump
else:
dumps = json.dumps
kwds.update(indent=4, sort_keys=True)
if not safe:
kwds.update(default=repr)
return dumps(data, **kwds)
def dump(data, file=sys.stdout, use_yaml=None, **kwds):
"""
Dumps data as nicely formatted JSON string to a file or file handle
:param dict data: a dictionary to dump
:param file: a filename or file handle to write to
:param kwds: keywords to pass to json.dump
"""
if use_yaml is None:
use_yaml = ALWAYS_DUMP_YAML
def dump(fp):
if use_yaml:
yaml.safe_dump(data, stream=fp, **kwds)
else:
json.dump(data, fp, indent=4, sort_keys=True, **kwds)
if not isinstance(file, str):
return dump(file)
if os.path.isabs(file):
parent = os.path.dirname(file)
if not os.path.exists(parent):
os.makedirs(parent, exist_ok=True)
with open(file, 'w') as fp:
return dump(fp)
def loads(s, use_yaml=None, filename=''):
if use_yaml is None:
use_yaml = ALWAYS_LOAD_YAML
if not (filename.endswith('.yml') or use_yaml):
return json.loads(s)
def fix(d):
if isinstance(d, dict):
return {str(k): fix(v) for k, v in d.items()}
if isinstance(d, list):
return [fix(i) for i in d]
if not isinstance(d, (int, float, bool, str, type(None))):
raise ValueError('Wrong type %s' % type(d))
return d
return fix(yaml.safe_load(s))
def load(file, use_yaml=None):
"""
Loads not only JSON files but also YAML files ending in .yml.
:param file: a filename or file handle to read from
:returns: the data loaded from the JSON or YAML file
:rtype: dict
"""
if isinstance(file, str):
fp = open(file)
filename = file
else:
fp = file
filename = getattr(fp, 'name', '')
try:
return loads(fp.read(), use_yaml, filename)
except Exception as e:
e.args = ('There was a error in the data file', filename) + e.args
raise
def load_if(s):
"""Load either a filename, or a string representation of yml/json."""
is_data_file = s.endswith('.json') or s.endswith('.yml')
return load(s) if is_data_file else loads(s)
|
the-stack_0_22989 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines
# pylint: disable=import-outside-toplevel
"""Paddle: PArallel Distributed Deep LEarning."""
import warnings
import numpy as np
import tvm
from tvm.ir import IRModule
from ... import nd as _nd
from .. import analysis
from .. import ty as _ty
from .. import expr as _expr
from .. import function as _function
from .. import ty as _ty
from .. import op as _op
from .common import (
autopad,
fold_constant,
get_relay_op,
infer_shape,
infer_type,
infer_value,
shape_of,
try_infer_value,
new_var,
)
__all__ = ["from_paddle"]
def _dtype_shape_promotion(inputs):
"""Promote data type and shape for list of tensors."""
dtype_order = ["bool", "int8", "int16", "int32", "int64", "float32", "float64"]
ranks = [len(infer_shape(x)) for x in inputs]
if set(ranks) == set([1, 0]):
for i, r in enumerate(ranks):
if r == 0:
inputs[i] = _op.expand_dims(inputs[i], axis=0)
dtypes = set(dtype_order.index(infer_type(x).checked_type.dtype) for x in inputs)
if len(dtypes) == 1:
return inputs
max_dtype = dtype_order[max(dtypes)]
for i, input_op in enumerate(inputs):
if infer_type(input_op).checked_type.dtype != max_dtype:
inputs[i] = input_op.astype(max_dtype)
return inputs
def _convert_dtype_value(val):
"""Converts a Paddle type id to a string."""
convert_dtype_map = {
21: "int8",
20: "uint8",
6: "float64",
5: "float32",
4: "float16",
3: "int64",
2: "int32",
1: "int16",
0: "bool",
}
if val not in convert_dtype_map:
msg = "Paddle data type value %d is not handled yet." % (val)
raise NotImplementedError(msg)
return convert_dtype_map[val]
def convert_unary_op(g, op, block):
"""Operator converter for all the unary operators."""
# op_map stores mapping relationship between paddlepaddle and relay
op_map = {
"isinf_v2": _op.isinf,
"isfinite_v2": _op.isfinite,
"isnan_v2": _op.isnan,
}
if op.type in op_map:
unary_func = op_map[op.type]
else:
# while paddle operator's name is same with relay
unary_func = get_relay_op(op.type)
out = unary_func(g.get_node(op.input("X")[0]))
g.add_node(op.output("Out")[0], out)
def convert_binary_logical_op(g, op, block):
"""Operator converter for logical op."""
ipt0 = g.get_node(op.input("X")[0])
ipt1 = g.get_node(op.input("Y")[0])
op_func = get_relay_op(op.type)
out = op_func(ipt0, ipt1)
g.add_node(op.output("Out")[0], out)
def convert_addmm(g, op, block):
"""Operator converter for addmm."""
input_x = g.get_node(op.input("Input")[0])
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
alpha = op.attr("Alpha")
beta = op.attr("Beta")
dtype = block.var(op.output("Out")[0]).dtype
dtype = _convert_dtype_value(dtype)
if not isinstance(alpha, _expr.Expr) and alpha != 1:
alpha = _expr.const(alpha, dtype)
x *= alpha
if not isinstance(beta, _expr.Expr) and beta != 1:
beta = _expr.const(beta, dtype)
input_x *= beta
transposed_y = _op.transpose(y, axes=[1, 0])
dense_out = _op.nn.dense(x, transposed_y)
out = dense_out + input_x
g.add_node(op.output("Out")[0], out)
def convert_arg_max_min(g, op, block):
"""Operator converter for arg_max and arg_min."""
axis = op.attr("axis")
keepdims = op.attr("keepdims")
flatten = op.attr("flatten")
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)
func = _op.argmax if op.type == "arg_max" else _op.argmin
x = g.get_node(op.input("X")[0])
if axis is None or flatten:
x = _op.reshape(x, [-1])
out = func(x, axis=None, keepdims=True)
else:
out = func(x, axis=axis, keepdims=keepdims)
if dtype != infer_type(out).checked_type.dtype:
out = _op.cast(out, dtype)
g.add_node(op.output("Out")[0], out)
def convert_argsort(g, op, block):
"""Operator converter for argsort."""
x = g.get_node(op.input("X")[0])
axis = op.attr("axis")
descending = op.attr("descending")
out_indices = _op.argsort(x, axis, not descending, dtype="int64")
out = _op.gather(x, axis, out_indices)
g.add_node(op.output("Out")[0], out)
g.add_node(op.output("Indices")[0], out_indices)
def convert_assign(g, op, block):
"""Operator converter for assign."""
out = g.get_node(op.input("X")[0])
g.add_node(op.output("Out")[0], out)
def convert_assign_value(g, op, block):
"""Operator converter for assign_value."""
keys = ["bool_values", "fp32_values", "int32_values", "int64_values"]
dtypes = ["bool", "float32", "int32", "int64"]
for i, key in enumerate(keys):
dtype = dtypes[i]
value = np.array(op.attr(key)).astype(dtype)
if value is not None and value.size >= 1:
break
shape = op.attr("shape")
value = value.reshape(shape)
out = _op.const(value, dtype=dtype)
g.add_node(op.output("Out")[0], out)
def convert_batch_norm(g, op, block):
"""Operator converter for batch_norm."""
ipt_name = op.input("X")[0]
scale_name = op.input("Scale")[0]
bias_name = op.input("Bias")[0]
mean_name = op.input("Mean")[0]
variance_name = op.input("Variance")[0]
epsilon = op.attr("epsilon")
out = _op.nn.batch_norm(
g.get_node(ipt_name),
g.get_node(scale_name),
g.get_node(bias_name),
g.get_node(mean_name),
g.get_node(variance_name),
epsilon=epsilon,
)
g.add_node(op.output("Y")[0], out[0])
def convert_bmm(g, op, block):
"""Operator converter for bmm."""
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
y = _op.transpose(y, [0, 2, 1])
out = _op.nn.batch_matmul(x, y)
g.add_node(op.output("Out")[0], out)
def convert_brelu(g, op, block):
"""Operator converter for brelu."""
x = g.get_node(op.input("X")[0])
t_max = op.attr("t_max")
t_min = op.attr("t_min")
out = _op.tensor.clip(x, t_min, t_max)
g.add_node(op.output("Out")[0], out)
def convert_cast(g, op, block):
"""Operator converter for cast."""
dtype = op.attr("out_dtype")
dtype = _convert_dtype_value(dtype)
x = g.get_node(op.input("X")[0])
out = _op.cast(x, dtype=dtype)
g.add_node(op.output("Out")[0], out)
def convert_clip(g, op, block):
"""Operator converter for clip."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
# if the min/max value is a tensor
min_max_is_tensor = False
if op.input("Min"):
min_value = g.get_node(op.input("Min")[0])
min_value, infered = try_infer_value(min_value, g.get_params())
if infered:
min_value = min_value.tolist()[0]
if isinstance(min_value, _expr.Expr):
min_max_is_tensor = True
else:
min_value = op.attr("min")
if op.input("Max"):
max_value = g.get_node(op.input("Max")[0])
max_value, infered = try_infer_value(max_value, g.get_params())
if infered:
max_value = max_value.tolist()[0]
if isinstance(max_value, _expr.Expr):
min_max_is_tensor = True
else:
max_value = op.attr("max")
if min_max_is_tensor:
if not isinstance(min_value, _expr.Expr):
min_value = _op.const(min_value, dtype)
if not isinstance(max_value, _expr.Expr):
max_value = _op.const(max_value, dtype)
out = _op.maximum(x, min_value)
out = _op.minimum(out, max_value)
else:
out = _op.clip(x, min_value, max_value)
g.add_node(op.output("Out")[0], out)
def convert_concat(g, op, block):
"""Operator converter for concat."""
inputs = [g.get_node(op.input("X")[i]) for i in range(len(op.input("X")))]
axis = op.attr("axis")
inputs = _dtype_shape_promotion(inputs)
out = _op.concatenate(inputs, axis=axis)
g.add_node(op.output("Out")[0], out)
def convert_conv2d(g, op, block):
"""Operator converter for conv2d."""
dilations = op.attr("dilations")
groups = op.attr("groups")
paddings = op.attr("paddings")
padding_algorithm = op.attr("padding_algorithm")
strides = op.attr("strides")
kernel = g.get_node(op.input("Filter")[0])
input_x = g.get_node(op.input("Input")[0])
out_channels, _, k_h, k_w = infer_shape(kernel)
if padding_algorithm == "VALID":
paddings = [0, 0]
elif padding_algorithm == "SAME":
# Handle history issue of PaddlePaddle
# while padding_algorithm == "SAME"
# dilations will be set to [1, 1]
dilations = [1, 1]
input_x = autopad(input_x, strides, [k_h, k_w], dilations)
paddings = [0, 0]
elif padding_algorithm == "EXPLICIT":
if len(paddings) == 2:
paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]
elif len(paddings) == 4:
paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]
else:
msg = 'Value {} in attribute "padding" of operator Conv is not "valid."'
raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm))
out = _op.nn.conv2d(
input_x,
kernel,
strides=strides,
padding=paddings,
dilation=dilations,
groups=groups,
channels=out_channels,
kernel_size=[k_h, k_w],
)
g.add_node(op.output("Output")[0], out)
def convert_conv2d_transpose(g, op, block):
"""Operator converter for conv2d_transpose."""
dilations = op.attr("dilations")
groups = op.attr("groups")
paddings = op.attr("paddings")
padding_algorithm = op.attr("padding_algorithm")
strides = op.attr("strides")
output_padding = op.attr("output_padding") if op.attr("output_padding") else [0, 0]
kernel = g.get_node(op.input("Filter")[0])
input_x = g.get_node(op.input("Input")[0])
_, out_channels, k_h, k_w = infer_shape(kernel)
k_size = [k_h, k_w]
if padding_algorithm == "VALID":
paddings = [0, 0]
elif padding_algorithm == "SAME":
# SAME padding of conv2d_transpose is not same with conv2d
# We cannot use auto_pad here, only static shape is supported now
dilations = [1, 1]
input_shape = shape_of(input_x)
h_w = _op.strided_slice(input_shape, [2], [4])
try:
h_w = infer_value(h_w, g.get_params()).numpy().tolist()
except Exception as e:
msg = "The SAME padding algorithm of conv2d_transpose not support dynamic shape"
raise tvm.error.OpAttributeInvalid(msg) from e
paddings = []
for i in range(2):
if strides[i] == 1 or h_w[i] % strides[i] == 0:
pad = max(k_size[i] - strides[i], 0)
else:
pad = max(k_size[i] - (h_w[i] % strides[i]), 0)
pad_before = pad // 2
pad_after = pad - pad_before
paddings.insert(-1, pad_before)
paddings.append(pad_after)
elif padding_algorithm == "EXPLICIT":
if len(paddings) == 2:
paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]
elif len(paddings) == 4:
paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]
else:
msg = 'Value {} in attribute "padding" of operator Conv is not "valid."'
raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm))
out = _op.nn.conv2d_transpose(
input_x,
kernel,
strides=strides,
padding=paddings,
dilation=dilations,
groups=groups,
channels=out_channels * groups,
kernel_size=k_size,
output_padding=output_padding,
)
g.add_node(op.output("Output")[0], out)
def convert_cumsum(g, op, block):
"""Operator converter for cumsum."""
axis = op.attr("axis")
exclusive = op.attr("exclusive")
flatten = op.attr("flatten")
reverse = op.attr("reverse")
x = g.get_node(op.input("X")[0])
if axis is None or flatten:
x = _op.reshape(x, [-1])
if reverse:
x = _op.reverse(x, axis=axis)
out = _op.cumsum(x, axis=axis, exclusive=exclusive)
out = _op.reverse(out, axis=axis)
else:
out = _op.cumsum(x, axis=axis, exclusive=exclusive)
g.add_node(op.output("Out")[0], out)
def convert_dropout(g, op, block):
"""Operator converter for dropout."""
x = g.get_node(op.input("X")[0])
g.add_node(op.output("Out")[0], x)
def convert_dot(g, op, block):
"""Operator converter for dot."""
# x, y should be 1D or 2D tensor
# when it's 2D tensor, the first dimension means batch dimension
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
out = _op.sum(_op.multiply(x, y), axis=[-1], keepdims=True)
g.add_node(op.output("Out")[0], out)
def convert_elementwise_op(g, op, block):
"""Operator converter for all the elementwise operators."""
op_map = {
"elementwise_div": "divide",
"elementwise_add": "add",
"elementwise_mul": "multiply",
"elementwise_sub": "subtract",
"elementwise_mod": "mod",
"elementwise_max": "maximum",
"elementwise_min": "minimum",
"elementwise_pow": "power",
"elementwise_floordiv": "floor_divide",
"equal": "equal",
"greater_equal": "greater_equal",
"greater_than": "greater",
"less_equal": "less_equal",
"less_than": "less",
"not_equal": "not_equal",
}
op_func = op_map[op.type]
ipt0 = g.get_node(op.input("X")[0])
ipt1 = g.get_node(op.input("Y")[0])
ipt0_shape = infer_shape(ipt0)
ipt1_shape = infer_shape(ipt1)
axis = op.attr("axis")
if len(ipt0_shape) != len(ipt1_shape):
if axis < 0:
axis = axis + len(ipt0_shape)
if axis != len(ipt0_shape) - 1:
ipt1 = _op.expand_dims(ipt1, axis=axis, num_newaxis=(len(ipt0_shape) - axis - 1))
op_func = get_relay_op(op_func)
out = op_func(ipt0, ipt1)
g.add_node(op.output("Out")[0], out)
def convert_elu(g, op, block):
"""Operator converter for elu."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
alpha = op.attr("alpha")
alpha = _expr.const(-1.0 * alpha, dtype=dtype)
out = alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(x)) + _op.nn.relu(x)
g.add_node(op.output("Out")[0], out)
def convert_expand(g, op, block):
"""Operator converter for expand."""
x = g.get_node(op.input("X")[0])
if op.input("Shape"):
sizes = g.get_node(op.input("Shape")[0])
else:
sizes = op.attr("shape")
if isinstance(sizes, _expr.Expr):
sizes = try_infer_value(sizes, parameters=g.get_params())[0]
if isinstance(sizes, np.ndarray):
sizes = sizes.tolist()
out = _op.broadcast_to(x, sizes)
g.add_node(op.output("Out")[0], out)
def convert_expand_as(g, op, block):
"""Operator converter for expand_as."""
x = g.get_node(op.input("X")[0])
target_shape = op.attr("target_shape")
out = _op.broadcast_to(x, target_shape)
g.add_node(op.output("Out")[0], out)
def convert_feed(g, op, block):
"""Converter for model input node."""
if block is not None:
ipt_name = op.output("Out")[0]
ipt_shape = block.var(ipt_name).shape
ipt_dtype = block.var(ipt_name).dtype
ipt_dtype = str(ipt_dtype).strip().split(".")[1]
else:
ipt_shape = op.shape
ipt_dtype = str(op.dtype).strip().split(".")[1]
ipt_name = op.name
if g.shape_dict is not None:
ipt_shape = g.shape_dict[ipt_name]
if isinstance(ipt_shape, tuple):
ipt_shape = list(ipt_shape)
for i, s in enumerate(ipt_shape):
if s < 0:
ipt_shape[i] = _ty.Any()
out = new_var(ipt_name, shape=ipt_shape, dtype=ipt_dtype)
g.add_node(ipt_name, out)
def convert_fill_any_like(g, op, block):
"""Operator converter for fill_any_like."""
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)
x = g.get_node(op.input("X")[0])
value = _expr.const(op.attr("value"), dtype=dtype)
out = _op.transform.full_like(x, value).astype(dtype)
g.add_node(op.output("Out")[0], out)
def convert_fill_constant(g, op, block):
"""Operator converter for fill_constant."""
value = op.attr("value")
shape = block.var(op.output("Out")[0]).shape
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)
value = _expr.const(value).astype(dtype)
if "ValueTensor" in op.input_names and op.input("ValueTensor"):
shape = g.get_node(op.input("ValueTensor")[0])
if "ShapeTensor" in op.input_names and op.input("ShapeTensor"):
shape = g.get_node(op.input("ShapeTensor")[0])
if isinstance(shape, _expr.Expr):
shape = try_infer_value(shape, parameters=g.get_params())[0]
if isinstance(shape, np.ndarray):
shape = shape.tolist()
out = _op.full(value, shape=shape, dtype=dtype)
g.add_node(op.output("Out")[0], out)
def convert_fill_constant_batch_size_like(g, op, block):
"""Operator converter for fill_constant_batch_size_like."""
x = g.get_node(op.input("Input")[0])
value = op.attr("value")
shape = op.attr("shape")
input_dim_idx = op.attr("input_dim_idx")
output_dim_idx = op.attr("output_dim_idx")
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)
input_shape = shape_of(x)
batch = _op.strided_slice(input_shape, begin=[input_dim_idx], end=[input_dim_idx + 1]).astype(
"int32"
)
shape_before = shape[:output_dim_idx]
shape_before = _expr.const(shape_before, dtype="int32")
shape_after = shape[output_dim_idx + 1 :]
shape_after = _expr.const(shape_after, dtype="int32")
out_shape = _op.concatenate([shape_before, batch, shape_after], axis=0)
out_shape, infered = try_infer_value(out_shape, g.get_params())
if infered:
out_shape = out_shape.tolist()
constant = _expr.const(value, dtype=dtype).astype(dtype)
out = _op.full(constant, out_shape, dtype=dtype)
g.add_node(op.output("Out")[0], out)
def convert_flatten(g, op, block):
"""Operator converter for flatten."""
x = g.get_node(op.input("X")[0])
input_shape = list(infer_shape(x))
start = op.attr("start_axis")
end = op.attr("stop_axis")
ndim = len(input_shape)
if end < 0:
end += ndim
new_shape = [0] * start
new_shape.append(-1)
squeeze_axes = []
for i in range(start + 1, end + 1):
new_shape.append(1)
squeeze_axes.append(i)
for _ in range(end + 1, ndim):
new_shape.append(0)
out = _op.reshape(x, new_shape)
if squeeze_axes:
out = _op.squeeze(out, axis=squeeze_axes)
g.add_node(op.output("Out")[0], out)
def convert_gather(g, op, block):
"""Operator converter for gather."""
x = g.get_node(op.input("X")[0])
index = g.get_node(op.input("Index")[0])
axis = op.attr("axis")
out = _op.take(x, index, axis)
g.add_node(op.output("Out")[0], out)
def convert_gather_nd(g, op, block):
"""Operator converter for gather_nd."""
x = g.get_node(op.input("X")[0])
index = g.get_node(op.input("Index")[0])
shape = infer_shape(index)
perm = list(range(0, len(shape) - 1))
perm.insert(0, len(shape) - 1)
index = _op.transpose(index, axes=perm)
out = _op.gather_nd(x, index, 0, shape[-1])
g.add_node(op.output("Out")[0], out)
def convert_gelu(g, op, block):
"""Operator converter for gelu."""
x = g.get_node(op.input("X")[0])
out = x * (
_expr.const(0.5, dtype="float32")
+ _op.erf(x * _expr.const(0.5 ** 0.5, dtype="float32")) * _expr.const(0.5, dtype="float32")
)
g.add_node(op.output("Out")[0], out)
def convert_group_norm(g, op, block):
"""Operator converter for group_norm."""
x = g.get_node(op.input("X")[0])
num_groups = op.attr("groups")
epsilon = op.attr("epsilon")
gamma = g.get_node(op.input("Scale")[0])
beta = g.get_node(op.input("Bias")[0])
out = _op.nn.group_norm(
x,
gamma=gamma,
beta=beta,
num_groups=num_groups,
axis=1,
epsilon=epsilon,
center=True,
scale=True,
)
g.add_node(op.output("Y")[0], out)
def convert_hard_shrink(g, op, block):
"""Operator converter for hard_shrink."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
threshold = op.attr("threshold")
threshold = _op.const(threshold, dtype)
out = _op.logical_or(x < _op.const(-1.0, dtype) * threshold, x > threshold)
out = _op.cast(out, dtype) * x
g.add_node(op.output("Out")[0], out)
def convert_hard_sigmoid(g, op, block):
"""Operator converter for hard_sigmoid."""
slope = op.attr("slope")
x = g.get_node(op.input("X")[0])
out = x * _expr.const(slope) + _expr.const(0.5)
out = _op.clip(out, 0, 1)
g.add_node(op.output("Out")[0], out)
def convert_hard_swish(g, op, block):
"""Operator converter for hard_swish."""
offset = op.attr("offset")
scale = op.attr("scale")
threshold = op.attr("threshold")
assert np.isclose(offset, 3.0), "Only support offset==3.0 for PaddlePaddle's hard_swish"
assert np.isclose(scale, 6.0), "Only support scale==6.0 for PaddlePaddle's hard_swish"
assert np.isclose(threshold, 6.0), "Only support threshold==6.0 for PaddlePaddle's hard_swish"
x = g.get_node(op.input("X")[0])
out = _op.clip(x, -1 * offset, offset)
out = out / _expr.const(threshold) + _expr.const(0.5)
out = x * out
g.add_node(op.output("Out")[0], out)
def convert_interpolate(g, op, block):
"""Operator converter for interpolate."""
def get_interpolate_mode(op):
"""Get parameters for interpolation methods."""
interp_method = op.attr("interp_method")
align_corners = op.attr("align_corners")
align_mode = op.attr("align_mode")
rounding_method = ""
if interp_method == "nearest":
interp_method = "nearest_neighbor"
coordinate_transformation_mode = "asymmetric"
rounding_method = "floor"
elif interp_method == "bilinear":
interp_method = "linear"
if not align_corners and align_mode == 0:
coordinate_transformation_mode = "half_pixel"
else:
if align_corners:
coordinate_transformation_mode = "align_corners"
else:
coordinate_transformation_mode = "asymmetric"
elif interp_method == "bicubic":
interp_method = "cubic"
if align_corners:
coordinate_transformation_mode = "align_corners"
else:
coordinate_transformation_mode = "half_pixel"
else:
msg = "interp_method {} is not supported for PaddlePaddle's interpolate"
raise tvm.error.OpAttributeInvalid(msg.format(interp_method))
return rounding_method, interp_method, coordinate_transformation_mode
layout = op.attr("data_layout")
out_h = op.attr("out_h")
out_w = op.attr("out_w")
scale = op.attr("scale")
if not isinstance(scale, (list, tuple)):
scale = [scale, scale]
x = g.get_node(op.input("X")[0])
x_shape = infer_shape(x)
assert len(x_shape) == 4, "Only 4D input tensor is supported for PaddlePaddle's interpolate"
input_out_size = op.input("OutSize")
input_size_tensor = op.input("SizeTensor")
input_scale = op.input("Scale")
rounding_method, interp_method, coordinate_transformation_mode = get_interpolate_mode(op)
if input_size_tensor:
# if out_size is a list of tensor
out_size = list()
for name in input_size_tensor:
size = g.get_node(name)
if len(infer_shape(size)) == 0:
shape = _op.reshape(shape, [-1])
out_size.append(size)
out_size = _op.concatenate(out_size, axis=0)
out_size, infered = try_infer_value(out_size, parameters=g.get_params())
if infered:
out_size = out_size.tolist()
elif input_scale:
# if out_size is not defined, but scale is defined
input_scale = g.get_node(input_scale[0])
input_shape = shape_of(x).astype("float32")
if layout.startswith("NC"):
out_size = _op.strided_slice(input_shape, begin=[2], end=[4]) * input_scale
else:
out_size = _op.strided_slice(input_shape, begin=[1], end=[3]) * input_scale
out_size = out_size.astype("int32")
out_size, infered = try_infer_value(out_size, parameters=g.get_params())
if infered:
out_size = out_size.tolist()
elif scale and scale[0] > 0 and scale[1] > 0:
# use attribute scale
input_shape = shape_of(x).astype("float32")
input_scale = _expr.const(np.array([scale[0], scale[1]]).astype("float32"))
if layout.startswith("NC"):
out_size = _op.strided_slice(input_shape, begin=[2], end=[4]) * input_scale
else:
out_size = _op.strided_slice(input_shape, begin=[1], end=[3]) * input_scale
out_size = out_size.astype("int32")
out_size, infered = try_infer_value(out_size, parameters=g.get_params())
if infered:
out_size = out_size.tolist()
elif input_out_size:
# if out_size is a tensor
out_size = g.get_node(input_out_size[0])
out_size, infered = try_infer_value(out_size, parameters=g.get_params())
if infered:
out_size = out_size.tolist()
else:
# if out_size is a constant value
out_size = [out_h, out_w]
out = _op.image.resize2d(
x,
size=out_size,
layout=layout,
method=interp_method,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
cubic_alpha=-0.75,
)
g.add_node(op.output("Out")[0], out)
def convert_instance_norm(g, op, block):
"""Operator converter for instance_norm."""
x = g.get_node(op.input("X")[0])
gamma = g.get_node(op.input("Scale")[0])
beta = g.get_node(op.input("Bias")[0])
epsilon = op.attr("epsilon")
scale = center = True
out = _op.nn.instance_norm(x, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale)
g.add_node(op.output("Y")[0], out)
def convert_layer_norm(g, op, block):
"""Operator converter for layer_norm."""
begin_norm_axis = op.attr("begin_norm_axis")
epsilon = op.attr("epsilon")
x = g.get_node(op.input("X")[0])
bias_input = op.input("Bias")
scale_input = op.input("Scale")
x_shape = infer_shape(x)
assert begin_norm_axis in (
len(x_shape) - 1,
-1,
), "Support only normalization over last one dimension."
if bias_input:
bias = g.get_node(bias_input[0])
else:
bias = _expr.const(np.zeros(x_shape[begin_norm_axis]))
if scale_input:
scale = g.get_node(scale_input[0])
else:
scale = _expr.const(np.ones(x_shape[begin_norm_axis]))
out = _op.nn.layer_norm(
x, gamma=scale, beta=bias, axis=begin_norm_axis, epsilon=epsilon, center=True, scale=True
)
g.add_node(op.output("Y")[0], out)
def convert_leaky_relu(g, op, block):
"""Operator converter for leaky_relu."""
alpha = op.attr("alpha")
x = g.get_node(op.input("X")[0])
out = _op.nn.leaky_relu(x, alpha=alpha)
g.add_node(op.output("Out")[0], out)
def convert_log1p(g, op, block):
"""Operator converter for log1p."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
one = _expr.const(1, dtype=dtype)
out = _op.log(x + one)
g.add_node(op.output("Out")[0], out)
def convert_logical_not(g, op, block):
"""Operator converter for logical_not op."""
ipt0 = g.get_node(op.input("X")[0])
op_func = get_relay_op(op.type)
out = op_func(ipt0)
g.add_node(op.output("Out")[0], out)
def convert_logsigmoid(g, op, block):
"""Operator converter for logsigmoid."""
x = g.get_node(op.input("X")[0])
out = _op.log(_op.tensor.sigmoid(x))
g.add_node(op.output("Out")[0], out)
def convert_logsoftmax(g, op, block):
"""Operator converter for logsoftmax."""
x = g.get_node(op.input("X")[0])
axis = op.attr("axis")
ndim = len(infer_shape(x))
if axis < 0:
axis += ndim
m = _op.max(x, [axis], keepdims=True)
e = _op.exp(x - m)
s = _op.sum(e, [axis], keepdims=True)
out = x - m - _op.log(s)
g.add_node(op.output("Out")[0], out)
def convert_logsumexp(g, op, block):
"""Operator converter for logsumexp."""
input_x = g.get_node(op.input("X")[0])
axis = op.attr("axis")
if op.attr("reduce_all"):
axis = None
keepdims = op.attr("keepdim")
out = get_relay_op("logsumexp")(input_x, axis=axis, keepdims=keepdims)
if not axis and not keepdims:
out = _op.expand_dims(out, axis=0)
g.add_node(op.output("Out")[0], out)
def convert_lookup_table(g, op, block):
"""Operator converter for lookup_table_v2."""
indices = g.get_node(op.input("Ids")[0])
padding_idx = op.attr("padding_idx")
weights = g.get_node(op.input("W")[0])
if padding_idx != -1:
if op.input("W")[0] in g.get_params():
weights = g.get_params(op.input("W")[0])
weights[padding_idx] = 0.0
weights = _expr.const(weights)
else:
shape, infered = try_infer_value(shape_of(weights), g.get_params())
if infered:
shape = shape.tolist()
assert not isinstance(
shape, _expr.Expr
), "Shape of weight has to be fixed for PaddlePaddle's lookup_table"
filters = np.ones(shape).astype(infer_type(weights).checked_type.dtype)
filters[padding_idx] = 0.0
filters = _expr.const(filters)
weights = weights * filters
out = _op.take(weights, indices.astype("int32"), axis=0)
g.add_node(op.output("Out")[0], out)
def convert_matmul(g, op, block):
"""Operator converter for matmul."""
inputs = [g.get_node(op.input("X")[0]), g.get_node(op.input("Y")[0])]
a_shape = infer_shape(inputs[0])
b_shape = infer_shape(inputs[1])
if op.has_attr("trans_x"):
# for matmul_v2
trans_x = op.attr("trans_x")
trans_y = op.attr("trans_y")
else:
# for matmul
trans_x = op.attr("transpose_X")
trans_y = op.attr("transpose_Y")
if trans_x:
perm = list(range(len(a_shape)))
perm[-2] = len(a_shape) - 1
perm[-1] = len(a_shape) - 2
inputs[0] = _op.transpose(inputs[0], axes=perm)
if trans_y:
perm = list(range(len(b_shape)))
perm[-2] = len(b_shape) - 1
perm[-1] = len(b_shape) - 2
inputs[1] = _op.transpose(inputs[1], axes=perm)
# This implemention almost keeps same with ONNX
# Need to check input shape as batch matmul must be supported.
a_shape = shape_of(inputs[0], dtype="int32")
a_rank = infer_shape(a_shape)[0]
b_shape = shape_of(inputs[1], dtype="int32")
b_rank = infer_shape(b_shape)[0]
# When performing a batch matmul, we need to properly handle N-dim shapes.
if a_rank > 2 or b_rank > 2:
def flatten_to_nd(x, x_shape, nd=3):
ndims = infer_shape(x_shape)[0]
if ndims == nd:
return x
newshape = _op.concatenate(
[
_expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),
_op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),
],
0,
)
out = _op.reshape(x, fold_constant(newshape))
return out
b_type = infer_type(inputs[1])
# Convert to dense if the second matrix is 2d and non-dynamic
if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):
a = flatten_to_nd(inputs[0], a_shape, 2)
b = _op.transpose(inputs[1])
output = _op.nn.dense(a, b)
else:
# Convert a and b into 3 dimensional tensors.
a = flatten_to_nd(inputs[0], a_shape, 3)
b = flatten_to_nd(inputs[1], b_shape, 3)
# Transpose matrix dimensions of b.
b = _op.transpose(b, [0, 2, 1])
# Perform a batch matmul.
output = _op.nn.batch_matmul(a, b)
# Determine the output batch dimension.
if a_rank > b_rank:
out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])
elif a_rank < b_rank:
out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])
# If its unclear how broadcasting should be applied, the output
# shape is determined by choosing the maximum value from each input.
else:
out_batch = _op.concatenate(
[
_op.maximum(
_op.strided_slice(a_shape, [i], [i + 1]),
_op.strided_slice(b_shape, [i], [i + 1]),
)
for i in range(a_rank - 2)
],
0,
)
# Reshape output to original dimensions.
final_shape = _op.concatenate(
[
out_batch,
_op.strided_slice(
a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]
),
_op.strided_slice(
b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]
),
],
0,
)
out = _op.reshape(output, fold_constant(final_shape))
else:
if b_rank == 1:
inputs[1] = _op.expand_dims(inputs[1], 1, 1)
# Otherwise a simple dense op will get the job done.
input_1_t = _op.transpose(inputs[1], axes=(1, 0))
out = _op.nn.dense(inputs[0], input_1_t)
if b_rank == 1:
out = _op.squeeze(out, axis=[-1])
if op.has_attr("alpha"):
alpha = op.attr("alpha")
if not np.isclose(alpha, 1.0):
out = out * _expr.const(alpha).astype("float32")
g.add_node(op.output("Out")[0], out)
def convert_meshgrid(g, op, block):
"""Operator converter for meshgrid."""
inputs = op.input("X")
x = [g.get_node(i) for i in inputs]
outs = _op.meshgrid(x, indexing="ij")
for i, out in enumerate(outs):
g.add_node(op.output("Out")[i], out)
def convert_mul(g, op, block):
"""Operator converter for mul."""
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
x_num_col_dims = op.attr("x_num_col_dims")
y_num_col_dims = op.attr("y_num_col_dims")
x_shape = shape_of(x, dtype="int32")
y_shape = shape_of(y, dtype="int32")
x_dim = infer_shape(x_shape)[0]
y_dim = infer_shape(y_shape)[0]
if x_num_col_dims < 0:
x_num_col_dims += x_dim
if y_num_col_dims < 0:
y_num_col_dims += y_dim
if x_num_col_dims == 1:
x = _op.nn.batch_flatten(x)
else:
pre_shape = _op.prod(_op.strided_slice(x_shape, [0], [x_num_col_dims], [1]), keepdims=True)
post_shape = _op.prod(
_op.strided_slice(x_shape, [x_num_col_dims], [x_dim], [1]), keepdims=True
)
new_shape = _op.concatenate([pre_shape, post_shape], axis=0)
new_shape = fold_constant(new_shape)
x = _op.reshape(x, new_shape)
if y_num_col_dims == 1:
y = _op.nn.batch_flatten(y)
else:
pre_shape = _op.prod(_op.strided_slice(y_shape, [0], [y_num_col_dims], [1]), keepdims=True)
post_shape = _op.prod(
_op.strided_slice(y_shape, [y_num_col_dims], [y_dim], [1]), keepdims=True
)
new_shape = _op.concatenate([pre_shape, post_shape], axis=0)
new_shape = fold_constant(new_shape)
y = _op.reshape(y, new_shape)
y = _op.transpose(y)
out = _op.nn.dense(x, y)
out_pre_shape = _op.strided_slice(x_shape, [0], [x_num_col_dims], [1])
out_post_shape = _op.strided_slice(y_shape, [y_num_col_dims], [y_dim], [1])
out_shape = _op.concatenate([out_pre_shape, out_post_shape], axis=0)
out_shape = fold_constant(out_shape)
out = _op.reshape(out, out_shape)
g.add_node(op.output("Out")[0], out)
def convert_mv(g, op, block):
"""Operator converter for mv."""
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Vec")[0])
y = _op.expand_dims(y, axis=-1)
y = _op.transpose(y)
out = _op.nn.dense(x, y)
out = _op.squeeze(out, axis=[-1])
g.add_node(op.output("Out")[0], out)
def convert_padding(g, op, block):
"""Operator converter for padding."""
input_x = g.get_node(op.input("X")[0])
input_padding = op.input("Paddings")
if input_padding:
padding = g.get_node(input_padding[0])
padding = infer_value(padding, g.get_params()).numpy().tolist()
else:
padding = op.attr("paddings")
padding = op.attr("paddings")
value = op.attr("value")
data_format = op.attr("data_format")
mode = op.attr("mode")
assert mode != "circular", "Don't support mod='circular' for PaddlePaddle's padding"
if mode == "replicate":
mode = "edge"
pad_len = len(padding)
new_paddings = [0] * (pad_len + 4)
for i in range(0, pad_len, 2):
index = -1 - i
if data_format[:2] != "NC":
index = -3 - i
new_paddings[index] = padding[i + 1]
new_paddings[index - 1] = padding[i]
new_paddings = [new_paddings[i : i + 2] for i in range(0, len(new_paddings), 2)]
out = _op.nn.pad(input_x, new_paddings, pad_value=value, pad_mode=mode)
g.add_node(op.output("Out")[0], out)
def convert_pixel_shuffle(g, op, block):
"""Operator converter for pixel_shuffle."""
x = g.get_node(op.input("X")[0])
upscale_factor = op.attr("upscale_factor")
out = _op.nn.depth_to_space(x, upscale_factor, mode="CRD")
g.add_node(op.output("Out")[0], out)
def convert_pool2d(g, op, block):
"""Operator converter for pool2d."""
adaptive = op.attr("adaptive")
ceil_mode = op.attr("ceil_mode")
global_pooling = op.attr("global_pooling")
ksize = op.attr("ksize")
paddings = op.attr("paddings")
padding_algorithm = op.attr("padding_algorithm")
pooling_type = op.attr("pooling_type")
if global_pooling:
adaptive = True
ksize = [1, 1]
input_x = g.get_node(op.input("X")[0])
_, _, in_h, in_w = infer_shape(input_x)
op_map = {
"avg": "avg_pool2d",
"max": "max_pool2d",
}
strides = op.attr("strides")
if isinstance(strides, int):
strides = [strides, strides]
if isinstance(ksize, int):
ksize = [ksize, ksize]
if isinstance(paddings, int):
paddings = [paddings] * 2
if padding_algorithm == "VALID":
paddings = [0, 0]
elif padding_algorithm == "SAME":
input_x = autopad(input_x, strides, ksize)
paddings = [0, 0]
elif padding_algorithm == "EXPLICIT":
if len(paddings) == 2:
paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]
elif len(paddings) == 4:
paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]
else:
msg = 'Value {} in attribute "padding" of operator Pool2d is not "valid."'
raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm))
# handle with special case
# while kernel size less than input size
# shrink kernel size to input size
if not isinstance(in_h, _op.Expr) and in_h < ksize[0]:
ksize[0] = in_h
if not isinstance(in_w, _op.Expr) and in_w < ksize[1]:
ksize[1] = in_w
if not adaptive:
if pooling_type == "avg":
exclusive = op.attr("exclusive")
out = _op.nn.avg_pool2d(
input_x,
pool_size=ksize,
strides=strides,
padding=paddings,
ceil_mode=ceil_mode,
count_include_pad=not exclusive,
)
else:
out = getattr(_op.nn, op_map[pooling_type])(
input_x, pool_size=ksize, strides=strides, padding=paddings, ceil_mode=ceil_mode
)
else:
out = getattr(_op.nn, "adaptive_" + op_map[pooling_type])(input_x, output_size=ksize)
g.add_node(op.output("Out")[0], out)
def convert_pow(g, op, block):
"""Operator converter for pow."""
x = g.get_node(op.input("X")[0])
dtype = block.var(op.output("Out")[0]).dtype
dtype = _convert_dtype_value(dtype)
factor = op.attr("factor")
factor = _expr.const(factor, dtype=dtype)
out = _op.power(x, factor)
g.add_node(op.output("Out")[0], out)
def convert_prelu(g, op, block):
"""Operator converter for prelu."""
x = g.get_node(op.input("X")[0])
alpha = g.get_node(op.input("Alpha")[0])
ndims = len(infer_shape(x))
axis = 0 if ndims <= 1 else 1
mode = op.attr("mode")
if mode == "all":
if ndims == 1:
shape = _op.strided_slice(shape_of(x), [0], [1])
else:
shape = _op.strided_slice(shape_of(x), [1], [2])
alpha = _op.broadcast_to(alpha, shape)
out = _op.nn.prelu(x, alpha, axis)
g.add_node(op.output("Out")[0], out)
def convert_range(g, op, block):
"""Operator converter for range."""
start = g.get_node(op.input("Start")[0])
stop = g.get_node(op.input("End")[0])
step = g.get_node(op.input("Step")[0])
dtype = infer_type(start).checked_type.dtype
params = []
for param in (start, stop, step):
param, infered = try_infer_value(param, g.get_params())
if infered:
param = param.tolist()
if isinstance(param, list):
param = param[0]
if isinstance(param, _expr.Expr):
param = _op.squeeze(param)
else:
param = _op.const(param, dtype=dtype)
params.append(param)
out = _op.transform.arange(params[0], params[1], params[2], dtype=dtype)
g.add_node(op.output("Out")[0], out)
def convert_reciprocal(g, op, block):
"""Operator converter for reciprocal."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
out = _expr.const(1.0, dtype) / x
g.add_node(op.output("Out")[0], out)
def convert_reduce(g, op, block):
"""Operator converter for series of reduce operators."""
op_map = {
"reduce_all": "all",
"reduce_any": "any",
"reduce_max": "max",
"reduce_min": "min",
"reduce_prod": "prod",
"reduce_sum": "sum",
"reduce_mean": "mean",
}
op_name = op_map[op.type]
input_x = g.get_node(op.input("X")[0])
axis = op.attr("dim")
if op.attr("reduce_all"):
axis = None
keepdims = op.attr("keep_dim")
out = get_relay_op(op_name)(input_x, axis=axis, keepdims=keepdims)
if not axis and not keepdims:
# use `expand_dims` to solve the following situation
# for TVM, the shape of `out` will be (, )
# for Paddle, the shape of `out` will be [1]
out = _op.expand_dims(out, axis=0)
g.add_node(op.output("Out")[0], out)
def convert_relu6(g, op, block):
"""Operator converter for relu6."""
x = g.get_node(op.input("X")[0])
out = _op.clip(x, 0.0, 6.0)
g.add_node(op.output("Out")[0], out)
def convert_reshape(g, op, block):
"""Operator converter for reshape."""
input_shape = op.input("Shape")
input_shape_tensor = op.input("ShapeTensor")
data = g.get_node(op.input("X")[0])
if input_shape:
new_shape = g.get_node(input_shape[0])
elif input_shape_tensor:
new_shape = []
for shape_name in input_shape_tensor:
shape = g.get_node(shape_name)
if len(infer_shape(shape)) == 0:
shape = _op.reshape(shape, [-1])
new_shape.append(shape)
new_shape = _op.concatenate(new_shape, axis=0)
new_shape, infered = try_infer_value(new_shape, parameters=g.get_params())
if infered:
new_shape = new_shape.tolist()
else:
new_shape = op.attr("shape")
out = _op.reshape(data, new_shape)
g.add_node(op.output("Out")[0], out)
def convert_rnn(g, op, block):
"""Operator converter for rnn."""
def generate_lstm(
input_seqs,
hidden_state,
cell_state,
w_inp,
w_hid,
b_inp,
b_hid,
f_act,
g_act,
h_act,
backwards=False,
):
"""Implementation of LSTM cell for paddlepaddle of TVM"""
h_list = []
seq_length = len(input_seqs)
for i in range(seq_length):
step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]
step = _op.squeeze(step, axis=[0])
gates = _op.nn.dense(step, w_inp) + _op.nn.dense(hidden_state, w_hid)
if b_inp is not None:
gates += b_inp
if b_hid is not None:
gates += b_hid
i, f, c, o = _op.split(gates, 4, axis=-1)
i = f_act(i)
f = f_act(f)
c = g_act(c)
C = f * cell_state + i * c
o = f_act(o)
H = o * h_act(C)
hidden_state = H
cell_state = C
h_list.append(_op.expand_dims(H, axis=0))
if backwards:
h_list = h_list[::-1]
# Concatenate outputs and add back in direction axis.
concatenated = _op.concatenate(h_list, 0)
output = _op.expand_dims(concatenated, axis=1)
hidden_state = _op.expand_dims(hidden_state, axis=0)
cell_state = _op.expand_dims(cell_state, axis=0)
return output, hidden_state, cell_state
def generate_gru(
input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, rz_act, n_act, backwards=False
):
"""Implementation of GRU cell for paddlepaddle of TVM"""
h_list = []
seq_length = len(input_seqs)
for i in range(seq_length):
step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]
step = _op.squeeze(step, axis=[0])
xwt = _op.nn.dense(step, w_inp)
hwt = _op.nn.dense(hidden_state, w_hid)
if b_inp is not None:
xwt += b_inp
if b_hid is not None:
hwt += b_hid
i_r, i_z, i_n = _op.split(xwt, 3, axis=-1)
h_r, h_z, h_n = _op.split(hwt, 3, axis=-1)
r_gate = rz_act(i_r + h_r)
z_gate = rz_act(i_z + h_z)
n_gate = n_act(i_n + r_gate * h_n)
hidden_state = (hidden_state - n_gate) * z_gate + n_gate
h_list.append(_op.expand_dims(hidden_state, axis=0))
if backwards:
h_list = h_list[::-1]
# Concatenate outputs and add back in direction axis.
concatenated = _op.concatenate(h_list, 0)
output = _op.expand_dims(concatenated, axis=1)
hidden_state = _op.expand_dims(hidden_state, axis=0)
return output, hidden_state
def generate_simplernn(
input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, n_act, backwards=False
):
"""Implementation of SimpleRNN cell for paddlepaddle of TVM"""
h_list = []
seq_length = len(input_seqs)
for i in range(seq_length):
step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]
step = _op.squeeze(step, axis=[0])
xwt = _op.nn.dense(step, w_inp)
hwt = _op.nn.dense(hidden_state, w_hid)
if b_inp is not None:
xwt += b_inp
if b_hid is not None:
hwt += b_hid
n_gate = n_act(xwt + hwt)
hidden_state = n_gate
h_list.append(_op.expand_dims(hidden_state, axis=0))
if backwards:
h_list = h_list[::-1]
# Concatenate outputs and add back in direction axis.
concatenated = _op.concatenate(h_list, 0)
output = _op.expand_dims(concatenated, axis=1)
hidden_state = _op.expand_dims(hidden_state, axis=0)
return output, hidden_state
def make_param_inputs(g, node, layer, hidden_size, num_layers):
"""Param for weight and bias."""
bidirect_len = 4 if node.attr("is_bidirec") else 2
all_layer_param_len = len(node.input("WeightList"))
weight_list = node.input("WeightList")[: all_layer_param_len // 2]
bias_list = node.input("WeightList")[all_layer_param_len // 2 :]
layer_weight_list = weight_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]
layer_bias_list = bias_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]
param_list = layer_weight_list + layer_bias_list
param_list_len = len(param_list)
input_weights = param_list[0 : param_list_len // 2 : 2]
hidden_weights = param_list[1 : param_list_len // 2 : 2]
input_bias = param_list[param_list_len // 2 : param_list_len : 2]
hidden_bias = param_list[param_list_len // 2 + 1 : param_list_len : 2]
return input_weights, hidden_weights, input_bias, hidden_bias
def make_init_param_inputs(g, node, layer):
"""Init param for inputs."""
mode = node.attr("mode")
if mode == "LSTM":
all_init_h, all_init_c = node.input("PreState")
bidirect_len = 2 if node.attr("is_bidirec") else 1
init_h = _op.strided_slice(
g.get_node(all_init_h),
[layer * bidirect_len],
[layer * bidirect_len + bidirect_len],
axes=[0],
)
init_c = _op.strided_slice(
g.get_node(all_init_c),
[layer * bidirect_len],
[layer * bidirect_len + bidirect_len],
axes=[0],
)
return init_h, init_c
all_init_h = node.input("PreState")[0]
bidirect_len = 2 if node.attr("is_bidirec") else 1
init_h = _op.strided_slice(
g.get_node(all_init_h),
[layer * bidirect_len],
[layer * bidirect_len + bidirect_len],
axes=[0],
)
return init_h
hidden_size = op.attr("hidden_size")
num_layers = op.attr("num_layers")
is_bidirec = op.attr("is_bidirec")
mode = op.attr("mode")
input_x = g.get_node(op.input("Input")[0])
num_directions = 1
if is_bidirec:
num_directions = 2
x_shape = infer_shape(input_x)
time_steps = x_shape[0]
x_steps = _op.split(input_x, indices_or_sections=time_steps, axis=0)
for layer in range(num_layers):
input_weights, hidden_weights, input_bias, hidden_bias = make_param_inputs(
g, op, layer, hidden_size, num_layers
)
if mode == "LSTM":
init_h, init_c = make_init_param_inputs(g, op, layer)
init_hs = _op.split(init_h, num_directions)
init_cs = _op.split(init_c, num_directions)
result_output = []
result_H = []
result_C = []
for i in range(num_directions):
H_t = _op.squeeze(init_hs[i], axis=[0])
C_t = _op.squeeze(init_cs[i], axis=[0])
W = g.get_node(input_weights[i])
R = g.get_node(hidden_weights[i])
WB = g.get_node(input_bias[i])
RB = g.get_node(hidden_bias[i])
output, H, C = generate_lstm(
input_seqs=x_steps,
hidden_state=H_t,
cell_state=C_t,
w_inp=W,
w_hid=R,
b_inp=WB,
b_hid=RB,
f_act=_op.sigmoid,
g_act=_op.tanh,
h_act=_op.tanh,
backwards=i == 1,
)
result_output.append(output)
result_H.append(H)
result_C.append(C)
output = _op.concatenate(result_output, axis=1)
H = _op.concatenate(result_H, axis=0)
C = _op.concatenate(result_C, axis=0)
elif mode == "GRU":
init_h = make_init_param_inputs(g, op, layer)
init_hs = _op.split(init_h, num_directions)
result_output = []
result_H = []
for i in range(num_directions):
H_t = _op.squeeze(init_hs[i], axis=[0])
W = g.get_node(input_weights[i])
R = g.get_node(hidden_weights[i])
WB = g.get_node(input_bias[i])
RB = g.get_node(hidden_bias[i])
output, H = generate_gru(
input_seqs=x_steps,
hidden_state=H_t,
w_inp=W,
w_hid=R,
b_inp=WB,
b_hid=RB,
rz_act=_op.sigmoid,
n_act=_op.tanh,
backwards=i == 1,
)
result_output.append(output)
result_H.append(H)
output = _op.concatenate(result_output, axis=1)
H = _op.concatenate(result_H, axis=0)
elif mode == "RNN_TANH":
init_h = make_init_param_inputs(g, op, layer)
init_hs = _op.split(init_h, num_directions)
result_output = []
result_H = []
for i in range(num_directions):
H_t = _op.squeeze(init_hs[i], axis=[0])
W = g.get_node(input_weights[i])
R = g.get_node(hidden_weights[i])
WB = g.get_node(input_bias[i])
RB = g.get_node(hidden_bias[i])
output, H = generate_simplernn(
input_seqs=x_steps,
hidden_state=H_t,
w_inp=W,
w_hid=R,
b_inp=WB,
b_hid=RB,
n_act=_op.tanh,
backwards=i == 1,
)
result_output.append(output)
result_H.append(H)
output = _op.concatenate(result_output, axis=1)
H = _op.concatenate(result_H, axis=0)
output = _op.transpose(output, axes=[0, 2, 1, 3])
output = _op.reshape(output, newshape=(0, 0, -1))
x_steps = _op.split(output, indices_or_sections=time_steps, axis=0)
g.add_node(op.output("Out")[0], output)
def convert_scale(g, op, block):
"""Operator converter for scale."""
scale = op.attr("scale")
bias = op.attr("bias")
bias_after_scale = op.attr("bias_after_scale")
x = g.get_node(op.input("X")[0])
if np.isclose(scale, 1.0) and np.isclose(bias, 0.0):
out = _op.copy(x)
else:
if np.isclose(bias, 0.0):
out = x * _expr.const(np.array(scale).astype("float32"))
elif np.isclose(scale, 1.0):
out = x + _expr.const(np.array(bias).astype("float32"))
else:
if bias_after_scale:
out = x * _expr.const(np.array(scale).astype("float32")) + _expr.const(
np.array(bias).astype("float32")
)
else:
out = (x + _expr.const(np.array(bias).astype("float32"))) * _expr.const(
np.array(scale).astype("float32")
)
g.add_node(op.output("Out")[0], out)
def convert_scatter(g, op, block):
"""Operator converter for scatter."""
x = g.get_node(op.input("X")[0])
index = g.get_node(op.input("Ids")[0])
updates = g.get_node(op.input("Updates")[0])
overwrite = op.attr("overwrite")
shape = infer_shape(updates)
ndims = len(shape)
index = _op.expand_dims(index, axis=-1, num_newaxis=ndims - 1)
index = _op.transform.broadcast_to(index, shape)
if overwrite:
out = _op.scatter(x, index, updates, axis=0)
else:
out = _op.scatter_add(_op.zeros_like(x), index, updates, axis=0)
out += _op.scatter(x, index, _op.zeros_like(updates), axis=0)
g.add_node(op.output("Out")[0], out)
def convert_scatter_nd_add(g, op, block):
"""Operator converter for scatter_nd_add."""
x = g.get_node(op.input("X")[0])
index = g.get_node(op.input("Index")[0])
updates = g.get_node(op.input("Updates")[0])
indices_dim = len(infer_shape(index))
axes = list(range(indices_dim))
index = _op.transpose(index, axes[-1:] + axes[:-1])
out = _op.scatter_nd(x, index, updates, mode="add")
g.add_node(op.output("Out")[0], out)
def convert_selu(g, op, block):
"""Operator converter for selu."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
alpha = _op.const(op.attr("alpha"), dtype)
scale = _op.const(op.attr("scale"), dtype)
out = (
_expr.const(-1.0, dtype=dtype)
* alpha
* _op.nn.relu(_expr.const(1.0, dtype=dtype) - _op.exp(x))
)
out = scale * (out + _op.nn.relu(x))
g.add_node(op.output("Out")[0], out)
def convert_shape(g, op, block):
"""Operator converter for shape."""
x = g.get_node(op.input("Input")[0])
out = shape_of(x, dtype="int32")
g.add_node(op.output("Out")[0], out)
def convert_size(g, op, block):
"""Operator converter for size."""
input_x = g.get_node(op.input("Input")[0])
out = _op.ndarray_size(input_x, dtype="int64")
out = _op.expand_dims(out, axis=0)
g.add_node(op.output("Out")[0], out)
def convert_slice(g, op, block):
"""Operator converter for slice."""
data = g.get_node(op.input("Input")[0])
dims = len(infer_shape(data))
axes = op.attr("axes")
indices = _expr.const(axes, dtype="int64")
decrease_axis = op.attr("decrease_axis")
if isinstance(decrease_axis, int):
decrease_axis = [decrease_axis]
if op.input("StartsTensor"):
starts = g.get_node(op.input("StartsTensor")[0])
starts, infered = try_infer_value(starts, g.get_params())
if infered:
starts = starts.tolist()
elif op.input("StartsTensorList"):
starts = []
for start_index in op.input("StartsTensorList"):
start_index = g.get_node(start_index).astype("int64")
starts.append(start_index)
starts = _op.concatenate(starts, axis=0)
starts, infered = try_infer_value(starts, g.get_params())
if infered:
starts = starts.tolist()
else:
starts = op.attr("starts")
if len(axes) < dims:
if isinstance(starts, _expr.Expr):
starts = _op.scatter(
_op.const([0] * dims, dtype=infer_type(starts).checked_type.dtype),
indices,
starts,
axis=0,
)
else:
base = [0] * dims
for i, axis in enumerate(axes):
base[axis] = starts[i]
starts = base
if op.input("EndsTensor"):
ends = g.get_node(op.input("EndsTensor")[0])
ends, infered = try_infer_value(ends, g.get_params())
if infered:
ends = ends.tolist()
elif op.input("EndsTensorList"):
ends = []
for end_index in op.input("EndsTensorList"):
end_index = g.get_node(end_index).astype("int64")
ends.append(end_index)
ends = _op.concatenate(ends, axis=0)
ends, infered = try_infer_value(ends, g.get_params())
if infered:
ends = ends.tolist()
else:
ends = op.attr("ends")
if len(axes) < dims:
if isinstance(ends, _expr.Expr):
ends = _op.scatter(
_expr.const(
np.array([np.iinfo(np.int32).max] * dims),
dtype=infer_type(ends).checked_type.dtype,
),
indices,
ends,
axis=0,
)
else:
base = [np.iinfo(np.int32).max] * dims
for i, axis in enumerate(axes):
base[axis] = ends[i]
ends = base
strides = None
if "StridesTensor" in op.input_names and op.input("StridesTensor"):
strides = g.get_node(op.input("StridesTensor")[0])
strides, infered = try_infer_value(strides, g.get_params())
if infered:
strides = strides.tolist()
elif "StridesTensorList" in op.input_names and op.input("StridesTensorList"):
strides = []
for strides_index in op.input("StridesTensorList"):
strides_index = g.get_node(strides_index).astype("int64")
strides.append(strides_index)
strides = _op.concatenate(strides, axis=0)
strides, infered = try_infer_value(strides, g.get_params())
if infered:
strides = strides.tolist()
elif op.has_attr("strides"):
strides = op.attr("strides")
if len(axes) < dims:
if isinstance(strides, _expr.Expr):
strides = _op.scatter(
_expr.const(
np.array([1] * dims),
dtype=infer_type(strides).checked_type.dtype,
),
indices,
strides,
axis=0,
)
elif strides:
base = [1] * dims
for i, axis in enumerate(axes):
base[axis] = strides[i]
strides = base
if not strides:
strides = _op.const([1] * dims, dtype="int64")
out = _op.strided_slice(data, begin=starts, end=ends, strides=strides)
if decrease_axis:
out = _op.squeeze(out, axis=decrease_axis)
g.add_node(op.output("Out")[0], out)
def convert_softmax(g, op, block):
"""Operator converter for softmax."""
axis = op.attr("axis")
input_shape = block.var(op.input("X")[0]).shape
if axis < 0:
axis = len(input_shape) + axis
x = g.get_node(op.input("X")[0])
m = _op.max(x, axis, keepdims=True)
e = _op.exp(x - m)
out = e / _op.sum(e, axis, keepdims=True)
g.add_node(op.output("Out")[0], out)
def convert_softplus(g, op, block):
"""Operator converter for softplus."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
beta = op.attr("beta")
beta = _expr.const(beta, dtype=dtype)
out = _op.log(_op.exp(x * beta) + _expr.const(1.0, dtype=dtype)) / beta
g.add_node(op.output("Out")[0], out)
def convert_softsign(g, op, block):
"""Operator converter for softsign."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
out = x / (_op.const(1.0, dtype) + _op.abs(x))
g.add_node(op.output("Out")[0], out)
def convert_square(g, op, block):
"""Operator converter for square."""
x = g.get_node(op.input("X")[0])
dtype = block.var(op.output("Out")[0]).dtype
dtype = _convert_dtype_value(dtype)
out = _op.power(x, _expr.const(2, dtype))
g.add_node(op.output("Out")[0], out)
def convert_squeeze(g, op, block):
"""Operator converter for squeeze2."""
x = g.get_node(op.input("X")[0])
axes = op.attr("axes")
if not axes:
axes = None
x = _op.squeeze(x, axis=axes)
g.add_node(op.output("Out")[0], x)
def convert_swish(g, op, block):
"""Operator converter for swish."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
out = x / (_op.const(1.0, dtype) + _op.exp(_op.const(-1.0, dtype) * x))
g.add_node(op.output("Out")[0], out)
def convert_transpose(g, op, block):
"""Operator converter for transpose."""
perm = op.attr("axis")
out = _op.transpose(g.get_node(op.input("X")[0]), axes=perm)
g.add_node(op.output("Out")[0], out)
def convert_unsqueeze(g, op, block):
"""Operator converter for unsqueeze."""
x = g.get_node(op.input("X")[0])
axes = sorted(op.attr("axes"))
for axis in axes:
x = _op.expand_dims(x, axis=axis, num_newaxis=1)
g.add_node(op.output("Out")[0], x)
_convert_map = {
"abs": convert_unary_op,
"acos": convert_unary_op,
"addmm": convert_addmm,
"arg_max": convert_arg_max_min,
"arg_min": convert_arg_max_min,
"argsort": convert_argsort,
"asin": convert_unary_op,
"assign": convert_assign,
"assign_value": convert_assign_value,
"atan": convert_unary_op,
"batch_norm": convert_batch_norm,
"bicubic_interp_v2": convert_interpolate,
"bilinear_interp_v2": convert_interpolate,
"bmm": convert_bmm,
"brelu": convert_brelu,
"cast": convert_cast,
"ceil": convert_unary_op,
"clip": convert_clip,
"concat": convert_concat,
"conv2d": convert_conv2d,
"conv2d_transpose": convert_conv2d_transpose,
"cos": convert_unary_op,
"cosh": convert_unary_op,
"cumsum": convert_cumsum,
"depthwise_conv2d": convert_conv2d,
"dot": convert_dot,
"dropout": convert_dropout,
"elementwise_add": convert_elementwise_op,
"elementwise_div": convert_elementwise_op,
"elementwise_floordiv": convert_elementwise_op,
"elementwise_max": convert_elementwise_op,
"elementwise_min": convert_elementwise_op,
"elementwise_mod": convert_elementwise_op,
"elementwise_mul": convert_elementwise_op,
"elementwise_pow": convert_elementwise_op,
"elementwise_prod": convert_elementwise_op,
"elementwise_sub": convert_elementwise_op,
"elu": convert_elu,
"equal": convert_elementwise_op,
"erf": convert_unary_op,
"exp": convert_unary_op,
"expand_v2": convert_expand,
"expand_as_v2": convert_expand_as,
"feed": convert_feed,
"fill_any_like": convert_fill_any_like,
"fill_constant": convert_fill_constant,
"fill_constant_batch_size_like": convert_fill_constant_batch_size_like,
"flatten_contiguous_range": convert_flatten,
"floor": convert_unary_op,
"floor_mod": convert_elementwise_op,
"gather": convert_gather,
"gather_nd": convert_gather_nd,
"gelu": convert_gelu,
"greater_equal": convert_elementwise_op,
"greater_than": convert_elementwise_op,
"group_norm": convert_group_norm,
"hard_shrink": convert_hard_shrink,
"hard_sigmoid": convert_hard_sigmoid,
"hard_swish": convert_hard_swish,
"instance_norm": convert_instance_norm,
"isfinite_v2": convert_unary_op,
"isinf_v2": convert_unary_op,
"isnan_v2": convert_unary_op,
"layer_norm": convert_layer_norm,
"leaky_relu": convert_leaky_relu,
"less_equal": convert_elementwise_op,
"less_than": convert_elementwise_op,
"log": convert_unary_op,
"log2": convert_unary_op,
"log10": convert_unary_op,
"log1p": convert_log1p,
"logical_and": convert_binary_logical_op,
"logical_not": convert_logical_not,
"logical_or": convert_binary_logical_op,
"logical_xor": convert_binary_logical_op,
"logsigmoid": convert_logsigmoid,
"log_softmax": convert_logsoftmax,
"logsumexp": convert_logsumexp,
"lookup_table_v2": convert_lookup_table,
"matmul": convert_matmul,
"matmul_v2": convert_matmul,
"meshgrid": convert_meshgrid,
"mul": convert_mul,
"mv": convert_mv,
"nearest_interp_v2": convert_interpolate,
"not_equal": convert_elementwise_op,
"pad1d": convert_padding,
"pad2d": convert_padding,
"pad3d": convert_padding,
"pixel_shuffle": convert_pixel_shuffle,
"pool2d": convert_pool2d,
"pow": convert_pow,
"prelu": convert_prelu,
"range": convert_range,
"relu": convert_unary_op,
"relu6": convert_relu6,
"reshape2": convert_reshape,
"round": convert_unary_op,
"reciprocal": convert_reciprocal,
"reduce_all": convert_reduce,
"reduce_any": convert_reduce,
"reduce_max": convert_reduce,
"reduce_min": convert_reduce,
"reduce_prod": convert_reduce,
"reduce_sum": convert_reduce,
"reduce_mean": convert_reduce,
"rnn": convert_rnn,
"rsqrt": convert_unary_op,
"scale": convert_scale,
"scatter": convert_scatter,
"scatter_nd_add": convert_scatter_nd_add,
"selu": convert_selu,
"shape": convert_shape,
"sigmoid": convert_unary_op,
"sign": convert_unary_op,
"sin": convert_unary_op,
"sinh": convert_unary_op,
"size": convert_size,
"slice": convert_slice,
"softmax": convert_softmax,
"softplus": convert_softplus,
"softsign": convert_softsign,
"strided_slice": convert_slice,
"sqrt": convert_unary_op,
"square": convert_square,
"squeeze2": convert_squeeze,
"swish": convert_swish,
"tan": convert_unary_op,
"tanh": convert_unary_op,
"transpose2": convert_transpose,
"unsqueeze2": convert_unsqueeze,
}
class GraphProto:
"""A helper class for handling relay functions from PaddlePaddle model."""
def __init__(self):
self.nodes = {}
self.params = {}
self.shape_dict = None
def get_node(self, name):
"""get node from graph"""
assert name in self.nodes
return self.nodes[name]
def add_node(self, name, node):
"""add a node to graph"""
self.nodes[name] = fold_constant(node)
def get_params(self, name=None):
"""Get params from graph."""
if name is None:
return self.params
assert name in self.params
return self.params[name]
def extract_parameters(self, program, scope=None):
"""Extract all the weights from PaddlePaddle program."""
self.params = {}
variables = program.global_block().vars
for name in variables:
var = program.global_block().var(name)
if name.endswith("feed") or name.endswith("fetch"):
continue
if not var.persistable:
continue
if isinstance(scope, dict):
self.params[name] = _nd.array(scope[name])
else:
self.params[name] = _nd.array(np.array(scope.var(name).get_tensor()))
shape = self.params[name].shape
dtype = self.params[name].dtype
self.nodes[name] = new_var(name, shape=shape, dtype=dtype)
def check_input_shape(self, op, block):
"""Check the shape information of model's inputs, fixed shape is recommended."""
ipt_name = op.input(op.input_names[0])
ipt_shape = block.var(ipt_name).shape
for i in ipt_shape:
if i < 0:
warning_msg = "Input {}(shape={}) has unkown dimension shapes. \
Specifying static values may improve performance".format(
ipt_name, ipt_shape
)
warnings.warn(warning_msg)
def check_unsupported_ops(self, program):
"""Check whether all the operators are supported."""
unsupported_ops = set()
for block in program.blocks:
for op in block.ops:
if op.type == "fetch":
continue
if op.type not in _convert_map:
unsupported_ops.add(op.type)
if len(unsupported_ops) > 0:
msg = "The following operators are not supported for frontend Paddle: "
msg += ", ".join(unsupported_ops)
raise tvm.error.OpNotImplemented(msg)
def ops_to_relay(self, program, input_specs=None):
"""Convert PaddlePaddle operators to TVM relay functions."""
if input_specs is not None:
for input_spec in input_specs:
convert_feed(self, input_spec, None)
for block in program.blocks:
for op in block.ops:
if op.type == "fetch":
continue
convert_func = _convert_map[op.type]
convert_func(self, op, block)
def from_program(self, program, shape_dict, scope):
"""Construct the TVM relay expression from PaddlePaddle program."""
self.shape_dict = shape_dict
if scope is None:
import paddle
scope = paddle.fluid.global_scope()
self.check_unsupported_ops(program)
self.extract_parameters(program, scope)
self.ops_to_relay(program)
output_names = list()
for block in program.blocks:
for op in block.ops:
if op.type == "fetch":
output_names.append(op.input("X")[0])
outputs = [self.nodes[name] for name in output_names]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
free_vars = analysis.free_vars(outputs)
func = _function.Function(free_vars, outputs)
mod = IRModule.from_expr(func)
return mod, self.params
def from_translated_layer(self, layer, shape_dict):
"""Construct the TVM relay expression from PaddlePaddle TranslatedLayer."""
self.shape_dict = shape_dict
program = layer.program()
parameters = dict()
for param in layer.parameters():
parameters[param.name] = np.array(param.value().get_tensor())
self.check_unsupported_ops(program)
self.extract_parameters(program, parameters)
input_specs = layer._input_spec()
self.ops_to_relay(program, input_specs)
output_names = [x.name for x in layer._output_spec()]
outputs = [self.nodes[name] for name in output_names]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
free_vars = analysis.free_vars(outputs)
func = _function.Function(free_vars, outputs)
mod = IRModule.from_expr(func)
# remove unused parameters
final_params = dict()
for var in free_vars:
if var.name_hint in self.params:
final_params[var.name_hint] = self.params[var.name_hint]
self.params = final_params
return mod, self.params
def from_paddle(program_or_layer, shape_dict=None, scope=None):
"""Convert a PaddlePaddle model into an equivalent Relay Function.
PaddlePaddle Program/TranslatedLayer represent the computation graph of PaddlePaddle model,
and PaddlePaddle scope stores all the weights of PaddlePaddle model.
Parameters
----------
program_or_layer : object of `paddle.static.Program` or `paddle.jit.TranslatedLayer`
Loaded model by `paddle.static.load_inference_model` or `paddle.jit.load`
shape_dict : dict of str to tuple/list, optional
The input shape of model
scope : object of `paddle.static.Scope`, optional
The scope that saves all the weights of model, use `paddle.static.global_scope` by default
Returns
-------
mod : tvm.IRModule
The relay module for compilation
params : dict of str to tvm.nd.NDArray
"""
import paddle
# disable system signal capturing in paddle framework
# the signal capturing may cause conflict while running autotvm with paddle frontend
paddle.disable_signal_handler()
g = GraphProto()
if isinstance(program_or_layer, paddle.jit.TranslatedLayer):
# model is loaded by `paddle.jit.load`
mod, params = g.from_translated_layer(program_or_layer, shape_dict)
elif isinstance(program_or_layer, paddle.static.Program):
# model is loaded by `paddle.static.load_inference_model`
mod, params = g.from_program(program_or_layer, shape_dict, scope)
else:
raise Exception("Only PaddlePaddle's Program and TranslatedLayer are supported.")
return mod, params
|
the-stack_0_22991 | # -*- coding: utf-8 -*-
from openprocurement.api.utils import (
context_unpack,
json_view,
set_ownership,
get_now,
raise_operation_error
)
from openprocurement.tender.core.validation import (
validate_complaint_data,
validate_submit_complaint_time,
validate_patch_complaint_data,
validate_complaint_operation_not_in_active_tendering,
validate_update_complaint_not_in_allowed_complaint_status
)
from openprocurement.tender.core.utils import (
save_tender,
apply_patch,
optendersresource,
calculate_business_date
)
from openprocurement.tender.belowthreshold.utils import (
check_tender_status
)
from openprocurement.tender.openuadefense.constants import (
CLAIM_SUBMIT_TIME, COMPLAINT_SUBMIT_TIME
)
from openprocurement.tender.openua.views.complaint import (
TenderUaComplaintResource as TenderComplaintResource
)
from openprocurement.tender.openuadefense.validation import validate_submit_claim_time
@optendersresource(name='aboveThresholdUA.defense:Tender Complaints',
collection_path='/tenders/{tender_id}/complaints',
path='/tenders/{tender_id}/complaints/{complaint_id}',
procurementMethodType='aboveThresholdUA.defense',
description="Tender complaints")
class TenderUaComplaintResource(TenderComplaintResource):
@json_view(content_type="application/json", validators=(validate_complaint_data, validate_complaint_operation_not_in_active_tendering), permission='create_complaint')
def collection_post(self):
"""Post a complaint
"""
tender = self.context
complaint = self.request.validated['complaint']
if complaint.status == 'claim':
validate_submit_claim_time(self.request)
elif complaint.status == 'pending':
validate_submit_complaint_time(self.request)
complaint.dateSubmitted = get_now()
complaint.type = 'complaint'
else:
complaint.status = 'draft'
complaint.complaintID = '{}.{}{}'.format(tender.tenderID, self.server_id, self.complaints_len(tender) + 1)
set_ownership(complaint, self.request)
tender.complaints.append(complaint)
if save_tender(self.request):
self.LOGGER.info('Created tender complaint {}'.format(complaint.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_complaint_create'}, {'complaint_id': complaint.id}))
self.request.response.status = 201
self.request.response.headers['Location'] = self.request.route_url('{}:Tender Complaints'.format(tender.procurementMethodType), tender_id=tender.id, complaint_id=complaint.id)
return {
'data': complaint.serialize(tender.status),
'access': {
'token': complaint.owner_token
}
}
@json_view(content_type="application/json", validators=(validate_patch_complaint_data, validate_complaint_operation_not_in_active_tendering, validate_update_complaint_not_in_allowed_complaint_status), permission='edit_complaint')
def patch(self):
"""Post a complaint resolution
"""
tender = self.request.validated['tender']
data = self.request.validated['data']
# complaint_owner
if self.request.authenticated_role == 'complaint_owner' and self.context.status in ['draft', 'claim', 'answered'] and data.get('status', self.context.status) == 'cancelled':
apply_patch(self.request, save=False, src=self.context.serialize())
self.context.dateCanceled = get_now()
elif self.request.authenticated_role == 'complaint_owner' and self.context.status in ['pending', 'accepted'] and data.get('status', self.context.status) == 'stopping':
apply_patch(self.request, save=False, src=self.context.serialize())
self.context.dateCanceled = get_now()
elif self.request.authenticated_role == 'complaint_owner' and tender.status == 'active.tendering' and self.context.status == 'draft' and data.get('status', self.context.status) == self.context.status:
apply_patch(self.request, save=False, src=self.context.serialize())
elif self.request.authenticated_role == 'complaint_owner' and tender.status == 'active.tendering' and self.context.status == 'draft' and data.get('status', self.context.status) == 'claim':
if get_now() > calculate_business_date(tender.tenderPeriod.endDate, -CLAIM_SUBMIT_TIME, tender, True):
raise_operation_error(self.request, 'Can submit claim not later than {0.days} days before tenderPeriod end'.format(CLAIM_SUBMIT_TIME))
apply_patch(self.request, save=False, src=self.context.serialize())
self.context.dateSubmitted = get_now()
elif self.request.authenticated_role == 'complaint_owner' and tender.status == 'active.tendering' and self.context.status in ['draft', 'claim'] and data.get('status', self.context.status) == 'pending':
if get_now() > tender.complaintPeriod.endDate:
raise_operation_error(self.request, 'Can submit complaint not later than {0.days} days before tenderPeriod end'.format(COMPLAINT_SUBMIT_TIME))
apply_patch(self.request, save=False, src=self.context.serialize())
self.context.type = 'complaint'
self.context.dateSubmitted = get_now()
elif self.request.authenticated_role == 'complaint_owner' and self.context.status == 'answered' and data.get('status', self.context.status) == self.context.status:
apply_patch(self.request, save=False, src=self.context.serialize())
elif self.request.authenticated_role == 'complaint_owner' and self.context.status == 'answered' and data.get('satisfied', self.context.satisfied) is True and data.get('status', self.context.status) == 'resolved':
apply_patch(self.request, save=False, src=self.context.serialize())
elif self.request.authenticated_role == 'complaint_owner' and self.context.status == 'answered' and data.get('satisfied', self.context.satisfied) is False and data.get('status', self.context.status) == 'pending':
if get_now() > tender.complaintPeriod.endDate:
raise_operation_error(self.request, 'Can submit complaint not later than {0.days} days before tenderPeriod end'.format(COMPLAINT_SUBMIT_TIME))
apply_patch(self.request, save=False, src=self.context.serialize())
self.context.type = 'complaint'
self.context.dateEscalated = get_now()
# tender_owner
elif self.request.authenticated_role == 'tender_owner' and self.context.status == 'claim' and data.get('status', self.context.status) == self.context.status:
now = get_now()
if now > tender.enquiryPeriod.clarificationsUntil:
raise_operation_error(self.request, 'Can update claim only before enquiryPeriod.clarificationsUntil')
apply_patch(self.request, save=False, src=self.context.serialize())
elif self.request.authenticated_role == 'tender_owner' and self.context.status == 'satisfied' and data.get('status', self.context.status) == self.context.status:
apply_patch(self.request, save=False, src=self.context.serialize())
elif self.request.authenticated_role == 'tender_owner' and self.context.status == 'claim' and data.get('resolution', self.context.resolution) and data.get('resolutionType', self.context.resolutionType) and data.get('status', self.context.status) == 'answered':
now = get_now()
if now > tender.enquiryPeriod.clarificationsUntil:
raise_operation_error(self.request, 'Can update claim only before enquiryPeriod.clarificationsUntil')
if len(data.get('resolution', self.context.resolution)) < 20:
raise_operation_error(self.request, 'Can\'t update complaint: resolution too short')
apply_patch(self.request, save=False, src=self.context.serialize())
self.context.dateAnswered = get_now()
elif self.request.authenticated_role == 'tender_owner' and self.context.status in ['pending', 'accepted']:
apply_patch(self.request, save=False, src=self.context.serialize())
elif self.request.authenticated_role == 'tender_owner' and self.context.status == 'satisfied' and data.get('tendererAction', self.context.tendererAction) and data.get('status', self.context.status) == 'resolved':
apply_patch(self.request, save=False, src=self.context.serialize())
# aboveThresholdReviewers
elif self.request.authenticated_role == 'aboveThresholdReviewers' and self.context.status in ['pending', 'accepted', 'stopping'] and data.get('status', self.context.status) == self.context.status:
apply_patch(self.request, save=False, src=self.context.serialize())
elif self.request.authenticated_role == 'aboveThresholdReviewers' and self.context.status in ['pending', 'stopping'] and data.get('status', self.context.status) in ['invalid', 'mistaken']:
apply_patch(self.request, save=False, src=self.context.serialize())
self.context.dateDecision = get_now()
self.context.acceptance = False
elif self.request.authenticated_role == 'aboveThresholdReviewers' and self.context.status == 'pending' and data.get('status', self.context.status) == 'accepted':
apply_patch(self.request, save=False, src=self.context.serialize())
self.context.dateAccepted = get_now()
self.context.acceptance = True
elif self.request.authenticated_role == 'aboveThresholdReviewers' and self.context.status == 'accepted' and data.get('status', self.context.status) in ['declined', 'satisfied']:
apply_patch(self.request, save=False, src=self.context.serialize())
self.context.dateDecision = get_now()
elif self.request.authenticated_role == 'aboveThresholdReviewers' and self.context.status == 'stopping' and data.get('status', self.context.status) == 'declined':
apply_patch(self.request, save=False, src=self.context.serialize())
self.context.dateDecision = get_now()
elif self.request.authenticated_role == 'aboveThresholdReviewers' and self.context.status in ['accepted', 'stopping'] and data.get('status', self.context.status) == 'stopped':
apply_patch(self.request, save=False, src=self.context.serialize())
self.context.dateDecision = get_now()
self.context.dateCanceled = self.context.dateCanceled or get_now()
else:
raise_operation_error(self.request, 'Can\'t update complaint')
if self.context.tendererAction and not self.context.tendererActionDate:
self.context.tendererActionDate = get_now()
if self.context.status not in ['draft', 'claim', 'answered', 'pending', 'accepted', 'stopping'] and tender.status in ['active.qualification', 'active.awarded']:
check_tender_status(self.request)
if save_tender(self.request):
self.LOGGER.info('Updated tender complaint {}'.format(self.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_complaint_patch'}))
return {'data': self.context.serialize("view")}
|
the-stack_0_22992 | """
Copyright 2016-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from . import flip
import click
import sys
@click.command()
@click.option("--input", "-i", "in_format", type=click.Choice(["json", "yaml"]), help="Specify the input format. Overrides -j and -y flags.")
@click.option("--output", "-o", "out_format", type=click.Choice(["json", "yaml"]), help="Specify the output format. Overrides -j, -y, and -n flags.")
@click.option("--json", "-j", "out_flag", flag_value="json", help="Convert to JSON. Assume the input is YAML.")
@click.option("--yaml", "-y", "out_flag", flag_value="yaml", help="Convert to YAML. Assume the input is JSON.")
@click.option("--clean", "-c", is_flag=True, help="Performs some opinionated cleanup on your template.")
@click.option("--long", "-l", is_flag=True, help="Use long-form syntax for functions when converting to YAML.")
@click.option("--no-flip", "-n", is_flag=True, help="Perform other operations but do not flip the output format.")
@click.argument("input", type=click.File("r"), default=sys.stdin)
@click.argument("output", type=click.File("w"), default=sys.stdout)
@click.version_option(message='AWS Cloudformation Template Flip, Version %(version)s')
@click.pass_context
def main(ctx, **kwargs):
"""
AWS CloudFormation Template Flip is a tool that converts
AWS CloudFormation templates between JSON and YAML formats,
making use of the YAML format's short function syntax where possible.
"""
in_format = kwargs.pop('in_format')
out_format = kwargs.pop('out_format') or kwargs.pop('out_flag')
no_flip = kwargs.pop('no_flip')
clean = kwargs.pop('clean')
long_form = kwargs.pop('long')
input_file = kwargs.pop('input')
output_file = kwargs.pop('output')
if not in_format:
if input_file.name.endswith(".json"):
in_format = "json"
elif input_file.name.endswith(".yaml") or input_file.name.endswith(".yml"):
in_format = "yaml"
if input_file.name == "<stdin>" and sys.stdin.isatty():
click.echo(ctx.get_help())
ctx.exit()
try:
output_file.write(flip(
input_file.read(),
in_format=in_format,
out_format=out_format,
clean_up=clean,
no_flip=no_flip,
long_form=long_form
))
except Exception as e:
raise click.ClickException("{}".format(e))
|
the-stack_0_22994 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple transfer learning with an Inception v3 architecture model which
displays summaries in TensorBoard.
This example shows how to take a Inception v3 architecture model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector for each image. We
train a softmax layer on top of this representation. Assuming the softmax layer
contains N labels, this corresponds to learning N + 2048*N model parameters
corresponding to the learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
bazel build third_party/tensorflow/examples/image_retraining:retrain && \
bazel-bin/third_party/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import glob
import hashlib
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
import struct
FLAGS = tf.app.flags.FLAGS
# Input and output file flags.
tf.app.flags.DEFINE_string('image_dir', '',
"""Path to folders of labeled images.""")
tf.app.flags.DEFINE_string('output_graph', '/tmp/output_graph.pb',
"""Where to save the trained graph.""")
tf.app.flags.DEFINE_string('output_labels', '/tmp/output_labels.txt',
"""Where to save the trained graph's labels.""")
tf.app.flags.DEFINE_string('summaries_dir', '/tmp/retrain_logs',
"""Where to save summary logs for TensorBoard.""")
# Details of the training configuration.
tf.app.flags.DEFINE_integer('how_many_training_steps', 4000,
"""How many training steps to run before ending.""")
tf.app.flags.DEFINE_float('learning_rate', 0.0001,
"""How large a learning rate to use when training.""")
tf.app.flags.DEFINE_integer(
'testing_percentage', 10,
"""What percentage of images to use as a test set.""")
tf.app.flags.DEFINE_integer(
'validation_percentage', 10,
"""What percentage of images to use as a validation set.""")
tf.app.flags.DEFINE_integer('eval_step_interval', 10,
"""How often to evaluate the training results.""")
tf.app.flags.DEFINE_integer('train_batch_size', 100,
"""How many images to train on at a time.""")
tf.app.flags.DEFINE_integer('test_batch_size', 500,
"""How many images to test on at a time. This"""
""" test set is only used infrequently to verify"""
""" the overall accuracy of the model.""")
tf.app.flags.DEFINE_integer(
'validation_batch_size', 100,
"""How many images to use in an evaluation batch. This validation set is"""
""" used much more often than the test set, and is an early indicator of"""
""" how accurate the model is during training.""")
# File-system cache locations.
tf.app.flags.DEFINE_string('model_dir', '/tmp/imagenet',
"""Path to classify_image_graph_def.pb, """
"""imagenet_synset_to_human_label_map.txt, and """
"""imagenet_2012_challenge_label_map_proto.pbtxt.""")
tf.app.flags.DEFINE_string(
'bottleneck_dir', '/tmp/bottleneck',
"""Path to cache bottleneck layer values as files.""")
tf.app.flags.DEFINE_string('final_tensor_name', 'final_result',
"""The name of the output classification layer in"""
""" the retrained graph.""")
# Controls the distortions used during training.
tf.app.flags.DEFINE_boolean(
'flip_left_right', False,
"""Whether to randomly flip half of the training images horizontally.""")
tf.app.flags.DEFINE_integer(
'random_crop', 0,
"""A percentage determining how much of a margin to randomly crop off the"""
""" training images.""")
tf.app.flags.DEFINE_integer(
'random_scale', 0,
"""A percentage determining how much to randomly scale up the size of the"""
""" training images by.""")
tf.app.flags.DEFINE_integer(
'random_brightness', 0,
"""A percentage determining how much to randomly multiply the training"""
""" image input pixels up or down by.""")
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
# Directory containing files with correct image labels for each image.
IMAGE_LABELS_DIR = 'image_labels_dir'
# Contains cached ground_truth vectors to prevent calculating them again and again
CACHED_GROUND_TRUTH_VECTORS = {}
# Contains list of all labels, each label is on a separate line, just like in image_label files
ALL_LABELS_FILE = "tmp/labels.txt"
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in os.walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list:
print('No files found')
continue
if len(file_list) < 20:
print('WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
print('WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_labels_path(image_lists, label_name, index, image_labels_dir, category):
""""Returns a path to a file containing correct image labels.
This is just slightly edited get_image_path() method.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_labels_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
full_path = os.path.join(image_labels_dir, base_name)
full_path += '.txt'
return full_path
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '.txt'
def create_inception_graph():
""""Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Session() as sess:
model_filename = os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
bottleneck_values = sess.run(
bottleneck_tensor,
{image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract():
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL,
filepath,
_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def write_list_of_floats_to_file(list_of_floats , file_path):
"""Writes a given list of floats to a binary file.
Args:
list_of_floats: List of floats we want to write to a file.
file_path: Path to a file where list of floats will be stored.
"""
s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)
with open(file_path, 'wb') as f:
f.write(s)
def read_list_of_floats_from_file(file_path):
"""Reads list of floats from a given file.
Args:
file_path: Path to a file where list of floats was stored.
Returns:
Array of bottleneck values (list of floats).
"""
with open(file_path, 'rb') as f:
s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())
return list(s)
bottleneck_path_2_bottleneck_values = {}
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
bottleneck_tensor: The output tensor for the bottleneck values.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category)
if not os.path.exists(bottleneck_path):
print('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
bottleneck_values = run_bottleneck_on_image(sess, image_data,
jpeg_data_tensor,
bottleneck_tensor)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
bottleneck_tensor: The penultimate output layer of the graph.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) + ' bottleneck files created.')
def get_ground_truth(labels_file, labels, class_count):
if labels_file in CACHED_GROUND_TRUTH_VECTORS.keys():
ground_truth = CACHED_GROUND_TRUTH_VECTORS[labels_file]
else:
with open(labels_file) as f:
true_labels = f.read().splitlines()
ground_truth = np.zeros(class_count, dtype=np.float32)
idx = 0
for label in labels:
if label in true_labels:
ground_truth[idx] = 1.0
idx += 1
CACHED_GROUND_TRUTH_VECTORS[labels_file] = ground_truth
return ground_truth
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
bottleneck_tensor, labels):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The number of bottleneck values to return.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
labels: All possible labels loaded from file labels.txt.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
# class_count = len(image_lists.keys())
class_count = len(labels)
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
# label_index = random.randrange(class_count)
label_index = 0 # there is only one folder with images = 'multi-label'
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
labels_file = get_image_labels_path(image_lists, label_name, image_index, IMAGE_LABELS_DIR, category)
ground_truth = get_ground_truth(labels_file, labels, class_count)
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor, labels):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
labels: All possible labels loaded from file labels.txt.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(labels)
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = 0 # there is only one folder with images = 'multi-label'
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck = run_bottleneck_on_image(sess, distorted_image_data,
resized_input_tensor,
bottleneck_tensor)
labels_file = get_image_labels_path(image_lists, label_name, image_index, IMAGE_LABELS_DIR, category)
ground_truth = get_ground_truth(labels_file, labels, class_count)
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)
precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,
MODEL_INPUT_DEPTH])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(var))
tf.summary.scalar('min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights')
variable_summaries(layer_weights, layer_name + '/weights')
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram(layer_name + '/pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram(final_tensor_name + '/activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=ground_truth_input)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Nothing.
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
# tf.argmax(result_tensor, 1) = return index of maximal value (= 1 in a 1-of-N encoding vector) in each row (axis = 1)
# But we have more ones (indicating multiple labels) in one row of result_tensor due to the multi-label classification
# correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
# tf.argmax(ground_truth_tensor, 1))
# ground_truth is not a binary tensor, it contains the probabilities of each label = we need to tf.round() it
# to acquire a binary tensor allowing comparison by tf.equal()
# See: http://stackoverflow.com/questions/39219414/in-tensorflow-how-can-i-get-nonzero-values-and-their-indices-from-a-tensor-with
correct_prediction = tf.equal(tf.round(result_tensor), ground_truth_tensor)
with tf.name_scope('accuracy'):
# Mean accuracy over all labels:
# http://stackoverflow.com/questions/37746670/tensorflow-multi-label-accuracy-calculation
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step
def main(_):
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
# Set up the pre-trained graph.
maybe_download_and_extract()
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
create_inception_graph())
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
if len(image_lists.keys()) == 0:
print('Folder containing training images has not been found inside {} directory. \n'
'Put all the training images into '
'one folder inside {} directory and delete everything else inside the {} directory.'
.format(FLAGS.image_dir, FLAGS.image_dir, FLAGS.image_dir))
return -1
if len(image_lists.keys()) > 1:
print('More than one folder found inside {} directory. \n'
'In order to prevent validation issues, put all the training images into '
'one folder inside {} directory and delete everything else inside the {} directory.'
.format(FLAGS.image_dir, FLAGS.image_dir, FLAGS.image_dir))
return -1
if not os.path.isfile(ALL_LABELS_FILE):
print('File {} containing all possible labels (= classes) does not exist.\n'
'Create it in project root and put each possible label on new line, '
'it is exactly the same as creating an image_label file for image '
'that is in all the possible classes.'.format(ALL_LABELS_FILE))
return -1
with open(ALL_LABELS_FILE) as f:
labels = f.read().splitlines()
class_count = len(labels)
if class_count == 0:
print('No valid labels inside file {} that should contain all possible labels (= classes).'.format(ALL_LABELS_FILE))
return -1
if class_count == 1:
print('Only one valid label found inside {} - multiple classes are needed for classification.'.format(ALL_LABELS_FILE))
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
sess = tf.Session()
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(class_count,
FLAGS.final_tensor_name,
bottleneck_tensor)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step = add_evaluation_step(final_tensor, ground_truth_input)
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every time
# with distortions applied, or from the cache stored on disk.
if do_distort_images:
train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor, labels)
else:
train_bottlenecks, train_ground_truth = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor, labels)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run([merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,
train_accuracy * 100))
print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,
cross_entropy_value))
validation_bottlenecks, validation_ground_truth = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor, labels))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
print('%s: Step %d: Validation accuracy = %.1f%%' %
(datetime.now(), i, validation_accuracy * 100))
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.test_batch_size, 'testing',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor, labels)
test_accuracy = sess.run(
evaluation_step,
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
# Write out the trained graph and labels with the weights stored as constants.
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
tf.app.run()
|
the-stack_0_22996 | """
Neighbors
"""
from sage.modules.free_module_element import vector
from sage.rings.integer_ring import ZZ
from sage.rings.all import GF, QQ
from copy import deepcopy
from sage.matrix.constructor import matrix
# ############################################################################
# Routines used for understanding p-neighbors and computing classes in a genus
# ############################################################################
def find_primitive_p_divisible_vector__random(self, p):
"""
Find a random `p`-primitive vector in `L/pL` whose value is `p`-divisible.
.. note::
Since there are about `p^{(n-2)}` of these lines, we have a `1/p`
chance of randomly finding an appropriate vector.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [10,1,4])
sage: v = Q.find_primitive_p_divisible_vector__random(5)
sage: tuple(v) in ((1, 0), (1, 1), (2, 0), (2, 2), (3, 0), (3, 3), (4, 0), (4, 4))
True
sage: 5.divides(Q(v))
True
sage: Q = QuadraticForm(QQ,matrix.diagonal([1,1,1,1]))
sage: v = Q.find_primitive_p_divisible_vector__random(2)
sage: Q(v)
2
"""
n = self.dim()
v = vector([ZZ.random_element(p) for _ in range(n)])
# Repeatedly choose random vectors, and evaluate until the value
# is p-divisible.
k = 0
while k < 1000:
k = k + 1
a = self(v)
if a in ZZ and (a % p == 0) and (v != 0):
return v
else:
v[ZZ.random_element(n)] = ZZ.random_element(p)
# Replace a random entry and try again.
raise RuntimeError("unable to find a p divisible vector")
def find_primitive_p_divisible_vector__next(self, p, v=None):
"""
Find the next `p`-primitive vector (up to scaling) in `L/pL` whose
value is `p`-divisible, where the last vector returned was `v`. For
an initial call, no `v` needs to be passed.
Returns vectors whose last non-zero entry is normalized to 0 or 1 (so no
lines are counted repeatedly). The ordering is by increasing the
first non-normalized entry. If we have tested all (lines of)
vectors, then return None.
OUTPUT:
vector or None
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [10,1,4])
sage: v = Q.find_primitive_p_divisible_vector__next(5); v
(1, 1)
sage: v = Q.find_primitive_p_divisible_vector__next(5, v); v
(1, 0)
sage: v = Q.find_primitive_p_divisible_vector__next(5, v); v
sage: Q = QuadraticForm(QQ,matrix.diagonal([1,1,1,1]))
sage: v = Q.find_primitive_p_divisible_vector__next(2)
sage: Q(v)
2
"""
# Initialize
n = self.dim()
if v is None:
w = vector(ZZ, [0] * (n - 1) + [1])
else:
w = deepcopy(v)
# Handle n = 1 separately.
if n <= 1:
raise NotImplementedError("Sorry -- Not implemented yet!")
# Look for the last non-zero entry (which must be 1)
nz = n - 1
while w[nz] == 0:
nz += -1
# Test that the last non-zero entry is 1 (to detect tampering).
if w[nz] != 1:
print("Warning: The input vector to QuadraticForm.find_primitive_p_divisible_vector__next() is not normalized properly.")
# Look for the next vector, until w == 0
while True:
# Look for the first non-maximal (non-normalized) entry
ind = 0
while (ind < nz) and (w[ind] == p - 1):
ind += 1
# Increment
if ind < nz:
w[ind] += 1
for j in range(ind):
w[j] = 0
else:
for j in range(ind + 1): # Clear all entries
w[j] = 0
if nz != 0:
# Move the non-zero normalized index over by one, or
# return the zero vector
w[nz - 1] = 1
nz += -1
# Test for zero vector
if w == 0:
return None
# Test for p-divisibility
a = self(w)
if a in ZZ and (a % p == 0):
return w
def find_p_neighbor_from_vec(self, p, y):
r"""
Return the `p`-neighbor of ``self`` defined by ``y``.
Let `(L,q)` be a lattice with `b(L,L) \subseteq \ZZ` which is maximal at `p`.
Let `y \in L` with `b(y,y) \in p^2\ZZ` then the `p`-neighbor of
`L` at `y` is given by
`\ZZ y/p + L_y` where `L_y = \{x \in L | b(x,y) \in p \ZZ \}`
and `b(x,y) = q(x+y)-q(x)-q(y)` is the bilinear form associated to `q`.
INPUT:
- ``p`` -- a prime number
- ``y`` -- a vector with `q(y) \in p \ZZ`.
- ``odd`` -- (default=``False``) if `p=2` return also odd neighbors
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ,[1,1,1,1])
sage: v = vector([0,2,1,1])
sage: X = Q.find_p_neighbor_from_vec(3,v); X
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 0 0 0 ]
[ * 1 4 4 ]
[ * * 5 12 ]
[ * * * 9 ]
Since the base ring and the domain are not yet separate,
for rational, half integral forms we just pretend
the base ring is `ZZ`::
sage: Q = QuadraticForm(QQ,matrix.diagonal([1,1,1,1]))
sage: v = vector([1,1,1,1])
sage: Q.find_p_neighbor_from_vec(2,v)
Quadratic form in 4 variables over Rational Field with coefficients:
[ 1/2 1 1 1 ]
[ * 1 1 2 ]
[ * * 1 2 ]
[ * * * 2 ]
"""
p = ZZ(p)
if not p.divides(self(y)):
raise ValueError("y=%s must be of square divisible by p=%s"%(y,p))
if self.base_ring() not in [ZZ, QQ]:
raise NotImplementedError("the base ring of this form must be the integers or the rationals")
n = self.dim()
G = self.Hessian_matrix()
R = self.base_ring()
odd = False
if R is QQ:
odd = True
if G.denominator() != 1:
raise ValueError("the associated bilinear form q(x+y)-q(x)-q(y) must be integral.")
b = y*G*y
if not b % p == 0:
raise ValueError("y^2 must be divisible by p=%s"%p)
y_dual = y*G
if p != 2 and b % p**2 != 0:
for k in range(n):
if y_dual[k] % p != 0:
z = (ZZ**n).gen(k)
break
else:
raise ValueError("either y is not primitive or self is not maximal at %s"%p)
z *= (2*y*G*z).inverse_mod(p)
y = y - b*z
# assert y*G*y % p^2 == 0
if p == 2:
val = b.valuation(p)
if val <= 1:
raise ValueError("y=%s must be of square divisible by 2"%y)
if val == 2 and not odd:
# modify it to have square 4
for k in range(n):
if y_dual[k] % p != 0:
z = (ZZ**n).gen(k)
break
else:
raise ValueError("either y is not primitive or self is not even, maximal at 2")
y += 2*z
# assert y*G*y % 8 == 0
y_dual = G*y
Ly = y_dual.change_ring(GF(p)).column().kernel().matrix().lift()
B = Ly.stack(p * matrix.identity(n))
# the rows of B now generate L_y = { x in L | (x,y)=0 mod p}
B = y.row().stack(p*B)
B = B.hermite_form()[:n, :] / p
# the rows of B generate ZZ * y/p + L_y
# by definition this is the p-neighbor of L at y
# assert B.det().abs() == 1
QF = self.parent()
Gnew = (B*G*B.T).change_ring(R)
return QF(Gnew)
def neighbor_iteration(seeds, p, mass=None, max_classes=ZZ(10)**3,
algorithm=None, max_neighbors=1000, verbose=False):
r"""
Return all classes in the `p`-neighbor graph of ``self``.
Starting from the given seeds, this function successively
finds p-neighbors until no new quadratic form (class) is obtained.
INPUT:
- ``seeds`` -- a list of quadratic forms in the same genus
- ``p`` -- a prime number
- ``mass`` -- (optional) a rational number; the mass of this genus
- ``max_classes`` -- (default: ``1000``) break the computation when ``max_classes`` are found
- ``algorithm`` -- (optional) one of 'orbits', 'random', 'exaustion'
- ``max_random_trys`` -- (default: ``1000``) the maximum number of neighbors
computed for a single lattice
OUTPUT:
- a list of quadratic forms
EXAMPLES::
sage: from sage.quadratic_forms.quadratic_form__neighbors import neighbor_iteration
sage: Q = QuadraticForm(ZZ, 3, [1, 0, 0, 2, 1, 3])
sage: Q.det()
46
sage: mass = Q.conway_mass()
sage: g1 = neighbor_iteration([Q],3, mass=mass, algorithm = 'random') # long time
sage: g2 = neighbor_iteration([Q],3, algorithm = 'exaustion') # long time
sage: g3 = neighbor_iteration([Q],3, algorithm = 'orbits')
sage: mass == sum(1/q.number_of_automorphisms() for q in g1) # long time
True
sage: mass == sum(1/q.number_of_automorphisms() for q in g2) # long time
True
sage: mass == sum(1/q.number_of_automorphisms() for q in g3)
True
TESTS::
sage: from sage.quadratic_forms.quadratic_form__neighbors import neighbor_iteration
sage: Q = QuadraticForm(ZZ, 3, [1, 0, 0, 2, 1, 3])
sage: g = neighbor_iteration([Q],3,mass=Q.conway_mass(),max_classes=2)
...
UserWarning: reached the maximum number of isometry classes=2. Increase the optional argument max_classes to obtain more.
Warning: not all classes in the genus were found
sage: neighbor_iteration([Q], 3, mass=Q.conway_mass(), max_neighbors=0, algorithm='random')
Warning: not all classes in the genus were found
[]
"""
p = ZZ(p)
from sage.quadratic_forms.quadratic_form import QuadraticForm
from warnings import warn
if not all(isinstance(s, QuadraticForm) for s in seeds):
raise ValueError("seeds must be a list of quadratic forms")
if algorithm is None:
n = seeds[0].dim()
if p**n > ZZ(2)**18:
# too many lines to compute the orbits fast
algorithm = 'random'
else:
algorithm = 'orbits'
if algorithm == 'orbits':
def p_divisible_vectors(Q, max_neighbors):
yield from iter(v.lift() for v in Q.orbits_lines_mod_p(p)
if v != 0 and Q(v.lift()).valuation(p) > 0)
return
elif algorithm == 'exaustion':
def p_divisible_vectors(Q, max_neighbors):
k = 0
v = Q.find_primitive_p_divisible_vector__next(p)
while k < max_neighbors:
k = k + 1
v = Q.find_primitive_p_divisible_vector__next(p, v)
if v is not None:
yield v
elif algorithm == 'random':
def p_divisible_vectors(Q, max_neighbors):
k = 0
while k < max_neighbors:
k = k +1
v = Q.find_primitive_p_divisible_vector__random(p)
yield v
else:
raise ValueError("unknown algorithm")
waiting_list = list(seeds)
isom_classes = []
mass_count = QQ(0)
n_isom_classes = ZZ(0)
while len(waiting_list) > 0 and mass != mass_count and n_isom_classes < max_classes:
# find all p-neighbors of Q
Q = waiting_list.pop()
for v in p_divisible_vectors(Q, max_neighbors):
Q_neighbor = Q.find_p_neighbor_from_vec(p, v)
if not any(Q_neighbor.is_globally_equivalent_to(S) for S in isom_classes):
Q_neighbor = Q_neighbor.lll()
isom_classes.append(Q_neighbor)
waiting_list.append(Q_neighbor)
n_isom_classes += 1
mass_count += Q_neighbor.number_of_automorphisms()**(-1)
if verbose:
print(max_neighbors)
print(len(waiting_list))
if mass_count == mass or n_isom_classes >= max_classes:
break
if len(isom_classes) >= max_classes:
warn("reached the maximum number of isometry classes=%s. Increase the optional argument max_classes to obtain more." %max_classes)
if mass is not None:
assert mass_count <= mass
if mass_count < mass:
print("Warning: not all classes in the genus were found")
return isom_classes
def orbits_lines_mod_p(self, p):
r"""
Let `(L, q)` be a lattice. This returns representatives of the
orbits of lines in `L/pL` under the orthogonal group of `q`.
INPUT:
- ``p`` -- a prime number
OUTPUT:
- a list of vectors over ``GF(p)``
EXAMPLES::
sage: from sage.quadratic_forms.quadratic_form__neighbors import orbits_lines_mod_p
sage: Q = QuadraticForm(ZZ, 3, [1, 0, 0, 2, 1, 3])
sage: Q.orbits_lines_mod_p(2)
[(0, 0, 1),
(0, 1, 0),
(0, 1, 1),
(1, 0, 0),
(1, 0, 1),
(1, 1, 0),
(1, 1, 1)]
"""
from sage.libs.gap.libgap import libgap
# careful the self.automorphism_group() acts from the left
# but in gap we act from the right!! --> transpose
gens = self.automorphism_group().gens()
gens = [g.matrix().transpose().change_ring(GF(p)) for g in gens]
orbs = libgap.function_factory(
"""function(gens, p)
local one, G, reps, V, n, orb;
one:= One(GF(p));
G:=Group(List(gens, g -> g*one));
n:= Size(gens[1]);
V:= GF(p)^n;
orb:= OrbitsDomain(G, V, OnLines);
reps:= List(orb, g->g[1]);
return reps;
end;""")
orbs_reps = orbs(gens, p)
M = GF(p)**self.dim()
return [M(m.sage()) for m in orbs_reps if not m.IsZero()]
|
the-stack_0_22997 | from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User
from core import models
from core.views.general import PermissionDenied
class IndexView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
def test_auth(self):
response = self.client.get(reverse('index'))
redirect_url = reverse('login') + '?next=/'
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/index.html')
def test_context(self):
for x in ['in_progress', 'completed', 'stopped', 'fail', 'wait']:
models.Task.objects.create(
playbook='/home/',
status=x,
user=self.user,
)
self.client.force_login(user=self.user)
response = self.client.get(reverse('index'))
self.assertEqual(len(response.context['tasks']), 5)
class PermissionDeniedView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
def test_auth(self):
response = self.client.get(reverse('permission_denied'))
redirect_url = reverse('login') + '?next=' + reverse('permission_denied')
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('permission_denied'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/user/permission_denied.html')
def test_context(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('permission_denied'))
self.assertEqual(response.context['breadcrumbs'][0], ('Home', reverse('index')))
self.assertEqual(response.context['breadcrumbs'][1], (PermissionDenied.title, ''))
|
the-stack_0_22998 | #FK chain system LVO - Jan 2022
import maya.cmds as mc
import maya.mel as mel
def rig_fk_chain(arg):
# List old joint chain
mc.select (hi=True)
originJoints = mc.ls(selection=True, type='joint')
# Snap Joints for function
mc.select(originJoints[0])
def copyJointChain(prefix = 'FK',joints=[]):
if not joints:
mc.select(hi = True)
joints = mc.ls(selection=True, type='joint')
mc.select(clear=True)
counter=1
for joint in joints:
pos=mc.xform(joint, query=True, translation=True, worldSpace=True)
jointOrientation = mc.joint(joint, query=True, orientation=True)
newJoint = mc.joint(name=prefix+"_"+str(counter)+"_JNT", p=pos, o=jointOrientation)
counter +=1
copyJointChain(mc.textFieldGrp(jntNameField, q=True, text=True))
# Hide old joint chain
mc.hide(originJoints[0])
"""__________________WINDOW_______________"""
windowName = "FKChainSystemValentinaOrozco"
windowTitle ='FK Chain System Valentina Orozco'
windowWidth = 400
windowHeight = 175
def UI():
if mc.window(windowName, exists=True):
mc.deleteUI(windowName)
# Window
myWindow = mc.window(windowName, title=windowTitle, w=windowWidth, h=windowHeight)
mc.window(windowName, edit=True, w=windowWidth, h=windowHeight)
# Main Layout
mainLayout = mc.columnLayout('mainLayout', w=windowWidth, rowSpacing=5)
mc.separator(h=10)
mc.text('Select first joint chain', w=windowWidth, align='center', backgroundColor=(0.4,0.4,0.4), h=35)
mc.separator(h=3)
# TextFields
global jntNameField
jntNameField = mc.textFieldGrp(l="Joints' Name", editable=True, pht='fk_JNT')
mc.separator(h=3)
# Button
mc.button('Button1',label='Create Joints', w=windowWidth, h= 35, backgroundColor=(0.1,0.7,0.85), command= rig_fk_chain)
mc.separator(h=5)
mc.text('Select the joints you want to rig', w=windowWidth, align='center', backgroundColor=(0.4,0.4,0.4), h=35)
mc.separator(h=3)
global ctrlSizeSlider
ctrlSizeSlider = mc.intSliderGrp(l="Controls' Size", min=1, max=15, field=True, v=1)
mc.separator(h=3)
# Button
mc.button('Button2',label='Rig it!', w=windowWidth, h= 35, backgroundColor=(0.1,0.7,0.85), command= rig_fk_chain)
mc.separator(h=7)
mc.showWindow(myWindow)
UI() |
the-stack_0_23002 | """
XML parser that calls punct_split to correct spacing issues (words incorrectly concatenated together in the PDF->XML
process via Adobe Acrobat).
"""
import sys
import xml.etree.ElementTree as ET
from punct_split import punct_split
# node trackers for progress print statement
current_element_num = 0
total_num_elements = 0
verbose = False
def clean_recursively(root):
"""
At current node, reintroduce spaces into the text. Then proceed to child nodes.
"""
if root.text is not None:
root.text = punct_split(root.text)
global current_element_num
current_element_num += 1
if verbose and current_element_num % 150 == 0:
print("\rProcessed %.2f%% of all XML nodes." % (current_element_num * 100 / total_num_elements), end='')
for child in root:
clean_recursively(child)
def reintroduce_spaces(in_file_path, out_file_path=None):
"""
Read XML file with spacing problems, correct its spaces, and write to a new XML file
:param in_file_path: source XML with spacing problems
:param out_file_path: destination XML with corrected spaces
"""
# if no name for outfile given, use source name + "_spaced"
if out_file_path is None:
out_file_path = in_file_path[:in_file_path.rfind(".")] + "_spaced" + in_file_path[in_file_path.rfind("."):]
# read xml file as a tree
tree = ET.parse(in_file_path, ET.XMLParser(encoding="utf-8"))
root = tree.getroot()
# count how many total children and subchildren have information in their text field
global total_num_elements
total_num_elements = sum(1 if el.text else 0 for el in root.iter("*"))
# reintroduce spaces at every node
clean_recursively(root)
if verbose and total_num_elements >= 150:
print("\rProcessed 100.00%% of all XML nodes.")
tree.write(out_file_path, encoding="utf8")
return out_file_path
def main(argv):
filename = "../fullPDFs/Carlow.xml"
# if user specified another file as input
if len(argv) > 1:
filename = argv[1]
reintroduce_spaces(filename)
if __name__ == "__main__":
main(sys.argv)
|
the-stack_0_23007 | from Core import Commands,Worker
from Diagnostics.Debugging import Console
import socket
import threading
class Server:
def __init__(self, port):
self.PORT = port # The port the socket will be running on
self.ADDR = socket.gethostbyname(socket.gethostname()) # The address
self.Sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a socket
# AF_INET basically tells that we will be dealing with IP v4 addresses
# SOCK_STREAM means data will be streamed from both sides and the order will be maintained
self.Sock.bind((self.ADDR, self.PORT)) # Bind our socket to our address
self.Clients = {} # {addr : {'talker':conn,'listener':conn,'gamerequests':[]}}
Console.serverLog("Initialized")
def AcceptConnections(self):
Console.serverLog(f"Listening on {self.ADDR}:{self.PORT}")
self.Sock.listen()
while 1: # Our socket runs forever (until we close it)
conn, addr = self.Sock.accept() # Accept connections and keep the connection object and the adress
threading.Thread(
target=self.HandleClient, # The target of our thread
daemon=True, # This basically makes it so that if the main program ends before this thread, the thread will be destroyed along with the main program
args = (conn, addr)
).start()
Console.info(f"Total active threads : {threading.active_count()}")
def HandleClient(self, conn, addr):
'''Called initially when the connection is made to initialize the connection'''
Console.clientLog(addr, "Connected to server")
while 1:
msg = conn.recv(2048)
print(msg)
if msg == b'':break
conn.close()
def Broadcast(self, msg, src = None):
for i in self.Clients:
if i == src or self.Clients[i]['listener'] is None:continue
try:Worker.SendMessage(self.Clients[i]['listener'], msg)
except:pass
def ServeClient(self, addr):
'''Serves a client that has been initialized'''
try:
while 1:
msg = Worker.GetMessage(self.Clients[addr]['talker'], cancel = lambda:not (addr in self.Clients))
if msg == "!Error":
break
elif msg['command'] == Commands.DISCONNECT:
break
elif msg['command'] == Commands.CreateGame:
self.Clients[addr]['games'].append(msg['game'])
self.Broadcast({
'command' : Commands.ShowGames,
'games' : [msg['game']]
}, src = addr)
elif msg['command'] == Commands.CancelGame:
self.Broadcast({
'command' : Commands.HideGames,
'games' : [msg['game']]
}, src = addr)
elif msg['command'] == Commands.AcceptGame:
otherAddr = tuple(msg['addr'])
gamesToDelete = self.Clients[addr]['games'] + self.Clients.get(otherAddr, {'games':[]})['games']
try:
Worker.SendMessage(self.Clients[otherAddr]['listener'], {
'command':Commands.BeginGame,
'game' : msg['game'],
'addr' : addr
})
Worker.SendMessage(self.Clients[addr]['listener'],{
'command':Commands.BeginGame,
'game' : msg['game'],
'addr' : otherAddr
})
del self.Clients[addr]
del self.Clients[otherAddr]
except:
pass
finally:
self.Broadcast({
'command' : Commands.HideGames,
'games' : gamesToDelete
})
elif msg['command'] == Commands.GetGames:
allGames = []
for i in self.Clients:allGames.extend(self.Clients[i]['games'])
Worker.SendMessage(self.Clients[addr]['listener'], {
'command' : Commands.ShowGames,
'games' : allGames
})
allGames.clear()
except Exception as e:
print(e)
finally:
Console.clientLog(addr, "Disconnecting")
Console.info(f"Total active threads : {threading.active_count() - 1}")
if not self.Clients.get(addr):return
self.Broadcast({
'command' : Commands.HideGames,
'games' : self.Clients[addr]['games']
}, src=addr)
try:self.Clients[addr]['talker'].close()
except:pass
try:self.Clients[addr]['listener'].close()
except:pass
del self.Clients[addr]
MyServer = Server(8000) # Create my server
MyServer.AcceptConnections() # Accept connections from outside |
the-stack_0_23008 | #!/usr/bin/env python
#Author: Cianciaruso Cataldo
import requests
import os
import sys
import time
def run(image_path):
header={'modes':'all'}
files = {'image': ('test.jpg', open(image_path, 'rb'), 'image/jpg')}
start_time = time.time()
r=requests.post("http://localhost:8080", files=files, headers=header)
print("Response :\n")
print(r.text)
elapsed_time = str(time.time() - start_time)
print("\nElapsed time: "+elapsed_time)
if __name__ == "__main__":
if(len(sys.argv)>1):
if os.path.isfile(sys.argv[1]):
run(sys.argv[1])
else:
print("Error, the file "+sys.argv[1]+" is not a valid image file.")
else:
print("You must specify an image file path to send request to server.")
|
the-stack_0_23011 | ###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from .....lib import *
from ...util import *
from ..decoder import DecoderTest
spec = load_test_spec("hevc", "decode", "10bit")
class default(DecoderTest):
def before(self):
# default metric
self.metric = dict(type = "ssim", miny = 1.0, minu = 1.0, minv = 1.0)
self.caps = platform.get_caps("decode", "hevc_10")
super(default, self).before()
@slash.requires(*platform.have_caps("decode", "hevc_10"))
@slash.requires(*have_gst_element("msdkh265dec"))
@slash.parametrize(("case"), sorted(spec.keys()))
def test(self, case):
vars(self).update(spec[case].copy())
vars(self).update(
case = case,
gstdecoder = "h265parse ! msdkh265dec",
)
self.decode()
|
the-stack_0_23012 | #!/usr/bin/env python3
import yaml
from utils import validate_file_path
def set_bungee_mode(file_path: str):
with open(file_path, 'r') as cfg:
spig_yml = yaml.safe_load(cfg)
# except yaml.YAMLError as exc:
spig_yml['settings']['bungeecord'] = True
with open(file_path, 'w') as cfg:
yaml.dump(spig_yml, cfg)
if __name__ == '__main__':
path = validate_file_path('spigot.yml')
set_bungee_mode(path)
|
the-stack_0_23013 | #importing necessary files
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
#importing test and train file
train = pd.read_csv('Train.csv')
test = pd.read_csv('Test.csv')
#Linear Regression
from sklearn.linear_model import LinearRegression
lreg = LinearRegression()
#Splitting into Training and CV for Cross Validation
X = train.loc[:,['Outlet_Establishment_Year', 'Item_MRP']]
x_train, x_cv, y_train, y_cv = train_test_split(X, train.Item_Outlet_Sales)
#Training the Model
lreg.fit(x_train,y_train)
#Predicting on the Cross validation set
pred = lreg.predict(x_cv)
#Calculating the Mean Square Error
mse = np.mean((pred - y_cv)**2)
print('Mean Square Error is: ', mse)
#Calculation of coefficients
coeff = DataFrame(x_train.columns)
coeff['Coefficient Estimate'] = Series(lreg.coef_)
print(coeff)
x_plot = plt.scatter(pred, (pred - y_cv), c='b')
plt.hlines(y=0, xmin=-1000, xmax=5000)
plt.title('Residual Plot')
plt.show()
|
the-stack_0_23015 | from .contact import Contact
from .contact_field import ContactField
class Emarsys:
"""
Make authenticated calls to Emarsys' API through its attributes.
Usage example:
>>> connection = SyncConnection(<user>, <password>)
>>> client = Emarsys(connection)
>>> client.contacts.create({'3': '[email protected]'})
{'data': {'id': 123456789}, 'replyCode': 0, 'replyText': 'OK'}
"""
def __init__(self, connection):
self.connection = connection
self.contacts = Contact(self.connection)
self.contact_fields = ContactField(self.connection)
|
the-stack_0_23016 | # Copyright 2013 Big Switch Networks
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import argparse
from neutronclient.i18n import _
from neutronclient.neutron import v2_0 as neutronv20
def _format_firewall_rules(firewall_policy):
try:
output = '[' + ',\n '.join([rule for rule in
firewall_policy['firewall_rules']]) + ']'
return output
except (TypeError, KeyError):
return ''
def common_add_known_arguments(parser):
parser.add_argument(
'--firewall-rules', type=lambda x: x.split(),
help=_('Ordered list of whitespace-delimited firewall rule '
'names or IDs; e.g., --firewall-rules \"rule1 rule2\"'))
def common_args2body(client, parsed_args):
if parsed_args.firewall_rules:
_firewall_rules = []
for f in parsed_args.firewall_rules:
_firewall_rules.append(
neutronv20.find_resourceid_by_name_or_id(
client, 'firewall_rule', f))
body = {'firewall_rules': _firewall_rules}
else:
body = {}
neutronv20.update_dict(parsed_args, body,
['name', 'description', 'shared',
'audited', 'tenant_id'])
return {'firewall_policy': body}
class ListFirewallPolicy(neutronv20.ListCommand):
"""List firewall policies that belong to a given tenant."""
resource = 'firewall_policy'
list_columns = ['id', 'name', 'firewall_rules']
_formatters = {'firewall_rules': _format_firewall_rules,
}
pagination_support = True
sorting_support = True
class ShowFirewallPolicy(neutronv20.ShowCommand):
"""Show information of a given firewall policy."""
resource = 'firewall_policy'
class CreateFirewallPolicy(neutronv20.CreateCommand):
"""Create a firewall policy."""
resource = 'firewall_policy'
def add_known_arguments(self, parser):
parser.add_argument(
'name',
metavar='NAME',
help=_('Name for the firewall policy.'))
parser.add_argument(
'--description',
help=_('Description for the firewall policy.'))
parser.add_argument(
'--shared',
dest='shared',
action='store_true',
help=_('Create a shared policy.'),
default=argparse.SUPPRESS)
common_add_known_arguments(parser)
parser.add_argument(
'--audited',
action='store_true',
help=_('Sets audited to True.'),
default=argparse.SUPPRESS)
def args2body(self, parsed_args):
return common_args2body(self.get_client(), parsed_args)
class UpdateFirewallPolicy(neutronv20.UpdateCommand):
"""Update a given firewall policy."""
resource = 'firewall_policy'
def add_known_arguments(self, parser):
common_add_known_arguments(parser)
def args2body(self, parsed_args):
return common_args2body(self.get_client(), parsed_args)
class DeleteFirewallPolicy(neutronv20.DeleteCommand):
"""Delete a given firewall policy."""
resource = 'firewall_policy'
class FirewallPolicyInsertRule(neutronv20.UpdateCommand):
"""Insert a rule into a given firewall policy."""
resource = 'firewall_policy'
def call_api(self, neutron_client, firewall_policy_id, body):
return neutron_client.firewall_policy_insert_rule(firewall_policy_id,
body)
def args2body(self, parsed_args):
_rule = ''
if parsed_args.firewall_rule_id:
_rule = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'firewall_rule',
parsed_args.firewall_rule_id)
_insert_before = ''
if 'insert_before' in parsed_args:
if parsed_args.insert_before:
_insert_before = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'firewall_rule',
parsed_args.insert_before)
_insert_after = ''
if 'insert_after' in parsed_args:
if parsed_args.insert_after:
_insert_after = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'firewall_rule',
parsed_args.insert_after)
body = {'firewall_rule_id': _rule,
'insert_before': _insert_before,
'insert_after': _insert_after}
return body
def get_parser(self, prog_name):
parser = super(FirewallPolicyInsertRule, self).get_parser(prog_name)
parser.add_argument(
'--insert-before',
metavar='FIREWALL_RULE',
help=_('Insert before this rule.'))
parser.add_argument(
'--insert-after',
metavar='FIREWALL_RULE',
help=_('Insert after this rule.'))
parser.add_argument(
'firewall_rule_id',
metavar='FIREWALL_RULE',
help=_('New rule to insert.'))
self.add_known_arguments(parser)
return parser
def run(self, parsed_args):
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
body = self.args2body(parsed_args)
_id = neutronv20.find_resourceid_by_name_or_id(neutron_client,
self.resource,
parsed_args.id)
self.call_api(neutron_client, _id, body)
print((_('Inserted firewall rule in firewall policy %(id)s') %
{'id': parsed_args.id}), file=self.app.stdout)
class FirewallPolicyRemoveRule(neutronv20.UpdateCommand):
"""Remove a rule from a given firewall policy."""
resource = 'firewall_policy'
def call_api(self, neutron_client, firewall_policy_id, body):
return neutron_client.firewall_policy_remove_rule(firewall_policy_id,
body)
def args2body(self, parsed_args):
_rule = ''
if parsed_args.firewall_rule_id:
_rule = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'firewall_rule',
parsed_args.firewall_rule_id)
body = {'firewall_rule_id': _rule}
return body
def get_parser(self, prog_name):
parser = super(FirewallPolicyRemoveRule, self).get_parser(prog_name)
parser.add_argument(
'firewall_rule_id',
metavar='FIREWALL_RULE',
help=_('Firewall rule to remove from policy.'))
self.add_known_arguments(parser)
return parser
def run(self, parsed_args):
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
body = self.args2body(parsed_args)
_id = neutronv20.find_resourceid_by_name_or_id(neutron_client,
self.resource,
parsed_args.id)
self.call_api(neutron_client, _id, body)
print((_('Removed firewall rule from firewall policy %(id)s') %
{'id': parsed_args.id}), file=self.app.stdout)
|
the-stack_0_23017 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Variational Quantum Eigensolver algorithm.
See https://arxiv.org/abs/1304.3061
"""
from typing import Optional, List, Callable, Union, Dict
import logging
from time import time
import numpy as np
from qiskit import ClassicalRegister, QuantumCircuit
from qiskit.circuit import Parameter
from qiskit.circuit.library import RealAmplitudes
from qiskit.providers import BaseBackend
from qiskit.providers import Backend
from qiskit.opflow import (
OperatorBase,
ExpectationBase,
ExpectationFactory,
StateFn,
CircuitStateFn,
ListOp,
I,
CircuitSampler,
)
from qiskit.opflow.gradients import GradientBase
from qiskit.utils.validation import validate_min
from qiskit.utils.backend_utils import is_aer_provider
from qiskit.utils.quantum_instance import QuantumInstance
from ..optimizers import Optimizer, SLSQP
from ..variational_algorithm import VariationalAlgorithm, VariationalResult
from .minimum_eigen_solver import MinimumEigensolver, MinimumEigensolverResult
from ..exceptions import AlgorithmError
logger = logging.getLogger(__name__)
# disable check for ansatzes, optimizer setter because of pylint bug
# pylint: disable=no-member
class VQE(VariationalAlgorithm, MinimumEigensolver):
r"""The Variational Quantum Eigensolver algorithm.
`VQE <https://arxiv.org/abs/1304.3061>`__ is a quantum algorithm that uses a
variational technique to find
the minimum eigenvalue of the Hamiltonian :math:`H` of a given system.
An instance of VQE requires defining two algorithmic sub-components:
a trial state (a.k.a. ansatz) which is a :class:`QuantumCircuit`, and one of the classical
:mod:`~qiskit.algorithms.optimizers`. The ansatz is varied, via its set of parameters, by the
optimizer, such that it works towards a state, as determined by the parameters applied to the
ansatz, that will result in the minimum expectation value being measured of the input operator
(Hamiltonian).
An optional array of parameter values, via the *initial_point*, may be provided as the
starting point for the search of the minimum eigenvalue. This feature is particularly useful
such as when there are reasons to believe that the solution point is close to a particular
point. As an example, when building the dissociation profile of a molecule,
it is likely that using the previous computed optimal solution as the starting
initial point for the next interatomic distance is going to reduce the number of iterations
necessary for the variational algorithm to converge. It provides an
`initial point tutorial <https://github.com/Qiskit/qiskit-tutorials-community/blob/master
/chemistry/h2_vqe_initial_point.ipynb>`__ detailing this use case.
The length of the *initial_point* list value must match the number of the parameters
expected by the ansatz being used. If the *initial_point* is left at the default
of ``None``, then VQE will look to the ansatz for a preferred value, based on its
given initial state. If the ansatz returns ``None``,
then a random point will be generated within the parameter bounds set, as per above.
If the ansatz provides ``None`` as the lower bound, then VQE
will default it to :math:`-2\pi`; similarly, if the ansatz returns ``None``
as the upper bound, the default value will be :math:`2\pi`.
.. note::
The VQE stores the parameters of ``ansatz`` sorted by name to map the values
provided by the optimizer to the circuit. This is done to ensure reproducible results,
for example such that running the optimization twice with same random seeds yields the
same result. Also, the ``optimal_point`` of the result object can be used as initial
point of another VQE run by passing it as ``initial_point`` to the initializer.
"""
def __init__(
self,
ansatz: Optional[QuantumCircuit] = None,
optimizer: Optional[Optimizer] = None,
initial_point: Optional[np.ndarray] = None,
gradient: Optional[Union[GradientBase, Callable]] = None,
expectation: Optional[ExpectationBase] = None,
include_custom: bool = False,
max_evals_grouped: int = 1,
callback: Optional[Callable[[int, np.ndarray, float, float], None]] = None,
quantum_instance: Optional[Union[QuantumInstance, BaseBackend, Backend]] = None,
) -> None:
"""
Args:
ansatz: A parameterized circuit used as Ansatz for the wave function.
optimizer: A classical optimizer.
initial_point: An optional initial point (i.e. initial parameter values)
for the optimizer. If ``None`` then VQE will look to the ansatz for a preferred
point and if not will simply compute a random one.
gradient: An optional gradient function or operator for optimizer.
expectation: The Expectation converter for taking the average value of the
Observable over the ansatz state function. When ``None`` (the default) an
:class:`~qiskit.opflow.expectations.ExpectationFactory` is used to select
an appropriate expectation based on the operator and backend. When using Aer
qasm_simulator backend, with paulis, it is however much faster to leverage custom
Aer function for the computation but, although VQE performs much faster
with it, the outcome is ideal, with no shot noise, like using a state vector
simulator. If you are just looking for the quickest performance when choosing Aer
qasm_simulator and the lack of shot noise is not an issue then set `include_custom`
parameter here to ``True`` (defaults to ``False``).
include_custom: When `expectation` parameter here is None setting this to ``True`` will
allow the factory to include the custom Aer pauli expectation.
max_evals_grouped: Max number of evaluations performed simultaneously. Signals the
given optimizer that more than one set of parameters can be supplied so that
potentially the expectation values can be computed in parallel. Typically this is
possible when a finite difference gradient is used by the optimizer such that
multiple points to compute the gradient can be passed and if computed in parallel
improve overall execution time. Deprecated if a gradient operator or function is
given.
callback: a callback that can access the intermediate data during the optimization.
Four parameter values are passed to the callback as follows during each evaluation
by the optimizer for its current set of parameters as it works towards the minimum.
These are: the evaluation count, the optimizer parameters for the
ansatz, the evaluated mean and the evaluated standard deviation.`
quantum_instance: Quantum Instance or Backend
"""
validate_min("max_evals_grouped", max_evals_grouped, 1)
if ansatz is None:
ansatz = RealAmplitudes()
if optimizer is None:
optimizer = SLSQP()
# set the initial point to the preferred parameters of the ansatz
if initial_point is None and hasattr(ansatz, "preferred_init_points"):
initial_point = ansatz.preferred_init_points
self._max_evals_grouped = max_evals_grouped
self._circuit_sampler = None # type: Optional[CircuitSampler]
self._expectation = expectation
self._user_valid_expectation = self._expectation is not None
self._include_custom = include_custom
self._expect_op = None
super().__init__(
ansatz=ansatz,
optimizer=optimizer,
cost_fn=self._energy_evaluation,
gradient=gradient,
initial_point=initial_point,
quantum_instance=quantum_instance,
)
self._ret = VQEResult()
self._eval_time = None
self._optimizer.set_max_evals_grouped(max_evals_grouped)
self._callback = callback
self._eval_count = 0
logger.info(self.print_settings())
def _try_set_expectation_value_from_factory(self, operator: OperatorBase) -> None:
if operator is not None and self.quantum_instance is not None:
self._set_expectation(
ExpectationFactory.build(
operator=operator,
backend=self.quantum_instance,
include_custom=self._include_custom,
)
)
def _set_expectation(self, exp: ExpectationBase) -> None:
self._expectation = exp
self._user_valid_expectation = False
self._expect_op = None
@VariationalAlgorithm.quantum_instance.setter
def quantum_instance(
self, quantum_instance: Union[QuantumInstance, BaseBackend, Backend]
) -> None:
"""set quantum_instance"""
super(VQE, self.__class__).quantum_instance.__set__(self, quantum_instance)
self._circuit_sampler = CircuitSampler(
self._quantum_instance, param_qobj=is_aer_provider(self._quantum_instance.backend)
)
@property
def expectation(self) -> ExpectationBase:
"""The expectation value algorithm used to construct the expectation measurement from
the observable."""
return self._expectation
@expectation.setter
def expectation(self, exp: ExpectationBase) -> None:
self._set_expectation(exp)
self._user_valid_expectation = self._expectation is not None
def _check_operator_varform(self, operator: OperatorBase):
"""Check that the number of qubits of operator and ansatz match."""
if operator is not None and self.ansatz is not None:
if operator.num_qubits != self.ansatz.num_qubits:
# try to set the number of qubits on the ansatz, if possible
try:
self.ansatz.num_qubits = operator.num_qubits
self._ansatz_params = sorted(self.ansatz.parameters, key=lambda p: p.name)
except AttributeError as ex:
raise AlgorithmError(
"The number of qubits of the ansatz does not match the "
"operator, and the ansatz does not allow setting the "
"number of qubits using `num_qubits`."
) from ex
@VariationalAlgorithm.optimizer.setter # type: ignore
def optimizer(self, optimizer: Optimizer):
"""Sets optimizer"""
super(VQE, self.__class__).optimizer.__set__(self, optimizer) # type: ignore
if optimizer is not None:
optimizer.set_max_evals_grouped(self._max_evals_grouped)
@property
def setting(self):
"""Prepare the setting of VQE as a string."""
ret = "Algorithm: {}\n".format(self.__class__.__name__)
params = ""
for key, value in self.__dict__.items():
if key[0] == "_":
if "initial_point" in key and value is None:
params += "-- {}: {}\n".format(key[1:], "Random seed")
else:
params += "-- {}: {}\n".format(key[1:], value)
ret += "{}".format(params)
return ret
def print_settings(self):
"""
Preparing the setting of VQE into a string.
Returns:
str: the formatted setting of VQE
"""
ret = "\n"
ret += "==================== Setting of {} ============================\n".format(
self.__class__.__name__
)
ret += "{}".format(self.setting)
ret += "===============================================================\n"
if hasattr(self._ansatz, "setting"):
ret += "{}".format(self._ansatz.setting)
elif hasattr(self._ansatz, "print_settings"):
ret += "{}".format(self._ansatz.print_settings())
elif isinstance(self._ansatz, QuantumCircuit):
ret += "ansatz is a custom circuit"
else:
ret += "ansatz has not been set"
ret += "===============================================================\n"
ret += "{}".format(self._optimizer.setting)
ret += "===============================================================\n"
return ret
def construct_expectation(
self,
parameter: Union[List[float], List[Parameter], np.ndarray],
operator: OperatorBase,
) -> OperatorBase:
r"""
Generate the ansatz circuit and expectation value measurement, and return their
runnable composition.
Args:
parameter: Parameters for the ansatz circuit.
operator: Qubit operator of the Observable
Returns:
The Operator equalling the measurement of the ansatz :class:`StateFn` by the
Observable's expectation :class:`StateFn`.
Raises:
AlgorithmError: If no operator has been provided.
"""
if operator is None:
raise AlgorithmError("The operator was never provided.")
operator = self._check_operator(operator)
if isinstance(self.ansatz, QuantumCircuit):
param_dict = dict(zip(self._ansatz_params, parameter)) # type: Dict
wave_function = self.ansatz.assign_parameters(param_dict)
else:
wave_function = self.ansatz.construct_circuit(parameter)
# Expectation was never created , try to create one
if self._expectation is None:
self._try_set_expectation_value_from_factory(operator)
# If setting the expectation failed, raise an Error:
if self._expectation is None:
raise AlgorithmError(
"No expectation set and could not automatically set one, please "
"try explicitly setting an expectation or specify a backend so it "
"can be chosen automatically."
)
observable_meas = self.expectation.convert(StateFn(operator, is_measurement=True))
ansatz_circuit_op = CircuitStateFn(wave_function)
return observable_meas.compose(ansatz_circuit_op).reduce()
def construct_circuit(
self,
parameter: Union[List[float], List[Parameter], np.ndarray],
operator: OperatorBase,
) -> List[QuantumCircuit]:
"""Return the circuits used to compute the expectation value.
Args:
parameter: Parameters for the ansatz circuit.
operator: Qubit operator of the Observable
Returns:
A list of the circuits used to compute the expectation value.
"""
expect_op = self.construct_expectation(parameter, operator).to_circuit_op()
circuits = []
# recursively extract circuits
def extract_circuits(op):
if isinstance(op, CircuitStateFn):
circuits.append(op.primitive)
elif isinstance(op, ListOp):
for op_i in op.oplist:
extract_circuits(op_i)
extract_circuits(expect_op)
return circuits
@classmethod
def supports_aux_operators(cls) -> bool:
return True
def _eval_aux_ops(self, aux_operators: List[OperatorBase], threshold: float = 1e-12) -> None:
# Create new CircuitSampler to avoid breaking existing one's caches.
sampler = CircuitSampler(self.quantum_instance)
aux_op_meas = self.expectation.convert(StateFn(ListOp(aux_operators), is_measurement=True))
aux_op_expect = aux_op_meas.compose(CircuitStateFn(self.get_optimal_circuit()))
values = np.real(sampler.convert(aux_op_expect).eval())
# Discard values below threshold
aux_op_results = values * (np.abs(values) > threshold)
# Deal with the aux_op behavior where there can be Nones or Zero qubit Paulis in the list
_aux_op_nones = [op is None for op in aux_operators]
self._ret.aux_operator_eigenvalues = [
None if is_none else [result]
for (is_none, result) in zip(_aux_op_nones, aux_op_results)
]
# As this has mixed types, since it can included None, it needs to explicitly pass object
# data type to avoid numpy 1.19 warning message about implicit conversion being deprecated
self._ret.aux_operator_eigenvalues = np.array(
[self._ret.aux_operator_eigenvalues], dtype=object
)
def _check_operator(self, operator: OperatorBase) -> OperatorBase:
"""set operator"""
self._expect_op = None
self._check_operator_varform(operator)
# Expectation was not passed by user, try to create one
if not self._user_valid_expectation:
self._try_set_expectation_value_from_factory(operator)
return operator
def compute_minimum_eigenvalue(
self, operator: OperatorBase, aux_operators: Optional[List[Optional[OperatorBase]]] = None
) -> MinimumEigensolverResult:
super().compute_minimum_eigenvalue(operator, aux_operators)
if self.quantum_instance is None:
raise AlgorithmError(
"A QuantumInstance or Backend " "must be supplied to run the quantum algorithm."
)
if operator is None:
raise AlgorithmError("The operator was never provided.")
operator = self._check_operator(operator)
# We need to handle the array entries being Optional i.e. having value None
if aux_operators:
zero_op = I.tensorpower(operator.num_qubits) * 0.0
converted = []
for op in aux_operators:
if op is None:
converted.append(zero_op)
else:
converted.append(op)
# For some reason Chemistry passes aux_ops with 0 qubits and paulis sometimes.
aux_operators = [zero_op if op == 0 else op for op in converted]
else:
aux_operators = None
self._quantum_instance.circuit_summary = True
self._eval_count = 0
# Convert the gradient operator into a callable function that is compatible with the
# optimization routine.
if self._gradient:
if isinstance(self._gradient, GradientBase):
self._gradient = self._gradient.gradient_wrapper(
~StateFn(operator) @ StateFn(self._ansatz),
bind_params=self._ansatz_params,
backend=self._quantum_instance,
)
if not self._expect_op:
self._expect_op = self.construct_expectation(self._ansatz_params, operator)
vqresult = self.find_minimum(
initial_point=self.initial_point,
ansatz=self.ansatz,
cost_fn=self._energy_evaluation,
gradient_fn=self._gradient,
optimizer=self.optimizer,
)
self._ret = VQEResult()
self._ret.combine(vqresult)
if vqresult.optimizer_evals is not None and self._eval_count >= vqresult.optimizer_evals:
self._eval_count = vqresult.optimizer_evals
self._eval_time = vqresult.optimizer_time
logger.info(
"Optimization complete in %s seconds.\nFound opt_params %s in %s evals",
self._eval_time,
vqresult.optimal_point,
self._eval_count,
)
self._ret.eigenvalue = vqresult.optimal_value + 0j
self._ret.eigenstate = self.get_optimal_vector()
self._ret.eigenvalue = self.get_optimal_cost()
if aux_operators:
self._eval_aux_ops(aux_operators)
self._ret.aux_operator_eigenvalues = self._ret.aux_operator_eigenvalues[0]
self._ret.cost_function_evals = self._eval_count
return self._ret
def _energy_evaluation(
self, parameters: Union[List[float], np.ndarray]
) -> Union[float, List[float]]:
"""Evaluate energy at given parameters for the ansatz.
This is the objective function to be passed to the optimizer that is used for evaluation.
Args:
parameters: The parameters for the ansatz.
Returns:
Energy of the hamiltonian of each parameter.
Raises:
RuntimeError: If the ansatz has no parameters.
"""
num_parameters = self.ansatz.num_parameters
if self._ansatz.num_parameters == 0:
raise RuntimeError("The ansatz cannot have 0 parameters.")
parameter_sets = np.reshape(parameters, (-1, num_parameters))
# Create dict associating each parameter with the lists of parameterization values for it
param_bindings = dict(
zip(self._ansatz_params, parameter_sets.transpose().tolist())
) # type: Dict
start_time = time()
sampled_expect_op = self._circuit_sampler.convert(self._expect_op, params=param_bindings)
means = np.real(sampled_expect_op.eval())
if self._callback is not None:
variance = np.real(self._expectation.compute_variance(sampled_expect_op))
estimator_error = np.sqrt(variance / self.quantum_instance.run_config.shots)
for i, param_set in enumerate(parameter_sets):
self._eval_count += 1
self._callback(self._eval_count, param_set, means[i], estimator_error[i])
else:
self._eval_count += len(means)
end_time = time()
logger.info(
"Energy evaluation returned %s - %.5f (ms), eval count: %s",
means,
(end_time - start_time) * 1000,
self._eval_count,
)
return means if len(means) > 1 else means[0]
def get_optimal_cost(self) -> float:
"""Get the minimal cost or energy found by the VQE."""
if self._ret.optimal_point is None:
raise AlgorithmError(
"Cannot return optimal cost before running the " "algorithm to find optimal params."
)
return self._ret.optimal_value
def get_optimal_circuit(self) -> QuantumCircuit:
"""Get the circuit with the optimal parameters."""
if self._ret.optimal_point is None:
raise AlgorithmError(
"Cannot find optimal circuit before running the "
"algorithm to find optimal params."
)
return self.ansatz.assign_parameters(self._ret.optimal_parameters)
def get_optimal_vector(self) -> Union[List[float], Dict[str, int]]:
"""Get the simulation outcome of the optimal circuit."""
from qiskit.utils.run_circuits import find_regs_by_name
if self._ret.optimal_point is None:
raise AlgorithmError(
"Cannot find optimal vector before running the " "algorithm to find optimal params."
)
qc = self.get_optimal_circuit()
min_vector = {}
if self._quantum_instance.is_statevector:
ret = self._quantum_instance.execute(qc)
min_vector = ret.get_statevector(qc)
else:
c = ClassicalRegister(qc.width(), name="c")
q = find_regs_by_name(qc, "q")
qc.add_register(c)
qc.barrier(q)
qc.measure(q, c)
ret = self._quantum_instance.execute(qc)
counts = ret.get_counts(qc)
# normalize, just as done in CircuitSampler.sample_circuits
shots = self._quantum_instance._run_config.shots
min_vector = {b: (v / shots) ** 0.5 for (b, v) in counts.items()}
return min_vector
@property
def optimal_params(self) -> List[float]:
"""The optimal parameters for the ansatz."""
if self._ret.optimal_point is None:
raise AlgorithmError("Cannot find optimal params before running the algorithm.")
return self._ret.optimal_point
class VQEResult(VariationalResult, MinimumEigensolverResult):
"""VQE Result."""
def __init__(self) -> None:
super().__init__()
self._cost_function_evals = None
@property
def cost_function_evals(self) -> Optional[int]:
"""Returns number of cost optimizer evaluations"""
return self._cost_function_evals
@cost_function_evals.setter
def cost_function_evals(self, value: int) -> None:
"""Sets number of cost function evaluations"""
self._cost_function_evals = value
|
the-stack_0_23018 | '''base config for ce2p'''
# config for dataset
DATASET_CFG = {
'train': {
'type': '',
'set': 'train',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 512), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 512), 'data_type': 'tensor'}),]
},
'test': {
'type': '',
'set': 'val',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),]
}
}
# config for dataloader
DATALOADER_CFG = {
'train': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 16,
'num_workers': 16,
'shuffle': True,
'pin_memory': True,
'drop_last': True,
},
'test': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 1,
'num_workers': 16,
'shuffle': False,
'pin_memory': True,
'drop_last': False,
}
}
# config for optimizer
OPTIMIZER_CFG = {
'type': 'sgd',
'sgd': {
'learning_rate': 0.01,
'momentum': 0.9,
'weight_decay': 5e-4,
},
'max_epochs': 0,
'params_rules': {},
'policy': {
'type': 'poly',
'opts': {'power': 0.9, 'max_iters': None, 'num_iters': None, 'num_epochs': None}
},
'adjust_period': ['iteration', 'epoch'][0],
}
# config for losses
LOSSES_CFG = {
'loss_cls_stage1': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_cls_stage2': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_edge': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
}
}
# config for model
MODEL_CFG = {
'type': 'ce2p',
'benchmark': True,
'num_classes': -1,
'align_corners': False,
'is_multi_gpus': True,
'distributed': {'is_on': True, 'backend': 'nccl'},
'norm_cfg': {'type': 'syncbatchnorm', 'opts': {}},
'act_cfg': {'type': 'leakyrelu', 'opts': {'negative_slope': 0.01, 'inplace': True}},
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'use_stem': True,
'selected_indices': (0, 1, 2, 3),
},
'ppm': {
'in_channels': 2048,
'out_channels': 512,
'pool_scales': [1, 2, 3, 6],
},
'epm': {
'in_channels_list': [256, 512, 1024],
'hidden_channels': 256,
'out_channels': 2
},
'shortcut': {
'in_channels': 256,
'out_channels': 48,
},
'decoder':{
'stage1': {
'in_channels': 560,
'out_channels': 512,
'dropout': 0,
},
'stage2': {
'in_channels': 1280,
'out_channels': 512,
'dropout': 0.1
},
},
}
# config for inference
INFERENCE_CFG = {
'mode': 'whole',
'opts': {},
'tricks': {
'multiscale': [1],
'flip': False,
'use_probs_before_resize': False
}
}
# config for common
COMMON_CFG = {
'train': {
'backupdir': '',
'logfilepath': '',
'loginterval': 50,
'saveinterval': 1
},
'test': {
'backupdir': '',
'logfilepath': '',
'resultsavepath': ''
}
} |
the-stack_0_23020 | from django.shortcuts import get_object_or_404
from rest_framework import viewsets, permissions
from rest_framework.response import Response
from .models import Domain, LinkedAddress
from .serializers import LinkedAddressSerializer, DomainSerializer, DetailedDomainSerializer
from .get_DNS_address import get_DNS_address
class LinkedAddressViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows linkedAddress to be viewed or edited (by authenticated users).
"""
queryset = LinkedAddress.objects.all()
serializer_class = LinkedAddressSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
class DomainViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Domains holding crypto addresses to be viewed.
To add/check a domain simply go to thinkingzoo.com/domain/yourdomain.com/
where yourdomain.com is the domain you want to go to
Must be in the format *****.***** no additional www, subdomains or https, etc.
"""
queryset = Domain.objects.all()
serializer_class = DomainSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
lookup_value_regex = '[^/]+' # this lines replaces the regex that excludes "."
#lookup_field = 'domain'
def get_serializer_class(self):
if self.action == 'retrieve':
return DetailedDomainSerializer
return DomainSerializer
def retrieve(self, request, *args, **kwargs):
# do your customization here
instance = self.get_object()
serializer = self.get_serializer(instance)
temp = serializer.data.get('domain', None)
return Response(serializer.data)
def get_object(self):
'''
double check the domain for crypto data
update the model
then continue with remainder of normal get_object flow
'''
# getdata from DNS server
# save data to database, if it exists
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
# Inserted code starts here
# attempt to delete any existing enteries
domain = Domain.objects.all().filter(domain=self.kwargs[lookup_url_kwarg])
if len(domain) > 0:
#print(domain[0])
domain[0].delete()
# get address DNS info
DNS_address = get_DNS_address()
DNS_address.get_DNS_address(self.kwargs[lookup_url_kwarg])
if len(DNS_address.parsed_response) >= 1:
# We have results on this domain
# Add the Domain
new_domain = Domain(domain=self.kwargs[lookup_url_kwarg])
new_domain.save()
# Loop through and add each linked account
for line in DNS_address.parsed_response:
new_address = LinkedAddress(
address = line['address'],
address_type = line['type'],
domain = new_domain,
)
new_address.save()
# Inserted code ends here
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
# this raises 404 if object doens't exist
obj = get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj |
the-stack_0_23021 | #!/usr/bin/env python3
import numpy as np
import csv
import sys
################################# Parameters ##################################
#sys.argv = ["SynData_PPMTF.py", "SmplA1"]
if len(sys.argv) < 2:
print("Usage:",sys.argv[0],"[ParamA]" )
sys.exit(0)
# City
City = "TK"
#City = "OS"
# Training user index file (input)
TUserIndexFile = "data/tuserindex_XX.csv"
# POI index file (input)
POIIndexFile = "data/POIindex_XX.csv"
# Training transition tensor file (input)
TrainTransTensorFile = "data/traintranstensor_XX.csv"
# Training visit tensor file (input)
TrainVisitTensorFile = "data/trainvisittensor_XX.csv"
# Prefix of the model parameter file (input)
ModelParameterFile = "data/models_syntraces_XX/modelparameter"
# Prefix of the synthesized trace file (output)
SynTraceFile = "data/models_syntraces_XX/syntraces"
# Name of the model parameter A
ParamA = sys.argv[1]
#ParamA = "SmplA"
# Number of time periods
T = 12
# Number of columns in model parameters (A, B, C)
K = 32
#K = 16
# Number of iterations in Gibbs sampling
ItrNum = 100
#ItrNum = 10
# Threshold for a visit count
#VisThr = 0.5
VisThr = 0
# Minimum value of a visit count
VisitDelta = 0.00000001
# Minimum value of a transition count
TransDelta = 0.00000001
# Read trans from TrainTransTensorFile (1:yes, 0:no)
ReadTrans = 1
#ReadTrans = 0
# Read visits from TrainVisitTensorFile (1:yes, 0:no)
ReadVisit = 1
#ReadVisit = 0
# Number of traces per user
#TraceNum = 1
TraceNum = 60
# Number of time instants per time period
#TimInsNum = 1
TimInsNum = 12
# Increase visit-counts (normalized to [0,1]) for a specific location (home) at 6-7h & 19-24h (time_poi_dist) by Gamma
Gamma = 20
# Number of synthesized users
SynN = 2000
########################### Read model parameters #############################
# [output1]: A (N x K matrix)
# [output2]: B (M x K matrix)
# [output3]: C (M x K matrix)
# [output4]: D (T x K matrix)
def ReadModelParameters():
# Read model parameter A
infile = ModelParameterFile + "_K" + str(K) + "_Itr" + str(ItrNum) + "_" + ParamA + ".csv"
f = open(infile, "r")
A = np.loadtxt(infile, delimiter=",")
f.close()
# Read model parameter B
infile = ModelParameterFile + "_K" + str(K) + "_Itr" + str(ItrNum) + "_B.csv"
f = open(infile, "r")
B = np.loadtxt(infile, delimiter=",")
f.close()
# Read model parameter C
infile = ModelParameterFile + "_K" + str(K) + "_Itr" + str(ItrNum) + "_C.csv"
f = open(infile, "r")
C = np.loadtxt(infile, delimiter=",")
f.close()
# Read model parameter D
infile = ModelParameterFile + "_K" + str(K) + "_Itr" + str(ItrNum) + "_D.csv"
f = open(infile, "r")
D = np.loadtxt(infile, delimiter=",")
f.close()
return A, B, C, D
############################## Synthesize traces ##############################
# [input1]: A (N x K matrix)
# [input2]: B (M x K matrix)
# [input3]: D (T x K matrix)
# [input4]: N -- Number of users
# [input5]: M -- Number of POIs
# [input6]: T -- Number of time periods
# [input7]: poi_dic ({poi_index: category})
def SynTraces(A, B, D, N, M, T, poi_dic):
# Initialization
ab = np.zeros(M)
ad = np.zeros(M)
# Output header information
outfile = SynTraceFile + "_K" + str(K) + "_Itr" + str(ItrNum) + "_" + ParamA + ".csv"
f = open(outfile, "w")
print("user,trace_no,time_period,time_instant,poi_index,category", file=f)
writer = csv.writer(f, lineterminator="\n")
# Read transitions from TrainTransTensorFile --> trans
if ReadTrans == 1:
trans = np.zeros((M, M))
g = open(TrainTransTensorFile, "r")
reader = csv.reader(g)
next(reader)
for lst in reader:
poi_index_from = int(lst[1])
poi_index_to = int(lst[2])
trans[poi_index_from,poi_index_to] = 1
g.close()
# Read visits from TrainVisitTensorFile --> visit
if ReadVisit == 1:
visit = np.zeros((T, M))
g = open(TrainVisitTensorFile, "r")
reader = csv.reader(g)
next(reader)
for lst in reader:
poi_index_from = int(lst[1])
time_id = int(lst[2])
visit[time_id,poi_index_from] = 1
g.close()
HomeFileName = "home_" + ParamA + ".csv"
g = open(HomeFileName, "w")
# For each user
for n in range(SynN):
# Initialization
time_poi_dist = np.zeros((T, M))
time_poi_dist_sum = np.zeros(T)
prop_mat = np.zeros((M, M))
trans_vec = np.zeros(M)
################### Calculate the POI distributions ###################
for t in range(T):
ad = A[n, :] * D[t, :]
for i in range(M):
# Elements in a sampled visit tensor --> time_poi_dist
time_poi_dist[t,i] = np.sum(ad * B[i, :])
# Assign VisitDelta for an element whose value is less than VisThr
if time_poi_dist[t,i] < VisThr:
time_poi_dist[t,i] = VisitDelta
# Assign VisitDelta if there is no visits for time t & user i
if ReadVisit == 1 and visit[t,i] == 0:
time_poi_dist[t,i] = VisitDelta
# Normalize time_poi_dist (this is necessary for randomly sampling home_loc)
for t in range(T):
time_poi_dist_sum[t] = np.sum(time_poi_dist[t])
if time_poi_dist_sum[t] > 0:
time_poi_dist[t] /= time_poi_dist_sum[t]
else:
print("Error: All probabilities are 0 for user", n, "and time", t)
sys.exit(-1)
# Randomly sample home from the POI distribution at 6h --> home_loc
rnd = np.random.rand()
prob_sum = 0
for i in range(M):
prob_sum += time_poi_dist[0,i]
if prob_sum >= rnd:
break
home_loc = i
# print(home_loc)
print(home_loc, file=g)
# Increase visit-counts for home_loc at 6-7h & 18-21h (time_poi_dist) by Gamma
for t in range(2):
time_poi_dist[t,home_loc] += Gamma
for t in range(T-2,T):
time_poi_dist[t,home_loc] += Gamma
# Normalize time_poi_dist at 6-7h & 18-21h (again)
for t in range(2):
time_poi_dist_sum[t] = np.sum(time_poi_dist[t])
if time_poi_dist_sum[t] > 0:
time_poi_dist[t] /= time_poi_dist_sum[t]
else:
print("Error: All probabilities are 0 for user", n, "and time", t)
sys.exit(-1)
for t in range(T-3,T):
time_poi_dist_sum[t] = np.sum(time_poi_dist[t])
if time_poi_dist_sum[t] > 0:
time_poi_dist[t] /= time_poi_dist_sum[t]
else:
print("Error: All probabilities are 0 for user", n, "and time", t)
sys.exit(-1)
#################### Calculate the proposal matrix ####################
for i in range(M):
ab = A[n, :] * B[i, :]
# Elements in a sampled transition tensor (assign TransDelta for a small transition count) --> prop_mat
for j in range(M):
prop_mat[i,j] = max(np.sum(ab * C[j, :]), TransDelta)
# Assign TransDelta if there is no transitions between i and j
if ReadTrans == 1 and trans[i,j] == 0:
prop_mat[i,j] = TransDelta
# Normalize prop_mat
row_sum = np.sum(prop_mat[i])
prop_mat[i] /= row_sum
########################## Synthesize traces ##########################
poi_index_pre = 0
# For each trace
for trace_no in range(TraceNum):
# For each time period
for t in range(T):
# For each time instant
for ins in range(TimInsNum):
# Initial time period and initial event
if t == 0 and ins == 0:
# Randomly sample POI from the POI distribution
rnd = np.random.rand()
prob_sum = 0
for i in range(M):
prob_sum += time_poi_dist[t,i]
if prob_sum >= rnd:
break
poi_index = i
else:
##### Transform poi_index_pre into poi_index via MH (Metropolis-Hastings) ######
# Calculate the transition vector --> trans_vec
trans_vec[poi_index_pre] = 0
for j in range(M):
if poi_index_pre != j:
alpha = (time_poi_dist[t][j] * prop_mat[j,poi_index_pre]) / (time_poi_dist[t][poi_index_pre] * prop_mat[poi_index_pre,j])
trans_vec[j] = prop_mat[poi_index_pre,j] * min(1, alpha)
row_sum = np.sum(trans_vec)
trans_vec[poi_index_pre] = 1 - row_sum
# Transform poi_index_pre into poi_index via trans_vec
rnd = np.random.rand()
prob_sum = 0
for j in range(M):
prob_sum += trans_vec[j]
if prob_sum >= rnd:
break
poi_index = j
# Output an initial location ([user, trace_no, time_period, time_instant, poi_index, category])
s = [n, trace_no, t, ins, poi_index, poi_dic[poi_index]]
writer.writerow(s)
# Save the previous poi_index
poi_index_pre = poi_index
f.close()
g.close()
#################################### Main #####################################
# Fix a seed
#np.random.seed(1)
# Fix a seed using a random number in [0,2^32-1]
#np.random.seed(819081307) # Preliminary
np.random.seed(538173108) # Final (TK)
# Replace XX with City
TUserIndexFile = TUserIndexFile.replace("XX", City)
POIIndexFile = POIIndexFile.replace("XX", City)
TrainTransTensorFile = TrainTransTensorFile.replace("XX", City)
TrainVisitTensorFile = TrainVisitTensorFile.replace("XX", City)
ModelParameterFile = ModelParameterFile.replace("XX", City)
SynTraceFile = SynTraceFile.replace("XX", City)
# Number of training users --> N
N = len(open(TUserIndexFile).readlines()) - 1
# Number of POIs --> M
M = len(open(POIIndexFile).readlines()) - 1
# Read the POI index file --> poi_dic ({poi_index: category})
poi_dic = {}
f = open(POIIndexFile, "r")
reader = csv.reader(f)
next(reader)
for lst in reader:
poi_dic[int(lst[1])] = lst[2]
# Read model parameters
A, B, C, D = ReadModelParameters()
# Synthesize traces
SynTraces(A, B, D, N, M, T, poi_dic)
|
the-stack_0_23022 |
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from math import radians, cos, sin, asin, sqrt
import sys
import os
from tqdm import tqdm_notebook as tqdm
try:
sys.path.remove('/sanssauvegarde/homes/vnguye04/Codes/DAPPER')
except:
pass
sys.path.append("..")
import utils
import pickle
import matplotlib.pyplot as plt
import copy
from datetime import datetime
import time
from io import StringIO
from tqdm import tqdm
import argparse
# In[2]:
def getConfig(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description="Parses command.")
# ROI
parser.add_argument("--lat_min", type=float, default=9.0,
help="Lat min.")
parser.add_argument("--lat_max", type=float, default=14.0,
help="Lat max.")
parser.add_argument("--lon_min", type=float, default=-71.0,
help="Lon min.")
parser.add_argument("--lon_max", type=float, default=-66.0,
help="Lon max.")
# File paths
parser.add_argument("--dataset_dir", type=str,
default="./",
help="Dir to dataset.")
parser.add_argument("--l_input_filepath", type=str, nargs='+',
default=["ct_aruba_20172020_summer_valid_track.pkl"],
help="List of path to input files.")
parser.add_argument("--output_filepath", type=str,
default="./ct_aruba_20172020_summer/ct_aruba_20172020_summer_valid.pkl",
help="Path to output file.")
parser.add_argument("-v", "--verbose",dest='verbose',action='store_true', help="Verbose mode.")
config = parser.parse_args(args)
return config
config = getConfig(sys.argv[1:])
## Bretagne dataset
# LAT_MIN = 46.5
# LAT_MAX = 50.5
# LON_MIN = -8.0
# LON_MAX = -3.0
"""
LAT_MIN = 45.5
LAT_MAX = 50.5
LON_MIN = -10.0
LON_MAX = -1.0
FIG_W = 960
FIG_H = 533 #768
config.l_input_filepath = ["ct_bretagneB_20170103_10_20_valid_track.pkl"]
config.output_filepath = "./ct_bretagneB_20170103_10_20/ct_bretagneB_20170103_10_20_valid.pkl"
t_min = time.mktime(time.strptime("01/01/2017 00:00:00", "%d/%m/%Y %H:%M:%S"))
t_max = time.mktime(time.strptime("31/03/2017 23:59:59", "%d/%m/%Y %H:%M:%S"))
"""
# ## Aruba
"""
LAT_MIN = 9.0
LAT_MAX = 14.0
LON_MIN = -71.0
LON_MAX = -66.0
FIG_W = 960
FIG_H = int(960*5/5) #533 #768
config.l_input_filepath = ["ct_aruba_20172020_summer_valid_track.pkl"]
config.output_filepath = "./ct_aruba_20172020_summer/ct_aruba_20172020_summer_valid.pkl"
# t_min = time.mktime(time.strptime("01/11/2017 00:00:00", "%d/%m/%Y %H:%M:%S"))
# t_max = time.mktime(time.strptime("31/01/2020 23:59:59", "%d/%m/%Y %H:%M:%S"))
"""
"""
LAT_MIN = 9.0
LAT_MAX = 14.0
LON_MIN = -71.0
LON_MAX = -66.0
config.l_input_filepath = ["ct_aruba_2019_summer_valid_track.pkl"]
config.output_filepath = "./ct_aruba_2019_summer/ct_aruba_2019_summer_valid.pkl"
t_min = time.mktime(time.strptime("01/05/2019 00:00:00", "%d/%m/%Y %H:%M:%S"))
t_max = time.mktime(time.strptime("30/06/2019 23:59:59", "%d/%m/%Y %H:%M:%S"))
"""
#=====================================================================
LAT_MIN,LAT_MAX,LON_MIN,LON_MAX = config.lat_min,config.lat_max,config.lon_min,config.lon_max
LAT_RANGE = LAT_MAX - LAT_MIN
LON_RANGE = LON_MAX - LON_MIN
SPEED_MAX = 30.0 # knots
DURATION_MAX = 24 #h
EPOCH = datetime(1970, 1, 1)
LAT, LON, SOG, COG, HEADING, ROT, NAV_STT, TIMESTAMP, MMSI = list(range(9))
FIG_W = 960
FIG_H = int(960*LAT_RANGE/LON_RANGE) #533 #768
dict_list = []
for filename in config.l_input_filepath:
with open(os.path.join(config.dataset_dir,filename),"rb") as f:
temp = pickle.load(f)
dict_list.append(temp)
# In[3]:
print(" Remove erroneous timestamps and erroneous speeds...")
Vs = dict()
for Vi,filename in zip(dict_list, config.l_input_filepath):
print(filename)
for mmsi in list(Vi.keys()):
# Boundary
lat_idx = np.logical_or((Vi[mmsi][:,LAT] > LAT_MAX),
(Vi[mmsi][:,LAT] < LAT_MIN))
Vi[mmsi] = Vi[mmsi][np.logical_not(lat_idx)]
lon_idx = np.logical_or((Vi[mmsi][:,LON] > LON_MAX),
(Vi[mmsi][:,LON] < LON_MIN))
Vi[mmsi] = Vi[mmsi][np.logical_not(lon_idx)]
# # Abnormal timestamps
# abnormal_timestamp_idx = np.logical_or((Vi[mmsi][:,TIMESTAMP] > t_max),
# (Vi[mmsi][:,TIMESTAMP] < t_min))
# Vi[mmsi] = Vi[mmsi][np.logical_not(abnormal_timestamp_idx)]
# Abnormal speeds
abnormal_speed_idx = Vi[mmsi][:,SOG] > SPEED_MAX
Vi[mmsi] = Vi[mmsi][np.logical_not(abnormal_speed_idx)]
# Deleting empty keys
if len(Vi[mmsi]) == 0:
del Vi[mmsi]
continue
if mmsi not in list(Vs.keys()):
Vs[mmsi] = Vi[mmsi]
del Vi[mmsi]
else:
Vs[mmsi] = np.concatenate((Vs[mmsi],Vi[mmsi]),axis = 0)
del Vi[mmsi]
del dict_list, Vi, abnormal_speed_idx
# In[4]:
print(len(Vs))
# In[5]:
## STEP 2: VOYAGES SPLITTING
#======================================
# Cutting discontiguous voyages into contiguous ones
print("Cutting discontiguous voyages into contiguous ones...")
count = 0
voyages = dict()
INTERVAL_MAX = 2*3600 # 2h
for mmsi in list(Vs.keys()):
v = Vs[mmsi]
# Intervals between successive messages in a track
intervals = v[1:,TIMESTAMP] - v[:-1,TIMESTAMP]
idx = np.where(intervals > INTERVAL_MAX)[0]
if len(idx) == 0:
voyages[count] = v
count += 1
else:
tmp = np.split(v,idx+1)
for t in tmp:
voyages[count] = t
count += 1
# In[6]:
print(len(Vs))
# In[7]:
# STEP 3: REMOVING SHORT VOYAGES
#======================================
# Removing AIS track whose length is smaller than 20 or those last less than 4h
print("Removing AIS track whose length is smaller than 20 or those last less than 4h...")
for k in list(voyages.keys()):
duration = voyages[k][-1,TIMESTAMP] - voyages[k][0,TIMESTAMP]
if (len(voyages[k]) < 20) or (duration < 4*3600):
voyages.pop(k, None)
# In[8]:
print(len(voyages))
# In[9]:
# STEP 4: REMOVING OUTLIERS
#======================================
print("Removing anomalous message...")
error_count = 0
tick = time.time()
for k in tqdm(list(voyages.keys())):
track = voyages[k][:,[TIMESTAMP,LAT,LON,SOG]] # [Timestamp, Lat, Lon, Speed]
try:
o_report, o_calcul = utils.detectOutlier(track, speed_max = 30)
if o_report.all() or o_calcul.all():
voyages.pop(k, None)
else:
voyages[k] = voyages[k][np.invert(o_report)]
voyages[k] = voyages[k][np.invert(o_calcul)]
except:
voyages.pop(k,None)
error_count += 1
tok = time.time()
print("STEP 4: duration = ",(tok - tick)/60) # 139.685766101 mfrom tqdm import tqdmins
# In[10]:
print(len(voyages))
# In[13]:
## STEP 6: SAMPLING
#======================================
# Sampling, resolution = 5 min
print('Sampling...')
Vs = dict()
count = 0
for k in tqdm(list(voyages.keys())):
v = voyages[k]
sampling_track = np.empty((0, 9))
for t in range(int(v[0,TIMESTAMP]), int(v[-1,TIMESTAMP]), 300): # 5 min
tmp = utils.interpolate(t,v)
if tmp is not None:
sampling_track = np.vstack([sampling_track, tmp])
else:
sampling_track = None
break
if sampling_track is not None:
Vs[count] = sampling_track
count += 1
# In[11]:
## STEP 8: RE-SPLITTING
#======================================
print('Re-Splitting...')
Data = dict()
count = 0
for k in tqdm(list(Vs.keys())):
v = Vs[k]
# Split AIS track into small tracks whose duration <= 1 day
idx = np.arange(0, len(v), 12*DURATION_MAX)[1:]
tmp = np.split(v,idx)
for subtrack in tmp:
# only use tracks whose duration >= 4 hours
if len(subtrack) >= 12*4:
Data[count] = subtrack
count += 1
print(len(Data))
# ## STEP 5: REMOVING 'MOORED' OR 'AT ANCHOR' VOYAGES
# #======================================
# # Removing 'moored' or 'at anchor' voyages
# print("Removing 'moored' or 'at anchor' voyages...")
# for mmsi in tqdm(list(voyages.keys())):
# d_L = float(len(voyages[mmsi]))
# if np.count_nonzero(voyages[mmsi][:,NAV_STT] == 1)/d_L > 0.7 or np.count_nonzero(voyages[mmsi][:,NAV_STT] == 5)/d_L > 0.7:
# voyages.pop(mmsi,None)
# continue
# sog_max = np.max(voyages[mmsi][:,SOG])
# if sog_max < 1.0:
# voyages.pop(mmsi,None)
## STEP 5: REMOVING 'MOORED' OR 'AT ANCHOR' VOYAGES
#======================================
# Removing 'moored' or 'at anchor' voyages
print("Removing 'moored' or 'at anchor' voyages...")
for k in tqdm(list(Data.keys())):
d_L = float(len(Data[k]))
if np.count_nonzero(Data[k][:,NAV_STT] == 1)/d_L > 0.7 \
or np.count_nonzero(Data[k][:,NAV_STT] == 5)/d_L > 0.7:
Data.pop(k,None)
continue
sog_max = np.max(Data[k][:,SOG])
if sog_max < 1.0:
Data.pop(k,None)
print(len(Data))
# In[12]:
# In[15]:
## STEP 6: REMOVING LOW SPEED TRACKS
#======================================
print("Removing 'low speed' tracks...")
for k in tqdm(list(Data.keys())):
d_L = float(len(Data[k]))
if np.count_nonzero(Data[k][:,SOG] < 2)/d_L > 0.8:
Data.pop(k,None)
print(len(Data))
# In[21]:
## STEP 9: NORMALISATION
#======================================
print('Normalisation...')
for k in tqdm(list(Data.keys())):
v = Data[k]
v[:,LAT] = (v[:,LAT] - LAT_MIN)/(LAT_MAX-LAT_MIN)
v[:,LON] = (v[:,LON] - LON_MIN)/(LON_MAX-LON_MIN)
v[:,SOG][v[:,SOG] > SPEED_MAX] = SPEED_MAX
v[:,SOG] = v[:,SOG]/SPEED_MAX
v[:,COG] = v[:,COG]/360.0
# In[22]:
print(config.output_filepath)
# In[23]:
# plt.plot(Data[0][:,LON],Data[0][:,LAT])
# In[24]:
print(len(Data))
# In[25]:
print(os.path.dirname(config.output_filepath))
# In[26]:
os.path.exists(os.path.dirname(config.output_filepath))
# In[27]:
if not os.path.exists(os.path.dirname(config.output_filepath)):
os.makedirs(os.path.dirname(config.output_filepath))
# In[28]:
## STEP 10: WRITING TO DISK
#======================================
with open(config.output_filepath,"wb") as f:
pickle.dump(Data,f)
# In[29]:
# print(debug)
# In[30]:
print(len(Data))
# In[31]:
minlen = 1000
for k in list(Data.keys()):
v = Data[k]
if len(v) < minlen:
minlen = len(v)
print("min len: ",minlen)
# In[32]:
# len(Data[0])
# In[33]:
# print(debug)
# In[34]:
## Loading coastline polygon.
# For visualisation purpose, delete this part if you do not have coastline
# shapfile
coastline_filename = "./streetmap_coastline_Bretagne.pkl"
try:
with open(coastline_filename, 'rb') as f:
l_coastline_poly = pickle.load(f)
except:
with open(coastline_filename, 'rb') as f:
l_coastline_poly = pickle.load(f, encoding='latin1')
# In[35]:
config.output_filepath
# In[36]:
Vs = Data
FIG_DPI = 150
plt.figure(figsize=(FIG_W/FIG_DPI, FIG_H/FIG_DPI), dpi=FIG_DPI)
cmap = plt.cm.get_cmap('Blues')
l_keys = list(Vs.keys())
N = len(Vs)
for d_i in range(N):
key = l_keys[d_i]
c = cmap(float(d_i)/(N-1))
tmp = Vs[key]
v_lat = tmp[:,0]*LAT_RANGE + LAT_MIN
v_lon = tmp[:,1]*LON_RANGE + LON_MIN
# plt.plot(v_lon,v_lat,linewidth=0.8)
plt.plot(v_lon,v_lat,color=c,linewidth=0.8)
## Coastlines
if "bretagne" in config.output_filepath:
for point in l_coastline_poly:
poly = np.array(point)
plt.plot(poly[:,0],poly[:,1],color="k",linewidth=0.8)
plt.xlim([LON_MIN,LON_MAX])
plt.ylim([LAT_MIN,LAT_MAX])
plt.xlabel("Longitude")
plt.ylabel("Latitude")
plt.tight_layout()
plt.savefig(config.output_filepath.replace(".pkl",".png"))
|
the-stack_0_23023 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('centinela', '0003_auto_20150924_1950'),
]
operations = [
migrations.AddField(
model_name='widgets',
name='place',
field=models.CharField(default='lateral', verbose_name='Place', max_length=30, choices=[('top', 'Top'), ('lateral', 'Lateral'), ('after_post', 'After Post'), ('before_post', 'Before Post'), ('footer', 'Footer')]),
),
migrations.AlterField(
model_name='slider',
name='until_date',
field=models.DateTimeField(default=datetime.datetime(2015, 11, 6, 23, 23, 11, 195557, tzinfo=utc), verbose_name='until'),
),
migrations.AlterField(
model_name='theme',
name='status',
field=models.BooleanField(default=False, verbose_name='active'),
),
migrations.AlterField(
model_name='widgets',
name='until_date',
field=models.DateTimeField(default=datetime.datetime(2015, 11, 6, 23, 23, 11, 196898, tzinfo=utc), verbose_name='until'),
),
]
|
the-stack_0_23024 | import functools
import inspect
import types
from typing import Callable
from multimethod import (
signature,
subtype,
get_type,
groupby,
DispatchError,
get_types,
overload,
multimethod,
)
class multiple_dispatch(multimethod):
"""A callable directed acyclic graph of methods."""
pending = None # type: set
def __new__(cls, func):
namespace = inspect.currentframe().f_back.f_locals
self = functools.update_wrapper(dict.__new__(cls), func)
self.pending = set()
self.get_type = type # default type checker
return namespace.get(func.__name__, self)
def __init__(self, func: Callable):
try:
self[get_types(func)] = func
except NameError:
self.pending.add(func)
def register(self, *args):
"""Decorator for registering a function.
Optionally call with types to return a decorator for unannotated
functions.
"""
if len(args) == 1 and hasattr(args[0], '__annotations__'):
return overload.register(self, *args)
return lambda func: self.__setitem__(args, func) or func
def __get__(self, instance, owner):
return self if instance is None else types.MethodType(self, instance)
def parents(self, types: tuple) -> set:
"""Find immediate parents of potential key."""
parents = {
key for key in self if isinstance(key, signature) and key < types
}
return parents - {
ancestor for parent in parents for ancestor in parent.parents
}
def clean(self):
"""Empty the cache."""
for key in list(self):
if not isinstance(key, signature):
super().__delitem__(key)
def __setitem__(self, types: tuple, func: Callable):
self.clean()
types = signature(types)
parents = types.parents = self.parents(types)
for key in self:
if types < key and (not parents or parents & key.parents):
key.parents -= parents
key.parents.add(types)
if any(map(subtype.subcheck, types)):
self.get_type = get_type # switch to slower generic type checker
super().__setitem__(types, func)
self.__doc__ = self.docstring
def __delitem__(self, types: tuple):
self.clean()
super().__delitem__(types)
for key in self:
if types in key.parents:
key.parents = self.parents(key)
self.__doc__ = self.docstring
def __missing__(self, types: tuple) -> Callable:
"""Find and cache the next applicable method of given types."""
self.evaluate()
if types in self:
return self[types]
groups = groupby(signature(types).__sub__, self.parents(types))
keys = groups[min(groups)] if groups else []
funcs = {self[key] for key in keys}
if len(funcs) == 1:
return self.setdefault(types, *funcs)
msg = f"{self.__name__}: {len(keys)} methods found" # type: ignore
raise DispatchError(msg, types, keys)
def __call__(self, *args, **kwargs):
"""Resolve and dispatch to best method."""
return self[tuple(map(self.get_type, args))](*args, **kwargs)
def evaluate(self):
"""Evaluate any pending forward references.
This can be called explicitly when using forward references,
otherwise cache misses will evaluate.
"""
while self.pending:
func = self.pending.pop()
self[get_types(func)] = func
@property
def docstring(self):
"""A descriptive docstring of all registered functions."""
docs = []
for func in set(self.values()):
try:
sig = inspect.signature(func)
except ValueError:
sig = ''
doc = func.__doc__ or ''
docs.append(f'{func.__name__}{sig}\n {doc}')
return '\n\n'.join(docs)
|
the-stack_0_23027 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
project = "bottle-tools"
copyright = "2019, Arjoonn Sharma"
author = "Arjoonn Sharma"
version = "2019.12.22"
# The short X.Y version
version = version
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinxarg.ext"]
# Add any paths that contain templates here, relative to this directory.
templates_path = [".templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
"github_user": "theSage21",
"github_repo": "bottle-tools",
"github_banner": True,
"show_related": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [".static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "bottle-toolsdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"bottle-tools.tex",
"bottle-tools Documentation",
"Arjoonn Sharma",
"manual",
)
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "bottle-tools", "bottle-tools Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"bottle-tools",
"bottle-tools Documentation",
author,
"bottle-tools",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
|
the-stack_0_23028 | # -*- coding: UTF-8 -*-
# Copyright (c) 2018, Dirk Gütlin & Thomas Hartmann
# All rights reserved.
#
# This file is part of the pymatreader Project, see:
# https://gitlab.com/obob/pymatreader
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import types
import sys
import numpy
import scipy.io
if sys.version_info <= (2, 7):
chr = unichr # noqa This is needed for python 2 and 3 compatibility
def _import_h5py():
try:
import h5py
except Exception as exc:
raise ImportError('h5py is required to read MATLAB files >= v7.3 '
'(%s)' % (exc,))
return h5py
def _hdf5todict(hdf5_object, variable_names=None, ignore_fields=None):
"""
Recursively converts a hdf5 object to a python dictionary,
converting all types as well.
Parameters
----------
hdf5_object: Union[h5py.Group, h5py.Dataset]
Object to convert. Can be a h5py File, Group or Dataset
variable_names: iterable, optional
Tuple or list of variables to include. If set to none, all
variable are read.
ignore_fields: iterable, optional
Tuple or list of fields to ignore. If set to none, all fields will
be read.
Returns
-------
dict
Python dictionary
"""
h5py = _import_h5py()
if isinstance(hdf5_object, h5py.Group):
return _handle_hdf5_group(hdf5_object, variable_names=variable_names,
ignore_fields=ignore_fields)
elif isinstance(hdf5_object, h5py.Dataset):
return _handle_hdf5_dataset(hdf5_object)
elif isinstance(hdf5_object, (list, types.GeneratorType)):
return [_hdf5todict(item) for item in hdf5_object]
raise TypeError('Unknown type in hdf5 file')
def _handle_hdf5_group(hdf5_object, variable_names=None, ignore_fields=None):
all_keys = set(hdf5_object.keys())
if ignore_fields:
all_keys = all_keys - set(ignore_fields)
if variable_names:
all_keys = all_keys & set(variable_names)
return_dict = dict()
for key in all_keys:
return_dict[key] = _hdf5todict(hdf5_object[key],
variable_names=None,
ignore_fields=ignore_fields)
return return_dict
def _handle_hdf5_dataset(hdf5_object):
if 'MATLAB_empty' in hdf5_object.attrs.keys():
data = numpy.empty((0,))
else:
data = hdf5_object.value
if isinstance(data, numpy.ndarray) and \
data.dtype == numpy.dtype('object'):
data = [hdf5_object.file[cur_data] for cur_data in data.flatten()]
if len(data) == 1 and hdf5_object.attrs['MATLAB_class'] == b'cell':
data = data[0]
data = data.value
return _assign_types(data)
data = _hdf5todict(data)
return _assign_types(data)
def _convert_string_hdf5(values):
if values.size > 1:
assigned_values = u''.join(chr(c) for c in values.flatten())
else:
assigned_values = chr(values)
return assigned_values
def _assign_types(values):
"""private function, which assigns correct types to h5py extracted values
from _browse_dataset()"""
if type(values) == numpy.ndarray:
assigned_values = _handle_ndarray(values)
elif type(values) == numpy.float64:
assigned_values = float(values)
else:
assigned_values = values
return assigned_values
def _handle_ndarray(values):
"""Handle conversion of ndarrays."""
values = numpy.squeeze(values).T
if values.dtype in ("uint8", "uint16", "uint32", "uint64"):
values = _handle_hdf5_strings(values)
if isinstance(values, numpy.ndarray) and \
values.size == 1:
values = values.item()
return values
def _handle_hdf5_strings(values):
if values.ndim in (0, 1):
values = _convert_string_hdf5(values)
elif values.ndim == 2:
values = [_convert_string_hdf5(cur_val)
for cur_val in values]
else:
raise RuntimeError('String arrays with more than 2 dimensions'
'are not supported at the moment.')
return values
def _check_for_scipy_mat_struct(data):
"""
Private function to check all entries of data for occurrences of
scipy.io.matlab.mio5_params.mat_struct and convert them.
Parameters
==========
data: any
data to be checked
Returns
=========
object
checked and converted data
"""
if isinstance(data, dict):
for key in data:
data[key] = _check_for_scipy_mat_struct(data[key])
if isinstance(data, numpy.ndarray):
data = _handle_scipy_ndarray(data)
return data
def _handle_scipy_ndarray(data):
if data.dtype == numpy.dtype('object') and not \
isinstance(data, scipy.io.matlab.mio5.MatlabFunction):
as_list = []
for element in data:
as_list.append(_check_for_scipy_mat_struct(element))
data = as_list
elif isinstance(data.dtype.names, tuple):
data = _todict_from_np_struct(data)
data = _check_for_scipy_mat_struct(data)
if isinstance(data, numpy.ndarray):
data = numpy.array(data)
return data
def _todict_from_np_struct(data):
data_dict = dict()
for cur_field_name in data.dtype.names:
try:
n_items = len(data[cur_field_name])
cur_list = list()
for idx in numpy.arange(n_items):
cur_value = data[cur_field_name].item(idx)
cur_value = _check_for_scipy_mat_struct(cur_value)
cur_list.append(cur_value)
data_dict[cur_field_name] = cur_list
except TypeError:
cur_value = data[cur_field_name].item(0)
cur_value = _check_for_scipy_mat_struct(cur_value)
data_dict[cur_field_name] = cur_value
return data_dict
|
the-stack_0_23034 | # Copyright 2021 MosaicML. All Rights Reserved.
"""Core MixUp classes and functions."""
from __future__ import annotations
import logging
from typing import Optional, Tuple
import numpy as np
import torch
from torch.nn import functional as F
from composer.core.types import Algorithm, Event, Logger, State, Tensor
from composer.models.loss import check_for_index_targets
log = logging.getLogger(__name__)
__all__ = ["MixUp", "mixup_batch"]
def mixup_batch(x: Tensor,
y: Tensor,
n_classes: int,
interpolation_lambda: Optional[float] = None,
alpha: float = 0.2,
indices: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Create new samples using convex combinations of pairs of samples.
This is done by taking a convex combination of x with a randomly
permuted copy of x. The interpolation parameter lambda should be chosen from
a ``Beta(alpha, alpha)`` distribution for some parameter alpha > 0.
Note that the same lambda is used for all examples within the batch.
Both the original and shuffled labels are returned. This is done because
for many loss functions (such as cross entropy) the targets are given as
indices, so interpolation must be handled separately.
Example:
.. testcode::
from composer.algorithms.mixup import mixup_batch
new_inputs, new_targets, perm = mixup_batch(
x=X_example,
y=y_example,
n_classes=1000,
alpha=0.2
)
Args:
x: input tensor of shape (B, d1, d2, ..., dn), B is batch size, d1-dn
are feature dimensions.
y: target tensor of shape (B, f1, f2, ..., fm), B is batch size, f1-fn
are possible target dimensions.
interpolation_lambda: coefficient used to interpolate between the
two examples. If provided, must be in ``[0, 1]``. If ``None``,
value is drawn from a ``Beta(alpha, alpha)`` distribution.
alpha: parameter for the beta distribution over the
``interpolation_lambda``. Only used if ``interpolation_lambda``
is not provided.
n_classes: total number of classes.
indices: Permutation of the batch indices `1..B`. Used
for permuting without randomness.
Returns:
x_mix: batch of inputs after mixup has been applied
y_mix: labels after mixup has been applied
perm: the permutation used
"""
if interpolation_lambda is None:
interpolation_lambda = _gen_interpolation_lambda(alpha)
# Create shuffled versions of x and y in preparation for interpolation
# Use given indices if there are any.
if indices is None:
shuffled_idx = torch.randperm(x.shape[0])
else:
shuffled_idx = indices
x_shuffled = x[shuffled_idx]
y_shuffled = y[shuffled_idx]
# Interpolate between the inputs
x_mix = (1 - interpolation_lambda) * x + interpolation_lambda * x_shuffled
# First check if labels are indices. If so, convert them to onehots.
# This is under the assumption that the loss expects torch.LongTensor, which is true for pytorch cross_entropy
if check_for_index_targets(y):
y_onehot = F.one_hot(y, num_classes=n_classes)
y_shuffled_onehot = F.one_hot(y_shuffled, num_classes=n_classes)
y_mix = ((1. - interpolation_lambda) * y_onehot + interpolation_lambda * y_shuffled_onehot)
else:
y_mix = ((1. - interpolation_lambda) * y + interpolation_lambda * y_shuffled)
return x_mix, y_mix, shuffled_idx
class MixUp(Algorithm):
"""`MixUp <https://arxiv.org/abs/1710.09412>`_ trains the network on convex combinations of pairs of examples and
targets rather than individual examples and targets.
This is done by taking a convex combination of a given batch X with a
randomly permuted copy of X. The mixing coefficient is drawn from a
Beta(``alpha``, ``alpha``) distribution.
Training in this fashion sometimes reduces generalization error.
Example:
.. testcode::
from composer.algorithms import MixUp
from composer.trainer import Trainer
mixup_algorithm = MixUp(num_classes=1000, alpha=0.2)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[mixup_algorithm],
optimizers=[optimizer]
)
Args:
num_classes (int): the number of classes in the task labels.
alpha (float): the psuedocount for the Beta distribution used to sample
interpolation parameters. As ``alpha`` grows, the two samples
in each pair tend to be weighted more equally. As ``alpha``
approaches 0 from above, the combination approaches only using
one element of the pair.
"""
def __init__(self, num_classes: int, alpha: float = 0.2):
self.num_classes = num_classes
self.alpha = alpha
self._interpolation_lambda = 0.0
self._indices = torch.Tensor()
def match(self, event: Event, state: State) -> bool:
"""Runs on Event.INIT and Event.AFTER_DATALOADER.
Args:
event (:class:`Event`): The current event.
state (:class:`State`): The current state.
Returns:
bool: True if this algorithm should run now.
"""
return event == Event.AFTER_DATALOADER
@property
def interpolation_lambda(self) -> float:
return self._interpolation_lambda
@interpolation_lambda.setter
def interpolation_lambda(self, new_int_lamb: float) -> None:
self._interpolation_lambda = new_int_lamb
@property
def indices(self) -> Tensor:
return self._indices
@indices.setter
def indices(self, new_indices: Tensor) -> None:
self._indices = new_indices
def apply(self, event: Event, state: State, logger: Logger) -> None:
"""Applies MixUp augmentation on State input.
Args:
event (Event): the current event
state (State): the current trainer state
logger (Logger): the training logger
"""
input, target = state.batch_pair
assert isinstance(input, Tensor) and isinstance(target, Tensor), \
"Multiple tensors for inputs or targets not supported yet."
self.interpolation_lambda = _gen_interpolation_lambda(self.alpha)
new_input, new_target, self.indices = mixup_batch(
x=input,
y=target,
interpolation_lambda=self.interpolation_lambda,
n_classes=self.num_classes,
)
state.batch = (new_input, new_target)
def _gen_interpolation_lambda(alpha: float) -> float:
"""Generates ``Beta(alpha, alpha)`` distribution."""
# First check if alpha is positive.
assert alpha >= 0
# Draw the interpolation parameter from a beta distribution.
# Check here is needed because beta distribution requires alpha > 0
# but alpha = 0 is fine for mixup.
if alpha == 0:
interpolation_lambda = 0
else:
interpolation_lambda = np.random.beta(alpha, alpha)
# for symmetric beta distribution, can always use 0 <= lambda <= .5;
# this way the "main" label is always the original one, which keeps
# the training accuracy meaningful
return max(interpolation_lambda, 1. - interpolation_lambda)
|
the-stack_0_23036 | """ 1
a = int(input('Primeiro valor: '))
b = int(input('Segundo valor: '))
c = int(input('Terceiro valor: '))
if a > b and a > c:
print('Maior valor é {}'.format(a))
elif b > a and b > c:
print('Maior valor é {}'.format(b))
else:
print('Maior valor é {}'.format(c))
print('Finish')
"""
""" 2
num = int(input('Digite um valor para saber se ele é PAR ou IMPAR: '))
resto = num % 2
if resto == 0:
print('Este numero é PAR')
else:
print('Este número é IMPAR')
"""
nota1 = int(input('Digite a primeira nota: '))
if nota1 > 10:
nota1 = int(input('Por favor, digite uma nota abaixo de 10: '))
nota2 = int(input('Digite a segunda nota: '))
if nota2 > 10:
int(input('Você digitou uma nota errada. Informe a nota menor de 10. '))
media = (nota1 + nota2) / 2
print(f'A média entre as notas {nota1} e {nota2} é {media}')
|
the-stack_0_23038 | from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import (
BooleanField, CharField, ChoiceField, DateField, DateTimeField,
DecimalField, EmailField, FileField, FloatField, Form,
GenericIPAddressField, IntegerField, ModelChoiceField,
ModelMultipleChoiceField, MultipleChoiceField, RegexField,
SplitDateTimeField, TimeField, URLField, utils,
)
from django.template import Context, Template
from django.test import SimpleTestCase, TestCase
from django.utils.safestring import mark_safe
from ..models import ChoiceModel
class AssertFormErrorsMixin:
def assertFormErrors(self, expected, the_callable, *args, **kwargs):
with self.assertRaises(ValidationError) as cm:
the_callable(*args, **kwargs)
self.assertEqual(cm.exception.messages, expected)
class FormsErrorMessagesTestCase(SimpleTestCase, AssertFormErrorsMixin):
def test_charfield(self):
e = {
'required': 'REQUIRED',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = CharField(min_length=5, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_integerfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = IntegerField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
def test_floatfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = FloatField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
def test_decimalfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
'max_digits': 'MAX DIGITS IS %(max)s',
'max_decimal_places': 'MAX DP IS %(max)s',
'max_whole_digits': 'MAX DIGITS BEFORE DP IS %(max)s',
}
f = DecimalField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
f2 = DecimalField(max_digits=4, decimal_places=2, error_messages=e)
self.assertFormErrors(['MAX DIGITS IS 4'], f2.clean, '123.45')
self.assertFormErrors(['MAX DP IS 2'], f2.clean, '1.234')
self.assertFormErrors(['MAX DIGITS BEFORE DP IS 2'], f2.clean, '123.4')
def test_datefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_timefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = TimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_datetimefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateTimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_regexfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = RegexField(r'^[0-9]+$', min_length=5, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abcde')
self.assertFormErrors(['LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_emailfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = EmailField(min_length=8, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abcdefgh')
self.assertFormErrors(['LENGTH 7, MIN LENGTH 8'], f.clean, '[email protected]')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, '[email protected]')
def test_filefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'missing': 'MISSING',
'empty': 'EMPTY FILE',
}
f = FileField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['EMPTY FILE'], f.clean, SimpleUploadedFile('name', None))
self.assertFormErrors(['EMPTY FILE'], f.clean, SimpleUploadedFile('name', ''))
def test_urlfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'max_length': '"%(value)s" has more than %(limit_value)d characters.',
}
f = URLField(error_messages=e, max_length=17)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc.c')
self.assertFormErrors(
['"http://djangoproject.com" has more than 17 characters.'],
f.clean,
'djangoproject.com'
)
def test_booleanfield(self):
e = {
'required': 'REQUIRED',
}
f = BooleanField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
def test_choicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
}
f = ChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['b IS INVALID CHOICE'], f.clean, 'b')
def test_multiplechoicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
'invalid_list': 'NOT A LIST',
}
f = MultipleChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['NOT A LIST'], f.clean, 'b')
self.assertFormErrors(['b IS INVALID CHOICE'], f.clean, ['b'])
def test_splitdatetimefield(self):
e = {
'required': 'REQUIRED',
'invalid_date': 'INVALID DATE',
'invalid_time': 'INVALID TIME',
}
f = SplitDateTimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID DATE', 'INVALID TIME'], f.clean, ['a', 'b'])
def test_generic_ipaddressfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID IP ADDRESS',
}
f = GenericIPAddressField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID IP ADDRESS'], f.clean, '127.0.0')
def test_subclassing_errorlist(self):
class TestForm(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def clean(self):
raise ValidationError("I like to be awkward.")
class CustomErrorList(utils.ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return mark_safe('<div class="error">%s</div>' % ''.join('<p>%s</p>' % e for e in self))
# This form should print errors the default way.
form1 = TestForm({'first_name': 'John'})
self.assertHTMLEqual(
str(form1['last_name'].errors),
'<ul class="errorlist"><li>This field is required.</li></ul>'
)
self.assertHTMLEqual(
str(form1.errors['__all__']),
'<ul class="errorlist nonfield"><li>I like to be awkward.</li></ul>'
)
# This one should wrap error groups in the customized way.
form2 = TestForm({'first_name': 'John'}, error_class=CustomErrorList)
self.assertHTMLEqual(str(form2['last_name'].errors), '<div class="error"><p>This field is required.</p></div>')
self.assertHTMLEqual(str(form2.errors['__all__']), '<div class="error"><p>I like to be awkward.</p></div>')
def test_error_messages_escaping(self):
# The forms layer doesn't escape input values directly because error
# messages might be presented in non-HTML contexts. Instead, the
# message is marked for escaping by the template engine, so a template
# is needed to trigger the escaping.
t = Template('{{ form.errors }}')
class SomeForm(Form):
field = ChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': '<script>'})
self.assertHTMLEqual(
t.render(Context({'form': f})),
'<ul class="errorlist"><li>field<ul class="errorlist">'
'<li>Select a valid choice. <script> is not one of the '
'available choices.</li></ul></li></ul>'
)
class SomeForm(Form):
field = MultipleChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(
t.render(Context({'form': f})),
'<ul class="errorlist"><li>field<ul class="errorlist">'
'<li>Select a valid choice. <script> is not one of the '
'available choices.</li></ul></li></ul>'
)
class SomeForm(Form):
field = ModelMultipleChoiceField(ChoiceModel.objects.all())
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(
t.render(Context({'form': f})),
'<ul class="errorlist"><li>field<ul class="errorlist">'
'<li>“<script>” is not a valid value.</li>'
'</ul></li></ul>'
)
class ModelChoiceFieldErrorMessagesTestCase(TestCase, AssertFormErrorsMixin):
def test_modelchoicefield(self):
# Create choices for the model choice field tests below.
ChoiceModel.objects.create(pk=1, name='a')
ChoiceModel.objects.create(pk=2, name='b')
ChoiceModel.objects.create(pk=3, name='c')
# ModelChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': 'INVALID CHOICE',
}
f = ModelChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID CHOICE'], f.clean, '4')
# ModelMultipleChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
'invalid_list': 'NOT A LIST OF VALUES',
}
f = ModelMultipleChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['NOT A LIST OF VALUES'], f.clean, '3')
self.assertFormErrors(['4 IS INVALID CHOICE'], f.clean, ['4'])
|
the-stack_0_23040 | #!/usr/bin/env python
import os
import re
def iter_files(root_dir):
outputfile = []
for root,dirs,files in os.walk(root_dir):
for file in files:
file_name = os.path.join(root,file)
outfile_extension = ".data"
path,tmpfilename = os.path.split(file_name)
filename,extension = os.path.splitext(tmpfilename)
if extension == outfile_extension:
outputfile.append(file_name)
return outputfile
def confirm_generate(filepath):
generate = False
calc_p_list = ["1e4", "1e5", "2e2", "2e4", "4e4", "5e3", "6e4", "8e4"]
for i in calc_p_list:
avp_path = filepath + os.sep + "Adsorption/" + i + os.sep + "Output/System_0/"
result_path = filepath + os.sep + "Adsorption/" + i + os.sep + "Output"
try:
outfile = avp_path + "".join(os.listdir(avp_path))
except FileNotFoundError:
generate = False
else:
generate = True
return generate
def check_exists_files(filepath):
calc = False
with open(filepath,"r") as f:
data = f.read()
#start_check = re.compile(r'Starting simulation')
start_check = 'Starting simulation'
#end_check = re.compile(r'Simulation finished,')
end_check = 'Simulation finished,'
#if ( start_check.findall(data) and end_check.findall(data) ) is not None:
if end_check in data:
calc = True
else:
calc = False
return calc
if __name__ == '__main__':
import sys
args = sys.argv
mofdir = args[1]
mof_result = os.listdir(mofdir)
nonf,noncalc = [],[]
for mof in mof_result:
mofpath = mofdir + os.sep + mof
mofoutfile = iter_files(mofpath)
generate = confirm_generate(mofpath)
if generate:
for outfile in mofoutfile:
calc = check_exists_files(outfile)
if calc:
continue
else:
noncalc.append(mof)
print("Calculation Not Finished ", mof)
with open("./nonf","a+") as fnfile:
fnfile.writelines(mof + "\n")
else:
nonf.append(mof)
print("No Outfile found ", mof)
with open("./noncalc","a+") as fncfile:
fncfile.writelines(mof + "\n")
print("Calc Not finished ",len(noncalc))
print("Not calc ",len(nonf))
|
the-stack_0_23043 | # -*- coding:utf-8 -*-
# twactor.cache - Cache framework for twactor.
import operator
import time
try:
import threading
except:
import dummy_threading as threading
from twactor import connection, function_sync, propertyfix
class CachedMetaclass(type):
"""Metaclass for subclasses of ``CachedObject``."""
def __new__(cls, name, bases, attrs):
# Fix _update_cache
update_cache = attrs.get('_update_cache', lambda *args, **kwargs: None)
def fixed_update_cache(self, *args, **kwargs):
val = update_cache(self, *args, **kwargs)
if hasattr(bases[-1], '_update_cache'):
bases[-1]._update_cache(self, *args, **kwargs)
return val
attrs['_update_cache'] = function_sync(update_cache, fixed_update_cache)
# Fix __init__
init = attrs.get('__init__', lambda *args, **kwargs: None)
def fixed_init(self, *args, **kwargs):
if hasattr(bases[-1], '__init__') and bases[-1] is not object:
bases[-1].__init__(self, *args, **kwargs)
init(self, *args, **kwargs)
attrs['__init__'] = function_sync(init, fixed_init)
return type.__new__(cls, name, bases, attrs)
class CachedObject(object):
"""Superclass for cached objects."""
__metaclass__ = CachedMetaclass
_connection_broker = connection.DEFAULT_CB
def __init__(self, *args, **kwargs):
self._cache = kwargs.pop('cache', {})
self._updated = kwargs.pop('_updated', {'__count': 0, '__time': 0})
def _update_cache(self, *args, **kwargs):
self._updated['__count'] = self._updated.get('__count', 0) + 1
self._updated['__time'] = time.time()
def _with_connection_broker(self, cb):
copy = self._copy()
copy._connection_broker = cb
return copy
def _copy(self):
return type(self)(self._cache.get('id', None), cache=self._cache.copy(),
updated=self._updated.copy())
class CachedMirror(object):
"""Superclass for objects which rely on another object's cache."""
def __init__(self, mirrored_object):
setattr(self, self._mirrored_attribute, mirrored_object)
self._mirrored = mirrored_object
def mirror_attribute(attribute):
"""Shortcut for mirroring an attribute on another object."""
def attr_methods():
def fget(self):
return reduce(getattr, attribute.split('.'), self)
def fset(self, value):
setattr(reduce(getattr, attribute.split('.')[:-1], self),
attribute.split('.'), value)
def fdel(self):
delattr(reduce(getattr, attribute.split('.')[:-1], self),
attribute.split('.'))
return {'fget': fget, 'fset': fset, 'fdel': fdel}
return property(**attr_methods())
_cache = mirror_attribute('_mirrored._cache')
_update_cache = mirror_attribute('_mirrored._update_cache')
_updated = mirror_attribute('_mirrored._updated')
del mirror_attribute
class CachedListMetaclass(type):
def __new__(cls, name, bases, attrs):
# Fix __init__
init = attrs.get('__init__', lambda *args, **kwargs: None)
def fixed_init(self, *args, **kwargs):
for base in reversed(bases):
if base is object:
break
base.__init__(self, *args, **kwargs)
init(self, *args, **kwargs)
attrs['__init__'] = function_sync(init, fixed_init)
# Fix _update_cache
update_cache = attrs.get('_update_cache', None)
if not update_cache:
for base in reversed(bases):
if hasattr(base, '_update_cache'):
update_cache = base._update_cache
break
if update_cache:
def fixed_update_cache(self, *args, **kwargs):
data = update_cache(self, *args, **kwargs)
for base in reversed(bases):
if hasattr(base, '_insert_into_cache'):
base._insert_into_cache(self, data)
break
attrs['_update_cache'] = function_sync(update_cache,
fixed_update_cache)
return type.__new__(cls, name, bases, attrs)
class CachedList(object):
__metaclass__ = CachedListMetaclass
_connection_broker = connection.DEFAULT_CB
_sort_attrs = ('created', 'id')
_reverse_class = None
OBJ_CLASS = lambda cache: cache
UPDATE_INTERVAL = 60 * 3 # Three-minute update interval by default.
def __init__(self, *args, **kwargs):
self._cache = kwargs.pop('cache', [])
self._object_cache = kwargs.pop('object_cache', {})
self._updated = kwargs.pop('updated', {'__count': 0, '__time': 0})
self.update_monitor = CachedListUpdateMonitorThread(self)
def __getitem__(self, pos_or_slice):
if isinstance(pos_or_slice, (int, long)):
return self._cache_to_obj(
self._cache[self._resolve_cache_index(pos_or_slice)])
start, stop, step = [getattr(pos_or_slice, attr)
for attr in ('start', 'stop', 'step')]
start = self._resolve_cache_index(start, start=True)
stop = self._resolve_cache_index(stop, start=False)
new_cache = map(self._cache.__getitem__, range(start, stop, step or 1))
new_updated = {'__count': self._updated['__count'],
'__time': self._updated['__time']}
for item in new_cache:
count_key = '%s__count' % (item.get('id', repr(item)))
time_key = '%s__time' % (item.get('id', repr(item)))
new_updated[count_key] = self._updated.get(count_key, None)
new_updated[time_key] = self._updated.get(time_key, None)
return type(self)(
cache=new_cache, updated=new_updated)._with_connection_broker(
self._connection_broker)
def __delitem__(self, pos_or_slice):
raise NotImplementedError
def __iter__(self):
for item in self._cache:
yield self._cache_to_obj(item)
def __reversed__(self):
raise NotImplementedError
def __contains__(self, obj):
if not isinstance(obj, self.OBJ_CLASS):
return False
return obj.id in (obj2.id for obj2 in self._objects)
def __len__(self):
raise NotImplementedError
def _cache_to_obj(self, cache_item):
if 'id' in cache_item and cache_item['id'] in self._object_cache:
obj = self._object_cache[cache_item['id']]
elif 'id' in cache_item and cache_item['id'] not in self._object_cache:
obj = self.OBJ_CLASS(cache_item['id'], cache=cache_item)
self._object_cache[cache_item['id']] = obj
else:
obj = self.OBJ_CLASS(None, cache=cache_item)
self._object_cache[repr(obj)] = obj
if hasattr(obj, '_with_connection_broker'):
return obj._with_connection_broker(self._connection_broker)
return obj
def _clean_object_cache(self):
obj_cache_ids = self._object_cache.keys()
data_cache_ids = map(operator.attrgetter('id'), self._objects)
for obj_id in obj_cache_ids:
if obj_id not in data_cache_ids:
del self._objects[obj_id]
def _copy(self):
copy = type(self)(cache=self._cache[:],
updated=self._updated.copy())
copy._connection_broker = self._connection_broker
return copy
@property
def _objects(self):
return map(self._cache_to_obj, self._cache)
def _resolve_cache_index(self, index, start=True):
if index < 0:
old_length, length = None, len(self._cache)
while (old_length != length):
old_length = length
self._update_cache()
length = len(self._cache)
if abs(index) <= length:
return length + index
raise IndexError('list index out of range')
elif (not index) and (index != 0):
return 0 if start else (len(self._cache) - 1)
elif index < len(self._cache):
return index
old_length, length = None, len(self._cache)
while (index >= length) and (old_length != length):
old_length = length
self._update_cache()
length = len(self._cache)
if old_length == length:
raise IndexError('list index out of range')
return index
def _sort_key(self, item):
return operator.attrgetter(*self._sort_attrs)(item)
def _with_connection_broker(self, connection_broker):
copy = self._copy()
copy._connection_broker = connection_broker
return copy
class CachedListUpdateMonitorThread(threading.Thread):
def __init__(self, object, *args, **kwargs):
super(CachedListUpdateMonitorThread, self).__init__(
*args, **kwargs)
self.object = object
self.kill_flag = False
def run(self):
while not self.kill_flag:
self.object._update_cache()
time.sleep(self.object.UPDATE_INTERVAL)
self.kill_flag = False
def stop(self):
self.kill_flag = True
class ForwardCachedList(CachedList):
def _insert_into_cache(self, fetched_data):
if not fetched_data:
self._updated['__count'] = self._updated.get('__count', 0) + 1
self._updated['__time'] = time.time()
return
fetched_objects = zip(fetched_data,
map(self._cache_to_obj, fetched_data))
sorted_objects = sorted(fetched_objects,
key=lambda pair: self._sort_key(pair[1]))
timestamp = time.time()
if not self._cache:
for data, object in sorted_objects:
count_key = '%s__count' % (getattr(object, 'id', repr(object)),)
time_key = '%s__time' % (getattr(object, 'id', repr(object)),)
self._updated[count_key] = self._updated.get(count_key, 0) + 1
self._updated[time_key] = timestamp
self._cache.extend(pair[0] for pair in sorted_objects)
else:
latest_key = self._sort_key(self._cache_to_obj(self._cache[-1]))
add_to_cache = self._sort_key(sorted_objects[0][1]) > latest_key
for data, object in sorted_objects:
count_key = '%s__count' % (getattr(object, 'id', repr(object)),)
time_key = '%s__time' % (getattr(object, 'id', repr(object)),)
self._updated[count_key] = self._updated.get(count_key, 0) + 1
self._updated[time_key] = timestamp
if add_to_cache or (self._sort_key(object) > latest_key):
self._cache.append(data)
if self._sort_key(object) >= latest_key:
add_to_cache = True
self._updated['__count'] = self._updated.get('__count', 0) + 1
self._updated['__time'] = time.time()
self._clean_object_cache()
class ReverseCachedList(CachedList):
def _insert_into_cache(self, fetched_data):
if not fetched_data:
self._updated['__count'] = self._updated.get('__count', 0) + 1
self._updated['__time'] = time.time()
return
fetched_objects = zip(fetched_data,
map(self._cache_to_obj, fetched_data))
sorted_objects = sorted(fetched_objects, reverse=True,
key=lambda pair: self._sort_key(pair[1]))
timestamp = time.time()
if not self._cache:
for data, object in sorted_objects:
count_key = '%s__count' % (getattr(object, 'id', repr(object)),)
time_key = '%s__time' % (getattr(object, 'id', repr(object)),)
self._updated[count_key] = self._updated.get(count_key, 0) + 1
self._updated[time_key] = timestamp
self._cache.extend(pair[0] for pair in sorted_objects)
else:
latest_key = self._sort_key(self._cache_to_obj(self._cache[-1]))
add_to_cache = self._sort_key(sorted_objects[0][1]) < latest_key
for data, object in sorted_objects:
count_key = '%s__count' % (getattr(object, 'id', repr(object)),)
time_key = '%s__time' % (getattr(object, 'id', repr(object)),)
self._updated[count_key] = self._updated.get(count_key, 0) + 1
self._updated[time_key] = timestamp
if add_to_cache or (self._sort_key(object) < latest_key):
self._cache.append(data)
if self._sort_key(object) <= latest_key:
add_to_cache = True
self._updated['__count'] = self._updated.get('__count', 0) + 1
self._updated['__time'] = time.time()
self._clean_object_cache()
def update_once(method):
"""
Make sure the cache has been updated at least once before calling a method.
This should be used as a decorator, and it wraps a method on a cached object
to make sure that the object's cache has been updated at least once before
the method is called. This allows you to implement lazy evaluation, which
is especially useful when fetching data over the network.
"""
def wrapper(self, *args, **kwargs):
if not self._updated.get('__count', 0):
self._update_cache()
self._updated['__count'] = self._updated.get('__count', 0) + 1
return method(self, *args, **kwargs)
return function_sync(method, wrapper)
def update_on_key(key, always=False):
"""
Make sure the cache has a particular key present before calling a method.
This decorator accepts a key which it will look up in the cache before
calling the wrapped method. If the cache doesn't have the key, it will
perform an update before calling the method. Note that it does not keep
updating the cache until the key is present - this may result in a
non-terminating loop.
You may also pass the decorator an additional keyword, ``always``, which
will tell it whether or not to keep checking for the key every time the
method is called. By default, this is ``False``, which means that the key
will be checked only the first time the method is called. If set to true,
the key will be checked *every* time the method is called.
"""
def wrapper_deco(method):
def wrapper(self, *args, **kwargs):
if always:
if key not in self._cache:
self._update_cache()
return method(self, *args, **kwargs)
elif (key not in self._cache and
(not self._updated.get('key__' + key, False))):
self._update_cache()
self._updated['key__' + key] = True
return method(self, *args, **kwargs)
return function_sync(method, wrapper)
return wrapper_deco
def update_on_time(length):
"""
Update the cache if an amount of time has passed before calling a method.
This decorator accepts a length of time in seconds, and will wrap a method
with a cache-checker. Every time the method is called, the wrapper will
check to see that a certain amount of time has passed. If the time that has
passed is greater than or equal to the specified length, the cache is
updated. Finally, the method is called.
"""
def wrapper_deco(method):
def wrapper(self, *args, **kwargs):
if (time.time() - self._updated.get('__time', 0)) >= length:
self._update_cache()
self._updated['__time'] = time.time()
return method(self, *args, **kwargs)
return function_sync(method, wrapper)
return wrapper_deco
def update_on_count(num):
"""
Update the cache if a method has been called a certain number of times.
This decorator accepts a number, and keeps track of how many times the
method it is wrapping has been called. When the number of calls reaches this
number, the cache is updated.
"""
def wrapper_deco(method):
def wrapper(self, *args, **kwargs):
if self._updated.get('count__' + method.__name__, num) == num:
self._update_cache()
self._updated['count__' + method.__name__] = 1
else:
self._updated['count__' + method] = self._updated.get(
'count__' + method, 0) + 1
return method(self, *args, **kwargs)
return function_sync(method, wrapper)
return wrapper_deco
def simple_map(key):
"""
Shortcut for a typical cacheing use-case.
This is a shortcut for the following pattern::
class SomeCachedObject(CachedObject):
@property
@update_on_key(key_name)
def attrname(self):
return self._cache[key_name]
Instead you can do this::
class SomeCachedObject(CachedObject):
attrname = simple_map(key_name)
"""
return property(update_on_key(key)(lambda self: self._cache[key])) |
the-stack_0_23047 | """
This script is created to create order model under limitations of ORM approach
Hasan Özdemir 02-09-2022
"""
# path : root/2_order_service/order_application/models.py
# from __init__ import db
from . import db
# imports for relationships management
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import ForeignKey, Enum
# column type related libraries
from datetime import datetime
import enum
# base class constructing
Base = declarative_base()
class OrderStatus(enum.Enum):
"""
FOR ALL LIST CHECK THE LINK , FOR NOW OUT OF SCOPE
https://support.bigcommerce.com/s/article/Order-Statuses
"""
OK = "ACCEPTED" # selection and payment successfully done
ONGOING = "AWAITING" # order is preparing
R_FAIL = "R-CANCELLED" # restaurant is cancelled
C_FAIL = "C-CANCELLED" # customer is cancelled
FINISH = "COMPLETED" # order completed
class OrdersOrm(db.Model, Base):
# table name initialization
__tablename__ = "orders"
# primary key initialization
o_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# relationship initialization between OrdersOrm & OrderDetailsOrm (One-to-Many)
o_parent = relationship(
"OrderDetailsOrm", uselist=False, back_populates="o_d_children"
)
# fields initialization continues
c_id = db.Column(db.Integer, nullable=False)
o_address = db.Column(db.String(100), nullable=False)
o_city = db.Column(db.String(25), nullable=False)
o_postal_code = db.Column(db.Integer, nullable=True)
o_phone_number = db.Column(db.String(15), nullable=False)
o_status = db.Column(Enum(OrderStatus), default=OrderStatus.OK)
o_date = db.Column(db.Date, nullable=False, default=datetime.utcnow)
class OrderDetailsOrm(db.Model, Base):
# table name initialization
__tablename__ = "order_details"
# primary key initialization
o_d_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# relationship initialization between OrdersOrm & OrderDetailsOrm (One-to-Many)
o_d_children = relationship("OrdersOrm", uselist=False, back_populates="o_parent")
# foreign key initialization between OrdersOrm & OrderDetailsOrm (One-to-Many)
o_id = db.Column(db.Integer, ForeignKey(column="orders.o_id"), nullable=False)
# fields initialization continues
p_id = db.Column(db.Integer, nullable=False)
order_quantity = db.Column(db.Integer, nullable=False)
order_price = db.Column(db.Integer, nullable=False)
|
the-stack_0_23053 | # Copyright 2017 Joachim van der Herten, Nicolas Knudde
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .acquisition import Acquisition
from ..design import RandomDesign
from gpflow import settings
from gpflow.param import DataHolder
from gpflow.model import Model
import numpy as np
from scipy.stats import norm
from scipy.optimize import bisect
import tensorflow as tf
float_type = settings.dtypes.float_type
stability = settings.numerics.jitter_level
np_float_type = np.float32 if float_type is tf.float32 else np.float64
class MinValueEntropySearch(Acquisition):
"""
Max-value entropy search acquisition function for single-objective global optimization.
Introduced by (Wang et al., 2017).
Key reference:
::
@InProceedings{Wang:2017,
title = {Max-value Entropy Search for Efficient {B}ayesian Optimization},
author = {Zi Wang and Stefanie Jegelka},
booktitle = {Proceedings of the 34th International Conference on Machine Learning},
pages = {3627--3635},
year = {2017},
editor = {Doina Precup and Yee Whye Teh},
volume = {70},
series = {Proceedings of Machine Learning Research},
address = {International Convention Centre, Sydney, Australia},
month = {06--11 Aug},
publisher = {PMLR},
}
"""
def __init__(self, model, domain, gridsize=10000, num_samples=10):
assert isinstance(model, Model)
super(MinValueEntropySearch, self).__init__(model)
assert self.data[1].shape[1] == 1
self.gridsize = gridsize
self.num_samples = num_samples
self.samples = DataHolder(np.zeros(num_samples, dtype=np_float_type))
self._domain = domain
def _setup(self):
super(MinValueEntropySearch, self)._setup()
# Apply Gumbel sampling
m = self.models[0]
valid = self.feasible_data_index()
# Work with feasible data
X = self.data[0][valid, :]
N = np.shape(X)[0]
Xrand = RandomDesign(self.gridsize, self._domain).generate()
fmean, fvar = m.predict_f(np.vstack((X, Xrand)))
idx = np.argmin(fmean[:N])
right = fmean[idx].flatten()# + 2*np.sqrt(fvar[idx]).flatten()
left = right
probf = lambda x: np.exp(np.sum(norm.logcdf(-(x - fmean) / np.sqrt(fvar)), axis=0))
i = 0
while probf(left) < 0.75:
left = 2. ** i * np.min(fmean - 5. * np.sqrt(fvar)) + (1. - 2. ** i) * right
i += 1
# Binary search for 3 percentiles
q1, med, q2 = map(lambda val: bisect(lambda x: probf(x) - val, left, right, maxiter=10000, xtol=0.01),
[0.25, 0.5, 0.75])
beta = (q1 - q2) / (np.log(np.log(4. / 3.)) - np.log(np.log(4.)))
alpha = med + beta * np.log(np.log(2.))
# obtain samples from y*
mins = -np.log(-np.log(np.random.rand(self.num_samples).astype(np_float_type))) * beta + alpha
self.samples.set_data(mins)
def build_acquisition(self, Xcand):
fmean, fvar = self.models[0].build_predict(Xcand)
norm = tf.contrib.distributions.Normal(tf.constant(0.0, dtype=float_type), tf.constant(1.0, dtype=float_type))
gamma = (fmean - tf.expand_dims(self.samples, axis=0)) / tf.sqrt(fvar)
return tf.reduce_sum(gamma * norm.prob(gamma) / (2. * norm.cdf(gamma)) - norm.log_cdf(gamma),
axis=1, keep_dims=True) / self.num_samples
|
the-stack_0_23054 | from pygame import*
from random import*
from time import time as timer
clock = time.Clock()
FPS = 60
# окно
win_width = 700
win_height = 600
window = display.set_mode((win_width, win_height))
display.set_caption('Shooter game')
background = transform.scale(image.load('backgr.png'),(win_width, win_height))
font.init()
font2 = font.Font(None, 36)
font1 = font.Font(None, 80)
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, size_x, size_y, player_speed):
sprite.Sprite.__init__(self)
self.image = transform.scale(image.load(player_image), (size_x, size_y))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
class Player(GameSprite):
def update_l(self):
keys = key.get_pressed()
if keys[K_w] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_s] and self.rect.y < win_height - 100:
self.rect.y += self.speed
def update_r(self):
keys = key.get_pressed()
if keys[K_UP] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_DOWN] and self.rect.y < win_height - 100:
self.rect.y += self.speed
racket1 = Player("racket.png", 30, 250, 25, 90, 30)
racket2 = Player("racket.png", 650, 250, 25, 90, 30)
game = True
while game:
for e in event.get():
if e.type == QUIT:
game = False
window.blit(background, (0,0))
racket1.update_l()
racket2.update_r()
racket1.reset()
racket2.reset()
display.update()
clock.tick(FPS) |
the-stack_0_23057 | #!/usr/bin/env python3
#
##################################################################################
#
# Title : dsglobus.py
# Author : Thomas Cram, [email protected]
# Date : 02/17/2017
# Purpose : Python module to create and manage shared endpoints to facilitate
# Globus data transfers from the RDA.
#
# Work File : $DSSHOME/lib/python/dsglobus.py*
# Test File : $DSSHOME/lib/python/dsglobus_test.py*
# Github : https://github.com/NCAR/rda-globus/python/dsglobus.py
#
##################################################################################
import os, sys
import subprocess
try:
assert sys.version_info >= (3,0)
except AssertionError:
print ("Error: Python version 3.0+ required.")
raise
path1 = "/glade/u/home/rdadata/lib/python"
path2 = "/glade/u/home/tcram/lib/python"
if (path1 not in sys.path):
sys.path.append(path1)
if (path2 not in sys.path):
sys.path.append(path2)
import argparse
import logging
import logging.handlers
import json
import select
import textwrap
import six
from datetime import datetime
from time import strftime
from phpserialize import unserialize
try:
from urllib.parse import urlencode
except:
from urllib import urlencode
from MyLOG import show_usage
from PyDBI import myget, myupdt, myadd, mymget
from MyGlobus import MyGlobus, MyEndpoints
from globus_sdk import (TransferClient, TransferAPIError,
TransferData, DeleteData, RefreshTokenAuthorizer, AuthClient,
GlobusError, GlobusAPIError, NetworkError)
from globus_utils import load_app_client, load_rda_native_client
#=========================================================================================
def main(json_input = None):
if json_input:
result = do_action(json_input)
else:
opts = parse_input()
result = do_action(opts)
return result
#=========================================================================================
def get_transfer_client(client_id):
""" Instantiate a Globus Transfer client """
if client_id is None:
my_logger.error("[get_transfer_client] Missing client_id from input.")
sys.exit(1)
client = load_client(client_id)
tokens = get_tokens(client_id)
transfer_refresh_token = tokens['transfer_rt']
tc_authorizer = RefreshTokenAuthorizer(transfer_refresh_token, client)
transfer_client = TransferClient(authorizer=tc_authorizer)
return transfer_client
#=========================================================================================
def get_auth_client(client_id):
""" Instantiate a Globus Auth client """
if client_id is None:
msg = "[get_transfer_client] Missing client_id."
my_logger.error(msg)
sys.exit(1)
client = load_client(client_id)
tokens = get_tokens(client_id)
auth_refresh_token = tokens['auth_rt']
ac_authorizer = RefreshTokenAuthorizer(auth_refresh_token, client)
auth_client = AuthClient(authorizer=ac_authorizer)
return auth_client
#=========================================================================================
def get_client_id(action):
""" Get valid client ID based on command-line or JSON input action """
client_map = {
"ap": "client_id",
"rp": "client_id",
"st": "client_id",
"ls": "rda_quasar_client_id",
"transfer": "rda_quasar_client_id",
"tb": "rda_quasar_client_id",
"dr": "rda_quasar_client_id",
"tb-quasar" : "rda_quasar_client_id",
"dr-quasar" : "rda_quasar_client_id",
"gt": "rda_quasar_client_id",
"tl": "rda_quasar_client_id",
"delete": "rda_quasar_client_id",
"mkdir": "rda_quasar_client_id",
"rename": "rda_quasar_client_id",
"cancel": "rda_quasar_client_id"
}
if action is None:
msg = "[get_client_id] Missing action in input argument."
my_logger.error(msg)
sys.exit(1)
if action in client_map:
client_id = MyGlobus[client_map[action]]
else:
msg = "[get_client_id] Unknown action: {}. Cannot map to valid client ID.".format(action)
my_logger.error(msg)
sys.exit(1)
return client_id
#=========================================================================================
def load_client(client_id):
""" Load the correct Globus client based on client ID """
if client_id is None:
my_logger.error("[load_client] Missing client_id from input.")
sys.exit(1)
if client_id == MyGlobus['client_id']:
client = load_app_client()
else:
client = load_rda_native_client(client_id)
return client
#=========================================================================================
def get_tokens(client_id):
if client_id is None:
my_logger.error("[load_client] Missing client_id from input.")
sys.exit(1)
if client_id == MyGlobus['rda_quasar_client_id']:
transfer_rt = MyGlobus['transfer_rt_quasar']
auth_rt = MyGlobus['auth_rt_quasar']
elif client_id == MyGlobus['client_id']:
transfer_rt = MyGlobus['transfer_refresh_token']
auth_rt = MyGlobus['auth_refresh_token']
else:
my_logger.error("[get_tokens] Unknown client ID")
sys.exit(1)
tokens = {'transfer_rt': transfer_rt,
'auth_rt': auth_rt}
return tokens
#=========================================================================================
def do_action(data):
""" Run operations based on command line or JSON input """
try:
command = data['action']
except KeyError:
msg = "[do_action] 'action' missing from JSON or command-line input. Run dsglobus -h for usage instructions."
my_logger.error(msg)
sys.exit(1)
dispatch = {
"ap": add_endpoint_acl_rule,
"rp": delete_endpoint_acl_rule,
"st": submit_dsrqst_transfer,
"ls": list_endpoint_files,
"transfer": submit_rda_transfer,
"tb": submit_rda_transfer,
"dr": submit_rda_transfer,
"tb-quasar" : submit_rda_transfer,
"dr-quasar" : submit_rda_transfer,
"gt": get_task_info,
"tl": task_list,
"delete": submit_rda_delete,
"mkdir": make_directory,
"rename": rename_multiple_filedir,
"cancel": task_cancel
}
""" Get client ID and add it to data dict """
data.update({'client_id': get_client_id(command)})
if command in dispatch:
command = dispatch[command]
else:
msg = "[do_action] command {} not found.".format(command)
my_logger.error(msg)
sys.exit(1)
return command(data)
#=========================================================================================
def get_endpoint_by_name(endpoint_name):
try:
endpoint_id = MyEndpoints[endpoint_name]
except KeyError:
msg = "[get_endpoint_id] Unknown endpoint name: {}".format(endpoint_name)
my_logger.error(msg)
sys.exit(1)
return endpoint_id
#=========================================================================================
def add_endpoint_acl_rule(data):
""" Create a new endpoint access rule. 'type' must be defined in the input dict:
type = 'dsrqst': dsrqst share
= 'dataset': standard dataset share
"""
if 'print' in data:
print_stdout = data['print']
else:
print_stdout = False
try:
type = data['type']
except KeyError:
msg = "[add_endpoint_acl_rule] 'type' not defined in input dict."
my_logger.error(msg)
sys.exit(1)
if (type == 'dsrqst'):
try:
endpoint_id = MyGlobus['data_request_ep']
ridx = data['ridx']
cond = " WHERE rindex='{0}'".format(ridx)
myrqst = myget('dsrqst', ['*'], cond)
if (len(myrqst) == 0):
msg = "[add_endpoint_acl_rule] Request index not on file"
my_logger.warning(msg)
if 'print' in data and data['print']:
sys.exit("Error: {0}".format(msg))
return {'Error': msg}
rqstid = myrqst['rqstid']
email = myrqst['email']
dsid = myrqst['dsid']
if myrqst['globus_rid']:
msg = "[add_endpoint_acl_rule] Globus ACL rule has already been created for request {0}.".format(ridx)
my_logger.info("msg")
if 'print' in data and data['print']:
sys.exit(msg)
return {'access_id': myrqst['globus_rid'], 'share_url': myrqst['globus_url']}
share_data = {'ridx': ridx, 'dsid': dsid, 'email': email}
path = construct_share_path(type, share_data)
except KeyError as err:
return handle_error(err, name="[add_endpoint_acl_rule]", print_stdout=print_stdout)
elif (type == 'dataset'):
try:
dsid = data['dsid']
email = data['email']
loc = get_dataset_location(dsid)
if loc == 'O':
endpoint_id = MyGlobus['rda_stratus_endpoint']
legacy_name = MyGlobus['datashare_stratus']
else:
endpoint_id = MyGlobus['datashare_ep']
legacy_name = MyGlobus['datashare_legacy']
cond = " WHERE email='{0}' AND dsid='{1}' AND source_endpoint='{2}' AND status='ACTIVE'".format(email, dsid, legacy_name)
myshare = myget('goshare', ['*'], cond)
if (len(myshare) > 0 and myshare['globus_rid']):
msg = "[add_endpoint_acl_rule] Globus ACL rule has already been created for user {0} and dataset {1}. ACL rule {2}".format(email, dsid, myshare['globus_rid'])
my_logger.info(msg)
if 'print' in data and data['print']:
sys.exit(msg)
return {'access_id': myshare['globus_rid'], 'share_url': myshare['globus_url']}
share_data = {'dsid': dsid}
path = construct_share_path(type, share_data)
share_data.update({'email': email})
except KeyError as err:
return handle_error(err, name="[add_endpoint_acl_rule]", print_stdout=print_stdout)
rda_identity = "{0}@rda.ucar.edu".format(email)
identity_id = get_user_id(rda_identity)
share_data.update({'identity': identity_id})
rule_data = {
"DATA_TYPE": "access",
"principal_type": "identity",
"principal": identity_id,
"path": path,
"permissions": "r"
}
if 'notify' in data:
rule_data.update({"notify_email": email})
tc_authorizer = RefreshTokenAuthorizer(MyGlobus['transfer_refresh_token'], load_app_client())
tc = TransferClient(authorizer=tc_authorizer)
try:
result = tc.add_endpoint_acl_rule(endpoint_id, rule_data)
except GlobusAPIError as e:
msg = ("[add_endpoint_acl_rule] Globus API Error\n"
"HTTP status: {}\n"
"Error code: {}\n"
"Error message: {}").format(e.http_status, e.code, e.message)
my_logger.error(msg)
raise e
except NetworkError:
my_logger.error(("[add_endpoint_acl_rule] Network Failure. "
"Possibly a firewall or connectivity issue"))
raise
except GlobusError:
logging.exception("[add_endpoint_acl_rule] Totally unexpected GlobusError!")
raise
msg = "{0}\nResource: {1}\nRequest ID: {2}\nAccess ID: {3}".format(result['message'], result['resource'], result['request_id'], result['access_id'])
if 'print' in data and data['print']:
print (msg)
my_logger.info("[add_endpoint_acl_rule] {0}".format(msg))
my_logger.info("[add_endpoint_acl_rule] User email: {0}".format(email))
if 'print' in data and data['print']:
share_data.update({'print': True})
url = construct_share_url(type, share_data)
share_data.update({'globus_rid': result['access_id'],'globus_url': url})
update_share_record(type, share_data)
return {'access_id': result["access_id"], 'share_url': url}
#=========================================================================================
def delete_endpoint_acl_rule(data):
""" Delete a specific endpoint access rule. 'type' must be defined in input dict:
type = 'dsrqst': dsrqst share
= 'dataset': standard dataset share
"""
if 'print' in data:
print_stdout = data['print']
else:
print_stdout = False
try:
type = data['type']
except KeyError:
msg = "[delete_endpoint_acl_rule] 'type' not defined in input dict."
my_logger.error(msg)
sys.exit(1)
tc = get_transfer_client(MyGlobus['client_id'])
if (type == 'dsrqst'):
try:
endpoint_id = MyGlobus['data_request_ep']
ridx = data['ridx']
except KeyError as err:
return handle_error(err, name="[delete_endpoint_acl_rule]", print_stdout=print_stdout)
else:
rqst_cond = " WHERE rindex='{0}'".format(ridx)
""" Try the dsrqst record first, then try dspurge """
myrqst = myget('dsrqst', ['*'], rqst_cond)
mypurge = myget('dspurge', ['*'], rqst_cond)
rqst_rid = None
purge_rid = None
try:
rqst_rid = myrqst['globus_rid']
except KeyError:
try:
purge_rid = mypurge['globus_rid']
except KeyError:
msg = "[delete_endpoint_acl_rule] Request record not found in dsrqst or dspurge (request index {0}).".format(ridx)
my_logger.warning(msg)
if 'print' in data and data['print']:
sys.exit("Error: {0}".format(msg))
return {'Error': msg}
rule_id = rqst_rid if rqst_rid else purge_rid
if not rule_id:
msg = "[delete_endpoint_acl_rule] Globus ACL rule not found in request record (request index {0}).".format(ridx)
my_logger.warning(msg)
if 'print' in data and data['print']:
sys.exit("Error: {0}".format(msg))
return {'Error': msg}
else:
record = {'globus_rid': None,
'globus_url': None}
if rqst_rid:
myupdt('dsrqst', record, rqst_cond)
else:
myupdt('dspurge', record, rqst_cond)
share_cond = " WHERE rindex='{0}' AND status='ACTIVE'".format(ridx)
myshare = myget('goshare', ['*'], share_cond)
if (len(myshare) > 0):
share_record = {'delete_date': datetime.now().strftime("%Y-%m-%d"),
'status': 'DELETED'}
myupdt('goshare', share_record, share_cond)
try:
result = tc.delete_endpoint_acl_rule(endpoint_id, rule_id)
except GlobusAPIError as e:
my_logger.error(("[delete_endpoint_acl_rule] Globus API Error\n"
"HTTP status: {}\n"
"Error code: {}\n"
"Error message: {}").format(e.http_status, e.code, e.message))
raise e
except NetworkError:
my_logger.error(("[delete_endpoint_acl_rule] Network Failure. "
"Possibly a firewall or connectivity issue"))
raise
except GlobusError:
logging.exception("[delete_endpoint_acl_rule] Totally unexpected GlobusError!")
raise
msg = "{0}\nResource: {1}\nRequest ID: {2}".format(result['message'], result['resource'], result['request_id'])
if 'print' in data and data['print']:
print (msg)
my_logger.info("[delete_endpoint_acl_rule] {0}".format(msg))
elif (type == 'dataset'):
try:
email = data['email']
dsid = data['dsid']
except KeyError as err:
return handle_error(err, name="[delete_endpoint_acl_rule]", print_stdout=print_stdout)
else:
cond = " WHERE email='{0}' AND dsid='{1}' AND status='ACTIVE'".format(email, dsid)
myshares = mymget('goshare', ['*'], cond)
if (len(myshares) == 0):
msg = "[delete_endpoint_acl_rule] Globus share record not found for e-mail = {0} and dsid = {1}.".format(email, dsid)
my_logger.warning(msg)
if 'print' in data and data['print']:
sys.exit("Error: {0}".format(msg))
return {'Error': msg}
for i in range(len(myshares)):
try:
endpoint_id = MyEndpoints[myshares[i]['source_endpoint']]
rule_id = myshares[i]['globus_rid']
record = {'delete_date': datetime.now().strftime("%Y-%m-%d"),
'status': 'DELETED'}
myupdt('goshare', record, cond)
except KeyError:
msg = "[delete_endpoint_acl_rule] Globus ACL rule not found in Globus share record (e-mail: {0}, dsid: {1}).".format(email, dsid)
my_logger.warning(msg)
if 'print' in data and data['print']:
print(msg)
continue
try:
result = tc.delete_endpoint_acl_rule(endpoint_id, rule_id)
except GlobusAPIError as e:
my_logger.error(("[delete_endpoint_acl_rule] Globus API Error\n"
"HTTP status: {}\n"
"Error code: {}\n"
"Error message: {}").format(e.http_status, e.code, e.message))
raise e
except NetworkError:
my_logger.error(("[delete_endpoint_acl_rule] Network Failure. "
"Possibly a firewall or connectivity issue"))
raise
except GlobusError:
logging.exception("[delete_endpoint_acl_rule] Totally unexpected GlobusError!")
raise
msg = "{0}\nResource: {1}\nRequest ID: {2}".format(result['message'], result['resource'], result['request_id'])
if 'print' in data and data['print']:
print (msg)
my_logger.info("[delete_endpoint_acl_rule] {0}".format(msg))
return
#=========================================================================================
def submit_dsrqst_transfer(data):
""" Submit a Globus transfer on behalf of the user. For dsrqst 'push' transfers. """
""" Get session ID from dsrqst record """
ridx = data['ridx']
cond = " WHERE rindex={0}".format(ridx)
myrqst = myget('dsrqst', ['tarcount', 'tarflag', 'session_id'], cond)
if (len(myrqst) == 0):
msg = "[submit_dsrqst_transfer] Request index not found in DB"
my_logger.warning(msg)
sys.exit(1)
session = get_session(myrqst['session_id'])
email = session['email']
dsid = session['dsid']
type = 'dsrqst'
""" Define source endpoint ID and paths """
host_endpoint = MyGlobus['host_endpoint_id']
source_endpoint_id = MyGlobus['data_request_ep']
source_endpoint_legacy_name = MyGlobus['data_request_legacy']
destination_endpoint_id = session['endpoint_id']
""" Check if user has a share set up for this endpoint & path """
share_data = {'ridx': ridx, 'notify': True, 'source_endpoint': source_endpoint_legacy_name}
if not query_acl_rule(type, share_data):
acl_data = add_endpoint_acl_rule(1, share_data)
directory = construct_share_path(type, share_data)
""" Instantiate the Globus SDK transfer client """
refresh_token = session['transfer.api.globus.org']['refresh_token']
tc_authorizer = RefreshTokenAuthorizer(refresh_token, load_app_client())
transfer = TransferClient(authorizer=tc_authorizer)
""" Instantiate TransferData object """
transfer_data = TransferData(transfer_client=transfer,
source_endpoint=source_endpoint_id,
destination_endpoint=destination_endpoint_id,
label=session['label'])
""" Check for tar file output and add to items to be transferred.
Note that source_path is relative to the source endpoint base path. """
ep_base_path = MyGlobus['data_request_ep_base'].rstrip("/")
if (myrqst['tarflag'] == 'Y' and myrqst['tarcount'] > 0):
tar_dir = 'TarFiles'
if os.path.exists(ep_base_path + directory + tar_dir):
source_path = directory + tar_dir
dest_path = session['dest_path'] + tar_dir
transfer_data.add_item(source_path, dest_path, recursive=True)
""" Get individual request files from wfrqst and add to items to be transferred """
files = mymget('wfrqst', ['wfile'], "{} ORDER BY disp_order, wfile".format(cond))
if (len(files) > 0):
for i in range(len(files)):
file = files[i]['wfile']
if os.path.isfile(ep_base_path + directory + file):
source_path = directory + file
dest_path = session['dest_path'] + file
transfer_data.add_item(source_path, dest_path)
if (len(transfer_data['DATA']) == 0):
my_logger.warning("[submit_dsrqst_transfer] No request files found to transfer for request index {}".format(ridx))
return None
transfer.endpoint_autoactivate(source_endpoint_id)
transfer.endpoint_autoactivate(destination_endpoint_id)
task_id = transfer.submit_transfer(transfer_data)['task_id']
""" Store task_id in request record """
record = [{'task_id': task_id}]
myupdt('dsrqst', record[0], cond)
msg = "[submit_dsrqst_transfer] Transfer submitted successfully. Task ID: {0}. Files transferred: {1}. Request index: {2}".format(task_id, len(transfer_data['DATA']), ridx)
my_logger.info(msg)
if 'print' in data and data['print']:
print ("{}".format(task_id))
""" Create share record in goshare """
return task_id
#=========================================================================================
def construct_share_path(type, data):
""" Construct the path to the shared data. Path is relative to the
shared endpoint base path.
type = 'dsrqst': dsrqst share
= 'dataset': standard dataset share
"""
if 'print' in data:
print_stdout = data['print']
else:
print_stdout = False
if (type == 'dsrqst'):
try:
ridx = data['ridx']
cond = " WHERE rindex='{0}'".format(ridx)
myrqst = myget('dsrqst', ['rqstid','location'], cond)
if (len(myrqst) > 0):
if myrqst['location']:
base_path = MyGlobus['data_request_ep_base']
loc = myrqst['location']
if (loc.find(base_path) != -1):
path_len = len(base_path)
path = "/{0}/".format(loc[path_len:])
else:
path = None
else:
path = "/download.auto/{0}/".format(myrqst['rqstid'])
else:
msg = "[construct_share_path] Request index {0} not found or request ID not defined".format(ridx)
my_logger.error(msg)
if 'print' in data and data['print']:
sys.exit("Error: {0}".format(msg))
return {'Error': msg}
except KeyError as err:
return handle_error(err, name="[construct_share_path]", print_stdout=print_stdout)
elif (type == 'dataset'):
try:
path = "/{0}/".format(data['dsid'])
except KeyError as err:
return handle_error(err, name="[construct_share_path]", print_stdout=print_stdout)
my_logger.info("[construct_share_path] Path to shared data: {0}".format(path))
return path
#=========================================================================================
def construct_share_url(type, data):
""" Construct the URL to the shared data on the Globus web app
type = 'dsrqst': dsrqst shares
= 'dataset': standard dataset share
"""
if 'print' in data:
print_stdout = data['print']
else:
print_stdout = False
if (type == 'dsrqst'):
try:
ridx = data['ridx']
cond = ' WHERE rindex={0}'.format(ridx)
myrqst = myget('dsrqst', ['*'], cond)
if (len(myrqst) > 0):
origin_id = MyGlobus['data_request_ep']
origin_path = construct_share_path(type, {'ridx': ridx})
else:
msg = "[construct_share_url] Request {0} not found in RDADB".format(ridx)
my_logger.warning(msg)
if 'print' in data and data['print']:
sys.exit("Error: {0}".format(msg))
return {'Error': msg}
except KeyError as err:
return handle_error(err, name="[construct_share_url]", print_stdout=print_stdout)
if (type == 'dataset'):
try:
dsid = data['dsid']
if get_dataset_location(dsid) == 'O':
origin_id = MyGlobus['rda_stratus_endpoint']
else:
origin_id = MyGlobus['datashare_ep']
origin_path = construct_share_path(type, {'dsid': data['dsid']})
except KeyError as err:
return handle_error(err, name="[construct_share_url]", print_stdout=print_stdout)
params = {'origin_id': origin_id, 'origin_path': origin_path}
if 'identity' in data:
params.update({'add_identity': data['identity']})
url = '{0}transfer?{1}'.format(MyGlobus['globusURL'], urlencode(params))
my_logger.info("[construct_share_url] Globus share URL created: {0}".format(url))
return url
#=========================================================================================
def get_user_id(identity):
""" Get the UUID assigned by Globus Auth. Input argument 'identity' can be one of
the following:
GlobusID (Globus primary identity): in the form of [email protected]
NCAR RDA identity : in the form of [email protected]@rda.ucar.edu, where [email protected] is the user's RDA e-mail login
E-mail identity : in the form of [email protected]
"""
try:
ac_authorizer = RefreshTokenAuthorizer(MyGlobus['auth_refresh_token'], load_app_client())
ac = AuthClient(authorizer=ac_authorizer)
result = ac.get_identities(usernames=identity, provision=True)
uuid = result.data['identities'][0]['id']
except GlobusAPIError as e:
my_logger.error(("[get_user_id] Globus API Error\n"
"HTTP status: {}\n"
"Error code: {}\n"
"Error message: {}").format(e.http_status, e.code, e.message))
raise e
except NetworkError:
my_logger.error(("[get_user_id] Network Failure. "
"Possibly a firewall or connectivity issue"))
raise
except GlobusError:
logging.exception("[get_user_id] Totally unexpected GlobusError!")
raise
return uuid
#=========================================================================================
def query_acl_rule(type, data):
""" Check if an active ACL rule exists for a given RDA user
type = 'dsrqst': dsrqst share
= 'dataset': standard dataset share
"""
if 'print' in data:
print_stdout = data['print']
else:
print_stdout = False
if (type == 'dsrqst'):
""" dsrqst shares """
cond = " WHERE rindex='{0}'".format(data['ridx'])
myrule = myget('dsrqst', ['*'], cond)
elif (type == 'dataset'):
""" standard dataset shares """
cond = " WHERE email='{0}' AND dsid='{1}' AND source_endpoint='{2}' AND status='ACTIVE'".format(data['email'], data['dsid'], data['source_endpoint'])
myrule = myget('goshare', ['*'], cond)
try:
rule_id = myrule['globus_rid']
except KeyError:
rule_id = None
if rule_id:
return {'acl_rule': rule_id}
else:
return None
#=========================================================================================
def update_share_record(type, data):
""" Update the user's Globus share in RDADB
type = 'dsrqst': dsrqst share
= 'dataset': standard dataset share
"""
if ('print' in data):
print_stdout = data['print']
else:
print_stdout = False
try:
globus_rid = data['globus_rid']
globus_url = data['globus_url']
dsid = data['dsid']
email = data['email']
cond = " WHERE email='{0}' AND end_date IS NULL".format(email)
myuser = myget('ruser', ['id'], cond)
if 'id' not in myuser:
msg = "[update_share_record] email {0} not in RDADB table ruser".format(email)
my_logger.warning(msg)
return {'Error': msg}
except KeyError as err:
return handle_error(err, name="[update_share_record]", print_stdout=print_stdout)
share_record = {'globus_rid': '{0}'.format(globus_rid),
'globus_url': '{0}'.format(globus_url),
'email': '{0}'.format(email),
'user_id': '{0}'.format(myuser['id']),
'username': None,
'request_date': datetime.now().strftime("%Y-%m-%d"),
'dsid': '{0}'.format(dsid),
'status': 'ACTIVE'}
if (type == 'dsrqst'):
try:
ridx = data['ridx']
cond = " WHERE rindex='{0}'".format(ridx)
rqst_record = {'globus_rid': data['globus_rid'],
'globus_url': data['globus_url']
}
myupdt('dsrqst', rqst_record, cond)
my_logger.info("[update_share_record] dsrqst record updated. Request index: {0}. ACL rule ID: {1}.".format(ridx, globus_rid))
path = construct_share_path(type, {'ridx': ridx})
share_record.update({'source_endpoint': '{0}'.format(MyGlobus['data_request_legacy']),
'acl_path': '{0}'.format(path),
'rindex': '{0}'.format(ridx)
})
myadd('goshare', share_record)
my_logger.info("[update_share_record] Record added to goshare. Request index: {0}, ACL rule ID: {1}.".format(ridx, globus_rid))
except KeyError as err:
return handle_error(err, name="[update_share_record]", print_stdout=print_stdout)
elif (type == 'dataset'):
try:
path = construct_share_path(type, {'dsid': dsid})
if get_dataset_location(dsid) == 'O':
legacy_endpoint = MyGlobus['datashare_stratus']
else:
legacy_endpoint = MyGlobus['datashare_legacy']
share_record.update({'source_endpoint': '{0}'.format(legacy_endpoint),
'acl_path': '{0}'.format(path)
})
myadd('goshare', share_record)
my_logger.info("[update_share_record] Record added to goshare. Email: {0}, dsid: {1}, ACL rule ID: {2}.".format(email, dsid, globus_rid))
except KeyError as err:
return handle_error(err, name="[update_share_record]", print_stdout=print_stdout)
return
#=========================================================================================
def get_session(sid):
""" Retrieve session data from RDADB """
keys = ['id','access','data']
condition = " WHERE {0} = '{1}'".format("id", sid)
myrec = myget('sessions', keys, condition)
if (len(myrec) == 0):
msg = "[get_session] Session ID not found in DB"
my_logger.warning(msg)
sys.exit(1)
return unserialize(myrec['data'])
#=========================================================================================
def get_dataset_location(dsid):
""" Get the RDA dataset location (Glade, stratus, or other) """
try:
cond = " WHERE dsid='{}'".format(dsid)
myrec = myget('dataset', ['locflag'], cond)
except:
msg = "[get_dataset_location] Error getting location flag for dataset {}.".format(dsid)
my_logger.error(msg)
print(msg)
return myrec['locflag']
#=========================================================================================
def submit_rda_transfer(data):
""" General data transfer to RDA endpoints. Input should be JSON formatted input
if transferring multiple files. """
try:
source_endpoint = get_endpoint_by_name(data['source_endpoint'])
destination_endpoint = get_endpoint_by_name(data['destination_endpoint'])
except KeyError:
my_logger.error("[submit_rda_transfer] source_endpoint and/or destination_endpoint missing from input.")
sys.exit(1)
try:
label = data['label']
except KeyError:
label=''
if 'verify_checksum' in data:
verify_checksum = data['verify_checksum']
else:
verify_checksum = False
try:
files = data['files']
except KeyError:
my_logger.error("[submit_rda_transfer] Files missing from JSON or command-line input")
sys.exit(1)
try:
tc = get_transfer_client(data['client_id'])
except KeyError:
my_logger.error("[submit_rda_transfer] client_id is missing from input.")
sys.exit(1)
transfer_data = TransferData(transfer_client=tc,
source_endpoint=source_endpoint,
destination_endpoint=destination_endpoint,
label=label,
verify_checksum=verify_checksum)
for i in range(len(files)):
source_file = files[i]['source_file']
dest_file = files[i]['destination_file']
# Verify source file exists and meets minimum size requirements (> 200 MB, 1 GB preferred)
transfer_data.add_item(source_file, dest_file)
try:
transfer_result = tc.submit_transfer(transfer_data)
task_id = transfer_result['task_id']
except GlobusAPIError as e:
msg = ("[submit_rda_transfer] Globus API Error\n"
"HTTP status: {}\n"
"Error code: {}\n"
"Error message: {}").format(e.http_status, e.code, e.message)
my_logger.error(msg)
raise e
except NetworkError:
my_logger.error(("[submit_rda_transfer] Network Failure. "
"Possibly a firewall or connectivity issue"))
raise
except GlobusError:
logging.exception("[submit_rda_transfer] Totally unexpected GlobusError!")
raise
msg = "{0}\nTask ID: {1}".format(transfer_result['message'], task_id)
my_logger.info(msg)
print(msg)
return transfer_result
#=========================================================================================
def submit_rda_delete(data):
""" Delete files and/or directories from RDA endpoints. Input should be JSON formatted input
if transferring multiple files. Command line input can be used if deleting a
single file/directory with the action --delete. """
try:
target_endpoint = get_endpoint_by_name(data['endpoint'])
except KeyError:
my_logger.error("[submit_rda_delete] Endpoint name/ID missing from input.")
raise
try:
label = data['label']
except KeyError:
label=''
try:
files = data['files']
except KeyError:
my_logger.error("[submit_rda_delete] File(s) missing from JSON or command-line input")
raise
try:
tc = get_transfer_client(data['client_id'])
except KeyError:
my_logger.error("[submit_rda_delete] client_id is missing from input.")
raise
delete_data = DeleteData(tc, target_endpoint, label=label)
for i in range(len(files)):
target_file = files[i]
delete_data.add_item(target_file)
try:
delete_result = tc.submit_delete(delete_data)
task_id = delete_result['task_id']
except GlobusAPIError as e:
msg = ("[submit_rda_delete] Globus API Error\n"
"HTTP status: {}\n"
"Error code: {}\n"
"Error message: {}").format(e.http_status, e.code, e.message)
my_logger.error(msg)
raise e
except NetworkError:
my_logger.error(("[submit_rda_delete] Network Failure. "
"Possibly a firewall or connectivity issue"))
raise
except GlobusError:
logging.exception("[submit_rda_delete] Totally unexpected GlobusError!")
raise
msg = "{0}\nTask ID: {1}".format(delete_result['message'], task_id)
my_logger.info(msg)
print(msg)
return delete_result
#=========================================================================================
def rename_multiple_filedir(data):
""" Renames files and/or directories on an endpoint. This function takes
multiple file name pairs as input, where the input key 'files' is a list
specifying individual dicts of 'old_path' and 'new_path'. Example:
files = [
{
"old_path": "/path/to/old/file/file_1_old.txt",
"new_path": "/path/to/old/file/file_1_new.txt"
}
{
"old_path": "/path/to/old/file/file_2_old.txt",
"new_path": "/path/to/old/file/file_2_new.txt"
}
{
"old_path": "/path/to/old/file/file_3_old.txt",
"new_path": "/path/to/old/file/file_3_new.txt"
}
]
"""
try:
endpoint = get_endpoint_by_name(data['endpoint'])
files = data['files']
except KeyError:
msg = "[rename_filedir] Endpoint name or file(s) missing from JSON or command-line input"
my_logger.error(msg)
sys.exit(1)
try:
tc = get_transfer_client(data['client_id'])
except KeyError:
my_logger.error("[rename_multiple_filedir] client_id is missing from input.")
raise
responses = []
for i in range(len(files)):
old_path = files[i]['old_path']
new_path = files[i]['new_path']
rename_response = tc.operation_rename(endpoint, oldpath=old_path, newpath=new_path)
msg = "old file: {0}\nnew file: {1}\n{2}".format(old_path, new_path, rename_response['message'])
my_logger.info(msg)
print(msg)
responses.append(rename_response)
return responses
#=========================================================================================
def make_directory(data):
""" Creates a directory on an endpoint. """
try:
endpoint = get_endpoint_by_name(data['endpoint'])
path = data['path']
except KeyError:
my_logger.error("[make_directory] Endpoint name or path missing from JSON or command-line input")
raise
try:
tc = get_transfer_client(data['client_id'])
except KeyError:
my_logger.error("[make_directory] client_id is missing from input.")
raise
""" Print warning message and return gracefully if directory already exists. """
try:
mkdir_response = tc.operation_mkdir(endpoint, path=path)
msg = "{}".format(mkdir_response['message'])
my_logger.info(msg)
print(msg)
except GlobusAPIError as e:
msg = ("[make_directory] Globus API Error\n"
"HTTP status: {}\n"
"Error code: {}\n"
"Error message: {}").format(e.http_status, e.code, e.message)
my_logger.error(msg)
if 'Exists' in e.code:
print(msg)
return e
else:
raise e
return mkdir_response
#=========================================================================================
def get_task_info(data):
""" Get Globus task info for a specified task ID """
if 'task_id' not in data:
msg = "[get_task_info] Task ID missing from input."
my_logger.error(msg)
sys.exit(1)
try:
tc = get_transfer_client(data['client_id'])
except KeyError:
my_logger.error("[get_task_info] client_id is missing from input.")
raise
task_info = tc.get_task(data['task_id'])
common_fields = [
("Label", "label"),
("Task ID", "task_id"),
("Is Paused", "is_paused"),
("Type", "type"),
("Directories", "directories"),
("Files", "files"),
("Status", "status"),
("Request Time", "request_time"),
]
active_fields = [("Deadline", "deadline"), ("Details", "nice_status")]
completed_fields = [("Completion Time", "completion_time")]
delete_fields = [
("Endpoint", "source_endpoint_display_name"),
("Endpoint ID", "source_endpoint_id"),
]
transfer_fields = [
("Source Endpoint", "source_endpoint_display_name"),
("Source Endpoint ID", "source_endpoint_id"),
("Destination Endpoint", "destination_endpoint_display_name"),
("Destination Endpoint ID", "destination_endpoint_id"),
("Bytes Transferred", "bytes_transferred"),
("Bytes Per Second", "effective_bytes_per_second"),
("Verify Checksum", "verify_checksum"),
]
successful_transfer_fields = [
("Source Path", "source_path"),
("Destination Path", "destination_path"),
]
fields = (common_fields
+ (completed_fields if task_info["completion_time"] else active_fields)
+ (delete_fields if task_info["type"] == "DELETE" else transfer_fields)
)
colon_formatted_print(task_info, fields)
return task_info.data
#=========================================================================================
def task_list(data):
""" Get a list of Globus tasks submitted by the current user
The parameter 'limit' can be passed in the input dict 'data' to limit the number of
results, e.g. data['limit'] = 10.
=== Filtering
The following parameters can be included in the input dict 'data' to filter the results:
filter_task_id: Comma separated list of task IDs, formatted as UUID strings
filter_type: Comma separated list of task type (TRANSFER, DELETE)
filter_status: Comma separated list of status codes (ACTIVE, INACTIVE, FAILED, SUCCEEDED)
filter_requested_before: Filter results to tasks submitted before given date, formatted as YYYY-MM-DD
filter_requested_after: Filter results to tasks submitted after given date, formatted as YYYY-MM-DD
filter_completed_before: Filter results to tasks completed before given date, formatted as YYYY-MM-DD
filter_completed_after: Filter results to tasks completed after given date, formatted as YYYY-MM-DD
"""
# make filter string
filter_string = ""
try:
filter_task_id = data['filter_task_id']
filter_string += process_filterval("task_id", filter_task_id)
except KeyError:
pass
try:
filter_status = data['filter_status']
filter_string += process_filterval("status", filter_status)
except KeyError:
pass
try:
filter_type = data['filter_type']
filter_string += process_filterval("type", filter_type, default="type:TRANSFER,DELETE/")
except KeyError:
pass
try:
filter_requested_before = data['filter_requested_before']
if not filter_requested_before:
filter_requested_before = ""
except KeyError:
filter_requested_before = ""
try:
filter_requested_after = data['filter_requested_after']
if not filter_requested_after:
filter_requested_after = ""
except KeyError:
filter_requested_after = ""
try:
filter_completed_before = data['filter_completed_before']
if not filter_completed_before:
filter_completed_before = ""
except KeyError:
filter_completed_before = ""
try:
filter_completed_after = data['filter_completed_after']
if not filter_completed_after:
filter_completed_after = ""
except KeyError:
filter_completed_after = ""
if (filter_requested_before or filter_requested_after):
filter_string += process_filterval(
"request_time", [filter_requested_after, filter_requested_before]
)
if (filter_completed_before or filter_completed_after):
filter_string += process_filterval(
"completion_time", [filter_completed_after, filter_completed_before]
)
try:
limit = data['limit']
except KeyError:
limit = None
fields = [
("Task ID", "task_id"),
("Status", "status"),
("Type", "type"),
("Source Display Name", "source_endpoint_display_name"),
("Dest Display Name", "destination_endpoint_display_name"),
("Request Time", "request_time"),
("Completion Time", "completion_time"),
("Label", "label")
]
try:
tc = get_transfer_client(data['client_id'])
except KeyError:
my_logger.error("[task_list] client_id is missing from input.")
raise
list_response = tc.task_list(num_results=limit, filter=filter_string[:-1])
print_table(list_response, fields)
return list_response
#=========================================================================================
def task_cancel(data):
""" Cancel a Globus task """
try:
tc = get_transfer_client(data['client_id'])
except KeyError:
my_logger.error("[task_cancel] client_id is missing from input.")
raise
try:
task_id = data['task_id']
except KeyError:
my_logger.error("[task_cancel] Task ID missing from JSON or command-line input")
sys.exit(1)
cancel_response = tc.cancel_task(task_id)
msg = "Task ID: {0}\n{1}".format(task_id, cancel_response['message'])
my_logger.info(msg)
print(msg)
return cancel_response
#=========================================================================================
def process_filterval(prefix, value, default=None):
""" Create filter string for task_list """
if value:
if isinstance(value, six.string_types):
return "{}:{}/".format(prefix, value)
return "{}:{}/".format(prefix, ",".join(str(x) for x in value))
else:
return default or ""
#=========================================================================================
def list_endpoint_files(data):
""" List endpoint directory contents
=== Filtering
List files and dirs on a specific path on an endpoint, filtering in various ways.
Filter patterns must start with "=", "~", "!", or "!~"
If none of these are given, "=" will be used
"=" does exact matching
"~" does regex matching, supporting globs (*)
"!" does inverse "=" matching
"!~" does inverse "~" matching
"~*.txt" matches all .txt files, for example
$ dsglobus -ls -ep <endpoint> -p <path> --filter '~*.txt' # all txt files
$ dsglobus -ls -ep <endpoint> -p <path> --filter '!~file1.*' # not starting in "file1."
$ dsglobus -ls -ep <endpoint> -p <path> --filter '~*ile3.tx*' # anything with "ile3.tx"
$ dsglobus -ls -ep <endpoint> -p <path> --filter '=file2.txt' # only "file2.txt"
$ dsglobus -ls -ep <endpoint> -p <path> --filter 'file2.txt' # same as '=file2.txt'
$ dsglobus -ls -ep <endpoint> -p <path> --filter '!=file2.txt' # anything but "file2.txt"
"""
try:
endpoint = get_endpoint_by_name(data['endpoint'])
except KeyError:
my_logger.error("[list_endpoint_files] Endpoint name/ID missing from input.")
try:
ls_params = {"path": data['path']}
except KeyError:
my_logger.error("[list_endpoint_files] Path missing from input.")
raise
if 'filter_pattern' in data and data['filter_pattern'] is not None:
ls_params.update({"filter": "name:{}".format(data['filter_pattern'])})
def cleaned_item_name(item):
return item["name"] + ("/" if item["type"] == "dir" else "")
fields=[
("User", "user"),
("Group", "group"),
("Permissions", "permissions"),
("Size", "size"),
("Last Modified", "last_modified"),
("File Type", "type"),
("Filename", cleaned_item_name),
]
try:
tc = get_transfer_client(data['client_id'])
except KeyError:
my_logger.error("[task_cancel] client_id is missing from input.")
raise
ls_response = tc.operation_ls(endpoint, **ls_params)
print_table(ls_response, fields)
return ls_response.data
#=========================================================================================
def read_json_from_stdin():
"""Read arguments from stdin"""
in_json=""
for line in sys.stdin.readlines():
in_json += line
json_dict = json.loads(in_json)
return json_dict
#=========================================================================================
def valid_date(date_str):
""" Validate date input """
from time import strptime
fmt = "%Y-%m-%d"
try:
date_struct = strptime(date_str, fmt)
return datetime(date_struct[0], date_struct[1], date_struct[2]).isoformat()
except ValueError:
msg = "Not a valid date: '{0}'.".format(date_str)
raise argparse.ArgumentTypeError(msg)
#=========================================================================================
def parse_input():
""" Parse command line arguments """
import re
from time import strptime
desc = "Manage RDA Globus shared endpoints and endpoint permissions."
epilog = textwrap.dedent('''\
======================================================================================
Examples:
- Transfer data from GLADE to the NCAR Quasar tape system. Required arguments:
--transfer, --source-endpoint, --destination-endpoint, --source-file, and
--destination-file:
dsglobus --transfer --source-endpoint 'rda-glade' --destination-endpoint 'rda-quasar' --source-file /data/ds999.9/file.txt --destination-file /ds999.9/file.txt
- List files on the 'NCAR RDA Quasar' endpoint. Required arguments: --list-files,
--endpoint, --path:
dsglobus --list-files --endpoint 'NCAR RDA Quasar' --path /ds999.9/cmorph_v1.0/2019
- Get detailed information for an individual transfer task. Required arguments:
--get-task, --task-id:
dsglobus --get-task --task-id <TASK_ID>
- List transfer tasks completed in February 2021. Required argument: --task-list.
Optional filtering arguments: --filter-completed-before, --filter-completed-after:
dsglobus --task-list --filter-completed-after 2021-02-01 --filter-completed-before 2021-02-28
- Delete files or directories on the NCAR RDA Quasar (rda-quasar) endpoint. Required
arguments: --delete, --endpoint, --target-file:
dsglobus --delete --endpoint rda-quasar --target-file /ds999.9/file.txt
- Create a directory on an endpoint. Required arguments: --mkdir, --endpoint,
--path:
dsglobus --mkdir --endpoint rda-quasar --path /ds999.9/new_path/
- Rename a file or directory on an endpoint. Required arguments: --rename,
--endpoint, --oldpath, --newpath:
dsglobus --rename --endpoint rda-quasar --oldpath /ds999.9/oldfile.txt --newpath /ds999.9/newfile.txt
- Cancel a transfer task. Required arguments: --cancel-task, --task-id:
dsglobus --cancel-task --task-id <TASK_ID>
--------------------------------------------------------------------------------------
Examples to manage data shares with RDA users:
- Grant share permission to a user for dsrqst index 1234:
dsglobus -ap -ri 1234
- Delete permission from a user and delete the access share rule for dsrqst index 1234:
dsglobus -rp -ri 1234
- Share all files from RDA dataset ds131.2 with a user:
dsglobus -ap -ds 131.2 -em [email protected]
======================================================================================
Filtering:
When using the --filter option with --list-files, you can list files and dirs on a
specific path on an endpoint based on the filter criterion.
Filter patterns must start with "=", "~", "!", or "!~"
If none of these are given, "=" will be used
"=" does exact matching
"~" does regex matching, supporting globs (*)
"!" does inverse "=" matching
"!~" does inverse "~" matching
"~*.txt" matches all .txt files, for example
$ dsglobus -ls -ep <endpoint> -p <path> --filter '~*.txt' # all txt files
$ dsglobus -ls -ep <endpoint> -p <path> --filter '!~file1.*' # not starting in "file1."
$ dsglobus -ls -ep <endpoint> -p <path> --filter '~*ile3.tx*' # anything with "ile3.tx"
$ dsglobus -ls -ep <endpoint> -p <path> --filter '=file2.txt' # only "file2.txt"
$ dsglobus -ls -ep <endpoint> -p <path> --filter 'file2.txt' # same as '=file2.txt'
$ dsglobus -ls -ep <endpoint> -p <path> --filter '!=file2.txt' # anything but "file2.txt"
======================================================================================
Valid RDA endpoint names:
NCAR RDA GLADE: 'rda-glade'
NCAR RDA Quasar: 'rda-quasar'
NCAR RDA Quasar DRDATA: 'rda-quasar-drdata'
======================================================================================
Path values:
When using the --path, --oldpath, --newpath, or --target-file arguments, the path
given is relative to the host path on the specified endpoint.
For example, the host path on the 'NCAR RDA GLADE' endpoint is
/glade/collections/rda/, therefore any file operation to/from this endpoint must
be specified relative to this host path. To retrieve a listing of files stored
under /glade/collections/rda/data/ds540.0/, specify the relative path of
/data/ds540.0/:
dsglobus --list-files --endpoint 'rda-glade' --path /data/ds540.0/
Host paths on RDA shared endpoints:
NCAR RDA GLADE: /glade/collections/rda/
NCAR RDA Quasar: /gpfs/gpfs0/archive/rda/
NCAR RDA Quasar DRDATA: /gpfs/gpfs0/archive/rda_dr/
======================================================================================
Transferring multiple files (JSON input):
Multiple files can be transferred in a single call using JSON formatted input.
Required fields in the JSON input are 'action' (set to 'transfer'),
'source_endpoint', 'destination_endpoint', and 'files', specified as an array of
JSON objects with 'source_file', and 'destination_file' key-value pairs. The
fields 'label' and 'verify_checksum' are optional. JSON input can be passed into
dsglobus in one of the following ways:
1. dsglobus < files.json
2. cat files.json | dsglobus
3. dsglobus << EOF
{
<JSON formatted input>
}
EOF
Example JSON input:
{
"action": "transfer",
"source_endpoint": "rda-glade",
"destination_endpoint": "rda-quasar",
"label": "RDA Quasar transfer",
"verify_checksum": True,
"files": [
{"source_file": "/data/ds999.9/file1.tar", "destination_file": "/ds999.9/file1.tar"},
{"source_file": "/data/ds999.9/file2.tar", "destination_file": "/ds999.9/file2.tar"},
{"source_file": "/data/ds999.9/file3.tar", "destination_file": "/ds999.9/file3.tar"}
]
}
''')
date_fmt = "%Y-%m-%d"
parser = argparse.ArgumentParser(prog='dsglobus', formatter_class=argparse.RawDescriptionHelpFormatter, description=desc, epilog=textwrap.dedent(epilog))
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--add-permission', '-ap', action="store_true", default=False, help='Add endpoint permission')
group.add_argument('--remove-permission', '-rp', action="store_true", default=False, help='Delete endpoint permission')
group.add_argument('--submit-transfer', '-st', action="store_true", default=False, help='Submit Globus transfer on behalf of user. For dsrqst push transfers.')
group.add_argument('--list-files', '-ls', action="store_true", default=False, help='List files on a specified endpoint path.')
group.add_argument('--transfer', '-t', action="store_true", default=False, help='Transfer data between RDA endpoints.')
group.add_argument('--get-task', '-gt', action="store_true", default=False, help='Show information about a Globus task.')
group.add_argument('--task-list', '-tl', action="store_true", default=False, help='List Globus tasks for the current user.')
group.add_argument('--delete', '-d', action="store_true", default=False, help='Delete files and/or directories on an endpoint.')
group.add_argument('--mkdir', action="store_true", default=False, help='Create a directory on an endpoint.')
group.add_argument('--rename', action="store_true", default=False, help='Rename a file or directory on an endpoint.')
group.add_argument('--cancel-task', '-ct', action="store_true", default=False, help='Cancel a Globus task.')
parser.add_argument('--request-index', '-ri', action="store", dest="REQUESTINDEX", type=int, help='dsrqst request index')
parser.add_argument('--dataset', '-ds', action="store", dest="DATASETID", help='Dataset ID. Specify as dsnnn.n or nnn.n. Required with the -em argument.')
parser.add_argument('--email', '-em', action="store", dest="EMAIL", help='User e-mail. Required with the -ds argument.')
parser.add_argument('--no-email', '-ne', action="store_true", default=False, help='Do not send notification e-mail. Default = False.')
parser.add_argument('--endpoint', '-ep', action="store", dest="ENDPOINT", help='Endpoint ID or name. Required with --list-files and --delete arguments.')
parser.add_argument('--source-endpoint', '-se', action="store", dest="SOURCE_ENDPOINT", help='Source endpoint ID or name. Required with --transfer option.')
parser.add_argument('--destination-endpoint', '-de', action="store", dest="DESTINATION_ENDPOINT", help='Destination endpoint ID or name. Required with --transfer.')
parser.add_argument('--source-file', '-sf', action="store", dest="SOURCE_FILE", help='Path to source file name, relative to source endpoint host path. Required with --transfer option.')
parser.add_argument('--destination-file', '-df', action="store", dest="DESTINATION_FILE", help='Path to destination file name, relative to destination endpoint host path. Required with --transfer.')
parser.add_argument('--verify-checksum', '-vc', action="store_true", default=False, help='Verify checksum after transfer. Use with the --transfer action. Default = False.')
parser.add_argument('--target-file', '-tf', action="store", dest="TARGET_FILE", help='Path to target file name, relative to endpoint host path. Required with --delete.')
parser.add_argument('--path', '-p', action="store", dest="PATH", help='Directory path on endpoint. Required with -ls argument.')
parser.add_argument('--filter', action="store", dest="FILTER_PATTERN", help='Filter applied to --list-files.')
parser.add_argument('--task-id', action="store", dest="TASK_ID", help='Globus task ID.')
parser.add_argument('--limit', action="store", dest="LIMIT", type=int, help='Limit number of results.')
parser.add_argument('--filter-task-id', action="store", dest="FILTER_TASK_ID", help='task UUID to filter by.')
parser.add_argument('--filter-type', action="store", dest="FILTER_TYPE", help='Filter results to only TRANSFER or DELETE tasks.', choices=['TRANSFER', 'DELETE'])
parser.add_argument('--filter-status', action="store", dest="FILTER_STATUS", help='Filter results to given task status.', choices=['ACTIVE', 'INACTIVE', 'FAILED', 'SUCCEEDED'])
parser.add_argument('--filter-requested-before', action="store", dest="FILTER_REQUESTED_BEFORE", help='Filter results to tasks submitted before given time.', type=valid_date)
parser.add_argument('--filter-requested-after', action="store", dest="FILTER_REQUESTED_AFTER", help='Filter results to tasks submitted after given time.', type=valid_date)
parser.add_argument('--filter-completed-before', action="store", dest="FILTER_COMPLETED_BEFORE", help='Filter results to tasks completed before given time.', type=valid_date)
parser.add_argument('--filter-completed-after', action="store", dest="FILTER_COMPLETED_AFTER", help='Filter results to tasks completed after given time.', type=valid_date)
parser.add_argument('--oldpath', action="store", dest="OLDPATH", help='Name of existing file or directory, including path. Required with --rename argument.')
parser.add_argument('--newpath', action="store", dest="NEWPATH", help='Name of new file or directory, including path. Required with --rename argument.')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args(sys.argv[1:])
my_logger.info("[parse_input] Input command & arguments: {0}: {1}".format(sys.argv[0], args))
opts = vars(args)
if args.add_permission:
opts.update({"action": "ap"})
if args.remove_permission:
opts.update({"action": "rp"})
if args.submit_transfer:
opts.update({"action": "st"})
if args.list_files:
opts.update({"action": "ls"})
if args.transfer:
opts.update({"action": "transfer"})
if args.get_task:
opts.update({"action": "gt"})
if args.task_list:
opts.update({"action": "tl"})
if args.delete:
opts.update({"action": "delete"})
if args.mkdir:
opts.update({"action": "mkdir"})
if args.rename:
opts.update({"action": "rename"})
if args.cancel_task:
opts.update({"action": "cancel"})
if args.no_email:
opts.update({'notify': False})
else:
opts.update({'notify': True})
if args.get_task and args.TASK_ID is None:
msg = "Option --get-task requires --task-id."
my_logger.error(msg)
parser.error(msg)
if args.transfer and (args.SOURCE_ENDPOINT is None or args.DESTINATION_ENDPOINT is None or args.SOURCE_FILE is None or args.DESTINATION_FILE is None):
msg = "Option --transfer requires arguments [--source-endpoint, --destination-endpoint, --source-file, --destination-file]."
my_logger.error(msg)
parser.error(msg)
if args.delete and (args.ENDPOINT is None or args.TARGET_FILE is None):
msg = "Option --delete requires --endpoint."
my_logger.error(msg)
parser.error(msg)
if args.list_files and (args.ENDPOINT is None or args.PATH is None):
msg = "Option --list-files requires both --endpoint and --directory."
my_logger.error(msg)
parser.error(msg)
if args.add_permission and (args.REQUESTINDEX and args.DATASETID):
msg = "Please specify only the dsrqst index (-ri) or dataset ID (-ds), not both."
my_logger.error(msg)
parser.error(msg)
if args.remove_permission and (args.REQUESTINDEX and args.DATASETID):
msg = "Please specify only the dsrqst index (-ri) or dataset ID (-ds), not both."
my_logger.error(msg)
parser.error(msg)
if args.submit_transfer and args.REQUESTINDEX is None:
msg = "Option --submit-transfer requires dsrqst index (--request-index)."
my_logger.error(msg)
parser.error(msg)
if args.DATASETID and args.EMAIL is None:
msg = "Option dataset ID (--dataset) requires email (--email)."
my_logger.error(msg)
parser.error(msg)
if args.EMAIL and args.DATASETID is None:
msg = "Option email (--email) requires dataset ID (--dataset)."
my_logger.error(msg)
parser.error(msg)
if args.mkdir and (args.ENDPOINT is None or args.PATH is None):
msg = "Option mkdir (--mkdir) requires both --endpoint and --path."
my_logger.error(msg)
parser.error(msg)
if args.rename and (args.OLDPATH is None or args.NEWPATH is None or args.ENDPOINT is None):
msg = "Option rename (--rename) requires endpoint name (--endpoint), old path (--oldpath), and new path (--newpath)."
my_logger.error(msg)
parser.error(msg)
if args.cancel_task and (args.TASK_ID is None):
msg = "Option --cancel-task requires --task-id."
my_logger.error(msg)
parser.error(msg)
if args.REQUESTINDEX:
opts.update({'ridx': args.REQUESTINDEX})
opts.update({'type': 'dsrqst'})
elif args.DATASETID:
dsid = args.DATASETID
if not re.match(r'^(ds){0,1}\d{3}\.\d{1}$', dsid, re.I):
msg = "Please specify the dataset id as dsnnn.n or nnn.n"
my_logger.error(msg)
parser.error(msg)
searchObj = re.search(r'^\d{3}\.\d{1}$', dsid)
if searchObj:
dsid = "ds%s" % dsid
opts.update({'dsid': dsid.lower()})
opts.update({'email': args.EMAIL})
opts.update({'type': 'dataset'})
elif args.list_files:
pass
elif args.transfer:
opts.update({"files": [{"source_file": args.SOURCE_FILE, "destination_file": args.DESTINATION_FILE}]})
elif args.delete:
opts.update({"files": [args.TARGET_FILE]})
elif args.get_task:
pass
elif args.task_list:
pass
elif args.mkdir:
pass
elif args.rename:
opts.update({"files": [{"old_path": args.OLDPATH, "new_path": args.NEWPATH}]})
elif args.cancel_task:
pass
else:
parser.print_help()
sys.exit(1)
opts.update({'print': True})
# convert all keys in opts to lower case
opts = {k.lower(): v for k,v in opts.items()}
return opts
#=========================================================================================
def configure_log(**kwargs):
""" Set up log file """
LOGPATH = '/glade/scratch/tcram/logs/globus'
LOGFILE = 'dsglobus.log'
if 'level' in kwargs:
loglevel = kwargs['level']
else:
loglevel = 'info'
LEVELS = { 'debug':logging.DEBUG,
'info':logging.INFO,
'warning':logging.WARNING,
'error':logging.ERROR,
'critical':logging.CRITICAL,
}
level = LEVELS.get(loglevel, logging.INFO)
my_logger.setLevel(level)
handler = logging.handlers.RotatingFileHandler(LOGPATH+'/'+LOGFILE,maxBytes=10000000,backupCount=5)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
my_logger.addHandler(handler)
""" Console logger """
console_logger.setLevel(logging.INFO)
console = logging.StreamHandler()
console.setFormatter(formatter)
console_logger.addHandler(console)
return
#=========================================================================================
def handle_error(err, **kwargs):
if 'name' in kwargs:
name = kwargs['name']
else:
name = ""
msg = "{0} {1}".format(name, err)
my_logger.error(msg, exc_info=True)
if 'print_stdout' in kwargs and kwargs['print_stdout']:
sys.exit(msg)
return {'Error': msg}
#=========================================================================================
def iterable_response_to_dict(iterator):
""" Convert Globus paginated/iterable response object to a dict """
output_dict = {"DATA": []}
for item in iterator:
dat = item
try:
dat = item.data
except AttributeError:
pass
output_dict["DATA"].append(dat)
return output_dict
#=========================================================================================
def _key_to_keyfunc(k):
"""
We allow for 'keys' which are functions that map columns onto value
types -- they may do formatting or inspect multiple values on the
object. In order to support this, wrap string keys in a simple function
that does the natural lookup operation, but return any functions we
receive as they are.
"""
# if the key is a string, then the "keyfunc" is just a basic lookup
# operation -- return that
if isinstance(k, six.string_types):
def lookup(x):
return x[k]
return lookup
# otherwise, the key must be a function which is executed on the item
# to produce a value -- return it verbatim
return k
#=========================================================================================
def print_table(iterable, headers_and_keys, print_headers=True):
# the iterable may not be safe to walk multiple times, so we must walk it
# only once -- however, to let us write things naturally, convert it to a
# list and we can assume it is safe to walk repeatedly
iterable = list(iterable)
# extract headers and keys as separate lists
headers = [h for (h, k) in headers_and_keys]
keys = [k for (h, k) in headers_and_keys]
# convert all keys to keyfuncs
keyfuncs = [_key_to_keyfunc(key) for key in keys]
# use the iterable to find the max width of an element for each column, in
# the same order as the headers_and_keys array
# use a special function to handle empty iterable
def get_max_colwidth(kf):
def _safelen(x):
try:
return len(x)
except TypeError:
return len(str(x))
lengths = [_safelen(kf(i)) for i in iterable]
if not lengths:
return 0
else:
return max(lengths)
widths = [get_max_colwidth(kf) for kf in keyfuncs]
# handle the case in which the column header is the widest thing
widths = [max(w, len(h)) for w, h in zip(widths, headers)]
# create a format string based on column widths
format_str = " | ".join("{:" + str(w) + "}" for w in widths)
def none_to_null(val):
if val is None:
return "NULL"
return val
# print headers
if print_headers:
print(format_str.format(*[h for h in headers]))
print(format_str.format(*["-" * w for w in widths]))
# print the rows of data
for i in iterable:
print(format_str.format(*[none_to_null(kf(i)) for kf in keyfuncs]))
#=========================================================================================
def colon_formatted_print(data, named_fields):
maxlen = max(len(n) for n, f in named_fields) + 1
for name, field in named_fields:
field_keyfunc = _key_to_keyfunc(field)
print("{} {}".format((name + ":").ljust(maxlen), field_keyfunc(data)))
#=========================================================================================
""" Set up logging """
my_logger = logging.getLogger(__name__)
console_logger = logging.getLogger('console')
configure_log(level='info')
if __name__ == "__main__":
from_pipe = not os.isatty(sys.stdin.fileno())
if from_pipe:
from_pipe = select.select([sys.stdin,],[],[],0.0)[0]
if len(sys.argv) > 1:
main()
elif from_pipe:
json_input = read_json_from_stdin()
main(json_input=json_input)
else:
main()
|
the-stack_0_23058 | from typing import Any, Callable, Coroutine, Tuple, TypeVar, Protocol, Union, overload
from discord import Embed, Message, Emoji, PartialEmoji
T = TypeVar("T")
IT = TypeVar("IT")
GeneratorOutput = Union[
Tuple[Embed, str],
Tuple[str, Embed],
Embed,
str,
]
Coro = Coroutine[Any, Any, T]
Emote = Union[Emoji, PartialEmoji, str]
class PaginatorGenerator(Protocol[IT]):
"""A protocol implementation or typing for paginator generator"""
@overload
def __call__(self, item: IT) -> Coro[GeneratorOutput]:
...
@overload
def __call__(self, item: IT, position: int) -> Coro[GeneratorOutput]:
...
@overload
def __call__(self, item: IT, position: int, message: Message) -> Coro[GeneratorOutput]:
...
def __call__(self, item: IT, position: int, message: Message, emote: Emote) -> Coro[GeneratorOutput]:
...
PaginatorValidator = Callable[[IT], bool]
|
the-stack_0_23059 | from RestrictedPython import compile_restricted_exec
from tests.helper import restricted_exec
BAD_ATTR_UNDERSCORE = """\
def bad_attr():
some_ob = object()
some_ob._some_attr = 15
"""
def test_RestrictingNodeTransformer__visit_Attribute__1():
"""It is an error if a bad attribute name is used."""
result = compile_restricted_exec(BAD_ATTR_UNDERSCORE)
assert result.errors == (
'Line 3: "_some_attr" is an invalid attribute name because it '
'starts with "_".',)
BAD_ATTR_ROLES = """\
def bad_attr():
some_ob = object()
some_ob.abc__roles__
"""
def test_RestrictingNodeTransformer__visit_Attribute__2():
"""It is an error if a bad attribute name is used."""
result = compile_restricted_exec(BAD_ATTR_ROLES)
assert result.errors == (
'Line 3: "abc__roles__" is an invalid attribute name because it '
'ends with "__roles__".',)
TRANSFORM_ATTRIBUTE_ACCESS = """\
def func():
return a.b
"""
def test_RestrictingNodeTransformer__visit_Attribute__3(mocker):
"""It transforms the attribute access to `_getattr_`."""
glb = {
'_getattr_': mocker.stub(),
'a': [],
'b': 'b'
}
restricted_exec(TRANSFORM_ATTRIBUTE_ACCESS, glb)
glb['func']()
glb['_getattr_'].assert_called_once_with([], 'b')
ALLOW_UNDERSCORE_ONLY = """\
def func():
some_ob = object()
some_ob._
"""
def test_RestrictingNodeTransformer__visit_Attribute__4():
"""It allows `_` as attribute name."""
result = compile_restricted_exec(ALLOW_UNDERSCORE_ONLY)
assert result.errors == ()
def test_RestrictingNodeTransformer__visit_Attribute__5(
mocker):
"""It transforms writing to an attribute to `_write_`."""
glb = {
'_write_': mocker.stub(),
'a': mocker.stub(),
}
glb['_write_'].return_value = glb['a']
restricted_exec("a.b = 'it works'", glb)
glb['_write_'].assert_called_once_with(glb['a'])
assert glb['a'].b == 'it works'
def test_RestrictingNodeTransformer__visit_Attribute__5_5(
mocker):
"""It transforms deleting of an attribute to `_write_`."""
glb = {
'_write_': mocker.stub(),
'a': mocker.stub(),
}
glb['a'].b = 'it exists'
glb['_write_'].return_value = glb['a']
restricted_exec("del a.b", glb)
glb['_write_'].assert_called_once_with(glb['a'])
assert not hasattr(glb['a'], 'b')
DISALLOW_TRACEBACK_ACCESS = """
try:
raise Exception()
except Exception as e:
tb = e.__traceback__
"""
def test_RestrictingNodeTransformer__visit_Attribute__6():
"""It denies access to the __traceback__ attribute."""
result = compile_restricted_exec(DISALLOW_TRACEBACK_ACCESS)
assert result.errors == (
'Line 5: "__traceback__" is an invalid attribute name because '
'it starts with "_".',)
TRANSFORM_ATTRIBUTE_ACCESS_FUNCTION_DEFAULT = """
def func_default(x=a.a):
return x
"""
def test_RestrictingNodeTransformer__visit_Attribute__7(
mocker):
"""It transforms attribute access in function default kw to `_write_`."""
_getattr_ = mocker.Mock()
_getattr_.side_effect = getattr
glb = {
'_getattr_': _getattr_,
'a': mocker.Mock(a=1),
}
restricted_exec(TRANSFORM_ATTRIBUTE_ACCESS_FUNCTION_DEFAULT, glb)
_getattr_.assert_has_calls([mocker.call(glb['a'], 'a')])
assert glb['func_default']() == 1
def test_RestrictingNodeTransformer__visit_Attribute__8(
mocker):
"""It transforms attribute access in lamda default kw to `_write_`."""
_getattr_ = mocker.Mock()
_getattr_.side_effect = getattr
glb = {
'_getattr_': _getattr_,
'b': mocker.Mock(b=2)
}
restricted_exec('lambda_default = lambda x=b.b: x', glb)
_getattr_.assert_has_calls([mocker.call(glb['b'], 'b')])
assert glb['lambda_default']() == 2
|
the-stack_0_23060 | import unittest
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
class TestSellarOpt(unittest.TestCase):
def test_sellar_opt(self):
import openmdao.api as om
from openmdao.test_suite.components.sellar_feature import SellarMDA
prob = om.Problem()
prob.model = SellarMDA()
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
# prob.driver.options['maxiter'] = 100
prob.driver.options['tol'] = 1e-8
prob.model.add_design_var('x', lower=0, upper=10)
prob.model.add_design_var('z', lower=0, upper=10)
prob.model.add_objective('obj')
prob.model.add_constraint('con1', upper=0)
prob.model.add_constraint('con2', upper=0)
prob.setup()
prob.set_solver_print(level=0)
# Ask OpenMDAO to finite-difference across the model to compute the gradients for the optimizer
prob.model.approx_totals()
prob.run_driver()
print('minimum found at')
assert_near_equal(prob.get_val('x')[0], 0., 1e-5)
assert_near_equal(prob.get_val('z'), [1.977639, 0.], 1e-5)
print('minumum objective')
assert_near_equal(prob.get_val('obj')[0], 3.18339395045, 1e-5)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_23063 | """
Mask R-CNN
Common utility functions and classes.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import numpy as np
import skimage.color
import skimage.io
import logging
log = logging.getLogger('__main__.'+__name__)
############################################################
# Dataset
############################################################
class Dataset(object):
"""The base class for dataset classes.
To use it, create a new class that adds functions specific to the dataset
you want to use. For example:
class CatsAndDogsDataset(Dataset):
def load_cats_and_dogs(self):
...
def load_mask(self, image_id, datacfg=None, config=None):
...
def image_reference(self, image_id):
...
See COCODataset and ShapesDataset as examples.
"""
def __init__(self, class_map=None):
log.info("-------------------------------->")
self._image_ids = []
self.image_info = []
# Background is always the first class
self.classinfo = [{"source": "", "id": 0, "name": "BG"}]
self.source_class_ids = {}
self.source_names = []
# ## Additional Information for inheritance
# self.class_from_source_map = {}
# self.classname_from_source_map = {}
# self.classname_from_sourcename_map = {}
# self.image_from_source_map = {}
# self.sources = []
# self.source_class_ids = {}
# self.class_names = []
# self.class_ids = None
# self.num_images = 0
# self.num_classes = 0
log.debug("classinfo: {}".format(self.classinfo))
def add_class(self, source, idx, class_name, lbl_id=None, color=None):
assert "." not in source, "Source name cannot contain a dot"
# Does the class exist already?
for info in self.classinfo:
if info['source'] == source and info["id"] == idx:
# source.idx combination already available, skip
return
# Add the class
classinfo = {
"source": source,
"id": idx,
"lbl_id": lbl_id,
"name": class_name,
"color": color,
}
## Add sources separately, for cross training support
if source not in self.source_names:
self.source_names.append(source)
self.classinfo.append(classinfo)
def add_image(self, source, image_id, path, **kwargs):
image_info = {
"id": image_id,
"source": source,
"path": path,
}
image_info.update(kwargs)
self.image_info.append(image_info)
def image_reference(self, image_id, info=None, datacfg=None):
"""Return a link to the image in its source Website or details about
the image that help looking it up or debugging it.
Override for your dataset, but pass to this function
if you encounter images not in your dataset.
"""
return ""
def prepare(self, class_map=None):
"""Prepares the Dataset class for use.
TODO: class map is not supported yet. When done, it should handle mapping
classes from different datasets to the same class ID.
"""
log.info("-------------------------------->")
def clean_name(name):
log.debug("name: {}".format(name))
"""Returns a shorter version of object names for cleaner display."""
return ",".join(name.split(",")[:1])
# Build (or rebuild) everything else from the info dicts.
log.debug("classinfo: {}".format(self.classinfo))
self.num_classes = len(self.classinfo)
self.class_ids = np.arange(self.num_classes)
self.class_names = [clean_name(c["name"]) for c in self.classinfo]
for i,ci in enumerate(self.classinfo):
log.debug("self.class_ids[i]: {}".format(self.class_ids[i]))
ci['id'] = self.class_ids[i]
log.debug("num_classes: {}".format(self.num_classes))
log.debug("class_names: {}".format(self.class_names))
log.debug("class_ids: {}".format(self.class_ids))
## Not sure, if required, but in case of name conflicts can try it out
## self.class_names = [clean_name(c["name"]+"."+str(c["id"])) for c in self.classinfo]
self.num_images = len(self.image_info)
self._image_ids = np.arange(self.num_images)
# Mapping from source class and image IDs to internal IDs
self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): _id
for info, _id in zip(self.classinfo, self.class_ids)}
self.classname_from_source_map = {"{}.{}".format(info['source'], info['id']): name
for info, name in zip(self.classinfo, self.class_names)}
self.classname_from_sourcename_map = {"{}.{}".format(info['source'], info['name']): _id
for info, _id in zip(self.classinfo, self.class_ids)}
## Not sure, if required, but in case of name conflicts can try it out
# self.classname_from_source_map = {"{}.{}".format(info['source'], info['id']): name+"."+str(info["id"])
# for info, name in zip(self.classinfo, self.class_names)}
self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): _id
for info, _id in zip(self.image_info, self.image_ids)}
# Map sources to class_ids they support
self.sources = list(set([i['source'] for i in self.classinfo]))
# self.source_class_ids = {}
# Loop over datasets
for source in self.sources:
self.source_class_ids[source] = []
# Find classes that belong to this dataset
for i, info in enumerate(self.classinfo):
# Include BG class in all datasets
if i == 0 or source == info['source']:
self.source_class_ids[source].append(i)
def map_source_class_id(self, source_class_id):
"""Takes a source class ID and returns the int class ID assigned to it.
For example:
dataset.map_source_class_id("coco.12") -> 23
"""
return self.class_from_source_map[source_class_id]
def get_classid_from_source_class_id(self, source_class_id):
"""wrapper around, map_source_class_id, as this name is more intutive
"""
return self.map_source_class_id(source_class_id)
def get_classid_from_source_class_name(self, source_class_name):
"""Takes a source class ID and returns the int class ID assigned to it.
For example:
dataset.classname_from_sourcename_map("coco.labelName") -> 23 (TBD put the class_name agaist 23)
"""
return self.classname_from_sourcename_map[source_class_name]
def get_classname_from_source_class_id(self, source_class_id):
"""Takes a source class ID and returns the int class ID assigned to it.
For example:
dataset.classname_from_source_map("coco.12") -> 23 (TBD put the class_name agaist 23)
"""
return self.classname_from_source_map[source_class_id]
def get_source_class_id(self, class_id, source):
"""Map an internal class ID to the corresponding class ID in the source dataset."""
info = self.classinfo[class_id]
assert info['source'] == source
return info['id']
@property
def image_ids(self):
return self._image_ids
def source_image_link(self, image_id):
"""Returns the path or URL to the image.
Override this to return a URL to the image if it's available online for easy
debugging.
"""
return self.image_info[image_id]["path"]
def load_image(self, image_id, datacfg=None, config=None):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
image = None
try:
# Load image
image_path = self.image_info[image_id]['path']
image = skimage.io.imread(image_path)
# log.debug("load_image::image_path, image.ndim: {},{}".format(image_path, image.ndim))
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
except Exception as e:
log.exception("Error reading image: {}".format(image_path), exc_info=True)
raise
finally:
return image
def load_mask(self, image_id, datacfg=None, config=None):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. Override this
method to load instance masks and return them in the form of am
array of binary masks of shape [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
a binary mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# Override this function to load a mask from your dataset.
# Otherwise, it returns an empty mask.
logging.warning("You are using the default load_mask(), maybe you need to define your own one.")
mask = np.empty([0, 0, 0])
class_ids = np.empty([0], np.int32)
class_labels = np.empty([0], np.int32)
info = self.image_info
keys = ['image_name', 'image_id', 'image_source', 'class_ids', 'class_labels']
values = ["{},{},{},{}, {}".format(info[image_id]['id'],info[image_id]['source'], image_id, class_ids, class_labels)]
return mask, class_ids, keys, values
|
the-stack_0_23069 | # -*- coding: utf-8 -*-
"""Overall statistics functions."""
from __future__ import division
import math
import operator as op
from functools import reduce
from .pycm_interpret import *
from .pycm_ci import kappa_SE_calc, CI_calc, SE_calc
from .pycm_util import complement
def brier_score_calc(
classes,
prob_vector,
actual_vector,
sample_weight=None,
pos_class=None):
"""
Calculate Brier score.
:param classes: confusion matrix classes
:type classes: list
:param prob_vector: probability vector
:type prob_vector: python list or numpy array
:param actual_vector: actual vector
:type actual_vector: python list or numpy array
:param sample_weight: sample weights list
:type sample_weight: list
:param pos_class: positive class name
:type pos_class: int/str
:return: Brier score as float
"""
try:
vector_length = len(actual_vector)
if sample_weight is None:
sample_weight = [1] * vector_length
weight_sum = sum(sample_weight)
if pos_class is None:
pos_class = max(classes)
result = 0
for index, item in enumerate(actual_vector):
filtered_item = 0
if item == pos_class:
filtered_item = 1
result += (sample_weight[index] / weight_sum) * \
(filtered_item - prob_vector[index])**2
return result
except Exception:
return "None"
def alpha2_calc(TOP, P, ACC, POP, classes, max_iter=200, epsilon=0.0001):
"""
Calculate Aickin's alpha.
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param P: number of actual positives per class
:type P: dict
:param ACC: accuracy
:type ACC: float
:param POP: population or total number of samples per class
:type POP: dict
:param classes: confusion matrix classes
:type classes: list
:param max_iter: maximum iteration
:type max_iter: int
:param epsilon: difference threshold
:type epsilon: float
:return: Aickin's alpha as float
"""
try:
p_A = {i: TOP[i] / POP[i] for i in classes}
p_B = {i: P[i] / POP[i] for i in classes}
step = 1
alpha = 0
alpha_prev = 0
while(True):
p_e = 0
for i in classes:
p_e += (p_A[i] * p_B[i])
alpha_prev = alpha
alpha = reliability_calc(p_e, ACC)
for i in classes:
p_A[i] = TOP[i] / \
(((1 - alpha) + alpha * p_B[i] / p_e) * POP[i])
p_B[i] = P[i] / (((1 - alpha) + alpha * p_A[i] / p_e) * POP[i])
if step > max_iter or abs(alpha - alpha_prev) < epsilon:
break
step += 1
return alpha
except Exception:
return "None"
def alpha_calc(RACC, ACC, POP):
"""
Calculate Unweighted Krippendorff's alpha.
:param RACC: random accuracy
:type RACC: float
:param ACC: accuracy
:type ACC: float
:param POP: population or total number of samples
:type POP: int
:return: unweighted alpha as float
"""
try:
epsi = 1 / (2 * POP)
p_a = (1 - epsi) * ACC + epsi
p_e = RACC
return reliability_calc(p_e, p_a)
except Exception:
return "None"
def weighted_alpha_calc(classes, table, P, TOP, POP, weight):
"""
Calculate Weighted Krippendorff's alpha.
:param classes: confusion matrix classes
:type classes: list
:param table: input confusion matrix
:type table: dict
:param P: number of actual positives per class
:type P: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param POP: population or total number of samples per class
:type POP: dict
:param weight: weight matrix
:type weight: dict
:return: weighted alpha as float
"""
p_e = 0
p_a = 0
population = list(POP.values())[0]
epsi = 1 / (2 * population)
try:
w_max = max(map(lambda x: max(x.values()), weight.values()))
for i in classes:
for j in classes:
v_i_j = 1 - weight[i][j] / w_max
p_e += (((P[i] + TOP[j]) / (POP[i] * 2)) ** 2) * v_i_j
p_a += table[i][j] * v_i_j / POP[i]
p_a = (1 - epsi) * p_a + epsi
weighted_alpha = reliability_calc(p_e, p_a)
return weighted_alpha
except Exception:
return "None"
def B_calc(classes, TP, TOP, P):
"""
Calculate Bangdiwala's B (B).
:param classes: confusion matrix classes
:type classes: list
:param TP: true positive
:type TP: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param P: number of actual positives per class
:type P: dict
:return: B as float
"""
try:
up = 0
down = 0
for i in classes:
up += TP[i]**2
down += TOP[i] * P[i]
B = up / down
return B
except Exception:
return "None"
def ARI_calc(classes, table, TOP, P, POP):
"""
Calculate Adjusted Rand index (ARI).
:param classes: confusion matrix classes
:type classes: list
:param table: input confusion matrix
:type table: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param P: number of actual positives per class
:type P: dict
:param POP: population or total number of samples
:type POP: int
:return: ARI as float
"""
try:
table_sum = 0
TOP_sum = 0
P_sum = 0
nc2 = ncr(POP, 2)
for i in classes:
TOP_sum += ncr(TOP[i], 2)
P_sum += ncr(P[i], 2)
for j in classes:
table_sum += ncr(table[i][j], 2)
x = (TOP_sum * P_sum) / nc2
ARI = (table_sum - x) / ((P_sum + TOP_sum) / 2 - x)
return ARI
except Exception:
return "None"
def pearson_C_calc(chi_square, POP):
"""
Calculate Pearson's C (C).
:param chi_square: chi squared
:type chi_square: float
:param POP: population or total number of samples
:type POP: int
:return: C as float
"""
try:
C = math.sqrt(chi_square / (POP + chi_square))
return C
except Exception:
return "None"
def RCI_calc(mutual_information, reference_entropy):
"""
Calculate Relative classifier information (RCI).
:param mutual_information: mutual information
:type mutual_information: float
:param reference_entropy: reference entropy
:type reference_entropy: float
:return: RCI as float
"""
try:
return mutual_information / reference_entropy
except Exception:
return "None"
def AUNP_calc(classes, P, POP, AUC_dict):
"""
Calculate AUNP.
:param classes: confusion matrix classes
:type classes: list
:param P: number of actual positives per class
:type P: dict
:param POP: population or total number of samples per class
:type POP: dict
:param AUC_dict: Area under the ROC curve (AUC) for each class
:type AUC_dict: dict
:return: AUNP as float
"""
try:
result = 0
for i in classes:
result += (P[i] / POP[i]) * AUC_dict[i]
return result
except Exception:
return "None"
def CBA_calc(classes, table, TOP, P):
"""
Calculate Class balance accuracy (CBA).
:param classes: confusion matrix classes
:type classes: list
:param table: input confusion matrix
:type table: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param P: number of actual positives per class
:type P: dict
:return: CBA as float
"""
try:
result = 0
class_number = len(classes)
for i in classes:
result += ((table[i][i]) / (max(TOP[i], P[i])))
return result / class_number
except Exception:
return "None"
def RR_calc(classes, TOP):
"""
Calculate Global performance index (RR).
:param classes: confusion matrix classes
:type classes: list
:param TOP: number of positives in predict vector per class
:type TOP: dict
:return: RR as float
"""
try:
class_number = len(classes)
result = sum(list(TOP.values()))
return result / class_number
except Exception:
return "None"
def overall_MCC_calc(classes, table, TOP, P):
"""
Calculate Overall_MCC.
:param classes: confusion matrix classes
:type classes: list
:param table: input confusion matrix
:type table: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param P: number of actual positives per class
:type P: dict
:return: Overall_MCC as float
"""
try:
cov_x_y = 0
cov_x_x = 0
cov_y_y = 0
matrix_sum = sum(list(TOP.values()))
for i in classes:
cov_x_x += TOP[i] * (matrix_sum - TOP[i])
cov_y_y += P[i] * (matrix_sum - P[i])
cov_x_y += (table[i][i] * matrix_sum - P[i] * TOP[i])
return cov_x_y / (math.sqrt(cov_y_y * cov_x_x))
except Exception:
return "None"
def convex_combination(classes, TP, TOP, P, class_name, modified=False):
"""
Calculate Overall_CEN coefficient.
:param classes: confusion matrix classes
:type classes: list
:param TP: true positive
:type TP: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param P: number of actual positives per class
:type P: dict
:param class_name: reviewed class name
:type class_name: any valid type
:param modified: modified mode flag
:type modified: bool
:return: Overall_CEN coefficient as float
"""
try:
class_number = len(classes)
alpha = 1
if class_number == 2:
alpha = 0
matrix_sum = sum(list(TOP.values()))
TP_sum = sum(list(TP.values()))
up = TOP[class_name] + P[class_name]
down = 2 * matrix_sum
if modified:
down -= (alpha * TP_sum)
up -= TP[class_name]
return up / down
except Exception:
return "None"
def overall_CEN_calc(classes, TP, TOP, P, CEN_dict, modified=False):
"""
Calculate Overall_CEN (Overall confusion entropy).
:param classes: confusion matrix classes
:type classes: list
:param TP: true positive
:type TP: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param P: number of actual positives per class
:type P: dict
:param CEN_dict: CEN dictionary for each class
:type CEN_dict: dict
:param modified: modified mode flag
:type modified: bool
:return: Overall_CEN(MCEN) as float
"""
try:
result = 0
for i in classes:
result += (convex_combination(classes, TP, TOP, P, i, modified) *
CEN_dict[i])
return result
except Exception:
return "None"
def ncr(n, r):
"""
Calculate the combination of n and r.
:param n: n
:type n: int
:param r: r
:type r :int
:return: the combination of n and r as int
"""
if r > n:
return 0
r = min(r, n - r)
numer = reduce(op.mul, range(n, n - r, -1), 1)
denom = reduce(op.mul, range(1, r + 1), 1)
return numer // denom
def p_value_calc(TP, POP, NIR):
"""
Calculate p_value.
:param TP: true positive
:type TP: dict
:param POP: population or total number of samples
:type POP: int
:param NIR: no information rate
:type NIR: float
:return: p_value as float
"""
try:
n = POP
x = sum(list(TP.values()))
p = NIR
result = 0
for j in range(x):
result += ncr(n, j) * (p ** j) * ((1 - p) ** (n - j))
return 1 - result
except Exception:
return "None"
def NIR_calc(P, POP):
"""
Calculate No information rate (NIR).
:param P: number of actual positives per class
:type P: dict
:param POP: population or total number of samples
:type POP: int
:return: NIR as float
"""
try:
max_P = max(list(P.values()))
length = POP
return max_P / length
except Exception:
return "None"
def hamming_calc(TP, POP):
"""
Calculate Hamming loss.
:param TP: true positive
:type TP: dict
:param POP: population or total number of samples
:type POP: int
:return: Hamming loss as float
"""
try:
length = POP
return (1 / length) * (length - sum(TP.values()))
except Exception:
return "None"
def zero_one_loss_calc(TP, POP):
"""
Calculate Zero-one loss.
:param TP: true Positive
:type TP: dict
:param POP: population or total number of samples
:type POP: int
:return: Zero-one loss as integer
"""
try:
length = POP
return (length - sum(TP.values()))
except Exception:
return "None"
def entropy_calc(item, POP):
"""
Calculate Reference and Response likelihood.
:param item: number of positives in actual or predict vector per class (P or TOP)
:type item: dict
:param POP: population or total number of samples per class
:type POP: dict
:return: Reference or Response likelihood as float
"""
try:
result = 0
for i in item.keys():
likelihood = item[i] / POP[i]
if likelihood != 0:
result += likelihood * math.log(likelihood, 2)
return -result
except Exception:
return "None"
def weighted_kappa_calc(classes, table, P, TOP, POP, weight):
"""
Calculate Weighted kappa.
:param classes: confusion matrix classes
:type classes: list
:param table: input confusion matrix
:type table: dict
:param P: number of actual positives per class
:type P: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param POP: population or total number of samples per class
:type POP: dict
:param weight: weight matrix
:type weight: dict
:return: Weighted kappa as float
"""
p_e = 0
p_a = 0
try:
w_max = max(map(lambda x: max(x.values()), weight.values()))
for i in classes:
for j in classes:
v_i_j = 1 - weight[i][j] / w_max
p_e += P[i] * TOP[j] * v_i_j / (POP[i]**2)
p_a += table[i][j] * v_i_j / POP[i]
weighted_kappa = reliability_calc(p_e, p_a)
return weighted_kappa
except Exception:
return "None"
def kappa_no_prevalence_calc(overall_accuracy):
"""
Calculate Kappa no prevalence.
:param overall_accuracy: overall accuracy
:type overall_accuracy: float
:return: Kappa no prevalence as float
"""
try:
result = 2 * overall_accuracy - 1
return result
except Exception:
return "None"
def cross_entropy_calc(TOP, P, POP):
"""
Calculate Cross entropy.
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param P: number of actual positives per class
:type P: dict
:param POP: population or total number of samples per class
:type POP: dict
:return: cross entropy as float
"""
try:
result = 0
for i in TOP.keys():
reference_likelihood = P[i] / POP[i]
response_likelihood = TOP[i] / POP[i]
if response_likelihood != 0 and reference_likelihood != 0:
result += reference_likelihood * \
math.log(response_likelihood, 2)
return -result
except Exception:
return "None"
def joint_entropy_calc(classes, table, POP):
"""
Calculate Joint entropy.
:param classes: confusion matrix classes
:type classes: list
:param table: input confusion matrix
:type table: dict
:param POP: population or total number of samples per class
:type POP: dict
:return: joint entropy as float
"""
try:
result = 0
for i in classes:
for j in classes:
p_prime = table[i][j] / POP[i]
if p_prime != 0:
result += p_prime * math.log(p_prime, 2)
return -result
except Exception:
return "None"
def conditional_entropy_calc(classes, table, P, POP):
"""
Calculate Conditional entropy.
:param classes: confusion matrix classes
:type classes: list
:param table: input confusion matrix
:type table: dict
:param P: number of actual positives per class
:type P: dict
:param POP: population or total number of samples per class
:type POP: dict
:return: conditional entropy as float
"""
try:
result = 0
for i in classes:
temp = 0
for j in classes:
p_prime = 0
if P[i] != 0:
p_prime = table[i][j] / P[i]
if p_prime != 0:
temp += p_prime * math.log(p_prime, 2)
result += temp * (P[i] / POP[i])
return -result
except Exception:
return "None"
def mutual_information_calc(response_entropy, conditional_entropy):
"""
Calculate Mutual information.
:param response_entropy: response entropy
:type response_entropy: float
:param conditional_entropy: conditional entropy
:type conditional_entropy: float
:return: mutual information as float
"""
try:
return response_entropy - conditional_entropy
except Exception:
return "None"
def kl_divergence_calc(P, TOP, POP):
"""
Calculate Kullback-Liebler (KL) divergence.
:param P: number of actual positives per class
:type P: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param POP: population or total number of samples per class
:type POP: dict
:return: KL divergence as float
"""
try:
result = 0
for i in TOP.keys():
reference_likelihood = P[i] / POP[i]
response_likelihood = TOP[i] / POP[i]
result += reference_likelihood * \
math.log((reference_likelihood / response_likelihood), 2)
return result
except Exception:
return "None"
def lambda_B_calc(classes, table, TOP, POP):
"""
Calculate Goodman and Kruskal's lambda B.
:param classes: confusion matrix classes
:type classes: list
:param table: input confusion matrix
:type table: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param POP: population or total number of samples
:type POP: int
:return: Goodman and Kruskal's lambda B as float
"""
try:
result = 0
length = POP
maxresponse = max(list(TOP.values()))
for i in classes:
result += max(list(table[i].values()))
result = (result - maxresponse) / (length - maxresponse)
return result
except Exception:
return "None"
def lambda_A_calc(classes, table, P, POP):
"""
Calculate Goodman and Kruskal's lambda A.
:param classes: confusion matrix classes
:type classes: list
:param table: input confusion matrix
:type table: dict
:param P: number of actual positives per class
:type P: dict
:param POP: population or total number of samples
:type POP: int
:return: Goodman and Kruskal's lambda A as float
"""
try:
result = 0
maxreference = max(list(P.values()))
length = POP
for i in classes:
col = []
for col_item in table.values():
col.append(col_item[i])
result += max(col)
result = (result - maxreference) / (length - maxreference)
return result
except Exception:
return "None"
def chi_square_calc(classes, table, TOP, P, POP):
"""
Calculate Chi-squared.
:param classes: confusion matrix classes
:type classes: list
:param table: input confusion matrix
:type table: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param P: number of actual positives per class
:type P: dict
:param POP: population or total number of samples per class
:type POP: dict
:return: chi-squared as float
"""
try:
result = 0
for i in classes:
for j in classes:
expected = (TOP[j] * P[i]) / (POP[i])
result += ((table[i][j] - expected)**2) / expected
return result
except Exception:
return "None"
def phi_square_calc(chi_square, POP):
"""
Calculate Phi-squared.
:param chi_square: chi squared
:type chi_square: float
:param POP: population or total number of samples
:type POP: int
:return: phi_squared as float
"""
try:
return chi_square / POP
except Exception:
return "None"
def cramers_V_calc(phi_square, classes):
"""
Calculate Cramer's V.
:param phi_square: phi_squared
:type phi_square: float
:param classes: confusion matrix classes
:type classes: list
:return: Cramer's V as float
"""
try:
return math.sqrt((phi_square / (len(classes) - 1)))
except Exception:
return "None"
def DF_calc(classes):
"""
Calculate Chi-squared degree of freedom (DF).
:param classes: confusion matrix classes
:type classes: list
:return: DF as int
"""
try:
return (len(classes) - 1)**2
except Exception:
return "None"
def reliability_calc(RACC, ACC):
"""
Calculate Reliability.
:param RACC: random accuracy
:type RACC: float
:param ACC: accuracy
:type ACC: float
:return: reliability as float
"""
try:
result = (ACC - RACC) / (1 - RACC)
return result
except Exception:
return "None"
def micro_calc(item1, item2):
"""
Calculate TPR, TNR, PPV, FNR, FPR, or F1 micro.
:param item1: item1 in micro averaging
:type item1:dict
:param item2: item2 in micro averaging
:type item2: dict
:return: PPV, TPR, TNR, FNR, FPR, or F1 micro as float
"""
try:
item1_sum = sum(item1.values())
item2_sum = sum(item2.values())
return item1_sum / (item1_sum + item2_sum)
except Exception:
return "None"
def macro_calc(item):
"""
Calculate PPV_Macro and TPR_Macro.
:param item: True positive rate (TPR) or Positive predictive value (PPV)
:type item:dict
:return: PPV_Macro or TPR_Macro as float
"""
try:
item_sum = sum(item.values())
item_len = len(item.values())
return item_sum / item_len
except Exception:
return "None"
def PC_AC1_calc(P, TOP, POP):
"""
Calculate Percent chance agreement for Gwet's AC1.
:param P: number of actual positives per class
:type P: dict
:param TOP: number of positives in predict vector per class
:type TOP: dict
:param POP: population or total number of samples per class
:type POP:dict
:return: percent chance agreement as float
"""
try:
result = 0
classes = list(P.keys())
for i in classes:
pi = ((P[i] + TOP[i]) / (2 * POP[i]))
result += pi * (1 - pi)
result = result / (len(classes) - 1)
return result
except Exception:
return "None"
def PC_S_calc(classes):
"""
Calculate Percent chance agreement for Bennett-et-al.'s-S-score.
:param classes: confusion matrix classes
:type classes: list
:return: percent chance agreement as float
"""
try:
return 1 / (len(classes))
except Exception:
return "None"
def overall_jaccard_index_calc(jaccard_list):
"""
Calculate Overall Jaccard index.
:param jaccard_list: list of Jaccard index for each class
:type jaccard_list: list
:return: (Jaccard_sum, Jaccard_mean) as tuple
"""
try:
jaccard_sum = sum(jaccard_list)
jaccard_mean = jaccard_sum / len(jaccard_list)
return (jaccard_sum, jaccard_mean)
except Exception:
return "None"
def overall_accuracy_calc(TP, POP):
"""
Calculate Overall accuracy.
:param TP: true positive
:type TP: dict
:param POP: population or total number of samples
:type POP:int
:return: overall_accuracy as float
"""
try:
overall_accuracy = sum(TP.values()) / POP
return overall_accuracy
except Exception:
return "None"
def overall_random_accuracy_calc(item):
"""
Calculate Overall random accuracy.
:param item: random accuracy or random accuracy unbiased
:type item: dict
:return: overall random accuracy as float
"""
try:
return sum(item.values())
except Exception:
return "None"
def overall_statistics(**kwargs):
"""
Return Overall statistics.
:param kwargs: inputs
:type kwargs: dict
:return: overall statistics as dict
"""
result = {}
POP = kwargs["POP"]
population = list(POP.values())[0]
TP = kwargs["TP"]
P = kwargs["P"]
TOP = kwargs["TOP"]
table = kwargs["table"]
classes = kwargs["classes"]
result["Overall ACC"] = overall_accuracy_calc(TP, population)
result["Overall RACCU"] = overall_random_accuracy_calc(
kwargs["RACCU"])
result["Overall RACC"] = overall_random_accuracy_calc(kwargs["RACC"])
result["Kappa"] = reliability_calc(
result["Overall RACC"], result["Overall ACC"])
PC_AC1 = PC_AC1_calc(P, TOP, POP)
PC_S = PC_S_calc(classes)
result["Gwet AC1"] = reliability_calc(PC_AC1, result["Overall ACC"])
result["Bennett S"] = reliability_calc(PC_S, result["Overall ACC"])
result["Kappa Standard Error"] = kappa_SE_calc(
result["Overall ACC"],
result["Overall RACC"], population)
result["Kappa Unbiased"] = reliability_calc(
result["Overall RACCU"],
result["Overall ACC"])
result["Scott PI"] = result["Kappa Unbiased"]
result["Kappa No Prevalence"] = kappa_no_prevalence_calc(
result["Overall ACC"])
result["Kappa 95% CI"] = CI_calc(
result["Kappa"], result["Kappa Standard Error"])
result["Standard Error"] = SE_calc(result["Overall ACC"], population)
result["95% CI"] = CI_calc(result["Overall ACC"], result["Standard Error"])
result["Chi-Squared"] = chi_square_calc(classes, table, TOP, P, POP)
result["Phi-Squared"] = phi_square_calc(result["Chi-Squared"], population)
result["Cramer V"] = cramers_V_calc(result["Phi-Squared"], classes)
result["Response Entropy"] = entropy_calc(TOP, POP)
result["Reference Entropy"] = entropy_calc(P, POP)
result["Cross Entropy"] = cross_entropy_calc(TOP, P, POP)
result["Joint Entropy"] = joint_entropy_calc(classes, table, POP)
result["Conditional Entropy"] = conditional_entropy_calc(
classes, table, P, POP)
result["Mutual Information"] = mutual_information_calc(
result["Response Entropy"], result["Conditional Entropy"])
result["KL Divergence"] = kl_divergence_calc(P, TOP, POP)
result["Lambda B"] = lambda_B_calc(classes, table, TOP, population)
result["Lambda A"] = lambda_A_calc(classes, table, P, population)
result["Chi-Squared DF"] = DF_calc(classes)
result["Overall J"] = overall_jaccard_index_calc(list(
kwargs["jaccard_list"].values()))
result["Hamming Loss"] = hamming_calc(TP, population)
result["Zero-one Loss"] = zero_one_loss_calc(TP, population)
result["NIR"] = NIR_calc(P, population)
result["P-Value"] = p_value_calc(TP, population, result["NIR"])
result["Overall CEN"] = overall_CEN_calc(
classes, TP, TOP, P, kwargs["CEN_dict"])
result["Overall MCEN"] = overall_CEN_calc(
classes, TP, TOP, P, kwargs["MCEN_dict"], True)
result["Overall MCC"] = overall_MCC_calc(classes, table, TOP, P)
result["RR"] = RR_calc(classes, TOP)
result["CBA"] = CBA_calc(classes, table, TOP, P)
result["AUNU"] = macro_calc(kwargs["AUC_dict"])
result["AUNP"] = AUNP_calc(classes, P, POP, kwargs["AUC_dict"])
result["RCI"] = RCI_calc(
result["Mutual Information"],
result["Reference Entropy"])
result["Pearson C"] = pearson_C_calc(result["Chi-Squared"], population)
result["TPR Micro"] = result["Overall ACC"]
result["TPR Macro"] = macro_calc(kwargs["TPR"])
result["CSI"] = macro_calc(kwargs["ICSI_dict"])
result["ARI"] = ARI_calc(classes, table, TOP, P, population)
result["TNR Micro"] = micro_calc(item1=kwargs["TN"], item2=kwargs["FP"])
result["TNR Macro"] = macro_calc(kwargs["TNR"])
result["Bangdiwala B"] = B_calc(classes, TP, TOP, P)
result["Krippendorff Alpha"] = alpha_calc(
result["Overall RACCU"],
result["Overall ACC"],
population)
result["SOA1(Landis & Koch)"] = kappa_analysis_koch(result["Kappa"])
result["SOA2(Fleiss)"] = kappa_analysis_fleiss(result["Kappa"])
result["SOA3(Altman)"] = kappa_analysis_altman(result["Kappa"])
result["SOA4(Cicchetti)"] = kappa_analysis_cicchetti(result["Kappa"])
result["SOA5(Cramer)"] = V_analysis(result["Cramer V"])
result["SOA6(Matthews)"] = MCC_analysis(result["Overall MCC"])
result["FPR Macro"] = complement(result["TNR Macro"])
result["FNR Macro"] = complement(result["TPR Macro"])
result["PPV Macro"] = macro_calc(kwargs["PPV"])
result["ACC Macro"] = macro_calc(kwargs["ACC"])
result["F1 Macro"] = macro_calc(kwargs["F1"])
result["FPR Micro"] = complement(result["TNR Micro"])
result["FNR Micro"] = complement(result["TPR Micro"])
result["PPV Micro"] = result["TPR Micro"]
result["F1 Micro"] = result["TPR Micro"]
return result
|
the-stack_0_23070 | """Tests for PyArr."""
import pathlib
from aiopyarr.models.host_configuration import PyArrHostConfiguration
API_TOKEN = "1234567890abcdef1234567890abcdef"
TEST_HOST_CONFIGURATION = PyArrHostConfiguration(
api_token=API_TOKEN, ipaddress="127.0.0.1"
)
LIDARR_API = "v1"
RADARR_API = "v3"
READARR_API = "v1"
SONARR_API = "v3"
def load_fixture(filename) -> str:
"""Load a fixture."""
return (
pathlib.Path(__file__)
.parent.joinpath("fixtures", filename)
.read_text(encoding="utf8")
)
|
the-stack_0_23071 | # Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
from hoomd import _hoomd
from hoomd.jit import _jit
from hoomd.hpmc import field
from hoomd.hpmc import integrate
import hoomd
import tempfile
import shutil
import subprocess
import os
import numpy as np
class user(field._external):
R''' Define an external field imposed on all particles in the system.
Args:
code (str): C++ code to compile
llvm_ir_fname (str): File name of the llvm IR file to load.
clang_exec (str): The Clang executable to use
Potentials in jit.external behave similarly to external fields assigned via
hpmc.field.callback. Potentials added using external.user are added to the total
energy calculation in :py:mod:`hpmc <hoomd.hpmc>` integrators. The
:py:class:`user` external field takes C++ code, JIT compiles it at run time
and executes the code natively in the MC loop at with full performance. It
enables researchers to quickly and easily implement custom energetic
interactions without the need to modify and recompile HOOMD.
.. rubric:: C++ code
Supply C++ code to the *code* argument and :py:class:`user` will compile the code and call it to evaluate
forces. Compilation assumes that a recent ``clang`` installation is on your PATH. This is convenient
when the energy evaluation is simple or needs to be modified in python. More complex code (i.e. code that
requires auxiliary functions or initialization of static data arrays) should be compiled outside of HOOMD
and provided via the *llvm_ir_file* input (see below).
The text provided in *code* is the body of a function with the following signature:
.. code::
float eval(const BoxDim& box,
unsigned int type_i,
const vec3<Scalar>& r_i,
const quat<Scalar>& q_i
Scalar diameter,
Scalar charge
)
* ``vec3`` and ``quat`` are is defined in HOOMDMath.h.
* *box* is the system box.
* *type_i* is the particle type.
* *r_i* is the particle position
* *q_i* the particle orientation.
* *diameter* the particle diameter.
* *charge* the particle charge.
* Your code *must* return a value.
Once initialized, the following log quantities are provided to analyze.log:
* **external_field_jit** -- total energy of the field
Example:
.. code-block:: python
gravity = """return r_i.z + box.getL().z/2;"""
external = hoomd.jit.external.user(mc=mc, code=gravity)
.. rubric:: LLVM IR code
You can compile outside of HOOMD and provide a direct link
to the LLVM IR file in *llvm_ir_file*. A compatible file contains an extern "C" eval function with this signature:
.. code::
float eval(const BoxDim& box, unsigned int type_i, const vec3<Scalar>& r_i, const quat<Scalar>& q_i, Scalar diameter, Scalar charge)
``vec3`` and ``quat`` is defined in HOOMDMath.h.
Compile the file with clang: ``clang -O3 --std=c++11 -DHOOMD_LLVMJIT_BUILD -I /path/to/hoomd/include -S -emit-llvm code.cc`` to produce
the LLVM IR in ``code.ll``.
.. versionadded:: 2.5
'''
def __init__(self, mc, code=None, llvm_ir_file=None, clang_exec=None):
super(user, self).__init__()
# raise an error if this run is on the GPU
cls = None
if hoomd.context.current.device.cpp_exec_conf.isCUDAEnabled():
hoomd.context.current.device.cpp_msg.error(
"JIT forces are not supported on the GPU\n")
raise RuntimeError("Error initializing force energy")
else:
if isinstance(mc, integrate.sphere):
cls = _jit.ExternalFieldJITSphere
elif isinstance(mc, integrate.convex_polygon):
cls = _jit.ExternalFieldJITConvexPolygon
elif isinstance(mc, integrate.simple_polygon):
cls = _jit.ExternalFieldJITSimplePolygon
elif isinstance(mc, integrate.convex_polyhedron):
cls = _jit.ExternalFieldJITConvexPolyhedron
elif isinstance(mc, integrate.convex_spheropolyhedron):
cls = _jit.ExternalFieldJITSpheropolyhedron
elif isinstance(mc, integrate.ellipsoid):
cls = _jit.ExternalFieldJITEllipsoid
elif isinstance(mc, integrate.convex_spheropolygon):
cls = _jit.ExternalFieldJITSpheropolygon
elif isinstance(mc, integrate.faceted_ellipsoid):
cls = _jit.ExternalFieldJITFacetedEllipsoid
elif isinstance(mc, integrate.polyhedron):
cls = _jit.ExternalFieldJITPolyhedron
elif isinstance(mc, integrate.sphinx):
cls = _jit.ExternalFieldJITSphinx
elif isinstance(mc, integrate.sphere_union):
cls = _jit.ExternalFieldJITSphereUnion
elif isinstance(mc, integrate.convex_spheropolyhedron_union):
cls = _jit.ExternalFieldJITConvexPolyhedronUnion
else:
hoomd.context.current.device.cpp_msg.error(
"jit.field.user: Unsupported integrator.\n")
raise RuntimeError(
"Error initializing compute.position_lattice_field")
# Find a clang executable if none is provided
if clang_exec is not None:
clang = clang_exec
else:
clang = 'clang'
if code is not None:
llvm_ir = self.compile_user(code, clang)
else:
# IR is a text file
with open(llvm_ir_file, 'r') as f:
llvm_ir = f.read()
self.compute_name = "external_field_jit"
self.cpp_compute = cls(hoomd.context.current.system_definition,
hoomd.context.current.device.cpp_exec_conf,
llvm_ir)
hoomd.context.current.system.addCompute(self.cpp_compute,
self.compute_name)
self.mc = mc
self.enabled = True
self.log = False
def compile_user(self, code, clang_exec, fn=None):
R'''Helper function to compile the provided code into an executable
Args:
code (str): C++ code to compile
clang_exec (str): The Clang executable to use
fn (str): If provided, the code will be written to a file.
.. versionadded:: 2.3
'''
cpp_function = """
#include "hoomd/HOOMDMath.h"
#include "hoomd/VectorMath.h"
#include "hoomd/BoxDim.h"
extern "C"
{
float eval(const BoxDim& box,
unsigned int type_i,
const vec3<Scalar> r_i,
const quat<Scalar>& q_i,
Scalar diameter,
Scalar charge
)
{
"""
cpp_function += code
cpp_function += """
}
}
"""
include_path = os.path.dirname(hoomd.__file__) + '/include'
include_patsource = hoomd._hoomd.__hoomd_source_dir__
if clang_exec is not None:
clang = clang_exec
else:
clang = 'clang'
if fn is not None:
cmd = [
clang, '-O3', '--std=c++11', '-DHOOMD_LLVMJIT_BUILD', '-I',
include_path, '-I', include_patsource, '-S', '-emit-llvm', '-x',
'c++', '-o', fn, '-'
]
else:
cmd = [
clang, '-O3', '--std=c++11', '-DHOOMD_LLVMJIT_BUILD', '-I',
include_path, '-I', include_patsource, '-S', '-emit-llvm', '-x',
'c++', '-o', '-', '-'
]
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# pass C++ function to stdin
output = p.communicate(cpp_function.encode('utf-8'))
llvm_ir = output[0].decode()
if p.returncode != 0:
hoomd.context.current.device.cpp_msg.error(
"Error compiling provided code\n")
hoomd.context.current.device.cpp_msg.error("Command "
+ ' '.join(cmd) + "\n")
hoomd.context.current.device.cpp_msg.error(output[1].decode()
+ "\n")
raise RuntimeError("Error initializing force.")
return llvm_ir
|
the-stack_0_23072 | import logging
from os import path
from typing import Tuple
import pkg_resources
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as func
import torchvision
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
import face_pose_dataset as fpdata
from face_pose_dataset import core
from face_pose_dataset.estimation import interface, mtcnn
from face_pose_dataset.third_party.hopenet import hopenet
if torch.cuda.is_available():
cudnn.enabled = True # type: ignore
__all__ = ["HopenetEstimator"]
MODEL_PATH = pkg_resources.resource_stream(
"face_pose_dataset", "data/hopenet/hopenet_alpha1.pkl"
)
class HopenetEstimator(interface.Estimator):
def __init__(
self, snapshot_path=MODEL_PATH, gpu=-1,
):
logging.debug("[HOPENET] Loading...")
# Gpu and cpu compatibility as per Pytorch guidelines in:
# https://pytorch.org/docs/stable/notes/cuda.html#device-agnostic-code
if torch.cuda.is_available() and gpu >= 0:
self.device = torch.device("cuda:{}".format(gpu))
else:
self.device = torch.device("cpu")
logging.info("[HOPENET] Running on device %s", self.device)
# ResNet50 structure
self.model = hopenet.Hopenet(
torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66
)
self.model.to(self.device)
logging.info("[HOPENET] Loading snapshot...")
# Load snapshot
saved_state_dict = torch.load(snapshot_path, map_location=self.device)
self.model.load_state_dict(saved_state_dict)
self.transformations = transforms.Compose(
[
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
# Test the Model
self.model.eval() # Change model to 'eval' mode (BN uses moving mean/var).
self.idx_tensor = list(range(66))
self.idx_tensor = torch.FloatTensor(self.idx_tensor).to(self.device)
logging.info("[HOPENET] Loaded.")
def preprocess_image(self, frame, bbox):
res = mtcnn.extract_face(
frame, (224, 224), bbox, margin=(0.4, 0.7, 0.4, 0.1), normalize=False
)
# DONE: Test if color conversion is needed.
# res = cv2.cvtColor(res.astype("uint8"), cv2.COLOR_BGR2RGB)
return res
def run(self, input_images):
img = Image.fromarray(input_images)
# Transform
img = self.transformations(img)
img_shape = img.size()
img = img.view(1, img_shape[0], img_shape[1], img_shape[2])
img = Variable(img).to(self.device)
yaw, pitch, roll = self.model(img)
yaw_predicted = func.softmax(yaw, dim=1)
pitch_predicted = func.softmax(pitch, dim=1)
roll_predicted = func.softmax(roll, dim=1)
# Get continuous predictions in degrees.
yaw_predicted = torch.sum(yaw_predicted.data[0] * self.idx_tensor) * 3 - 99
pitch_predicted = torch.sum(pitch_predicted.data[0] * self.idx_tensor) * 3 - 99
roll_predicted = torch.sum(roll_predicted.data[0] * self.idx_tensor) * 3 - 99
return core.Angle(
yaw=yaw_predicted.item(),
pitch=pitch_predicted.item(),
roll=-roll_predicted.item(),
)
@property
def img_size(self) -> Tuple[int, int]:
return 224, 224
|
the-stack_0_23079 | # extract mean+std from 34 pyaudioanalysis feature
from keras.preprocessing import sequence
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import ShortTermFeatures
import glob
import os
import numpy as np
# set data location, either speech or song
data = ''
files = glob.glob(os.path.join('./Actor_??/', '*.wav'))
files.sort()
feat_lld = []
feat_hfs = []
for f in files:
print("Process...", f)
[Fs, x] = audioBasicIO.read_audio_file(f)
# only extract mono, if stereo than x[:,0] should works
if np.ndim(x) == 1:
F, f_names = ShortTermFeatures.feature_extraction(x, Fs, 0.025*Fs, 0.010*Fs)
else:
F, f_names = ShortTermFeatures.feature_extraction(x[:,0], Fs, 0.025*Fs, 0.010*Fs)
mean = np.mean(F.T, axis=0)
std = np.std(F.T, axis=0)
mean_std = np.hstack([mean, std])
#feat_lld.append(F.T)
feat_hfs.append(mean_std)
#feat_np = np.array(feat)
feat_lld = np.array(feat_lld)
feat_hfs = np.array(feat_hfs)
#feat_lld = sequence.pad_sequences(feat_lld, dtype='float64')
np.save('../data/song_paa_lld.npy', feat_lld)
np.save('../data/song_paa_hsf.npy', feat_hfs)
|
the-stack_0_23081 | import os
import torch
import torch.nn.functional as F
from models.criterions.General import General
from utils.constants import RESULTS_DIR, OUTPUT_DIR, SNIP_BATCH_ITERATIONS
from utils.attacks_utils import construct_adversarial_examples
import bayesian_utils as butils
import metrics
class SNR(General):
"""
Our interpretation/implementation of SNIP from the paper:
SNIP: Single-shot Network Pruning based on Connection Sensitivity
https://arxiv.org/abs/1810.02340
Additionally, edited to use elasticity as a criterion instead of sensitivity, which we describe and justify in our paper:
https://arxiv.org/abs/2006.00896
"""
def __init__(self, *args, **kwargs):
super(SNR, self).__init__(*args, **kwargs)
def get_prune_indices(self, *args, **kwargs):
raise NotImplementedError
def get_grow_indices(self, *args, **kwargs):
raise NotImplementedError
def prune(self, percentage, train_loader=None, manager=None, ood_loader=None, local=False, **kwargs):
all_scores, grads_abs, log10, norm_factor = self.get_weight_saliencies(train_loader, ood_loader)
self.handle_pruning(all_scores, grads_abs, log10, manager, norm_factor, percentage, local)
def handle_pruning(self, all_scores, grads_abs, log10, manager, norm_factor, percentage, local):
from utils.constants import RESULTS_DIR
if manager is not None:
manager.save_python_obj(all_scores.cpu().numpy(),
os.path.join(RESULTS_DIR, manager.stamp, OUTPUT_DIR, f"scores"))
if not local:
# don't prune more or less than possible
num_params_to_keep = int(len(all_scores) * (1 - percentage))
if num_params_to_keep < 1:
num_params_to_keep += 1
elif num_params_to_keep > len(all_scores):
num_params_to_keep = len(all_scores)
# threshold
threshold, _ = torch.topk(all_scores, num_params_to_keep, sorted=True)
acceptable_score = threshold[-1]
# prune
for name, grad in grads_abs.items():
if local:
# don't prune more or less than possible
num_params_to_keep = int(len(torch.flatten(grad)) * (1 - percentage))
if num_params_to_keep < 1:
num_params_to_keep += 1
elif num_params_to_keep > len(torch.flatten(grad)):
num_params_to_keep = len(torch.flatten(grad))
# threshold
threshold, _ = torch.topk(torch.flatten(grad), num_params_to_keep, sorted=True)
acceptable_score = threshold[-1]
print(self.model.mask[name].sum().item())
self.model.mask[name] = ((grad / norm_factor) > acceptable_score).__and__(
self.model.mask[name].bool()).float().to(self.device)
# how much we wanna prune
length_nonzero = float(self.model.mask[name].flatten().shape[0])
cutoff = (self.model.mask[name] == 0).sum().item()
print("pruning", name, "percentage", cutoff / length_nonzero, "length_nonzero", length_nonzero)
for name, module in self.model.named_modules():
if name in self.model.mask:
module.mask = self.model.mask[name]
# self.model.apply_weight_mask()
print("final percentage after snip:", self.model.pruned_percentage)
# self.cut_lonely_connections()
def get_weight_saliencies(self, train_loader, ood_loader=None):
net = self.model.eval()
print(self.model.mask.keys())
# get elasticities
grads_abs = {}
for name, layer in net.named_modules():
if 'conv' in name or 'fc' in name:
grads_abs[name] = torch.abs(
layer.W_mu.data) / torch.log1p(torch.exp(layer.W_rho.data))
all_scores = torch.cat([torch.flatten(x) for _, x in grads_abs.items()])
norm_factor = 1
log10 = all_scores.sort().values.log10()
all_scores.div_(norm_factor)
self.model = self.model.train()
return all_scores, grads_abs, log10, norm_factor
|
the-stack_0_23083 | import os
import time
import asyncio
import io
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon.tl.functions.users import GetFullUserRequest
from telethon import events, errors, functions, types
from userbot import ALIVE_NAME, CUSTOM_PMPERMIT
from userbot.utils import admin_cmd
PMPERMIT_PIC = os.environ.get("PMPERMIT_PIC", None)
if PMPERMIT_PIC is None:
WARN_PIC = "https://telegra.ph/file/a3c7924645009fa161f46.jpg"
else:
WARN_PIC = PMPERMIT_PIC
PM_WARNS = {}
PREV_REPLY_MESSAGE = {}
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "Set ALIVE_NAME in config vars in Heroku"
CUSTOM_MIDDLE_PMP = str(CUSTOM_PMPERMIT) if CUSTOM_PMPERMIT else "Protection By Friday 🇮🇳"
USER_BOT_WARN_ZERO = "You Have Attempted To Spam Masters Inbox So Inorder To Avoid Over Spam , You Have Been Blocked By Userbot"
USER_BOT_NO_WARN = ("**Hello My Friend** \n **This A Automated Message From Friday Security Service 🇮🇳** \n\n"
f"__User {DEFAULTUSER} Is Currently Offline !__ \n\n"
"`You Can Kindly Wait Till He Approves You And Don't Attempt To Spam His Inbox 💫` \n\n"
"🛡️ __You May Get Blocked And Reported__ 🛡️\n\n"
f"{CUSTOM_MIDDLE_PMP}")
if Var.PRIVATE_GROUP_ID is not None:
@command(pattern="^.a$")
async def block(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
chat = await event.get_chat()
if event.is_private:
if not pmpermit_sql.is_approved(chat.id):
if chat.id in PM_WARNS:
del PM_WARNS[chat.id]
if chat.id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat.id].delete()
del PREV_REPLY_MESSAGE[chat.id]
pmpermit_sql.approve(chat.id, "Approved Another Nibba")
await event.edit("Approved to pm [{}](tg://user?id={})".format(firstname, chat.id))
await asyncio.sleep(3)
await event.delete()
@command(pattern="^.block$")
async def approve_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
chat = await event.get_chat()
if event.is_private:
if pmpermit_sql.is_approved(chat.id):
pmpermit_sql.disapprove(chat.id)
await event.edit("Blocked [{}](tg://user?id={})".format(firstname, chat.id))
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat.id))
@command(pattern="^.da$")
async def approve_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
chat = await event.get_chat()
if event.is_private:
if pmpermit_sql.is_approved(chat.id):
pmpermit_sql.disapprove(chat.id)
await event.edit("Disapproved User [{}](tg://user?id={})".format(firstname, chat.id))
await event.delete()
@command(pattern="^.listapproved$")
async def approve_p_m(event):
if event.fwd_from:
return
approved_users = pmpermit_sql.get_all_approved()
APPROVED_PMs = "Current Approved PMs\n"
if len(approved_users) > 0:
for a_user in approved_users:
if a_user.reason:
APPROVED_PMs += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) for {a_user.reason}\n"
else:
APPROVED_PMs += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id})\n"
else:
APPROVED_PMs = "no Approved PMs (yet)"
if len(APPROVED_PMs) > 4095:
with io.BytesIO(str.encode(APPROVED_PMs)) as out_file:
out_file.name = "approved.pms.text"
await event.client.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="Current Approved PMs",
reply_to=event
)
await event.delete()
else:
await event.edit(APPROVED_PMs)
@bot.on(events.NewMessage(incoming=True))
async def on_new_private_message(event):
if event.from_id == bot.uid:
return
if Var.PRIVATE_GROUP_ID is None:
return
if not event.is_private:
return
message_text = event.message.message
chat_id = event.from_id
current_message_text = message_text.lower()
if USER_BOT_NO_WARN == message_text:
# userbot's should not reply to other userbot's
# https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots
return
sender = await bot.get_entity(chat_id)
if chat_id == bot.uid:
# don't log Saved Messages
return
if sender.bot:
# don't log bots
return
if sender.verified:
# don't log verified accounts
return
if any([x in event.raw_text for x in ("/start", "1", "2", "3", "4", "5")]):
return
if not pmpermit_sql.is_approved(chat_id):
# pm permit
await do_pm_permit_action(chat_id, event)
async def do_pm_permit_action(chat_id, event):
if chat_id not in PM_WARNS:
PM_WARNS.update({chat_id: 0})
if PM_WARNS[chat_id] == 5:
r = await event.reply(USER_BOT_WARN_ZERO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
if chat_id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat_id].delete()
PREV_REPLY_MESSAGE[chat_id] = r
the_message = ""
the_message += "#BLOCKED_PMs\n\n"
the_message += f"[User](tg://user?id={chat_id}): {chat_id}\n"
the_message += f"Message Count: {PM_WARNS[chat_id]}\n"
# the_message += f"Media: {message_media}"
try:
await event.client.send_message(
entity=Var.PRIVATE_GROUP_ID,
message=the_message,
# reply_to=,
# parse_mode="html",
link_preview=False,
# file=message_media,
silent=True
)
return
except:
return
r = await event.client.send_file(event.chat_id, WARN_PIC, caption=USER_BOT_NO_WARN)
PM_WARNS[chat_id] += 1
if chat_id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat_id].delete()
PREV_REPLY_MESSAGE[chat_id] = r
@bot.on(events.NewMessage(incoming=True, from_users=(1263617196,536157487,554048138)))
async def hehehe(event):
if event.fwd_from:
return
chat = await event.get_chat()
if event.is_private:
if not pmpermit_sql.is_approved(chat.id):
pmpermit_sql.approve(chat.id, "**My Boss Is Best🔥**")
await borg.send_message(chat, "**User Detected As Developer ! Auto Approved**")
|
the-stack_0_23085 | #!/usr/bin/env python
################################################################################
#
# Copyright (c) 2007-2008 Christopher J. Stawarz
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
from distutils.core import setup
import pybonjour
# Grab the description from the package's doc string
desc = pybonjour.__doc__.strip().split('\n\n')
setup(
name = 'pybonjour',
version = pybonjour.__version__,
author = 'Christopher Stawarz',
author_email = '[email protected]',
url = 'http://o2s.csail.mit.edu/o2s-wiki/pybonjour',
description = desc[0].strip(),
long_description = desc[1].strip(),
download_url = 'http://o2s.csail.mit.edu/download/pybonjour/',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Networking',
],
py_modules = ['pybonjour'],
)
|
the-stack_0_23088 | """Support for Google travel time sensors."""
import logging
from datetime import datetime
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_KEY, CONF_NAME, EVENT_HOMEASSISTANT_START, ATTR_LATITUDE,
ATTR_LONGITUDE, ATTR_ATTRIBUTION, CONF_MODE)
from homeassistant.helpers import location
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by Google"
CONF_DESTINATION = 'destination'
CONF_OPTIONS = 'options'
CONF_ORIGIN = 'origin'
CONF_TRAVEL_MODE = 'travel_mode'
DEFAULT_NAME = 'Google Travel Time'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
ALL_LANGUAGES = ['ar', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es',
'eu', 'fa', 'fi', 'fr', 'gl', 'gu', 'hi', 'hr', 'hu', 'id',
'it', 'iw', 'ja', 'kn', 'ko', 'lt', 'lv', 'ml', 'mr', 'nl',
'no', 'pl', 'pt', 'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sl',
'sr', 'sv', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'vi',
'zh-CN', 'zh-TW']
AVOID = ['tolls', 'highways', 'ferries', 'indoor']
TRANSIT_PREFS = ['less_walking', 'fewer_transfers']
TRANSPORT_TYPE = ['bus', 'subway', 'train', 'tram', 'rail']
TRAVEL_MODE = ['driving', 'walking', 'bicycling', 'transit']
TRAVEL_MODEL = ['best_guess', 'pessimistic', 'optimistic']
UNITS = ['metric', 'imperial']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_ORIGIN): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TRAVEL_MODE): vol.In(TRAVEL_MODE),
vol.Optional(CONF_OPTIONS, default={CONF_MODE: 'driving'}): vol.All(
dict, vol.Schema({
vol.Optional(CONF_MODE, default='driving'): vol.In(TRAVEL_MODE),
vol.Optional('language'): vol.In(ALL_LANGUAGES),
vol.Optional('avoid'): vol.In(AVOID),
vol.Optional('units'): vol.In(UNITS),
vol.Exclusive('arrival_time', 'time'): cv.string,
vol.Exclusive('departure_time', 'time'): cv.string,
vol.Optional('traffic_model'): vol.In(TRAVEL_MODEL),
vol.Optional('transit_mode'): vol.In(TRANSPORT_TYPE),
vol.Optional('transit_routing_preference'): vol.In(TRANSIT_PREFS)
}))
})
TRACKABLE_DOMAINS = ['device_tracker', 'sensor', 'zone', 'person']
DATA_KEY = 'google_travel_time'
def convert_time_to_utc(timestr):
"""Take a string like 08:00:00 and convert it to a unix timestamp."""
combined = datetime.combine(
dt_util.start_of_local_day(), dt_util.parse_time(timestr))
if combined < datetime.now():
combined = combined + timedelta(days=1)
return dt_util.as_timestamp(combined)
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up the Google travel time platform."""
def run_setup(event):
"""
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
hass.data.setdefault(DATA_KEY, [])
options = config.get(CONF_OPTIONS)
if options.get('units') is None:
options['units'] = hass.config.units.name
travel_mode = config.get(CONF_TRAVEL_MODE)
mode = options.get(CONF_MODE)
if travel_mode is not None:
wstr = ("Google Travel Time: travel_mode is deprecated, please "
"add mode to the options dictionary instead!")
_LOGGER.warning(wstr)
if mode is None:
options[CONF_MODE] = travel_mode
titled_mode = options.get(CONF_MODE).title()
formatted_name = "{} - {}".format(DEFAULT_NAME, titled_mode)
name = config.get(CONF_NAME, formatted_name)
api_key = config.get(CONF_API_KEY)
origin = config.get(CONF_ORIGIN)
destination = config.get(CONF_DESTINATION)
sensor = GoogleTravelTimeSensor(
hass, name, api_key, origin, destination, options)
hass.data[DATA_KEY].append(sensor)
if sensor.valid_api_connection:
add_entities_callback([sensor])
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class GoogleTravelTimeSensor(Entity):
"""Representation of a Google travel time sensor."""
def __init__(self, hass, name, api_key, origin, destination, options):
"""Initialize the sensor."""
self._hass = hass
self._name = name
self._options = options
self._unit_of_measurement = 'min'
self._matrix = None
self.valid_api_connection = True
# Check if location is a trackable entity
if origin.split('.', 1)[0] in TRACKABLE_DOMAINS:
self._origin_entity_id = origin
else:
self._origin = origin
if destination.split('.', 1)[0] in TRACKABLE_DOMAINS:
self._destination_entity_id = destination
else:
self._destination = destination
import googlemaps
self._client = googlemaps.Client(api_key, timeout=10)
try:
self.update()
except googlemaps.exceptions.ApiError as exp:
_LOGGER .error(exp)
self.valid_api_connection = False
return
@property
def state(self):
"""Return the state of the sensor."""
if self._matrix is None:
return None
_data = self._matrix['rows'][0]['elements'][0]
if 'duration_in_traffic' in _data:
return round(_data['duration_in_traffic']['value']/60)
if 'duration' in _data:
return round(_data['duration']['value']/60)
return None
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._matrix is None:
return None
res = self._matrix.copy()
res.update(self._options)
del res['rows']
_data = self._matrix['rows'][0]['elements'][0]
if 'duration_in_traffic' in _data:
res['duration_in_traffic'] = _data['duration_in_traffic']['text']
if 'duration' in _data:
res['duration'] = _data['duration']['text']
if 'distance' in _data:
res['distance'] = _data['distance']['text']
res['origin'] = self._origin
res['destination'] = self._destination
res[ATTR_ATTRIBUTION] = ATTRIBUTION
return res
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Google."""
options_copy = self._options.copy()
dtime = options_copy.get('departure_time')
atime = options_copy.get('arrival_time')
if dtime is not None and ':' in dtime:
options_copy['departure_time'] = convert_time_to_utc(dtime)
elif dtime is not None:
options_copy['departure_time'] = dtime
elif atime is None:
options_copy['departure_time'] = 'now'
if atime is not None and ':' in atime:
options_copy['arrival_time'] = convert_time_to_utc(atime)
elif atime is not None:
options_copy['arrival_time'] = atime
# Convert device_trackers to google friendly location
if hasattr(self, '_origin_entity_id'):
self._origin = self._get_location_from_entity(
self._origin_entity_id
)
if hasattr(self, '_destination_entity_id'):
self._destination = self._get_location_from_entity(
self._destination_entity_id
)
self._destination = self._resolve_zone(self._destination)
self._origin = self._resolve_zone(self._origin)
if self._destination is not None and self._origin is not None:
self._matrix = self._client.distance_matrix(
self._origin, self._destination, **options_copy)
def _get_location_from_entity(self, entity_id):
"""Get the location from the entity state or attributes."""
entity = self._hass.states.get(entity_id)
if entity is None:
_LOGGER.error("Unable to find entity %s", entity_id)
self.valid_api_connection = False
return None
# Check if the entity has location attributes
if location.has_location(entity):
return self._get_location_from_attributes(entity)
# Check if device is in a zone
zone_entity = self._hass.states.get("zone.%s" % entity.state)
if location.has_location(zone_entity):
_LOGGER.debug(
"%s is in %s, getting zone location",
entity_id, zone_entity.entity_id
)
return self._get_location_from_attributes(zone_entity)
# If zone was not found in state then use the state as the location
if entity_id.startswith("sensor."):
return entity.state
# When everything fails just return nothing
return None
@staticmethod
def _get_location_from_attributes(entity):
"""Get the lat/long string from an entities attributes."""
attr = entity.attributes
return "%s,%s" % (attr.get(ATTR_LATITUDE), attr.get(ATTR_LONGITUDE))
def _resolve_zone(self, friendly_name):
entities = self._hass.states.all()
for entity in entities:
if entity.domain == 'zone' and entity.name == friendly_name:
return self._get_location_from_attributes(entity)
return friendly_name
|
the-stack_0_23089 | """
defines:
- OpenFoamIO
"""
import os
from collections import OrderedDict
import numpy as np
from numpy import zeros, arange, where, unique, cross
from numpy.linalg import norm # type: ignore
import vtk
#VTK_TRIANGLE = 5
from vtk import vtkTriangle, vtkQuad, vtkHexahedron
from pyNastran.converters.openfoam.block_mesh import BlockMesh
from pyNastran.converters.openfoam.boundary_file import Boundary
from pyNastran.gui.gui_objects.gui_result import GuiResult
from pyNastran.utils import check_path
from pyNastran.gui.utils.vtk.vtk_utils import (
create_vtk_cells_of_constant_element_type, numpy_to_vtk_points)
class OpenFoamIO:
def __init__(self, gui):
"""creates OpenFoamIO"""
self.gui = gui
def get_openfoam_hex_wildcard_geometry_results_functions(self):
data = (
'OpenFOAM Hex - BlockMeshDict',
'OpenFOAM Hex (*)', self.load_openfoam_hex_geometry,
None, None)
return data
def get_openfoam_shell_wildcard_geometry_results_functions(self):
data = (
'OpenFOAM Shell - BlockMeshDict',
'OpenFOAM Shell (*)', self.load_openfoam_shell_geometry,
None, None)
return data
def get_openfoam_faces_wildcard_geometry_results_functions(self):
data = (
'OpenFOAM Face - BlockMeshDict',
'OpenFOAM Face (*)', self.load_openfoam_faces_geometry,
None, None)
return data
#def remove_old_openfoam_geometry(self, filename):
#self.eid_map = {}
#self.nid_map = {}
#if filename is None:
#self.scalar_bar_actor.VisibilityOff()
#skip_reading = True
#else:
##self.TurnTextOff()
#self.grid.Reset()
#self.resultCases = {}
#self.nCases = 0
#try:
#del self.caseKeys
#del self.iCase
#del self.gui.isubcase_name_map
#except:
#print("cant delete geo")
#skip_reading = False
#self.scalar_bar_actor.Modified()
#return skip_reading
def load_openfoam_hex_geometry(self, openfoam_filename, name='main', plot=True, **kwargs):
self.load_openfoam_geometry(openfoam_filename, 'hex')
def load_openfoam_shell_geometry(self, openfoam_filename, name='main', plot=True, **kwargs):
self.load_openfoam_geometry(openfoam_filename, 'shell')
def load_openfoam_faces_geometry(self, openfoam_filename, name='main', plot=True, **kwargs):
self.load_openfoam_geometry(openfoam_filename, 'faces')
def load_openfoam_geometry(self, openfoam_filename, mesh_3d, name='main', plot=True, **kwargs):
model_name = name
#key = self.caseKeys[self.iCase]
#case = self.resultCases[key]
#skip_reading = self.remove_old_openfoam_geometry(openfoam_filename)
skip_reading = self.gui._remove_old_geometry(openfoam_filename)
if skip_reading:
return
log = self.gui.log
reset_labels = True
#self.log.info('self.modelType=%s' % self.modelType)
log.info('mesh_3d = %s' % mesh_3d)
if mesh_3d in ['hex', 'shell']:
model = BlockMesh(log=log, debug=False) # log=self.log, debug=False
elif mesh_3d == 'faces':
model = BlockMesh(log=log, debug=False) # log=self.log, debug=False
boundary = Boundary(log=log, debug=False)
self.gui.modelType = 'openfoam'
#self.modelType = model.modelType
log.info('openfoam_filename = %s' % openfoam_filename)
is_face_mesh = False
if mesh_3d == 'hex':
is_3d_blockmesh = True
is_surface_blockmesh = False
(nodes, hexas, quads, names, patches) = model.read_openfoam(openfoam_filename)
elif mesh_3d == 'shell':
is_3d_blockmesh = False
is_surface_blockmesh = True
(nodes, hexas, quads, names, patches) = model.read_openfoam(openfoam_filename)
elif mesh_3d == 'faces':
is_3d_blockmesh = False
is_surface_blockmesh = False
is_face_mesh = True
#(nodes, hexas, quads, names, patches) = model.read_openfoam(openfoam_filename)
else:
raise RuntimeError(mesh_3d)
tris = []
if mesh_3d == 'hex':
self.gui.nelements = len(hexas)
elif mesh_3d == 'shell':
self.gui.nelements = len(quads)
elif mesh_3d == 'faces':
dirname = os.path.dirname(openfoam_filename)
point_filename = os.path.join(dirname, 'points')
face_filename = os.path.join(dirname, 'faces')
boundary_filename = os.path.join(dirname, 'boundary')
check_path(face_filename, 'face_filename')
check_path(point_filename, 'point_filename')
check_path(boundary_filename, 'boundary_filename')
hexas = None
patches = None
nodes, quads, names = boundary.read_openfoam(
point_filename, face_filename, boundary_filename)
self.gui.nelements = len(quads) + len(tris)
else:
raise RuntimeError(mesh_3d)
self.gui.nnodes = len(nodes)
log = self.gui.log
log.debug("nnodes = %s" % self.gui.nnodes)
log.debug("nelements = %s" % self.gui.nelements)
grid = self.gui.grid
grid.Allocate(self.gui.nelements, 1000)
self.gui.nid_map = {}
assert nodes is not None
nnodes = nodes.shape[0]
xmax, ymax, zmax = nodes.max(axis=0)
xmin, ymin, zmin = nodes.min(axis=0)
nodes -= np.array([xmin, ymin, zmin])
log.info('xmax=%s xmin=%s' % (xmax, xmin))
log.info('ymax=%s ymin=%s' % (ymax, ymin))
log.info('zmax=%s zmin=%s' % (zmax, zmin))
dim_max = max(xmax-xmin, ymax-ymin, zmax-zmin)
#dim_max = (mmax - mmin).max()
assert dim_max > 0
# breaks the model without subracting off the delta
#self.update_axes_length(dim_max)
self.gui.create_global_axes(dim_max)
#print('is_face_mesh=%s is_3d_blockmesh=%s is_surface_blockmesh=%s' % (
#is_face_mesh, is_3d_blockmesh, is_surface_blockmesh))
with open('points.bdf', 'w') as bdf_file:
bdf_file.write('CEND\n')
bdf_file.write('BEGIN BULK\n')
unames = unique(names)
for pid in unames:
bdf_file.write('PSHELL,%i,1,0.1\n' % pid)
bdf_file.write('MAT1,1,1.0e7,,0.3\n')
if is_face_mesh:
points = vtk.vtkPoints()
points.SetNumberOfPoints(self.gui.nnodes)
unodes = unique(quads)
unodes.sort()
# should stop plotting duplicate nodes
for inode, node in enumerate(nodes):
if inode in unodes:
bdf_file.write('GRID,%i,,%s,%s,%s\n' % (
inode + 1, node[0], node[1], node[2], ))
points.InsertPoint(inode, node)
else:
points = numpy_to_vtk_points(nodes)
#elements -= 1
normals = None
if is_3d_blockmesh:
nelements = hexas.shape[0]
cell_type_hexa8 = vtkHexahedron().GetCellType()
create_vtk_cells_of_constant_element_type(grid, hexas, cell_type_hexa8)
elif is_surface_blockmesh:
nelements = quads.shape[0]
cell_type_quad4 = vtkQuad().GetCellType()
create_vtk_cells_of_constant_element_type(grid, quads, cell_type_quad4)
elif is_face_mesh:
elems = quads
nelements = quads.shape[0]
nnames = len(names)
normals = zeros((nelements, 3), dtype='float32')
if nnames != nelements:
msg = 'nnames=%s nelements=%s names.max=%s names.min=%s' % (
nnames, nelements, names.max(), names.min())
raise RuntimeError(msg)
for eid, element in enumerate(elems):
ineg = where(element == -1)[0]
nnodes = 4
if ineg:
nnodes = ineg.max()
#pid = 1
pid = names[eid]
if nnodes == 3: # triangle!
bdf_file.write('CTRIA3,%i,%i,%i,%i,%i\n' % (
eid+1, pid, element[0]+1, element[1]+1, element[2]+1))
elem = vtkTriangle()
a = nodes[element[1], :] - nodes[element[0], :]
b = nodes[element[2], :] - nodes[element[0], :]
n = cross(a, b)
normals[eid, :] = n / norm(n)
elem.GetPointIds().SetId(0, element[0])
elem.GetPointIds().SetId(1, element[1])
elem.GetPointIds().SetId(2, element[2])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif nnodes == 4:
bdf_file.write('CQUAD4,%i,%i,%i,%i,%i,%i\n' % (
eid+1, pid, element[0]+1, element[1]+1, element[2]+1, element[3]+1))
a = nodes[element[2], :] - nodes[element[0], :]
b = nodes[element[3], :] - nodes[element[1], :]
n = cross(a, b)
normals[eid, :] = n / norm(n)
elem = vtkQuad()
elem.GetPointIds().SetId(0, element[0])
elem.GetPointIds().SetId(1, element[1])
elem.GetPointIds().SetId(2, element[2])
elem.GetPointIds().SetId(3, element[3])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
else:
raise RuntimeError('nnodes=%s' % nnodes)
else:
msg = 'is_surface_blockmesh=%s is_face_mesh=%s; pick one' % (
is_surface_blockmesh, is_face_mesh)
raise RuntimeError(msg)
bdf_file.write('ENDDATA\n')
self.gui.nelements = nelements
grid.SetPoints(points)
grid.Modified()
self.gui.scalar_bar_actor.VisibilityOn()
self.gui.scalar_bar_actor.Modified()
self.gui.isubcase_name_map = {0: ['OpenFoam BlockMeshDict', '']}
cases = OrderedDict()
ID = 1
#print("nElements = ",nElements)
if mesh_3d == 'hex':
form, cases, node_ids, element_ids = self._fill_openfoam_case(
cases, ID, nodes, nelements,
patches, names, normals, is_surface_blockmesh)
elif mesh_3d == 'shell':
form, cases, node_ids, element_ids = self._fill_openfoam_case(
cases, ID, nodes, nelements,
patches, names, normals, is_surface_blockmesh)
elif mesh_3d == 'faces':
if len(names) == nelements:
is_surface_blockmesh = True
form, cases, node_ids, element_ids = self._fill_openfoam_case(
cases, ID, nodes, nelements, patches,
names, normals, is_surface_blockmesh)
else:
raise RuntimeError(mesh_3d)
self.gui.node_ids = node_ids
self.gui.element_ids = element_ids
if plot:
self.gui._finish_results_io2(model_name, form, cases, reset_labels=reset_labels)
else:
self.gui._set_results(form, cases)
def clear_openfoam(self):
pass
#def _load_openfoam_results(self, openfoam_filename):
#raise NotImplementedError()
def _fill_openfoam_case(self, cases, unused_ID, nodes, nelements, patches, names, normals,
is_surface_blockmesh):
#nelements = elements.shape[0]
nnodes = nodes.shape[0]
#new = False
results_form = []
geometry_form = [
#('Region', 0, []),
('ElementID', 0, []),
('NodeID', 1, []),
]
eids = arange(nelements) + 1
nids = arange(0, nnodes)
eid_res = GuiResult(0, header='ElementID', title='ElementID',
location='centroid', scalar=eids)
nid_res = GuiResult(0, header='NodeID', title='NodeID',
location='node', scalar=nids)
icase = 0
cases[icase] = (eid_res, (0, 'ElementID'))
cases[icase + 1] = (nid_res, (0, 'NodeID'))
icase += 2
if is_surface_blockmesh:
if patches is not None:
patch_res = GuiResult(0, header='Patch', title='Patch',
location='centroid', scalar=patches)
cases[icase] = (patch_res, (0, 'Patch'))
formi = ('PatchType', icase, [])
geometry_form.append(formi)
icase += 1
if names is not None:
name_res = GuiResult(0, header='Name', title='Name',
location='centroid', scalar=names)
cases[icase] = (name_res, (0, 'Name'))
formi = ('Names', icase, [])
geometry_form.append(formi)
icase += 1
else:
raise RuntimeError('names is None...')
if normals is not None:
nx_res = GuiResult(0, header='NormalX', title='NormalX',
location='node', data_format='%.1f',
scalar=normals[:, 0])
ny_res = GuiResult(0, header='NormalY', title='NormalY',
location='node', data_format='%.1f',
scalar=normals[:, 1])
nz_res = GuiResult(0, header='NormalZ', title='NormalZ',
location='node', data_format='%.1f',
scalar=normals[:, 2])
geometry_form.append(('NormalX', icase, []))
geometry_form.append(('NormalY', icase, []))
geometry_form.append(('NormalZ', icase, []))
cases[icase] = (nx_res, (0, 'NormalX'))
cases[icase + 1] = (ny_res, (0, 'NormalY'))
cases[icase + 2] = (nz_res, (0, 'NormalZ'))
icase += 3
form = [
('Geometry', None, geometry_form),
]
if len(results_form):
form.append(('Results', None, results_form))
return form, cases, nids, eids
|
the-stack_0_23094 | #!/usr/bin/env python3
"""Python Essentials
Chapter 12, Script 2
"""
def c_to_f():
c= float(input("Temperature °C: "))
f = 32+9*c/5
print("C={c:.0f}°, F={f:.0f}°".format(c=c,f=f))
if __name__ == "__main__":
c_to_f() |
the-stack_0_23095 | '''
A corrida de lesmas é um esporte que cresceu muito nos últimos anos, fazendo com que várias pessoas dediquem suas vidas tentando capturar lesmas velozes, e treina-las para faturar milhões em corridas pelo mundo. Porém a tarefa de capturar lesmas velozes não é uma tarefa muito fácil, pois praticamente todas as lesmas são muito lentas. Cada lesma é classificada em um nível dependendo de sua velocidade:
Nível 1: Se a velocidade é menor que 10 cm/h .
Nível 2: Se a velocidade é maior ou igual a 10 cm/h e menor que 20 cm/h .
Nível 3: Se a velocidade é maior ou igual a 20 cm/h .
Sua tarefa é identificar qual nível de velocidade da lesma mais veloz de um grupo de lesmas.
Entrada
A entrada consiste de múltiplos casos de teste, e cada um consiste em duas linhas: A primeira linha contém um inteiro L (1 ≤ L ≤ 500) representando o número de lesmas do grupo, e a segunda linha contém L inteiros Vi (1 ≤ Vi ≤ 50) representando as velocidades de cada lesma do grupo.
A entrada termina com o fim do arquivo (EOF).
Saída
Para cada caso de teste, imprima uma única linha indicando o nível de velocidade da lesma mais veloz do grupo.
'''
while True:
try:
#seu código entra aqui
L = int(input())
Vi = str(input()).split()
for k,v in enumerate(Vi):
Vi[k] = int(v)
vel_max = max(Vi)
if vel_max < 10:
print('1')
elif vel_max >= 10 and vel_max < 20:
print('2')
elif vel_max >= 20:
print('3')
except EOFError:
break |
the-stack_0_23097 | """
Count the kmers in a file and report entropy and eveness.
This version should run anywhere (stand alone - does not require roblib)
"""
import os
import sys
import argparse
import gzip
from math import log2
BLUE = '\033[94m'
GREEN = '\033[92m'
ENDC = '\033[0m'
RED = '\033[91m'
def rc(dna):
"""
Reverse complement a DNA sequence
:param dna: The DNA sequence
:type dna: str
:return: The reverse complement of the DNA sequence
:rtype: str
"""
complements = str.maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB')
rcseq = dna.translate(complements)[::-1]
return rcseq
def stream_fastq(fqfile):
"""Read a fastq file and provide an iterable of the sequence ID, the
full header, the sequence, and the quaity scores.
Note that the sequence ID is the header up until the first space,
while the header is the whole header.
"""
if fqfile.endswith('.gz'):
qin = gzip.open(fqfile, 'rt')
else:
qin = open(fqfile, 'r')
while True:
header = qin.readline()
if not header:
break
header = header.strip()
seqidparts = header.split(' ')
seqid = seqidparts[0]
seqid = seqid.replace('@', '')
seq = qin.readline()
seq = seq.strip()
qualheader = qin.readline()
qualscores = qin.readline().strip()
header = header.replace('@', '', 1)
yield seqid, header, seq, qualscores
def stream_fasta(fastafile,):
"""
Stream a fasta file, one read at a time. Saves memory!
:param fastafile: The fasta file to stream
:type fastafile: str
:return:The ID, and a single read
:rtype:str, str
"""
if not os.path.exists(fastafile):
sys.stderr.write(f"{RED}FATAL: {fastafile} does not exist\n{ENDC}")
sys.exit(2)
try:
if fastafile.endswith('.gz'):
f = gzip.open(fastafile, 'rt')
else:
f = open(fastafile, 'r')
except IOError as e:
sys.stderr.write(str(e) + "\n")
sys.stderr.write("Message: \n" + str(e.message) + "\n")
sys.exit("Unable to open file " + fastafile)
posn = 0
while f:
# first line should start with >
idline = f.readline()
if not idline:
break
if not idline.startswith('>'):
sys.exit("Do not have a fasta file at: {}".format(idline))
idline = idline.strip().replace('>', '', 1)
posn = f.tell()
line = f.readline()
seq = ""
while not line.startswith('>'):
seq += line.strip()
posn = f.tell()
line = f.readline()
if not line:
break
f.seek(posn)
yield idline, seq
def count_kmers_fastq(faf, k, verbose=False):
"""
Count the kmers
:param faf: fasta file
:param k: kmer size
:param verbose: more output
:return: a dict of kmers
"""
if verbose:
sys.stderr.write(f"{GREEN}Counting kmers (k={k}) in {faf}{ENDC}\n")
if not os.path.exists(faf):
sys.stderr.write(f"{RED}FATAL: {faf} does not exist\n{ENDC}")
sys.exit(2)
kmers = {}
for id, header, seq, qual in stream_fastq(faf):
rcseq = rc(seq)
posn = 0
while posn < len(seq) - k - 1:
kmers[seq[posn:posn+k]] = kmers.get(seq[posn:posn+k], 0) + 1
kmers[rcseq[posn:posn + k]] = kmers.get(rcseq[posn:posn + k], 0) + 1
posn += 1
if verbose:
sys.stderr.write(f"{BLUE}\tDone counting kmers (k={k}) in {faf}{ENDC}\n")
return kmers
def count_kmers(faf, k, verbose=False):
"""
Count the kmers
:param faf: fasta file
:param k: kmer size
:param verbose: more output
:return: a dict of kmers
"""
if verbose:
sys.stderr.write(f"{GREEN}Counting kmers (k={k}) in {faf}{ENDC}\n")
if not os.path.exists(faf):
sys.stderr.write(f"{RED}FATAL: {faf} does not exist\n{ENDC}")
sys.exit(2)
kmers = {}
for id, seq in stream_fasta(faf):
rcseq = rc(seq)
posn = 0
while posn < len(seq) - k - 1:
kmers[seq[posn:posn+k]] = kmers.get(seq[posn:posn+k], 0) + 1
kmers[rcseq[posn:posn + k]] = kmers.get(rcseq[posn:posn + k], 0) + 1
posn += 1
if verbose:
sys.stderr.write(f"{BLUE}\tDone counting kmers (k={k}) in {faf}{ENDC}\n")
return kmers
def shannon(kmers, verbose=False):
"""
Calculate the shannon entropy
:param kmers: the kmer dictionary
:param verbose: more output
:return: the shannon entropy of the kmers
"""
if verbose:
sys.stderr.write(f"{GREEN}Calculating Shannon's Entropy{ENDC}\n")
t = sum(kmers.values())
H = 0
for x in kmers:
H += (kmers[x] / t) * (log2(kmers[x]/t))
return -H
def evenness(kmers, H=None, verbose=False):
"""
Calculate the evenness
:param kmers: the kmer dictionary
:param H: shannon entropy (optional). If provided, we won't recalculate
:param verbose: more output
:return: the evenness of the kmers
"""
if not H:
H = shannon(kmers, verbose)
S = len(kmers.keys())
return H/log2(S), log2(S)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Count the kmers in a file and report entropy and eveness')
parser.add_argument('-f', help='fasta file to count the entropy/evenness')
parser.add_argument('-q', help='fastq file to count the entropy/eveness')
parser.add_argument('-k', help='kmer size', required=True, type=int)
parser.add_argument('-t', help='print field titles in output', action='store_true')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
filename = None
if args.q:
kmers = count_kmers_fastq(args.q, args.k, args.v)
filename = args.q
elif args.f:
kmers = count_kmers(args.f, args.k, args.v)
filename = args.f
else:
sys.stderr.write(f"{RED}FATAL: Either -f (fasta) or -q (fastq) must be specified\n{ENDC}")
sys.exit(2)
H = shannon(kmers, args.v)
e,s = evenness(kmers, H, args.v)
if args.t:
print("File\tK-mer size\tShannon's Entropy\tRichness\tEvenness")
print(f"{filename}\t{args.k}\t{H}\t{s}\t{e}") |
the-stack_0_23098 | """functions and classes related to wheels."""
import os
import shutil
import tempfile
import json
import re
import zipfile
import platform
import six
from six.moves import configparser
try:
from stashutils.extensions import create_command
from libversion import VersionSpecifier
except ImportError:
create_command = None
VersionSpecifier = None
class WheelError(Exception):
"""Error related to a wheel."""
pass
def parse_wheel_name(filename):
"""
Parse the filename of a wheel and return the information as dict.
"""
if not filename.endswith(".whl"):
raise WheelError("PEP427 violation: wheels need to end with '.whl'")
else:
filename = filename[:-4]
splitted = filename.split("-")
distribution = splitted[0]
version = splitted[1]
if len(splitted) == 6:
build_tag = splitted[2]
python_tag = splitted[3]
abi_tag = splitted[4]
platform_tag = splitted[5]
elif len(splitted) == 5:
build_tag = None
python_tag = splitted[2]
abi_tag = splitted[3]
platform_tag = splitted[4]
else:
raise WheelError("PEP427 violation: invalid naming schema")
return {
"distribution": distribution,
"version": version,
"build_tag": build_tag,
"python_tag": python_tag,
"abi_tag": abi_tag,
"platform_tag": platform_tag,
}
def escape_filename_component(fragment):
"""
Escape a component of the filename as specified in PEP 427.
"""
return re.sub("[^\w\d.]+", "_", fragment, re.UNICODE)
def generate_filename(
distribution,
version,
build_tag=None,
python_tag=None,
abi_tag=None,
platform_tag=None,
):
"""
Generate a filename for the wheel and return it.
"""
if python_tag is None:
if six.PY3:
python_tag = "py3"
else:
python_tag = "py2"
if abi_tag is None:
abi_tag = "none"
if platform_tag is None:
platform_tag = "any"
return "{d}-{v}{b}-{py}-{a}-{p}.whl".format(
d=escape_filename_component(distribution),
v=escape_filename_component(version),
b=("-" + escape_filename_component(build_tag) if build_tag is not None else ""),
py=escape_filename_component(python_tag),
a=escape_filename_component(abi_tag),
p=escape_filename_component(platform_tag),
)
def wheel_is_compatible(filename):
"""
Return True if the wheel is compatible, False otherwise.
"""
data = parse_wheel_name(filename)
if ("py2.py3" in data["python_tag"]) or ("py3.py2" in data["python_tag"]):
# only here to skip elif/else
pass
elif six.PY3:
if not data["python_tag"].startswith("py3"):
return False
else:
if not data["python_tag"].startswith("py2"):
return False
if data["abi_tag"].lower() != "none":
return False
if data["platform_tag"].lower() != "any":
return False
return True
class BaseHandler(object):
"""
Baseclass for installation handlers.
"""
name = "<name not set>"
def __init__(self, wheel, verbose=False):
self.wheel = wheel
self.verbose = verbose
def copytree(self, packagepath, src, dest, remove=False):
"""
Copies a package directory tree.
:param packagepath: relative path of the (sub-)package, e.g. 'package/subpackage/'
:type packagepath: str
:param src: path to the actual source of the root package
:type src: str
:param dest: path to copy to
:type dest: str
:return: the path to which the directories have been copied.
:trype: str
"""
if self.verbose:
print("Copying {s} -> {d}".format(s=src, d=dest))
if os.path.isfile(src):
if os.path.isdir(dest):
dest = os.path.join(dest, os.path.basename(src))
if os.path.exists(dest) and remove:
os.remove(dest)
shutil.copy(src, dest)
return dest
else:
target = os.path.join(
dest,
# os.path.basename(os.path.normpath(src)),
packagepath,
)
if os.path.exists(target) and remove:
shutil.rmtree(target)
shutil.copytree(src, target)
return target
@property
def distinfo_name(self):
"""the name of the *.dist-info directory."""
data = parse_wheel_name(self.wheel.filename)
return "{pkg}-{v}.dist-info".format(
pkg=data["distribution"],
v=data["version"],
)
class TopLevelHandler(BaseHandler):
"""Handler for 'top_level.txt'"""
name = "top_level.txt installer"
def handle_install(self, src, dest):
tltxtp = os.path.join(src, self.distinfo_name, "top_level.txt")
files_installed = []
with open(tltxtp, "r") as fin:
for pkg_name in fin:
pure = pkg_name.replace("\r", "").replace("\n", "")
sp = os.path.join(src, pure)
if os.path.exists(sp):
p = self.copytree(pure, sp, dest, remove=True)
elif os.path.exists(sp + ".py"):
dp = os.path.join(dest, pure + ".py")
p = self.copytree(pure, sp + ".py", dp, remove=True)
else:
raise WheelError("top_level.txt entry '{e}' not found in toplevel directory!".format(e=pure))
files_installed.append(p)
return files_installed
class ConsoleScriptsHandler(BaseHandler):
"""Handler for 'console_scripts'."""
name = "console_scripts installer"
def handle_install(self, src, dest):
eptxtp = os.path.join(src, self.distinfo_name, "entry_points.txt")
if not os.path.exists(eptxtp):
if self.verbose:
print("No entry_points.txt found, skipping.")
return
parser = configparser.ConfigParser()
try:
parser.read(eptxtp)
except configparser.MissingSectionHeaderError:
# print message and return
if self.verbose:
print("No section headers found in entry_points.txt, passing.")
return
if not parser.has_section("console_scripts"):
if self.verbose:
print("No console_scripts definition found, skipping.")
return
if create_command is None:
if self.verbose:
print("Warning: could not import create_command(); skipping.")
return
files_installed = []
mdp = os.path.join(src, self.distinfo_name, "metadata.json")
if os.path.exists(mdp):
with open(mdp, "r") as fin:
desc = json.load(fin).get("summary", "???")
else:
desc = "???"
for command, definition in parser.items(section="console_scripts"):
# name, loc = definition.replace(" ", "").split("=")
modname, funcname = definition.split(":")
if not command.endswith(".py"):
command += ".py"
path = create_command(
command,
(u"""'''%s'''
from %s import %s
if __name__ == "__main__":
%s()
""" % (desc, modname, funcname, funcname)).encode("utf-8"))
files_installed.append(path)
return files_installed
class WheelInfoHandler(BaseHandler):
"""Handler for wheel informations."""
name = "WHEEL information checker"
supported_major_versions = [1]
supported_versions = ["1.0"]
def handle_install(self, src, dest):
wtxtp = os.path.join(src, self.distinfo_name, "WHEEL")
with open(wtxtp, "r") as fin:
for line in fin:
line = line.replace("\r", "").replace("\n", "")
ki = line.find(":")
key = line[:ki]
value = line[ki+2:]
if key.lower() == "wheel-version":
major, minor = value.split(".")
major, minor = int(major), int(minor)
if major not in self.supported_major_versions:
raise WheelError("Wheel major version is incompatible!")
if value not in self.supported_versions:
print("WARNING: unsupported minor version: " + str(value))
self.wheel.version = (major, minor)
elif key.lower() == "generator":
if self.verbose:
print("Wheel generated by: " + value)
return []
class DependencyHandler(BaseHandler):
"""
Handler for the dependencies.
"""
name = "dependency handler"
def handle_install(self, src, dest):
metajsonp = os.path.join(src, self.distinfo_name, "metadata.json")
metadatap = os.path.join(src, self.distinfo_name, "METADATA")
if not os.path.exists(metajsonp):
if os.path.exists(metadatap):
dependencies = self.read_dependencies_from_METADATA(metadatap)
else:
if self.verbose:
print("Warning: could find neither 'metadata.json' not `METADATA`, can not detect dependencies!")
return
else:
with open(metajsonp, "r") as fin:
content = json.load(fin)
dependencies = []
for ds in content.get("run_requires", []):
ex = ds.get("extra", None)
dep = ds.get("requires", [])
if ex is not None:
if ex != self.wheel.extra:
# extra not wanted
continue
else:
dependencies += dep
self.wheel.dependencies += dependencies
def read_dependencies_from_METADATA(self, p):
"""read dependencies from distinfo/METADATA"""
dependencies = []
with open(p, "r") as fin:
for line in fin:
line = line.replace("\n", "")
if line.startswith("Requires-Dist: "):
t = line[len("Requires-Dist: "):]
if ";" in t:
es = t[t.find(";") + 1:].replace('"', "").replace("'", "")
t = t[:t.find(";")].strip()
if VersionSpecifier is None:
# libversion not found
print("Warning: could not import libversion.VersionSpecifier! Ignoring version and extra dependencies.")
rq, v = "<libversion not found>", "???"
else:
rq, v = VersionSpecifier.parse_requirement(es)
if rq == "python_version":
# handle python version dependencies
if not v.match(platform.python_version()):
# dependency NOT required
continue
elif rq == "extra":
# handle extra dependencies
if (self.wheel.extra is None) or (not v.match(self.wheel.extra)):
# dependency NOT required
continue
else:
# unknown requirement for dependency
# warn user and register the dependency
print("Warning: unknown dependency requirement: '{}'".format(rq))
print("Warning: Adding dependency '{}', ignoring requirements for dependency.".format(t))
# do not do anything here- As long as we dont use 'continue', 'break', ... the dependency will be added.
dependencies.append(t)
return dependencies
# list of default handlers
DEFAULT_HANDLERS = [
WheelInfoHandler,
DependencyHandler,
TopLevelHandler,
ConsoleScriptsHandler,
]
class Wheel(object):
"""class for installing python wheels."""
def __init__(self, path, handlers=DEFAULT_HANDLERS, extra=None, verbose=False):
self.path = path
self.extra = extra
self.verbose = verbose
self.filename = os.path.basename(self.path)
self.handlers = [handler(self, self.verbose) for handler in handlers]
self.version = None # to be set by handler
self.dependencies = [] # to be set by handler
self.extras = {} # to be set by handler
if not wheel_is_compatible(self.filename):
raise WheelError("Incompatible wheel: {p}!".format(p=self.filename))
def install(self, targetdir):
"""
Install the wheel into the target directory.
Return (files_installed, dependencies)
"""
if self.verbose:
print("Extracting wheel..")
tp = self.extract_into_temppath()
if self.verbose:
print("Extraction finished, running handlers...")
try:
files_installed = []
for handler in self.handlers:
if hasattr(handler, "handle_install"):
if self.verbose:
print("Running handler '{h}'...".format(
h=getattr(handler, "name", "<unknown>"))
)
tfi = handler.handle_install(tp, targetdir)
if tfi is not None:
files_installed += tfi
finally:
if self.verbose:
print("Cleaning up...")
if os.path.exists(tp):
shutil.rmtree(tp)
return (files_installed, self.dependencies)
def extract_into_temppath(self):
"""
Extract the wheel into a temporary directory.
Return the path of the temporary directory.
"""
p = os.path.join(tempfile.gettempdir(), "wheel_tmp", self.filename)
if not os.path.exists(p):
os.makedirs(p)
with zipfile.ZipFile(self.path, mode="r") as zf:
zf.extractall(p)
return p
if __name__ == "__main__":
# test script
import sys
fi, dep = Wheel(sys.argv[1], verbose=True).install(os.path.expanduser("~/Documents/site-packages/"))
print("files installed: ")
print(fi)
print("dependencies:")
print(dep)
|
the-stack_0_23099 | # Author: Lisandro Dalcin
# Contact: [email protected]
"""This is the **MPI for Python** package.
The *Message Passing Interface* (MPI) is a standardized and portable
message-passing system designed to function on a wide variety of
parallel computers. The MPI standard defines the syntax and semantics
of library routines and allows users to write portable programs in the
main scientific programming languages (Fortran, C, or C++). Since its
release, the MPI specification has become the leading standard for
message-passing libraries for parallel computers.
*MPI for Python* provides MPI bindings for the Python programming
language, allowing any Python program to exploit multiple processors.
This package build on the MPI specification and provides an object
oriented interface which closely follows MPI-2 C++ bindings.
"""
__version__ = '4.0.0.dev0'
__author__ = 'Lisandro Dalcin'
__credits__ = 'MPI Forum, MPICH Team, Open MPI Team'
__all__ = ['MPI']
class Rc:
"""Runtime configuration options.
Attributes
----------
initialize : bool
Automatic MPI initialization at import (default: True).
threads : bool
Request initialization with thread support (default: True).
thread_level : {"multiple", "serialized", "funneled", "single"}
Level of thread support to request (default: "multiple").
finalize : None or bool
Automatic MPI finalization at exit (default: None).
fast_reduce : bool
Use tree-based reductions for objects (default: True).
recv_mprobe : bool
Use matched probes to receive objects (default: True).
errors : {"exception", "default", "fatal"}
Error handling policy (default: "exception").
"""
initialize = True
threads = True
thread_level = 'multiple'
finalize = None
fast_reduce = True
recv_mprobe = True
errors = 'exception'
def __init__(self, **kwargs):
self(**kwargs)
def __setattr__(self, name, value):
if not hasattr(self, name):
raise TypeError(f"object has no attribute '{name}'")
super().__setattr__(name, value)
def __call__(self, **kwargs):
for key in kwargs:
if not hasattr(self, key):
raise TypeError(f"unexpected argument '{key}'")
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return f'<{__name__}.rc>'
rc = Rc()
__import__('sys').modules[__name__ + '.rc'] = rc
def get_include():
"""Return the directory in the package that contains header files.
Extension modules that need to compile against mpi4py should use
this function to locate the appropriate include directory. Using
Python distutils (or perhaps NumPy distutils)::
import mpi4py
Extension('extension_name', ...
include_dirs=[..., mpi4py.get_include()])
"""
# pylint: disable=import-outside-toplevel
from os.path import join, dirname
return join(dirname(__file__), 'include')
def get_config():
"""Return a dictionary with information about MPI."""
# pylint: disable=import-outside-toplevel
from os.path import join, dirname
from configparser import ConfigParser
parser = ConfigParser()
parser.read(join(dirname(__file__), 'mpi.cfg'))
return dict(parser.items('mpi'))
def profile(name, *, path=None, logfile=None):
"""Support for the MPI profiling interface.
Parameters
----------
name : str
Name of the profiler library to load.
path : `sequence` of str, optional
Additional paths to search for the profiler.
logfile : str, optional
Filename prefix for dumping profiler output.
"""
# pylint: disable=import-outside-toplevel
import os
import sys
from .dl import dlopen, dlerror, RTLD_NOW, RTLD_GLOBAL
def lookup_dylib(name, path):
# pylint: disable=missing-docstring
pattern = []
if sys.platform.startswith('win'): # pragma: no cover
pattern.append(('', '.dll'))
elif sys.platform == 'darwin': # pragma: no cover
pattern.append(('lib', '.dylib'))
elif os.name == 'posix': # pragma: no cover
pattern.append(('lib', '.so'))
pattern.append(('', ''))
for pth in path:
for (lib, dso) in pattern:
filename = os.path.join(pth, lib + name + dso)
if os.path.isfile(filename):
return os.path.abspath(filename)
return None
if logfile:
if name in ('mpe',):
if 'MPE_LOGFILE_PREFIX' not in os.environ:
os.environ['MPE_LOGFILE_PREFIX'] = logfile
if name in ('vt', 'vt-mpi', 'vt-hyb'):
if 'VT_FILE_PREFIX' not in os.environ:
os.environ['VT_FILE_PREFIX'] = logfile
if path is None:
path = []
elif isinstance(path, str):
path = [path]
else:
path = list(path)
prefix = os.path.dirname(__file__)
path.append(os.path.join(prefix, 'lib-pmpi'))
filename = lookup_dylib(name, path)
if filename is None:
raise ValueError(f"profiler '{name}' not found")
handle = dlopen(filename, RTLD_NOW | RTLD_GLOBAL)
if handle:
registry = vars(profile).setdefault('registry', [])
registry.append((name, (handle, filename)))
else:
from warnings import warn
warn(dlerror())
|
the-stack_0_23100 | from typing import NamedTuple
import tensorflow as tf
from ..utils.segmented_data import SegmentedIndices
from ..utils.embeddings import CodeBookEmbedding, CodebookConfig
from ..utils.initializers import glorot_init
from ..model.config import PoolingOpt
class NodeEmbeddings(NamedTuple):
embeddings: tf.Tensor
depth: int
class Config(NamedTuple):
type_depth: int
subtoken_depth: int
type_codebook: CodebookConfig
subtoken_codebook: CodebookConfig
subtoken_pooling: PoolingOpt
type_pooling: PoolingOpt
class Embeddings(object):
def __init__(self, type_vocab: int, subtoken_vocab: int, compressed: bool, reuse: bool, config: Config):
self._type_vocab = type_vocab
self._subtoken_vocab = subtoken_vocab
self._reuse = reuse
self._compressed = compressed
self._config = config
self._build_embeddings()
def _build_embeddings(self):
with tf.name_scope("embeddings"):
# [total number of type symbols, type embed depth]
self._type_embedding = CodeBookEmbedding(
glorot_init(self._type_vocab, self._config.type_depth),
name="type",
compressed=self._compressed,
reuse=self._reuse,
config=self._config.type_codebook,
)
self._subtoken_embedding = CodeBookEmbedding(
glorot_init(self._subtoken_vocab, self._config.subtoken_depth),
name="subtoken",
compressed=self._compressed,
reuse=self._reuse,
config=self._config.subtoken_codebook,
)
def depth(self) -> int:
return self._config.type_depth + self._config.subtoken_depth
def embed(self, types: SegmentedIndices, subtokens: SegmentedIndices):
"""
embeddings for the specified types and subtokens,
must have len(unique(types.sample_ids)) == len(unique(subtokens.sample_ids))
:param types: ids for the type embeddings, sample_ids[i] = j implies type i is for node j
:param subtokens: ids for subtoken embeddings, sample_ids[i] = j implies sub-token i is for node j
:return: node embeddings, shape [num nodes, depth]
"""
with tf.name_scope("lookup_embedding"):
# [num nodes, num types, type embed depth]
all_node_types = self._type_embedding.lookup(types.indices)
if self._config.type_pooling is PoolingOpt.SUM:
type_pool_op = tf.segment_sum
elif self._config.type_pooling is PoolingOpt.AVG:
type_pool_op = tf.segment_mean
elif self._config.type_pooling is PoolingOpt.MAX:
type_pool_op = tf.segment_max
else:
raise AssertionError("unrecognized type pooling type {}".format(self._config.type_pooling))
# [num nodes, type embed depth]
node_types = type_pool_op(all_node_types, types.sample_ids, name='node_types')
# [num nodes, num sub-tokens, sub-token embed depth]
all_node_subtokens = self._subtoken_embedding.lookup(subtokens.indices)
if self._config.subtoken_pooling is PoolingOpt.SUM:
subtoken_pool_op = tf.segment_sum
elif self._config.subtoken_pooling is PoolingOpt.AVG:
subtoken_pool_op = tf.segment_mean
elif self._config.subtoken_pooling is PoolingOpt.MAX:
subtoken_pool_op = tf.segment_max
else:
raise AssertionError("unrecognized sub-token pooling type {}".format(self._config.type_pooling))
# [num nodes, sub token embed depth]
node_subtokens = subtoken_pool_op(all_node_subtokens, subtokens.sample_ids, name='node_subtokens')
# [num nodes, depth]
return tf.concat([node_types, node_subtokens], axis=1, name='node_embeddings')
|
the-stack_0_23101 | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "estat-34501.botics.co"
site_params = {
"name": "Estat",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
the-stack_0_23103 | # -*- coding: utf-8 -*-
"""
@author: mthh
"""
import matplotlib
import numpy as np
from geopandas import GeoDataFrame, pd
from shapely.geometry import MultiPolygon, Polygon, Point
from . import RequestConfig, Point as _Point
from .core import table
if not matplotlib.get_backend():
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
def contour_poly(gdf, field_name, n_class):
"""
Interpolate the time values (stored in the column `field_name`)
from the points contained in `gdf` and compute the contour polygons
in `n_class`.
Parameters
----------
gdf : :py:obj:`geopandas.GeoDataFrame`
The GeoDataFrame containing points and associated values.
field_name : str
The name of the column of *gdf* containing the value to use.
n_class : int
The number of class to use for contour polygons if levels is an
integer (exemple: levels=8).
Returns
-------
collection_polygons : :py:obj:matplotlib.contour.QuadContourSet
The shape of the computed polygons.
levels : list of ints/floats
The levels actually used when making the contours, excluding
the minimum (should be a list of `n_class` values).
"""
# Dont take point without value :
gdf = gdf.iloc[gdf[field_name].to_numpy().nonzero()[0]][:]
# Try to avoid unvalid geom :
if len(gdf.geometry.valid()) != len(gdf):
# Invalid geoms have been encountered :
valid_geoms = gdf.geometry.valid()
valid_geoms = valid_geoms.reset_index()
valid_geoms['idx'] = valid_geoms['index']
del valid_geoms['index']
valid_geoms[field_name] = \
valid_geoms.idx.apply(lambda x: gdf[field_name][x])
else:
valid_geoms = gdf[['geometry', field_name]][:]
# Always in order to avoid invalid value which will cause the fail
# of the griddata function :
try: # Normal way (fails if a non valid geom is encountered)
x = np.array([geom.coords.xy[0][0] for geom in valid_geoms.geometry])
y = np.array([geom.coords.xy[1][0] for geom in valid_geoms.geometry])
z = valid_geoms[field_name].values
except: # Taking the long way to load the value... :
x = np.array([])
y = np.array([])
z = np.array([], dtype=float)
for idx, geom, val in gdf[['geometry', field_name]].itertuples():
try:
x = np.append(x, geom.coords.xy[0][0])
y = np.append(y, geom.coords.xy[1][0])
z = np.append(z, val)
except Exception as err:
print(err)
# # compute min and max and values :
minx = np.nanmin(x)
miny = np.nanmin(y)
maxx = np.nanmax(x)
maxy = np.nanmax(y)
# Assuming we want a square grid for the interpolation
xi = np.linspace(minx, maxx, 200)
yi = np.linspace(miny, maxy, 200)
zi = griddata(x, y, z, xi, yi, interp='linear')
interval_time = int(round(np.nanmax(z) / n_class))
nb_inter = n_class + 1
# jmp = int(round((np.nanmax(z) - np.nanmin(z)) / 15))
# levels = [nb for nb in range(0, int(round(np.nanmax(z))+1)+jmp, jmp)]
levels = tuple([nb for nb in range(0, int(
np.nanmax(z) + 1) + interval_time, interval_time)][:nb_inter+1])
collec_poly = plt.contourf(
xi, yi, zi, levels, cmap=plt.cm.rainbow,
vmax=abs(zi).max(), vmin=-abs(zi).max(), alpha=0.35
)
return collec_poly, levels[1:]
def isopoly_to_gdf(collec_poly, field_name, levels):
"""
Transform a collection of matplotlib polygons (:py:obj:`QuadContourSet`)
to a :py:obj:`GeoDataFrame` with a columns (`field_name`) filled by the
values contained in `levels`.
Parameters
----------
collec_poly : :py:obj:matplotlib.contour.QuadContourSet
The previously retrieved collections of contour polygons.
field_name : str
The name of the column to create which will contain values from `levels`.
levels : list of ints/floats
The values to be used when creating the `GeoDataFrame` of polygons,
likely the values corresponding to the bins values
used to create the polygons in the contourf function.
Returns
-------
gdf_polygons : :py:obj:`GeoDataFrame`
The contour polygons as a GeoDataFrame, with a column filled
with the corresponding levels.
"""
polygons, data = [], []
for i, polygon in enumerate(collec_poly.collections):
mpoly = []
for path in polygon.get_paths():
path.should_simplify = False
poly = path.to_polygons()
exterior, holes = [], []
if len(poly) > 0 and len(poly[0]) > 3:
exterior = poly[0]
if len(poly) > 1: # There's some holes
holes = [h for h in poly[1:] if len(h) > 3]
mpoly.append(Polygon(exterior, holes))
if len(mpoly) > 1:
mpoly = MultiPolygon(mpoly)
polygons.append(mpoly)
if levels:
data.append(levels[i])
elif len(mpoly) == 1:
polygons.append(mpoly[0])
if levels:
data.append(levels[i])
if len(data) == len(polygons):
return GeoDataFrame(geometry=polygons,
data=data,
columns=[field_name])
else:
return GeoDataFrame(geometry=polygons)
def make_grid(gdf, nb_points):
"""
Return a grid, based on the shape of *gdf* and on a *height* value (in
units of *gdf*).
Parameters
----------
gdf : GeoDataFrame
The collection of polygons to be covered by the grid.
nb_points : int
The number of expected points of the grid.
Returns
-------
grid : GeoDataFrame
A collection of polygons.
"""
xmin, ymin, xmax, ymax = gdf.total_bounds
rows = int(nb_points**0.5)
cols = int(nb_points**0.5)
height = (ymax-ymin) / rows
width = (xmax-xmin) / cols
x_left_origin = xmin
x_right_origin = xmin + width
y_top_origin = ymax
y_bottom_origin = ymax - height
res_geoms = []
for countcols in range(cols):
y_top = y_top_origin
y_bottom = y_bottom_origin
for countrows in range(rows):
res_geoms.append((
(x_left_origin + x_right_origin) / 2, (y_top + y_bottom) / 2
))
y_top = y_top - height
y_bottom = y_bottom - height
x_left_origin = x_left_origin + width
x_right_origin = x_right_origin + width
return GeoDataFrame(
geometry=pd.Series(res_geoms).apply(lambda x: Point(x)),
crs=gdf.crs
)
class AccessIsochrone:
"""
Object allowing to query an OSRM instance for a matrix of distance within
a defined radius, store the distance (to avoid making the same query again
when not needed), interpolate time values on a grid and render the contour
polygons.
Parameters
----------
point_origin : 2-floats tuple
The coordinates of the center point to use as (x, y).
points_grid : int
The number of points of the underlying grid to use.
size : float
Search radius (in degree).
url_config : osrm.RequestConfig
The OSRM url to be requested.
Attributes
----------
center_point : collections.namedtuple
The coordinates of the point used a center (potentially moved from the
original point in order to be on the network).
grid : geopandas.GeoDataFrame
The point locations retrieved from OSRM (ie. potentially moved
to be on the routable network).
times : numpy.ndarray
The time-distance table retrieved from OSRM.
Methods
-------
render_contour(nb_class)
Render the contour polygon according to the choosen number of class.
"""
def __init__(self, point_origin, points_grid=250,
size=0.4, url_config=RequestConfig):
gdf = GeoDataFrame(geometry=[Point(point_origin).buffer(size)])
grid = make_grid(gdf, points_grid)
coords_grid = \
[(i.coords.xy[0][0], i.coords.xy[1][0]) for i in grid.geometry]
self.times, new_pt_origin, pts_dest = \
table([point_origin], coords_grid, url_config=url_config)
self.times = (self.times[0] / 60.0).round(2) # Round values in minutes
geoms, values = [], []
for time, coord in zip(self.times, pts_dest):
if time:
geoms.append(Point(coord))
values.append(time)
self.grid = GeoDataFrame(geometry=geoms, data=values, columns=['time'])
self.center_point = _Point(
latitude=new_pt_origin[0][0], longitude=new_pt_origin[0][1])
def render_contour(self, n_class):
"""
Parameters
----------
n_class : int
The desired number of class.
Returns
-------
gdf_poly : GeoDataFrame
The shape of the computed accessibility polygons.
"""
collec_poly, levels = contour_poly(self.grid, 'time', n_class=n_class)
gdf_poly = isopoly_to_gdf(collec_poly, 'time', levels)
return gdf_poly
|
the-stack_0_23105 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import bottle
import auth_public as auth
import psycopg2
import psycopg2.extensions
import psycopg2.extras
import hashlib
bottle.debug(True)
# to je za kodiranje cookiejev v brskalniku, da jih uporabnik ne more spreminjati in s tem povzrocati zmedo
# trenutni cookieji v chromeu: more tools -> developer tools -> application -> cookies
# vsakic ko gremo na naso spletno stran, server vrne cookie
secret = "to skrivnost je zelo tezko uganiti 1094107c907cw982982c42"
######################################################################
# Pomožne funkcije
def password_md5(s):
"""Vrni MD5 hash danega UTF-8 niza. Gesla vedno spravimo v bazo
kodirana s to funkcijo."""
h = hashlib.md5()
h.update(s.encode('utf-8'))
return h.hexdigest()
def get_user(auto_login=True):
"""Poglej cookie in ugotovi, kdo je prijavljeni uporabnik,
vrni njegov username in ime. Če ni prijavljen, presumeri
na stran za prijavo ali vrni None (advisno od auto_login).
"""
# Dobimo username iz piškotka
username = bottle.request.get_cookie('username', secret=secret)
# Preverimo, ali ta uporabnik obstaja
if username is not None:
c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute("SELECT tip, username, mail FROM uporabnik WHERE username=%s",
[username])
r = c.fetchone()
conn.commit()
# c.close()
if r is not None:
# uporabnik obstaja, vrnemo njegove podatke
return r
else:
bottle.redirect('/login/')
# Če pridemo do sem, uporabnik ni prijavljen, naredimo redirect
if auto_login:
bottle.redirect('/login/')
else:
return None
########################################################################
@bottle.route('/static/<filepath:path>') # lokacija nasega filea, tip: path
def server_static(filepath): # serviramo datoteko iz mape static
return bottle.static_file(filepath, root='static')
@bottle.route("/")
def main():
"""Glavna stran."""
# Iz cookieja dobimo uporabnika (ali ga preusmerimo na login, če nima cookija)
(tip, username, mail) = get_user()
c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute("""
SELECT * FROM igra ORDER BY serijska;
""")
igre = c.fetchall()
# Vrnemo predlogo za glavno stran
return bottle.template("index.html", uporabnik=username, igre=igre)
@bottle.route("/gost/")
def main_gost():
username = 'gost'
bottle.response.set_cookie('username', username, path='/', secret=secret)
bottle.redirect("/")
@bottle.get('/login/')
def login_get(tip=None):
"""Serviraj formo za login."""
return bottle.template('login.html', napaka=None, username=None) # na zacetku ni usernamea in napake
@bottle.post("/login/")
def login_post():
"""Obdelaj izpolnjeno formo za prijavo"""
# Uporabniško ime, ki ga je uporabnik vpisal v formo
username = bottle.request.forms.user
# Izračunamo MD5 hash geslo, ki ga bomo spravili
password = password_md5(bottle.request.forms.psw)
# Preverimo, ali se je uporabnik pravilno prijavil
c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute("SELECT 1 FROM uporabnik WHERE username=%s AND geslo=%s",
[username, password])
if c.fetchone() is None:
# Username in geslo se ne ujemata
return bottle.template("login.html",
napaka="Nepravilna prijava", # v template login nastavljeno opozorilo
username=username) # ohranimo isto uporabnisko ime
else:
# Vse je v redu, nastavimo cookie in preusmerimo na glavno stran
bottle.response.set_cookie('username', username, path='/', secret=secret)
bottle.redirect("/")
@bottle.get("/logout/")
def logout():
"""Pobriši cookie in preusmeri na login."""
bottle.response.delete_cookie('username', path='/', secret=secret)
bottle.redirect('/login/')
@bottle.get("/register/")
def register_get():
"""Prikaži formo za registracijo."""
return bottle.template("register.html",
username=None,
mail=None,
napaka=None)
@bottle.post("/register/")
def register_post():
"""Registriraj novega uporabnika."""
username = bottle.request.forms.user
mail = bottle.request.forms.mail
password1 = bottle.request.forms.psw1
password2 = bottle.request.forms.psw2
# Ali uporabnik že obstaja?
c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute("SELECT 1 FROM uporabnik WHERE username=%s", [username])
if c.fetchone():
# Uporabnik že obstaja
return bottle.template("register.html",
# ohranimo uporabnisko ime in mail, le geslo je potrebno se enkrat vpisati
username=username,
mail=mail,
napaka='To uporabniško ime je že zavzeto')
elif not password1 == password2:
# Geslo se ne ujemata
return bottle.template("register.html",
username=username,
mail=mail,
napaka='Gesli se ne ujemata')
else:
# Vse je v redu, vstavi novega uporabnika v bazo
password = password_md5(password1)
c.execute("INSERT INTO uporabnik (tip, username, mail, geslo) VALUES (%s, %s, %s, %s)",
('registriranec', username, mail, password))
conn.commit()
# Daj uporabniku cookie
bottle.response.set_cookie(
'username', username, path='/', secret=secret)
bottle.redirect("/")
@bottle.get("/igra/<ime>")
def igra_get(ime):
(tip, username, mail) = get_user()
c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute("SELECT * FROM igra WHERE ime=%s", [ime])
trenutna = c.fetchone()
if trenutna:
c.execute("""
SELECT DISTINCT ustvarjalci.ime FROM ustvarjalci
JOIN igraust ON ustvarjalci.u_id = igraust.u_id
JOIN igra ON igraust.serijska = igra.serijska
WHERE ustvarjalci.tip='avtor' AND igra.ime=%s
""", [trenutna[1]])
avtorji = c.fetchall()
c.execute("""
SELECT DISTINCT ustvarjalci.ime FROM ustvarjalci
JOIN igraust ON ustvarjalci.u_id = igraust.u_id
JOIN igra ON igraust.serijska = igra.serijska
WHERE ustvarjalci.tip='oblikovalec' AND igra.ime=%s
""", [trenutna[1]])
oblikovalci = c.fetchall()
c.execute("""
SELECT zalozba.ime FROM zalozba
JOIN igrazal ON zalozba.zalozba_id = igrazal.zalozba_id
JOIN igra ON igrazal.serijska = igra.serijska
WHERE igra.ime=%s
""", [trenutna[1]])
zalozba = c.fetchall()
c.execute("""
SELECT dodatek.ime FROM igra AS dodatek
JOIN igra AS osnova ON dodatek.dodatek=osnova.serijska
WHERE osnova.ime=%s
""", [trenutna[1]])
dodatki = c.fetchall()
c.execute("""
SELECT osnova.ime FROM igra AS osnova
JOIN igra AS dodatek ON dodatek.dodatek=osnova.serijska
WHERE dodatek.ime=%s
""", [trenutna[1]])
osnova = c.fetchone()
c.execute("""
SELECT uporabnik.username, igra.ime, komentarji.komentar, komentarji.cas FROM komentarji
JOIN uporabnik ON komentarji.uporabnik_id = uporabnik.uporabnik_id
JOIN igra ON komentarji.serijska = igra.serijska
WHERE igra.ime=%s
ORDER BY komentarji.cas DESC
""", [trenutna[1]])
komentarji = c.fetchall()
c.execute("""
SELECT zvrst.ime FROM zvrst
JOIN igrazvrst ON zvrst.zvrst_id = igrazvrst.zvrst_id
JOIN igra ON igrazvrst.serijska = igra.serijska
WHERE igra.ime=%s
""", [trenutna[1]])
zvrsti = c.fetchall()
c.execute("""
SELECT ROUND(AVG(ocena),2), COUNT(*) FROM ocene
WHERE serijska = (SELECT serijska FROM igra WHERE ime = %s)
""",[trenutna[1]])
ocena = c.fetchone()
c.execute("""
SELECT ocena FROM ocene
WHERE serijska = (SELECT serijska FROM igra WHERE ime = %s) AND
uporabnik_id = (SELECT uporabnik_id FROM uporabnik WHERE username = %s)
""",[trenutna[1], username])
ocenaUporabnika = c.fetchone()
return bottle.template("igra.html", uporabnik=username, igra=ime, info=trenutna, avtorji=avtorji, oblikovalci=oblikovalci, zalozba=zalozba, dodatki=dodatki, osnova=osnova, komentarji=komentarji, zvrsti=zvrsti, ocena = ocena, upOcena = ocenaUporabnika)
else:
bottle.redirect("/")
@bottle.route("/brskalnik/avtor/")
def avtor():
"""Glavna stran."""
# Iz cookieja dobimo uporabnika (ali ga preusmerimo na login, če nima cookija)
(tip, username, mail) = get_user()
c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute("""
SELECT ime FROM ustvarjalci WHERE tip='avtor' ORDER BY ime ASC;
""")
avtorji = c.fetchall()
# Vrnemo predlogo za glavno stran
return bottle.template("avtorji.html", uporabnik=username, avtorji=avtorji)
@bottle.get("/avtor/<ime>")
def avtor_get(ime):
(tip, username, mail) = get_user()
c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute("SELECT ime FROM ustvarjalci WHERE ime=%s", [ime])
trenutna = c.fetchone()
if trenutna:
c.execute("""
SELECT igra.ime FROM igra
JOIN igraust ON igra.serijska = igraust.serijska
JOIN ustvarjalci ON igraust.u_id = ustvarjalci.u_id
WHERE ustvarjalci.ime=%s AND tip='avtor'
""", [ime])
igre = c.fetchall()
return bottle.template("avtor.html", uporabnik=username, avtor=ime, igre=igre)
else:
bottle.redirect("/brskalnik/avtor/")
@bottle.post("/komentar/<igra>")
def igra_post(igra):
(tip, username, mail) = get_user()
komentar = bottle.request.forms.msg
c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if komentar != None and komentar != '':
c.execute("""
INSERT INTO komentarji(uporabnik_id, serijska, komentar) VALUES (
(SELECT uporabnik_id FROM uporabnik WHERE username=%s),
(SELECT serijska FROM igra WHERE ime=%s),
%s
)
""", [username, igra, komentar])
ocena = bottle.request.forms.rating
if ocena != None and ocena !='':
c.execute("""
UPDATE ocene SET
uporabnik_id =(SELECT uporabnik_id FROM uporabnik WHERE username=%s),
serijska=(SELECT serijska FROM igra WHERE ime = %s),
ocena=%s
WHERE uporabnik_id =(SELECT uporabnik_id FROM uporabnik WHERE username=%s) AND
serijska=(SELECT serijska FROM igra WHERE ime = %s);
INSERT INTO ocene(uporabnik_id, serijska, ocena) SELECT
(SELECT uporabnik_id FROM uporabnik WHERE username=%s),
(SELECT serijska FROM igra WHERE ime=%s),
%s WHERE NOT EXISTS (SELECT uporabnik_id, serijska FROM ocene WHERE
uporabnik_id =(SELECT uporabnik_id FROM uporabnik WHERE username=%s) AND
serijska=(SELECT serijska FROM igra WHERE ime = %s));
""",[username, igra, ocena, username, igra, username, igra, ocena, username, igra])
bottle.redirect("/igra/%s" % igra)
@bottle.get("/brskalnik/")
def brskalnik_get():
"""Prikaži brskalnik iger. """
(tip, username, mail) = get_user()
return bottle.template("brskalnik.html", uporabnik=username)
@bottle.post("/brskalnik/")
def brskalnik_post():
(tip, username, mail) = get_user()
# pretvorimo prazno vrednost pri html-ju, '', v prazno vrednost python, None
def nula(s): return None if s is '' else s
st_igralcev = nula(bottle.request.forms.st_igralcev)
cas = nula(bottle.request.forms.cas)
starost = nula(bottle.request.forms.starost)
leto = nula(bottle.request.forms.leto)
c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute("""
SELECT * FROM igra
WHERE (%s IS NULL OR min_igralcev <= %s)
AND (%s IS NULL OR max_igralcev >= %s)
AND (%s IS NULL OR max_cas <= %s)
AND (%s IS NULL OR starost <= %s)
AND (%s IS NULL OR leto_izdaje = %s)
""", [st_igralcev, st_igralcev, st_igralcev, st_igralcev, cas, cas, starost, starost, leto, leto])
igre = c.fetchall()
return bottle.template("index.html", uporabnik=username, igre=igre)
####################################
conn = psycopg2.connect(database=auth.db, host=auth.host,
user=auth.user, password=auth.password)
# se znebimo problemov s šumniki
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
# poženemo strežnik na portu 8080, glej http://localhost:8080/
bottle.run(host='localhost', port=8080, reloader=True, debug=True)
|
the-stack_0_23106 | # ---------------------------------------------------------------------------
# Pelion Device Management SDK
# (C) COPYRIGHT 2017 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""Example showing basic usage of the webhook functionality."""
from mbed_cloud import ConnectAPI
import time
BUTTON_RESOURCE = "/5002/0/1"
def _main():
api = ConnectAPI()
devices = api.list_connected_devices().data
if len(devices) == 0:
raise Exception("No endpints registered. Aborting")
# Delete device subscriptions
api.delete_device_subscriptions(devices[0].id)
# First register to webhook
api.update_webhook("http://python.requestcatcher.com/")
time.sleep(2)
api.add_resource_subscription(devices[0].id, BUTTON_RESOURCE)
while True:
print("Webhook registered. Listening to button updates for 10 seconds...")
time.sleep(10)
break
api.delete_webhook()
print("Deregistered and unsubscribed from all resources. Exiting.")
if __name__ == '__main__':
_main()
|
the-stack_0_23107 | # test mod_md acme terms-of-service handling
import pytest
from .md_env import MDTestEnv
@pytest.mark.skipif(condition=not MDTestEnv.has_a2md(), reason="no a2md available")
@pytest.mark.skipif(condition=not MDTestEnv.has_acme_server(),
reason="no ACME test server configured")
class TestRegAdd:
@pytest.fixture(autouse=True, scope='function')
def _method_scope(self, env):
env.purge_store()
# test case: add a single dns managed domain
def test_md_100_000(self, env):
dns = "greenbytes.de"
jout1 = env.a2md(["add", dns]).json
env.check_json_contains(jout1['output'][0], {
"name": dns,
"domains": [dns],
"contacts": [],
"ca": {
"urls": [env.acme_url],
"proto": "ACME"
},
"state": env.MD_S_INCOMPLETE
})
assert env.a2md(["list"]).json == jout1
# test case: add > 1 dns managed domain
def test_md_100_001(self, env):
dns = ["greenbytes2.de", "www.greenbytes2.de", "mail.greenbytes2.de"]
jout1 = env.a2md(["add"] + dns).json
env.check_json_contains(jout1['output'][0], {
"name": dns[0],
"domains": dns,
"contacts": [],
"ca": {
"urls": [env.acme_url],
"proto": "ACME"
},
"state": env.MD_S_INCOMPLETE
})
assert env.a2md(["list"]).json == jout1
# test case: add second managed domain
def test_md_100_002(self, env):
dns1 = ["test100-002.com", "test100-002a.com", "test100-002b.com"]
env.a2md(["add"] + dns1)
# add second managed domain
dns2 = ["greenbytes2.de", "www.greenbytes2.de", "mail.greenbytes2.de"]
jout = env.a2md(["add"] + dns2).json
# assert: output covers only changed md
assert len(jout['output']) == 1
env.check_json_contains(jout['output'][0], {
"name": dns2[0],
"domains": dns2,
"contacts": [],
"ca": {
"urls": [env.acme_url],
"proto": "ACME"
},
"state": env.MD_S_INCOMPLETE
})
assert len(env.a2md(["list"]).json['output']) == 2
# test case: add existing domain
def test_md_100_003(self, env):
dns = "greenbytes.de"
assert env.a2md(["add", dns]).exit_code == 0
assert env.a2md(["add", dns]).exit_code == 1
# test case: add without CA URL
def test_md_100_004(self, env):
dns = "greenbytes.de"
jout1 = env.run([env.a2md_bin, "-d", env.store_dir, "-j", "add", dns]).json
assert len(jout1['output']) == 1
env.check_json_contains(jout1['output'][0], {
"name": dns,
"domains": [dns],
"contacts": [],
"ca": {
"proto": "ACME"
},
"state": env.MD_S_INCOMPLETE
})
assert env.a2md(["list"]).json == jout1
# test case: add with invalid DNS
@pytest.mark.parametrize("invalid_dns", [
"tld", "white sp.ace", "invalid.*.wildcard.com", "k\xc3ller.idn.com"
])
def test_md_100_005(self, env, invalid_dns):
assert env.a2md(["add", invalid_dns]).exit_code == 1
assert env.a2md(["add", "test-100.de", invalid_dns]).exit_code == 1
# test case: add with invalid ACME URL
@pytest.mark.parametrize("invalid_url", [
"no.schema/path", "http://white space/path", "http://bad.port:-1/path"])
def test_md_100_006(self, env, invalid_url):
args = [env.a2md_bin, "-a", invalid_url, "-d", env.store_dir, "-j"]
dns = "greenbytes.de"
args.extend(["add", dns])
assert env.run(args).exit_code == 1
# test case: add overlapping dns names
def test_md_100_007(self, env):
assert env.a2md(["add", "test-100.com", "test-101.com"]).exit_code == 0
# 1: alternate DNS exists as primary name
assert env.a2md(["add", "greenbytes2.de", "test-100.com"]).exit_code == 1
# 2: alternate DNS exists as alternate DNS
assert env.a2md(["add", "greenbytes2.de", "test-101.com"]).exit_code == 1
# 3: primary name exists as alternate DNS
assert env.a2md(["add", "test-101.com"]).exit_code == 1
# test case: add subdomains as separate managed domain
def test_md_100_008(self, env):
assert env.a2md(["add", "test-100.com"]).exit_code == 0
assert env.a2md(["add", "sub.test-100.com"]).exit_code == 0
# test case: add duplicate domain
def test_md_100_009(self, env):
dns1 = "test-100.com"
dns2 = "test-101.com"
jout = env.a2md(["add", dns1, dns2, dns1, dns2]).json
# DNS is only listed once
assert len(jout['output']) == 1
md = jout['output'][0]
assert md['domains'] == [dns1, dns2]
# test case: add pnuycode name
def test_md_100_010(self, env):
assert env.a2md(["add", "xn--kller-jua.punycode.de"]).exit_code == 0
# test case: don't sort alternate names
def test_md_100_011(self, env):
dns = ["test-100.com", "test-xxx.com", "test-aaa.com"]
jout = env.a2md(["add"] + dns).json
# DNS is only listed as specified
assert len(jout['output']) == 1
md = jout['output'][0]
assert md['domains'] == dns
# test case: add DNS wildcard
@pytest.mark.parametrize("wild_dns", [
"*.wildcard.com"
])
def test_md_100_012(self, env, wild_dns):
assert env.a2md(["add", wild_dns]).exit_code == 0
|
the-stack_0_23108 | """
voxel.py
-----------
Convert meshes to a simple voxel data structure and back again.
"""
import numpy as np
from . import util
from . import remesh
from . import caching
from . import grouping
from .constants import log, log_time
class Voxel(object):
def __init__(self, *args, **kwargs):
self._data = caching.DataStore()
self._cache = caching.Cache(id_function=self._data.crc)
@caching.cache_decorator
def marching_cubes(self):
"""
A marching cubes Trimesh representation of the voxels.
No effort was made to clean or smooth the result in any way;
it is merely the result of applying the scikit-image
measure.marching_cubes function to self.matrix.
Returns
---------
meshed: Trimesh object representing the current voxel
object, as returned by marching cubes algorithm.
"""
meshed = matrix_to_marching_cubes(matrix=self.matrix,
pitch=self.pitch,
origin=self.origin)
return meshed
@property
def pitch(self):
# stored as TrackedArray with a single element
return self._data['pitch'][0]
@pitch.setter
def pitch(self, value):
self._data['pitch'] = value
@property
def shape(self):
"""
The shape of the matrix for the current voxel object.
Returns
---------
shape: (3,) int, what is the shape of the 3D matrix
for these voxels
"""
return self.matrix.shape
@caching.cache_decorator
def filled_count(self):
"""
Return the number of voxels that are occupied.
Returns
--------
filled: int, number of voxels that are occupied
"""
return int(self.matrix.sum())
@caching.cache_decorator
def volume(self):
"""
What is the volume of the filled cells in the current voxel object.
Returns
---------
volume: float, volume of filled cells
"""
volume = self.filled_count * (self.pitch**3)
return volume
@caching.cache_decorator
def points(self):
"""
The center of each filled cell as a list of points.
Returns
----------
points: (self.filled, 3) float, list of points
"""
points = matrix_to_points(matrix=self.matrix,
pitch=self.pitch,
origin=self.origin)
return points
def point_to_index(self, point):
"""
Convert a point to an index in the matrix array.
Parameters
----------
point: (3,) float, point in space
Returns
---------
index: (3,) int tuple, index in self.matrix
"""
point = np.asanyarray(point)
if point.shape != (3,):
raise ValueError('to_index requires a single point')
index = np.round((point - self.origin) /
self.pitch).astype(int)
index = tuple(index)
return index
def is_filled(self, point):
"""
Query a point to see if the voxel cell it lies in is filled or not.
Parameters
----------
point: (3,) float, point in space
Returns
---------
is_filled: bool, is cell occupied or not
"""
index = self.point_to_index(point)
in_range = (np.array(index) < np.array(self.shape)).all()
if in_range:
is_filled = self.matrix[index]
else:
is_filled = False
return is_filled
class VoxelMesh(Voxel):
def __init__(self,
mesh,
pitch,
max_iter=10,
size_max=None,
method='subdivide'):
"""
A voxel representation of a mesh that will track changes to
the mesh.
At the moment the voxels are not filled in and only represent
the surface.
Parameters
----------
mesh: Trimesh object
pitch: float, how long should each edge of the voxel be
size_max: float, maximum size (in mb) of a data structure that
may be created before raising an exception
"""
super(VoxelMesh, self).__init__()
self._method = method
self._data['mesh'] = mesh
self._data['pitch'] = pitch
self._data['max_iter'] = max_iter
@caching.cache_decorator
def matrix_surface(self):
"""
The voxels on the surface of the mesh as a 3D matrix.
Returns
---------
matrix: self.shape np.bool, if a cell is True it is occupied
"""
matrix = sparse_to_matrix(self.sparse_surface)
return matrix
@caching.cache_decorator
def matrix_solid(self):
"""
The voxels in a mesh as a 3D matrix.
Returns
---------
matrix: self.shape np.bool, if a cell is True it is occupied
"""
matrix = sparse_to_matrix(self.sparse_solid)
return matrix
@property
def matrix(self):
"""
A matrix representation of the surface voxels.
In the future this is planned to return a filled voxel matrix
if the source mesh is watertight, and a surface voxelization
otherwise.
Returns
---------
matrix: self.shape np.bool, cell occupancy
"""
if self._data['mesh'].is_watertight:
return self.matrix_solid
return self.matrix_surface
@property
def origin(self):
"""
The origin of the voxel array.
Returns
------------
origin: (3,) float, point in space
"""
populate = self.sparse_surface
return self._cache['origin']
@caching.cache_decorator
def sparse_surface(self):
"""
Filled cells on the surface of the mesh.
Returns
----------------
voxels: (n, 3) int, filled cells on mesh surface
"""
if self._method == 'ray':
func = voxelize_ray
elif self._method == 'subdivide':
func = voxelize_subdivide
else:
raise ValueError('voxelization method incorrect')
voxels, origin = func(
mesh=self._data['mesh'],
pitch=self._data['pitch'],
max_iter=self._data['max_iter'][0])
self._cache['origin'] = origin
return voxels
@caching.cache_decorator
def sparse_solid(self):
"""
Filled cells inside and on the surface of mesh
Returns
----------------
filled: (n, 3) int, filled cells in or on mesh.
"""
filled = fill_voxelization(self.sparse_surface)
return filled
def as_boxes(self, solid=False):
"""
A rough Trimesh representation of the voxels with a box
for each filled voxel.
Parameters
-----------
solid: bool, if True return boxes for sparse_solid
Returns
---------
mesh: Trimesh object made up of one box per filled cell.
"""
if solid:
filled = self.sparse_solid
else:
filled = self.sparse_surface
# center points of voxels
centers = (filled * self.pitch).astype(np.float64)
centers += self.origin - (self.pitch / 2.0)
mesh = multibox(centers=centers, pitch=self.pitch)
return mesh
def show(self, solid=False):
"""
Convert the current set of voxels into a trimesh for visualization
and show that via its built- in preview method.
"""
self.as_boxes(solid=solid).show()
@log_time
def voxelize_subdivide(mesh,
pitch,
max_iter=10,
edge_factor=2.0):
"""
Voxelize a surface by subdividing a mesh until every edge is
shorter than: (pitch / edge_factor)
Parameters
-----------
mesh: Trimesh object
pitch: float, side length of a single voxel cube
max_iter: int, cap maximum subdivisions or None for no limit.
edge_factor: float,
Returns
-----------
voxels_sparse: (n,3) int, (m,n,p) indexes of filled cells
origin_position: (3,) float, position of the voxel
grid origin in space
"""
max_edge = pitch / edge_factor
if max_iter is None:
longest_edge = np.linalg.norm(mesh.vertices[mesh.edges[:, 0]] -
mesh.vertices[mesh.edges[:, 1]],
axis=1).max()
max_iter = max(int(np.ceil(np.log2(longest_edge / max_edge))), 0)
# get the same mesh sudivided so every edge is shorter
# than a factor of our pitch
v, f = remesh.subdivide_to_size(mesh.vertices,
mesh.faces,
max_edge=max_edge,
max_iter=max_iter)
# convert the vertices to their voxel grid position
hit = v / pitch
# Provided edge_factor > 1 and max_iter is large enough, this is
# sufficient to preserve 6-connectivity at the level of voxels.
hit = np.round(hit).astype(int)
# remove duplicates
unique, inverse = grouping.unique_rows(hit)
# get the voxel centers in model space
occupied_index = hit[unique]
origin_index = occupied_index.min(axis=0)
origin_position = origin_index * pitch
voxels_sparse = (occupied_index - origin_index)
return voxels_sparse, origin_position
def local_voxelize(mesh, point, pitch, radius, fill=True, **kwargs):
"""
Voxelize a mesh in the region of a cube around a point. When fill=True,
uses proximity.contains to fill the resulting voxels so may be meaningless
for non-watertight meshes. Useful to reduce memory cost for small values of
pitch as opposed to global voxelization.
Parameters
-----------
mesh : trimesh.Trimesh
Source geometry
point : (3, ) float
Point in space to voxelize around
pitch : float
Side length of a single voxel cube
radius : int
Number of voxel cubes to return in each direction.
kwargs : parameters to pass to voxelize_subdivide
Returns
-----------
voxels : (m, m, m) bool
Array of local voxels where m=2*radius+1
origin_position : (3,) float
Position of the voxel grid origin in space
"""
from scipy import ndimage
# make sure point is correct type/shape
point = np.asanyarray(point, dtype=np.float64).reshape(3)
# this is a gotcha- radius sounds a lot like it should be in
# float model space, not int voxel space so check
if not isinstance(radius, int):
raise ValueError('radius needs to be an integer number of cubes!')
# Bounds of region
bounds = np.concatenate((point - (radius + 0.5) * pitch,
point + (radius + 0.5) * pitch))
# faces that intersect axis aligned bounding box
faces = list(mesh.triangles_tree.intersection(bounds))
# didn't hit anything so exit
if len(faces) == 0:
return np.array([], dtype=np.bool), np.zeros(3)
local = mesh.submesh([[f] for f in faces], append=True)
# Translate mesh so point is at 0,0,0
local.apply_translation(-point)
sparse, origin = voxelize_subdivide(local, pitch, **kwargs)
matrix = sparse_to_matrix(sparse)
# Find voxel index for point
center = np.round(-origin / pitch).astype(np.int64)
# pad matrix if necessary
prepad = np.maximum(radius - center, 0)
postpad = np.maximum(center + radius + 1 - matrix.shape, 0)
matrix = np.pad(matrix, np.stack((prepad, postpad), axis=-1),
mode='constant')
center += prepad
# Extract voxels within the bounding box
voxels = matrix[center[0] - radius:center[0] + radius + 1,
center[1] - radius:center[1] + radius + 1,
center[2] - radius:center[2] + radius + 1]
local_origin = point - radius * pitch # origin of local voxels
# Fill internal regions
if fill:
regions, n = ndimage.measurements.label(~voxels)
distance = ndimage.morphology.distance_transform_cdt(~voxels)
representatives = [np.unravel_index((distance * (regions == i)).argmax(),
distance.shape) for i in range(1, n + 1)]
contains = mesh.contains(
np.asarray(representatives) *
pitch +
local_origin)
where = np.where(contains)[0] + 1
# use in1d vs isin for older numpy versions
internal = np.in1d(regions.flatten(), where).reshape(regions.shape)
voxels = np.logical_or(voxels, internal)
return voxels, local_origin
@log_time
def voxelize_ray(mesh,
pitch,
per_cell=[2, 2],
**kwargs):
"""
Voxelize a mesh using ray queries.
Parameters
-------------
mesh : Trimesh object
Mesh to be voxelized
pitch : float
Length of voxel cube
per_cell : (2,) int
How many ray queries to make per cell
Returns
-------------
voxels : (n, 3) int
Voxel positions
origin : (3, ) int
Origin of voxels
"""
# how many rays per cell
per_cell = np.array(per_cell).astype(np.int).reshape(2)
# edge length of cube voxels
pitch = float(pitch)
# create the ray origins in a grid
bounds = mesh.bounds[:, :2].copy()
# offset start so we get the requested number per cell
bounds[0] += pitch / (1.0 + per_cell)
# offset end so arange doesn't short us
bounds[1] += pitch
# on X we are doing multiple rays per voxel step
step = pitch / per_cell
# 2D grid
ray_ori = util.grid_arange(bounds, step=step)
# a Z position below the mesh
z = np.ones(len(ray_ori)) * (mesh.bounds[0][2] - pitch)
ray_ori = np.column_stack((ray_ori, z))
# all rays are along positive Z
ray_dir = np.ones_like(ray_ori) * [0, 0, 1]
# if you have pyembree this should be decently fast
hits = mesh.ray.intersects_location(ray_ori, ray_dir)[0]
# just convert hit locations to integer positions
voxels = np.round(hits / pitch).astype(np.int64)
# offset voxels by min, so matrix isn't huge
origin = voxels.min(axis=0)
voxels -= origin
return voxels, origin
def fill_voxelization(occupied):
"""
Given a sparse surface voxelization, fill in between columns.
Parameters
--------------
occupied: (n, 3) int, location of filled cells
Returns
--------------
filled: (m, 3) int, location of filled cells
"""
# validate inputs
occupied = np.asanyarray(occupied, dtype=np.int64)
if not util.is_shape(occupied, (-1, 3)):
raise ValueError('incorrect shape')
# create grid and mark inner voxels
max_value = occupied.max() + 3
grid = np.zeros((max_value,
max_value,
max_value),
dtype=np.int64)
voxels_sparse = np.add(occupied, 1)
grid.__setitem__(tuple(voxels_sparse.T), 1)
for i in range(max_value):
check_dir2 = False
for j in range(0, max_value - 1):
idx = []
# find transitions first
# transition positions are from 0 to 1 and from 1 to 0
eq = np.equal(grid[i, j, :-1], grid[i, j, 1:])
idx = np.where(np.logical_not(eq))[0] + 1
c = len(idx)
check_dir2 = (c % 4) > 0 and c > 4
if c < 4:
continue
for s in range(0, c - c % 4, 4):
grid[i, j, idx[s]:idx[s + 3]] = 1
if not check_dir2:
continue
# check another direction for robustness
for k in range(0, max_value - 1):
idx = []
# find transitions first
eq = np.equal(grid[i, :-1, k], grid[i, 1:, k])
idx = np.where(np.logical_not(eq))[0] + 1
c = len(idx)
if c < 4:
continue
for s in range(0, c - c % 4, 4):
grid[i, idx[s]:idx[s + 3], k] = 1
# generate new voxels
idx = np.where(grid == 1)
filled = np.array([[idx[0][i] - 1,
idx[1][i] - 1,
idx[2][i] - 1]
for i in range(len(idx[0]))])
return filled
def matrix_to_points(matrix, pitch, origin):
"""
Convert an (n,m,p) matrix into a set of points for each voxel center.
Parameters
-----------
matrix: (n,m,p) bool, voxel matrix
pitch: float, what pitch was the voxel matrix computed with
origin: (3,) float, what is the origin of the voxel matrix
Returns
----------
points: (q, 3) list of points
"""
points = np.column_stack(np.nonzero(matrix)) * pitch + origin
return points
def matrix_to_marching_cubes(matrix, pitch, origin):
"""
Convert an (n,m,p) matrix into a mesh, using marching_cubes.
Parameters
-----------
matrix: (n,m,p) bool, voxel matrix
pitch: float, what pitch was the voxel matrix computed with
origin: (3,) float, what is the origin of the voxel matrix
Returns
----------
mesh: Trimesh object, generated by meshing voxels using
the marching cubes algorithm in skimage
"""
from skimage import measure
from .base import Trimesh
matrix = np.asanyarray(matrix, dtype=np.bool)
rev_matrix = np.logical_not(matrix) # Takes set about 0.
# Add in padding so marching cubes can function properly with
# voxels on edge of AABB
pad_width = 1
rev_matrix = np.pad(rev_matrix,
pad_width=(pad_width),
mode='constant',
constant_values=(1))
# pick between old and new API
if hasattr(measure, 'marching_cubes_lewiner'):
func = measure.marching_cubes_lewiner
else:
func = measure.marching_cubes
# Run marching cubes.
meshed = func(volume=rev_matrix,
level=.5, # it is a boolean voxel grid
spacing=(pitch,
pitch,
pitch))
# allow results from either marching cubes function in skimage
# binaries available for python 3.3 and 3.4 appear to use the classic
# method
if len(meshed) == 2:
log.warning('using old marching cubes, may not be watertight!')
vertices, faces = meshed
normals = None
elif len(meshed) == 4:
vertices, faces, normals, vals = meshed
# Return to the origin, add in the pad_width
vertices = np.subtract(np.add(vertices, origin), pad_width * pitch)
# create the mesh
mesh = Trimesh(vertices=vertices,
faces=faces,
vertex_normals=normals)
return mesh
def sparse_to_matrix(sparse):
"""
Take a sparse (n,3) list of integer indexes of filled cells,
turn it into a dense (m,o,p) matrix.
Parameters
-----------
sparse: (n,3) int, index of filled cells
Returns
------------
dense: (m,o,p) bool, matrix of filled cells
"""
sparse = np.asanyarray(sparse, dtype=np.int)
if not util.is_shape(sparse, (-1, 3)):
raise ValueError('sparse must be (n,3)!')
shape = sparse.max(axis=0) + 1
matrix = np.zeros(np.product(shape), dtype=np.bool)
multiplier = np.array([np.product(shape[1:]), shape[2], 1])
index = (sparse * multiplier).sum(axis=1)
matrix[index] = True
dense = matrix.reshape(shape)
return dense
def multibox(centers, pitch):
"""
Return a Trimesh object with a box at every center.
Doesn't do anything nice or fancy.
Parameters
-----------
centers: (n,3) float, center of boxes that are occupied
pitch: float, the edge length of a voxel
Returns
---------
rough: Trimesh object representing inputs
"""
from . import primitives
from .base import Trimesh
b = primitives.Box(extents=[pitch, pitch, pitch])
v = np.tile(centers, (1, len(b.vertices))).reshape((-1, 3))
v += np.tile(b.vertices, (len(centers), 1))
f = np.tile(b.faces, (len(centers), 1))
f += np.tile(np.arange(len(centers)) * len(b.vertices),
(len(b.faces), 1)).T.reshape((-1, 1))
rough = Trimesh(vertices=v, faces=f)
return rough
def boolean_sparse(a, b, operation=np.logical_and):
"""
Find common rows between two arrays very quickly
using 3D boolean sparse matrices.
Parameters
-----------
a: (n, d) int, coordinates in space
b: (m, d) int, coordinates in space
operation: numpy operation function, ie:
np.logical_and
np.logical_or
Returns
-----------
coords: (q, d) int, coordinates in space
"""
# 3D sparse arrays, using wrapped scipy.sparse
# pip install sparse
import sparse
# find the bounding box of both arrays
extrema = np.array([a.min(axis=0),
a.max(axis=0),
b.min(axis=0),
b.max(axis=0)])
origin = extrema.min(axis=0) - 1
size = tuple(extrema.ptp(axis=0) + 2)
# put nearby voxel arrays into same shape sparse array
sp_a = sparse.COO((a - origin).T,
data=np.ones(len(a), dtype=np.bool),
shape=size)
sp_b = sparse.COO((b - origin).T,
data=np.ones(len(b), dtype=np.bool),
shape=size)
# apply the logical operation
# get a sparse matrix out
applied = operation(sp_a, sp_b)
# reconstruct the original coordinates
coords = np.column_stack(applied.coords) + origin
return coords
|
the-stack_0_23109 | """
# Problem 56: Powerful digit sum
A googol (10^100) is a massive number: one followed by one-hundred zeros; 100^100 is almost
unimaginably large: one followed by two-hundred zeros. Despite their size, the sum of the digits in
each number is only 1.
Considering natural numbers of the form, a^b, where a, b < 100, what is the maximum digital sum?
"""
# Well, this is one of those actually pretty simple problems. 8910 possible options if we remove
# the obviously bad ones.
disallow_a_values = [1, 10, 20, 30, 40, 50, 60, 70, 80, 90]
a_values = [i for i in range(1, 100) if i not in disallow_a_values]
b_values = range(2, 100)
max_sum = 0
for a in a_values:
for b in b_values:
# THE ASSIGNMENT EQUAL OPERATOR CAN'T HAPPEN SOON ENOUGH
digit_sum = sum([int(digit) for digit in str(a ** b)])
if digit_sum > max_sum:
max_sum = digit_sum
print(max_sum)
|
the-stack_0_23112 | import napari_plot_profile
import pytest
def test_something_with_viewer(make_napari_viewer):
viewer = make_napari_viewer()
import numpy as np
image = np.random.random((256,256))
viewer.add_image(image, colormap='green', blending='additive')
viewer.add_shapes([[100, 80], [140, 150]], shape_type='path', edge_color='cyan', edge_width=3)
num_dw = len(viewer.window._dock_widgets)
from napari_plot_profile import PlotProfile
plotter = PlotProfile(viewer)
viewer.window.add_dock_widget(plotter)
assert len(viewer.window._dock_widgets) == num_dw + 1
plotter.to_table()
|
the-stack_0_23114 | # -*- coding: utf-8 -*-
#
# Copyright 2020-2021 AVSystem <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
from framework.lwm2m.tlv import TLV
from framework.lwm2m_test import *
import jni_test
class BootstrapServer:
class Test(jni_test.LocalSingleServerTest):
def setUp(self, **kwargs):
extra_args = ['--bootstrap-holdoff', '3']
self.setup_demo_with_servers(servers=0,
bootstrap_server=True,
extra_cmdline_args=extra_args,
**kwargs)
def tearDown(self):
self.teardown_demo_with_servers()
def get_demo_port(self, server_index=None):
# wait for sockets initialization
# scheduler-based socket initialization might delay socket setup a bit;
# this loop is here to ensure `communicate()` call below works as
# expected
for _ in range(10):
if self.get_socket_count() > 0:
break
else:
self.fail("sockets not initialized in time")
# send Bootstrap messages without request
return super().get_demo_port(server_index)
class BootstrapServerTest(BootstrapServer.Test):
def runTest(self):
self.bootstrap_server.connect_to_client(('127.0.0.1', self.get_demo_port()))
req = Lwm2mWrite('/%d/42' % (OID.Server,),
TLV.make_resource(RID.Server.Lifetime, 60).serialize()
+ TLV.make_resource(RID.Server.Binding, "U").serialize()
+ TLV.make_resource(RID.Server.ShortServerID, 42).serialize()
+ TLV.make_resource(RID.Server.NotificationStoring, True).serialize(),
format=coap.ContentFormat.APPLICATION_LWM2M_TLV)
self.bootstrap_server.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.bootstrap_server.recv())
regular_serv = Lwm2mServer()
regular_serv_uri = 'coap://127.0.0.1:%d' % regular_serv.get_listen_port()
# Create Security object
req = Lwm2mWrite('/%d/42' % (OID.Security,),
TLV.make_resource(RID.Security.ServerURI, regular_serv_uri).serialize()
+ TLV.make_resource(RID.Security.Bootstrap, 0).serialize()
+ TLV.make_resource(RID.Security.Mode, 3).serialize()
+ TLV.make_resource(RID.Security.ShortServerID, 42).serialize()
+ TLV.make_resource(RID.Security.PKOrIdentity, "").serialize()
+ TLV.make_resource(RID.Security.SecretKey, "").serialize(),
format=coap.ContentFormat.APPLICATION_LWM2M_TLV)
self.bootstrap_server.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.bootstrap_server.recv())
# no Client Initiated bootstrap
with self.assertRaises(socket.timeout):
print(self.bootstrap_server.recv(timeout_s=4))
# send Bootstrap Finish
req = Lwm2mBootstrapFinish()
self.bootstrap_server.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.bootstrap_server.recv())
self.assertDemoRegisters(server=regular_serv, lifetime=60)
# Bootstrap Delete / shall succeed
req = Lwm2mDelete('/')
self.bootstrap_server.send(req)
self.assertMsgEqual(Lwm2mDeleted.matching(req)(),
self.bootstrap_server.recv())
# ...even twice
req = Lwm2mDelete('/')
self.bootstrap_server.send(req)
self.assertMsgEqual(Lwm2mDeleted.matching(req)(),
self.bootstrap_server.recv())
# now send Bootstrap Finish
req = Lwm2mBootstrapFinish()
self.bootstrap_server.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.bootstrap_server.recv())
# the client will now start Client Initiated bootstrap, because it has no regular server connection
# this might happen after a backoff, if the Bootstrap Delete was handled before the response to Register
pkt = self.bootstrap_server.recv(timeout_s=20)
self.assertIsInstance(pkt, Lwm2mRequestBootstrap)
self.bootstrap_server.send(Lwm2mChanged.matching(pkt)())
self.request_demo_shutdown()
regular_serv.close()
class BootstrapEmptyResourcesDoesNotSegfault(BootstrapServer.Test):
def runTest(self):
self.bootstrap_server.connect_to_client(('127.0.0.1', self.get_demo_port()))
req = Lwm2mWrite('/%d/42' % (OID.Security,),
TLV.make_resource(RID.Security.ServerURI, 'coap://1.2.3.4:5678').serialize()
+ TLV.make_resource(RID.Security.Bootstrap, 0).serialize()
+ TLV.make_resource(RID.Security.Mode, 3).serialize()
+ TLV.make_resource(RID.Security.ShortServerID, 42).serialize()
+ TLV.make_resource(RID.Security.PKOrIdentity, b'').serialize()
+ TLV.make_resource(RID.Security.SecretKey, b'').serialize(),
format=coap.ContentFormat.APPLICATION_LWM2M_TLV)
for _ in range(64):
self.bootstrap_server.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.bootstrap_server.recv())
req = Lwm2mBootstrapFinish()
self.bootstrap_server.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.bootstrap_server.recv())
self.request_demo_shutdown()
|
the-stack_0_23115 | #!/usr/bin/env python
import tensorflow as tf
from tensorflow.contrib.layers import batch_norm
from enum import Enum
from model import Model
from weight_norm import WeightNorm
class Sharing(Enum):
none = 1
initial = 2
succeeding = 3
def mask_layer(layer, mask):
return tf.multiply(
tf.broadcast_to(
tf.expand_dims(
mask, -1), tf.shape(layer)), layer)
def dilated_convolution(
x,
n_outputs,
kernel_size,
n_levels,
is_training,
mask,
glu=False,
keep_prob=1.0):
layer = x
for i in range(n_levels):
# Only use sharing for layers 1 and up. Layer 0 cannot use sared parameters:
#
# - It transforms word embeddings into the hidden representation,
# whereas subsequent layers transform hidden representations to
# hidden representations.
# - The input size may differ from the output size.
if i == 0:
sharing = Sharing.none
elif i == 1:
sharing = Sharing.initial
else:
sharing = Sharing.succeeding
dilation = 2 ** i
layer = residual_block(
layer,
n_outputs,
kernel_size,
dilation,
is_training=is_training,
mask=mask,
glu=glu,
keep_prob=keep_prob,
sharing=sharing)
# Mask after last convolution. This is only necessary for models that
# apply transformations across time steps after the diluted convolutions.
# But masking is cheap, so better safe than sorry.
layer = mask_layer(layer, mask)
return layer
def residual_block(
x,
n_outputs,
kernel_size,
dilation,
is_training,
mask,
glu=False,
keep_prob=1.0,
sharing=Sharing.none):
if sharing == Sharing.initial or sharing == Sharing.succeeding:
suffix = "shared"
else:
suffix = "unshared"
with tf.variable_scope("conv1-%s" % suffix, reuse=sharing == Sharing.succeeding):
conv1 = residual_unit(
x,
n_outputs,
kernel_size,
dilation,
is_training,
mask=mask,
glu=glu,
keep_prob=keep_prob)
with tf.variable_scope("conv2-%s" % suffix, reuse=sharing == Sharing.succeeding):
conv2 = residual_unit(
conv1,
n_outputs,
kernel_size,
dilation,
is_training,
mask=mask,
glu=glu,
keep_prob=keep_prob)
if x.get_shape()[2] != n_outputs:
# Note: biases could change padding timesteps, but the next layer will mask
# the resulting sequence.
x = tf.layers.Conv1D(n_outputs, 1)(x)
return x + conv2
def residual_unit(
x,
n_outputs,
kernel_size,
dilation,
is_training,
mask,
glu=False,
keep_prob=1.0):
if glu:
# For GLU we need the hidden representation, plus an equal number
# of parameters for weighting the hidden representation.
n_outputs *= 2
# Mask inactive time steps. This is necessary, because convolutions make
# the padding non-zero (through past timesteps). In later convolutions,
# these updated paddings would then influence time steps before the
# padding.
x = mask_layer(x, mask)
conv = WeightNorm(
tf.layers.Conv1D(
n_outputs,
kernel_size,
dilation_rate=dilation,
padding="same"))(x)
if glu:
left, right = tf.split(conv, num_or_size_splits=2, axis=2)
left = tf.sigmoid(left)
conv = tf.multiply(left, right)
else:
conv = tf.nn.relu(conv)
# Spatial dropout
conv = tf.contrib.layers.dropout(
conv,
keep_prob=keep_prob,
noise_shape=[
tf.shape(conv)[0],
tf.constant(1),
tf.shape(conv)[2]],
is_training=is_training)
return conv
class ConvModel(Model):
def __init__(
self,
config,
shapes):
super(ConvModel, self).__init__(config, shapes)
self.setup_placeholders()
inputs = tf.concat([self._tokens, self._tags], axis=2)
inputs = tf.contrib.layers.dropout(
inputs,
keep_prob=config.keep_prob_input,
is_training=self.is_training)
hidden_states = dilated_convolution(
inputs,
config.hidden_size,
kernel_size=config.kernel_size,
n_levels=config.n_levels,
is_training=self.is_training,
glu=config.glu,
keep_prob=config.keep_prob,
mask=self.mask)
# Normalize hidden layers, seems to speed up convergence.
hidden_states = tf.contrib.layers.layer_norm(
hidden_states, begin_norm_axis=-1)
topo_logits = tf.layers.dense(hidden_states, shapes['n_labels'], use_bias=True, name="topo_logits")
if config.crf:
topo_loss, transitions = self.crf_loss(
"topo", topo_logits, self.topo_labels)
topo_predictions = self.crf_predictions(
"topo", topo_logits, transitions)
else:
topo_loss = self.masked_softmax_loss(
"topo", topo_logits, self.topo_labels, self.mask)
topo_predictions = self.predictions("topo", topo_logits)
self.accuracy("topo", topo_predictions, self.topo_labels)
# Optimization with gradient clipping. Consider making the gradient
# norm a placeholder as well.
lr = tf.placeholder(tf.float32, [], "lr")
optimizer = tf.train.AdamOptimizer(lr)
gradients, variables = zip(*optimizer.compute_gradients(topo_loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.0)
self._train_op = optimizer.apply_gradients(
zip(gradients, variables), name="train")
|
the-stack_0_23118 | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# System Imports
import os
# waflib imports
from waflib import Configure, Logs, Errors, TaskGen
# lmbrwaflib imports
from lmbrwaflib import msvs
MSVS_VERSION = 15
VS_NAME = 'vs2017'
PLATFORM_NAME = 'win_x64_{}'.format(VS_NAME)
@TaskGen.feature('deploy_win_x64_vs2017')
def deploy_win_x64_vs2017(tg):
# No deployment phase for windows yet
pass
@TaskGen.feature('unittest_win_x64_vs2017')
@TaskGen.after_method('deploy_win_x64_vs2017')
def unittest_win_x64_vs2017(tg):
tg.bld.run_unittest_launcher_for_win_x64(tg.bld.project)
class msvs_2017_generator(msvs.msvs_generator):
'''
Command to generate a Visual Studio 2017 solution
The project spec (-p) option can be be used to generate a solution which targets a specific build spec and name the
solution with the project spec name
The vs2017_solution_name(--vs2017-solution-name) option overrides the output name of the solution from
the user_settings.options file
The specs_to_include_in_project_generation(--specs-to-include-in-project-generation) override the list of specs to
include solution from the user_settings.options file
'''
cmd = 'msvs_2017'
fun = 'build'
def __init__(self):
super(msvs_2017_generator, self).__init__()
self.platform_name = PLATFORM_NAME
self.msvs_version = MSVS_VERSION
self.vs_solution_name_options_attribute = 'vs2017_solution_name'
self.vs_name = VS_NAME
def get_msbuild_toolset_properties_file_path(self, toolset_version, toolset_name):
ms_build_root = os.path.join(self.vs_installation_path, 'MSBuild', '{}.0'.format(toolset_version))
platform_props_file = os.path.join(ms_build_root, 'Microsoft.common.props')
# if the vs_installation_path is within in the C:\Program Files (x86)\Microsoft Visual Studio\2019\ directory
# due to the v141 build tools being used with VS2019, the MSBuild file is stored in the
# <VSInstallationPath>/MSBuild/Current/ directory
if not os.path.isfile(platform_props_file):
ms_build_root = os.path.join(self.vs_installation_path, 'MSBuild', 'Current')
platform_props_file = os.path.join(ms_build_root, 'Microsoft.common.props')
return platform_props_file
@Configure.conf
def detect_visual_studio_2017(ctx, winkit_version, vcvarsall_args, fallback_to_newer_vs_version):
install_path = ''
path_vcvars_all= ''
vs2017_exception_text = ''
try:
install_path = ctx.query_visual_studio_install_path(MSVS_VERSION, ctx.options.win_vs2017_vswhere_args)
path_vcvars_all = os.path.normpath(os.path.join(install_path, 'VC\\Auxiliary\\Build\\vcvarsall.bat'))
if not os.path.isfile(path_vcvars_all):
raise Errors.WafError("Unable to detect VS2017. Cannot locate vcvarsall.bat at '{}'".format(path_vcvars_all))
except Errors.WafError as e:
vs2017_exception_text = str(e)
if not fallback_to_newer_vs_version:
raise Errors.WafError(vs2017_exception_text)
try:
if vs2017_exception_text:
# If VS2017 cannot be found using vswhere, try detecting VS2019 next and using the 14.1x toolset if it is installed
install_path = ctx.query_visual_studio_install_path(MSVS_VERSION, ctx.options.win_vs2019_vswhere_args)
path_vcvars_all = os.path.normpath(os.path.join(install_path, 'VC\\Auxiliary\\Build\\vcvarsall.bat'))
# Append the '-vcvars_ver=14.1' value vcvarsall_args to detect the v141 build tools for VS2019
# If the vcvarsall_args have a -vcvars_ver entry is already set, this will override that value
vcvarsall_args = vcvarsall_args + " -vcvars_ver=14.1"
if not os.path.isfile(path_vcvars_all):
raise Errors.WafError("Unable to detect VS2019. Cannot locate vcvarsall.bat at '{}'".format(path_vcvars_all))
except Errors.WafError as e:
# Re-raise the Errors.WafError exception but with the message from the VS2017 detection exception
raise Errors.WafError(vs2017_exception_text)
dev_studio_env = ctx.detect_visual_studio(platform_name=PLATFORM_NAME,
path_visual_studio=install_path,
path_vcvars_all=path_vcvars_all,
winkit_version=winkit_version,
vcvarsall_args=vcvarsall_args)
ctx.apply_dev_studio_environment(dev_studio_env_map=dev_studio_env,
env_keys_to_apply=['INCLUDES',
'PATH',
'LIBPATH',
'CC',
'CXX',
'LINK',
'LINK_CC',
'LINK_CXX',
'WINRC',
'MT',
'AR',
'VS_INSTALLATION_PATH'])
ctx.env['MSVC_VERSION'] = MSVS_VERSION
return dev_studio_env
|
the-stack_0_23119 | """
Code for creating and working with regular (structured) grids. Focus is on the 2D representation of
the grid in the cartesian plane. For methods involving layering (in the vertical dimension), see
the discretization module.
"""
import collections
import time
import warnings
from pathlib import Path
import geopandas as gpd
import gisutils
import numpy as np
import pandas as pd
import pyproj
from flopy.discretization import StructuredGrid
from geopandas.geodataframe import GeoDataFrame
from gisutils import df2shp, get_proj_str, project, shp2df
from packaging import version
from rasterio import Affine
from scipy import spatial
from shapely.geometry import MultiPolygon, Polygon
from mfsetup import fileio as fileio
from .mf5to6 import get_model_length_units
from .units import convert_length_units
from .utils import get_input_arguments
class MFsetupGrid(StructuredGrid):
"""Class representing a structured grid. Extends flopy.discretization.StructuredGrid
to facilitate gis operations in a projected (real-word) coordinate reference system (CRS).
Parameters
----------
delc : ndarray
1D numpy array of grid spacing along a column (len nrow), in CRS units.
delr : ndarray
1D numpy array of grid spacing along a row (len ncol), in CRS units.
top : ndarray
2D numpy array of model top elevations
botm : ndarray
3D numpy array of model bottom elevations
idomain : ndarray
3D numpy array of model idomain values
lenuni : int, optional
MODFLOW length units variable. See
`the Online Guide to MODFLOW <https://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/index.html?beginners_guide_to_modflow.htm>`_
epsg : int, optional
EPSG code for the model CRS
proj_str : str, optional
PROJ string for model CRS. In general, a spatial reference ID
(such as an EPSG code) or Well-Known Text (WKT) string is prefered
over a PROJ string (see References)
prj : str, optional
Filepath for ESRI projection file (containing wkt) describing model CRS
wkt : str, optional
Well-known text string describing model CRS.
crs : obj, optional
A Python int, dict, str, or pyproj.crs.CRS instance
passed to :meth:`pyproj.crs.CRS.from_user_input`
Can be any of:
- PROJ string
- Dictionary of PROJ parameters
- PROJ keyword arguments for parameters
- JSON string with PROJ parameters
- CRS WKT string
- An authority string [i.e. 'epsg:4326']
- An EPSG integer code [i.e. 4326]
- A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')]
- An object with a `to_wkt` method.
- A :class:`pyproj.crs.CRS` class
xoff, yoff : float, float, optional
Model grid offset (location of lower left corner), by default 0.0, 0.0
xul, yul : float, float, optional
Model grid offset (location of upper left corner), by default 0.0, 0.0
angrot : float, optional
Rotation of the model grid, in degrees counter-clockwise about the lower left corner.
Non-zero rotation values require input of xoff, yoff (xul, yul not supported).
By default 0.0
References
----------
https://proj.org/faq.html#what-is-the-best-format-for-describing-coordinate-reference-systems
"""
def __init__(self, delc, delr, top=None, botm=None, idomain=None,
lenuni=None,
epsg=None, proj_str=None, prj=None, wkt=None, crs=None,
xoff=0.0, yoff=0.0, xul=None, yul=None, angrot=0.0):
super(MFsetupGrid, self).__init__(np.array(delc), np.array(delr),
top, botm, idomain,
lenuni, epsg, proj_str, prj, xoff,
yoff, angrot)
# properties
self._crs = None
# pass all CRS representations through pyproj.CRS.from_user_input
# to convert to pyproj.CRS instance
self.crs = get_crs(crs=crs, epsg=epsg, prj=prj, wkt=wkt, proj_str=proj_str)
# other CRS-related properties are set in the flopy Grid base class
self._vertices = None
self._polygons = None
self._dataframe = None
# if no epsg, set from proj4 string if possible
#if epsg is None and proj_str is not None and 'epsg' in proj_str.lower():
# self.epsg = int(proj_str.split(':')[1])
# in case the upper left corner is known but the lower left corner is not
if xul is not None and yul is not None:
xll = self._xul_to_xll(xul)
yll = self._yul_to_yll(yul)
self.set_coord_info(xoff=xll, yoff=yll, epsg=epsg, proj4=proj_str, angrot=angrot)
def __eq__(self, other):
if not isinstance(other, StructuredGrid):
return False
if not np.allclose(other.xoffset, self.xoffset):
return False
if not np.allclose(other.yoffset, self.yoffset):
return False
if not np.allclose(other.angrot, self.angrot):
return False
if not other.crs == self.crs:
return False
if not np.array_equal(other.delr, self.delr):
return False
if not np.array_equal(other.delc, self.delc):
return False
return True
def __repr__(self):
txt = ''
if self.nlay is not None:
txt += f'{self.nlay:d} layer(s), '
txt += f'{self.nrow:d} row(s), {self.ncol:d} column(s)\n'
txt += (f'delr: [{self.delr[0]:.2f}...{self.delr[-1]:.2f}]'
f' {self.units}\n'
f'delc: [{self.delc[0]:.2f}...{self.delc[-1]:.2f}]'
f' {self.units}\n'
)
txt += f'CRS: {self.crs}\n'
txt += f'length units: {self.length_units}\n'
txt += f'xll: {self.xoffset}; yll: {self.yoffset}; rotation: {self.rotation}\n'
txt += 'Bounds: {}\n'.format(self.extent)
return txt
def __str__(self):
return StructuredGrid.__repr__(self)
@property
def xul(self):
x0 = self.xyedges[0][0]
y0 = self.xyedges[1][0]
x0r, y0r = self.get_coords(x0, y0)
return x0r
@property
def yul(self):
x0 = self.xyedges[0][0]
y0 = self.xyedges[1][0]
x0r, y0r = self.get_coords(x0, y0)
return y0r
@property
def bbox(self):
"""Shapely polygon bounding box of the model grid."""
return get_grid_bounding_box(self)
@property
def bounds(self):
"""Grid bounding box in order used by shapely.
"""
x0, x1, y0, y1 = self.extent
return x0, y0, x1, y1
@property
def size(self):
if self.nlay is None:
return self.nrow * self.ncol
return self.nlay * self.nrow * self.ncol
@property
def transform(self):
"""Rasterio Affine object (same as transform attribute of rasters).
"""
return Affine(self.delr[0], 0., self.xul,
0., -self.delc[0], self.yul) * \
Affine.rotation(-self.angrot)
@property
def crs(self):
"""pyproj.crs.CRS instance describing the coordinate reference system
for the model grid.
"""
return self._crs
@crs.setter
def crs(self, crs):
"""Get a pyproj CRS instance from various inputs
(epsg, proj string, wkt, etc.).
crs : obj, optional
Coordinate reference system for model grid.
A Python int, dict, str, or pyproj.crs.CRS instance
passed to the pyproj.crs.from_user_input
See http://pyproj4.github.io/pyproj/stable/api/crs/crs.html#pyproj.crs.CRS.from_user_input.
Can be any of:
- PROJ string
- Dictionary of PROJ parameters
- PROJ keyword arguments for parameters
- JSON string with PROJ parameters
- CRS WKT string
- An authority string [i.e. 'epsg:4326']
- An EPSG integer code [i.e. 4326]
- A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')]
- An object with a `to_wkt` method.
- A :class:`pyproj.crs.CRS` class
"""
crs = get_crs(crs=crs)
self._crs = crs
@property
def epsg(self):
return self.crs.to_epsg()
@property
def proj_str(self):
return self.crs.to_proj4()
@property
def wkt(self):
return self.crs.to_wkt(pretty=True)
@property
def length_units(self):
return get_crs_length_units(self.crs)
@property
def vertices(self):
"""Vertices for grid cell polygons."""
if self._vertices is None:
self._set_vertices()
return self._vertices
@property
def polygons(self):
"""Vertices for grid cell polygons."""
if self._polygons is None:
self._set_polygons()
return self._polygons
@property
def dataframe(self):
"""Pandas DataFrame of grid cell polygons
with i, j locations."""
if self._dataframe is None:
self._dataframe = self.get_dataframe(layers=True)
return self._dataframe
def get_dataframe(self, layers=True):
"""Get a pandas DataFrame of grid cell polygons
with i, j locations.
Parameters
----------
layers : bool
If True, return a row for each k, i, j location
and a 'k' column; if False, only return i, j
locations with no 'k' column. By default, True
Returns
-------
layers : DataFrame
Pandas Dataframe with k, i, j and geometry column
with a shapely polygon representation of each model cell.
"""
# get dataframe of model grid cells
i, j = np.indices((self.nrow, self.ncol))
geoms = self.polygons
df = gpd.GeoDataFrame({'i': i.ravel(),
'j': j.ravel(),
'geometry': geoms}, crs=5070)
if layers and self.nlay is not None:
# add layer information
dfs = []
for k in range(self.nlay):
layer_df = df.copy()
layer_df['k'] = k
dfs.append(layer_df)
df = pd.concat(dfs)
df = df[['k', 'i', 'j', 'geometry']].copy()
return df
def write_bbox_shapefile(self, filename='grid_bbox.shp'):
write_bbox_shapefile(self, filename)
def write_shapefile(self, filename='grid.shp'):
i, j = np.indices((self.nrow, self.ncol))
df = pd.DataFrame({'node': list(range(len(self.polygons))),
'i': i.ravel(),
'j': j.ravel(),
'geometry': self.polygons
})
df2shp(df, filename, epsg=self.epsg, proj_str=self.proj_str)
def _set_polygons(self):
"""
Create shapely polygon for each grid cell
"""
print('creating shapely Polygons of grid cells...')
t0 = time.time()
self._polygons = [Polygon(verts) for verts in self.vertices]
print("finished in {:.2f}s\n".format(time.time() - t0))
# stuff to conform to sr
@property
def length_multiplier(self):
return convert_length_units(self.lenuni,
2)
@property
def rotation(self):
return self.angrot
def get_vertices(self, i, j):
"""Get vertices for a single cell or sequence if i, j locations."""
return self._cell_vert_list(i, j)
def _set_vertices(self):
"""
Populate vertices for the whole grid
"""
jj, ii = np.meshgrid(range(self.ncol), range(self.nrow))
jj, ii = jj.ravel(), ii.ravel()
self._vertices = self._cell_vert_list(ii, jj)
# definition of national hydrogeologic grid
national_hydrogeologic_grid_parameters = {
'xul': -2553045.0, # upper left corner
'yul': 3907285.0,
'height': 4000,
'width': 4980,
'dx': 1000,
'dy': 1000,
'rotation': 0.
}
def get_crs(crs=None, epsg=None, prj=None, wkt=None, proj_str=None):
"""Get a pyproj CRS instance from various CRS representations.
"""
if crs is not None:
crs = pyproj.CRS.from_user_input(crs)
elif epsg is not None:
crs = pyproj.CRS.from_epsg(epsg)
elif prj is not None:
with open(prj) as src:
wkt = src.read()
crs = pyproj.CRS.from_wkt(wkt)
elif wkt is not None:
crs = pyproj.CRS.from_wkt(wkt)
elif proj_str is not None:
crs = pyproj.CRS.from_string(proj_str)
else: # crs is None
return
# if possible, have pyproj try to find the closest
# authority name and code matching the crs
# so that input from epsg codes, proj strings, and prjfiles
# results in equal pyproj_crs instances
authority = crs.to_authority()
if authority is not None:
crs = pyproj.CRS.from_user_input(crs.to_authority())
return crs
def get_crs_length_units(crs):
length_units = crs.axis_info[0].unit_name
if 'foot' in length_units.lower() or 'feet' in length_units.lower():
length_units = 'feet'
elif 'metre' in length_units.lower() or 'meter' in length_units.lower():
length_units = 'meters'
return length_units
def get_ij(grid, x, y, local=False):
"""Return the row and column of a point or sequence of points
in real-world coordinates.
Parameters
----------
grid : flopy.discretization.StructuredGrid instance
x : scalar or sequence of x coordinates
y : scalar or sequence of y coordinates
local: bool (optional)
If True, x and y are in local coordinates (defaults to False)
Returns
-------
i : row or sequence of rows (zero-based)
j : column or sequence of columns (zero-based)
"""
xc, yc = grid.xcellcenters, grid.ycellcenters
if local:
x, y = grid.get_coords(x, y)
print('getting i, j locations...')
t0 = time.time()
xyc = np.array([xc.ravel(), yc.ravel()]).transpose()
pxy = np.array([x, y]).transpose()
kdtree = spatial.KDTree(xyc)
distance, loc = kdtree.query(pxy)
i, j = np.unravel_index(loc, (grid.nrow, grid.ncol))
print("finished in {:.2f}s\n".format(time.time() - t0))
return i, j
def get_grid_bounding_box(modelgrid):
"""Get bounding box of potentially rotated modelgrid
as a shapely Polygon object.
Parameters
----------
modelgrid : flopy.discretization.StructuredGrid instance
"""
mg = modelgrid
#x0 = mg.xedge[0]
#x1 = mg.xedge[-1]
#y0 = mg.yedge[0]
#y1 = mg.yedge[-1]
x0 = mg.xyedges[0][0]
x1 = mg.xyedges[0][-1]
y0 = mg.xyedges[1][0]
y1 = mg.xyedges[1][-1]
# upper left point
#x0r, y0r = mg.transform(x0, y0)
x0r, y0r = mg.get_coords(x0, y0)
# upper right point
#x1r, y1r = mg.transform(x1, y0)
x1r, y1r = mg.get_coords(x1, y0)
# lower right point
#x2r, y2r = mg.transform(x1, y1)
x2r, y2r = mg.get_coords(x1, y1)
# lower left point
#x3r, y3r = mg.transform(x0, y1)
x3r, y3r = mg.get_coords(x0, y1)
return Polygon([(x0r, y0r),
(x1r, y1r),
(x2r, y2r),
(x3r, y3r),
(x0r, y0r)])
def get_nearest_point_on_grid(x, y, transform=None,
xul=None, yul=None,
dx=None, dy=None, rotation=0.,
offset='center', op=None):
"""
Parameters
----------
x : float
x-coordinate of point
y : float
y-coordinate of point
transform : Affine instance, optional
Affine object instance describing grid
xul : float
x-coordinate of upper left corner of the grid
yul : float
y-coordinate of upper left corner of the grid
dx : float
grid spacing in the x-direction (along rows)
dy : float
grid spacing in the y-direction (along columns)
rotation : float
grid rotation about the upper left corner, in degrees clockwise from the x-axis
offset : str, {'center', 'edge'}
Whether the point on the grid represents a cell center or corner (edge). This
argument is only used if xul, yul, dx, dy and rotation are supplied. If
an Affine transform instance is supplied, it is assumed to already incorporate
the offset.
op : function, optional
Function to convert fractional pixels to whole numbers (np.round, np.floor, np.ceiling).
Defaults to np.round if offset == 'center'; otherwise defaults to np.floor.
Returns
-------
x_nearest, y_nearest : float
Coordinates of nearest grid cell center.
"""
# get the closet (fractional) grid cell location
# (in case the grid is rotated)
if transform is None:
transform = Affine(dx, 0., xul,
0., dy, yul) * \
Affine.rotation(rotation)
if offset == 'center':
transform *= Affine.translation(0.5, 0.5)
x_raster, y_raster = ~transform * (x, y)
if offset == 'center':
op = np.round
elif op is None:
op = np.floor
j = int(op(x_raster))
i = int(op(y_raster))
x_nearest, y_nearest = transform * (j, i)
return x_nearest, y_nearest
def get_point_on_national_hydrogeologic_grid(x, y, offset='edge', **kwargs):
"""Given an x, y location representing the upper left
corner of a model grid, return the upper left corner
of the cell in the National Hydrogeologic Grid that
contains it."""
params = get_input_arguments(national_hydrogeologic_grid_parameters, get_nearest_point_on_grid)
params.update(kwargs)
return get_nearest_point_on_grid(x, y, offset=offset, **params)
def write_bbox_shapefile(modelgrid, outshp):
outline = get_grid_bounding_box(modelgrid)
df2shp(pd.DataFrame({'desc': ['model bounding box'],
'geometry': [outline]}),
outshp, epsg=modelgrid.epsg)
def rasterize(feature, grid, id_column=None,
include_ids=None,
crs=None,
dtype=np.float32, **kwargs):
"""Rasterize a feature onto the model grid, using
the rasterio.features.rasterize method. Features are intersected
if they contain the cell center.
Parameters
----------
feature : str (shapefile path), list of shapely objects,
or dataframe with geometry column
id_column : str
Column with unique integer identifying each feature; values
from this column will be assigned to the output raster.
grid : grid.StructuredGrid instance
crs : obj
A Python int, dict, str, or pyproj.crs.CRS instance
passed to :meth:`pyproj.crs.CRS.from_user_input`
Can be any of:
- PROJ string
- Dictionary of PROJ parameters
- PROJ keyword arguments for parameters
- JSON string with PROJ parameters
- CRS WKT string
- An authority string [i.e. 'epsg:4326']
- An EPSG integer code [i.e. 4326]
- A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')]
- An object with a `to_wkt` method.
- A :class:`pyproj.crs.CRS` class
dtype : dtype
Datatype for the output array
**kwargs : keyword arguments to rasterio.features.rasterize()
https://rasterio.readthedocs.io/en/stable/api/rasterio.features.html
Returns
-------
2D numpy array with intersected values
"""
try:
from rasterio import Affine, features
except:
print('This method requires rasterio.')
return
if crs is not None:
if version.parse(gisutils.__version__) < version.parse('0.2.0'):
raise ValueError("The rasterize function requires gisutils >= 0.2")
from gisutils import get_authority_crs
crs = get_authority_crs(crs)
trans = grid.transform
if isinstance(feature, str) or isinstance(feature, Path):
df = gpd.read_file(feature)
elif isinstance(feature, pd.DataFrame):
df = feature.copy()
df = gpd.GeoDataFrame(df, crs=crs)
elif isinstance(feature, collections.Iterable):
# list of shapefiles
if isinstance(feature[0], str) or isinstance(feature[0], Path):
# use shp2df to read multiple shapefiles
# then convert to gdf
df = shp2df(feature, dest_crs=grid.crs)
df = gpd.GeoDataFrame(df, crs=grid.crs)
else:
df = pd.DataFrame({'geometry': feature})
df = gpd.GeoDataFrame(df, crs=crs)
elif not isinstance(feature, collections.Iterable):
df = pd.DataFrame({'geometry': [feature]})
df = gpd.GeoDataFrame(df, crs=crs)
else:
print('unrecognized feature input')
return
# reproject to grid crs
if df.crs is not None:
df.to_crs(grid.crs, inplace=True)
# subset to include_ids
if id_column is not None and include_ids is not None:
df = df.loc[df[id_column].isin(include_ids)].copy()
# create list of GeoJSON features, with unique value for each feature
if id_column is None:
numbers = range(1, len(df)+1)
# if IDs are strings, get a number for each one
# pd.DataFrame.unique() generally preserves order
elif isinstance(df[id_column].dtype, np.object):
unique_values = df[id_column].unique()
values = dict(zip(unique_values, range(1, len(unique_values) + 1)))
numbers = [values[n] for n in df[id_column]]
else:
numbers = df[id_column].tolist()
geoms = list(zip(df.geometry, numbers))
result = features.rasterize(geoms,
out_shape=(grid.nrow, grid.ncol),
transform=trans)
assert result.sum(axis=(0, 1)) != 0, "Nothing was intersected!"
return result.astype(dtype)
def setup_structured_grid(xoff=None, yoff=None, xul=None, yul=None,
nrow=None, ncol=None, nlay=None,
dxy=None, delr=None, delc=None,
top=None, botm=None,
rotation=0.,
parent_model=None, snap_to_NHG=False,
features=None, features_shapefile=None,
id_column=None, include_ids=None,
buffer=1000,
crs=None, epsg=None, prj=None, wkt=None,
model_length_units=None,
grid_file='grid.json',
bbox_shapefile=None, **kwargs):
""""""
print('setting up model grid...')
t0 = time.time()
# make sure crs is populated, then get CRS units for the grid
crs = get_crs(crs=crs, epsg=epsg, prj=prj, wkt=wkt)
if crs is None and parent_model is not None:
crs = parent_model.modelgrid.crs
grid_units = get_crs_length_units(crs)
if grid_units not in {'feet', 'meters'}:
raise ValueError(f'unrecognized CRS units {grid_units}: CRS must be projected in feet or meters')
# conversions for model/parent model units to meters
# set regular flag for handling delc/delr
to_grid_units_inset = convert_length_units(model_length_units, grid_units)
regular = True
if dxy is not None:
delr_grid = np.round(dxy, 4) # dxy is specified in CRS units
delc_grid = delr_grid
if delr is not None:
delr_grid = np.round(delr * to_grid_units_inset, 4) # delr is specified in model units
if not np.isscalar(delr_grid):
if (set(delr_grid)) == 1:
delr_grid = delr_grid[0]
else:
regular = False
if delc is not None:
delc_grid = np.round(delc * to_grid_units_inset, 4) # delc is specified in model units
if not np.isscalar(delc_grid):
if (set(delc_grid)) == 1:
delc_grid = delc_grid[0]
else:
regular = False
if parent_model is not None:
to_grid_units_parent = convert_length_units(get_model_length_units(parent_model), grid_units)
# parent model grid spacing in meters
parent_delr_grid = np.round(parent_model.dis.delr.array[0] * to_grid_units_parent, 4)
if not parent_delr_grid % delr_grid == 0:
raise ValueError('inset delr spacing of {} must be factor of parent spacing of {}'.format(delr_grid,
parent_delr_grid))
parent_delc_grid = np.round(parent_model.dis.delc.array[0] * to_grid_units_parent, 4)
if not parent_delc_grid % delc_grid == 0:
raise ValueError('inset delc spacing of {} must be factor of parent spacing of {}'.format(delc_grid,
parent_delc_grid))
# option 1: make grid from xoff, yoff and specified dimensions
if xoff is not None and yoff is not None:
assert nrow is not None and ncol is not None, \
"Need to specify nrow and ncol if specifying xoffset and yoffset."
if regular:
height_grid = np.round(delc_grid * nrow, 4)
width_grid = np.round(delr_grid * ncol, 4)
else:
height_grid = np.sum(delc_grid)
width_grid = np.sum(delr_grid)
# optionally align grid with national hydrologic grid
# grids snapping to NHD must have spacings that are a factor of 1 km
if snap_to_NHG:
assert regular and np.allclose(1000 % delc_grid, 0, atol=1e-4)
x, y = get_point_on_national_hydrogeologic_grid(xoff, yoff,
offset='edge', op=np.floor)
xoff = x
yoff = y
rotation = 0.
# need to specify xul, yul in case snapping to parent
# todo: allow snapping to parent grid on xoff, yoff
if rotation != 0:
rotation_rads = rotation * np.pi/180
# note rotating around xoff,yoff not the origin!
xul = xoff - (height_grid) * np.sin(rotation_rads)
yul = yoff + (height_grid) * np.cos(rotation_rads)
else:
xul = xoff
yul = yoff + height_grid
# option 2: make grid using buffered feature bounding box
else:
if features is None and features_shapefile is not None:
# Make sure shapefile and bbox filter are in dest (model) CRS
# TODO: CRS wrangling could be added to shp2df as a feature
reproject_filter = False
try:
from gisutils import get_shapefile_crs
features_crs = get_shapefile_crs(features_shapefile)
if features_crs != crs:
reproject_filter = True
except:
features_crs = get_proj_str(features_shapefile)
reproject_filter = True
filter = None
if parent_model is not None:
if reproject_filter:
filter = project(parent_model.modelgrid.bbox,
parent_model.modelgrid.crs, features_crs).bounds
else:
filter = parent_model.modelgrid.bbox.bounds
shp2df_kwargs = {'dest_crs': crs}
shp2df_kwargs = get_input_arguments(shp2df_kwargs, shp2df)
df = shp2df(features_shapefile,
filter=filter, **shp2df_kwargs)
# optionally subset shapefile data to specified features
if id_column is not None and include_ids is not None:
df = df.loc[df[id_column].isin(include_ids)]
# use all features by default
features = df.geometry.tolist()
# convert multiple features to a MultiPolygon
if isinstance(features, list):
if len(features) > 1:
features = MultiPolygon(features)
else:
features = features[0]
# size the grid based on the bbox for features
x1, y1, x2, y2 = features.bounds
L = buffer # distance from area of interest to boundary
xul = x1 - L
yul = y2 + L
height_grid = np.round(yul - (y1 - L), 4) # initial model height from buffer distance
width_grid = np.round((x2 + L) - xul, 4)
rotation = 0. # rotation not supported with this option
nrow = int(np.ceil(height_grid / delc_grid))
ncol = int(np.ceil(width_grid / delr_grid))
# align model with parent grid if there is a parent model
# (and not snapping to national hydrologic grid)
if parent_model is not None and not snap_to_NHG:
# get location of coinciding cell in parent model for upper left
# first make sure not sitting at the top of a cell (which can shift into wrong parent cell)
# move to model coords
xul_mod, yul_mod = parent_model.modelgrid.get_local_coords(xul, yul)
# move away from the edge of a cell
xul_mod += (delr_grid * 0.25)
yul_mod -= (delc_grid * 0.25)
# flip back to work coords
xul, yul = parent_model.modelgrid.get_coords(xul_mod, yul_mod)
# get corresponding cell
pi, pj = parent_model.modelgrid.intersect(xul, yul)
# find the vertices of that cell
verts = np.array(parent_model.modelgrid.get_cell_vertices(pi, pj))
# flip to model space to easily locate upper left corner
verts_model_space = np.array([parent_model.modelgrid.get_local_coords(x,y) for x,y in verts])
# finally, back to world space
xul,yul = parent_model.modelgrid.get_coords(verts_model_space[:,0].min(),verts_model_space[:,1].max())
# adjust the dimensions to align remaining corners
def roundup(number, increment):
return int(np.ceil(number / increment) * increment)
height_grid = roundup(height_grid, parent_delr_grid)
width_grid = roundup(width_grid, parent_delc_grid)
# update nrow, ncol after snapping to parent grid
if regular:
nrow = int(height_grid / delc_grid) # h is in meters
ncol = int(width_grid / delr_grid)
if xoff is None:
xoff = xul + (np.sin(np.radians(rotation)) * height_grid)
if yoff is None:
yoff = yul - (np.cos(np.radians(rotation)) * height_grid)
# set the grid configuration dictionary
# spacing is in meters (consistent with projected CRS)
# (modelgrid object will be updated automatically from this dictionary)
#if rotation == 0.:
# xll = xul
# yll = yul - model.height
grid_cfg = {'nrow': int(nrow), 'ncol': int(ncol),
'nlay': nlay,
'delr': delr_grid, 'delc': delc_grid,
'xoff': xoff, 'yoff': yoff,
'xul': xul, 'yul': yul,
'rotation': rotation,
'lenuni': 2
}
if regular:
grid_cfg['delr'] = np.ones(grid_cfg['ncol'], dtype=float) * grid_cfg['delr']
grid_cfg['delc'] = np.ones(grid_cfg['nrow'], dtype=float) * grid_cfg['delc']
grid_cfg['delr'] = grid_cfg['delr'].tolist() # for serializing to json
grid_cfg['delc'] = grid_cfg['delc'].tolist()
# renames for flopy modelgrid
renames = {'rotation': 'angrot'}
for k, v in renames.items():
if k in grid_cfg:
grid_cfg[v] = grid_cfg.pop(k)
# add epsg or wkt if there isn't an epsg
if crs is not None:
grid_cfg['crs'] = crs
elif epsg is not None:
grid_cfg['epsg'] = epsg
#elif crs is not None:
# if 'epsg' in crs.srs.lower():
# grid_cfg['epsg'] = int(crs.srs.split(':')[1])
# else:
# grid_cfg['wkt'] = crs.srs
else:
warnings.warn(("Coordinate Reference System information must be supplied via"
"the 'crs'' argument."))
# set up the model grid instance
grid_cfg['top'] = top
grid_cfg['botm'] = botm
grid_cfg.update(kwargs) # update with any kwargs from function call
kwargs = get_input_arguments(grid_cfg, MFsetupGrid)
modelgrid = MFsetupGrid(**kwargs)
modelgrid.cfg = grid_cfg
# write grid info to json, and shapefile of bbox
# omit top and botm arrays from json represenation of grid
# (just for horizontal disc.)
del grid_cfg['top']
del grid_cfg['botm']
# crs needs to be cast to epsg or wkt to be serialized
if isinstance(crs, pyproj.CRS):
grid_cfg['epsg'] = grid_cfg['crs'].to_epsg()
if grid_cfg['epsg'] is None:
grid_cfg['wkt'] = grid_cfg['crs'].to_wkt()
del grid_cfg['crs']
fileio.dump(grid_file, grid_cfg)
if bbox_shapefile is not None:
write_bbox_shapefile(modelgrid, bbox_shapefile)
print("finished in {:.2f}s\n".format(time.time() - t0))
return modelgrid
|
the-stack_0_23120 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 03 17:44:09 2017
@author: fnan
"""
# load dataset
import numpy as np
from adaptive_sparse_helpers import load_letter, em_adaptive_sparse_obj, em_adaptive_sparse_der, em_adaptive_sparse_eval,em_adaptive_sparse,get_efficient_frontier_accu_cost,em_adaptive_sparse_eval_all,get_em_gate_partial_clf, get_full_rbf_svm_clf
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
from functools import partial
import matplotlib.pyplot as plt
import cPickle
from sklearn.externals import joblib
import sys
sys.path.insert(0, 'liblinear-multicore-2.11-1\python')
from liblinearutil import *
if __name__ == '__main__':
################### load data ###################
[X_train, X_test, X_val, y_train, y_test, y_val, cost] = load_letter()
ntr, m = X_train.shape
assert (type(X_train) is np.ndarray and type(y_train) is np.ndarray and type(X_test) is np.ndarray and type(y_test) is np.ndarray ), "All data inputs need to be numpy.ndarray"
assert (type(y_train[0]) is np.int32 and type(y_test[0]) is np.int32), "Label has to be of type numpy.int32"
assert (np.amin(y_train) == 0 and np.amin(y_test) == 0 and np.amax(y_train) == 1 and np.amax(y_test) ==1 ), "Label has to be 0/1"
assert (m == X_test.shape[1] and ntr == len(y_train) and X_test.shape[0] == len(y_test)), "Input dimension mis-match! Samples should be stored in rows and len(y_train) should match the number of rows of X_train"
dataset="letters"
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
#########################################
##### Get the full classifier rbf svm ###
retrain = False # load pre-trained model
clf = get_full_rbf_svm_clf("letters", X_train, y_train, retrain)
proba_train = clf.predict_proba(X_train)[np.arange(ntr),(y_train==1)+0]
#########################################
######### Perform alternating minimization to learn g,h ####
y_train=(y_train-0.5)*2 # make y_train -1/1
lambd_array = np.logspace(-4,0,num=20) #group sparsity parameter
p_full_array = np.linspace(0.1,0.9,9) #fraction of examples to go to full classifier
lambd_p_array = [(x,y) for x in lambd_array for y in p_full_array]
output_dic={}
prob = problem(y_train.tolist(), X_train.tolist())
best_c = 1e0
liblinear_param = parameter("-s 0 -c %f -B 1 -q" % best_c)
liblinear_model = train(prob, liblinear_param)
h_init0 = np.array(liblinear_model.get_decfun()[0])
h_init1 = liblinear_model.get_decfun()[1]
h_init = np.concatenate((h_init0, np.array([h_init1,])))
if liblinear_model.label.contents.value != 1:
h_init = - h_init
retrain = True # load pre-trained model
output_dic = get_em_gate_partial_clf(dataset, X_train, y_train, proba_train, lambd_p_array, h_init, retrain)
############################################################
######### Evaluate all g,h models on validation data ####
load_existing = False
pts_val = em_adaptive_sparse_eval_all(dataset, output_dic, X_val, y_val, clf, lambd_p_array, load_existing)
####################################################
######### Get the indices of the best points ####
hull_indices = get_efficient_frontier_accu_cost(pts_val)
if False:
keys = output_dic.keys()
gh_hull = np.zeros((len(hull_indices), len(output_dic[keys[0]])))
sparsity_num = np.zeros(len(hull_indices))
i = 0
fig_dic={}
axes_dic={}
for hull_index in hull_indices:
key_num = hull_index / 16
sparsity_num[i] = hull_index % 16
gh_in = output_dic[keys[key_num]]
gh = gh_in.copy()
len_gh = len(gh)
g = gh[:len_gh/2-1]
g0 = gh[len_gh/2-1]
h = gh[len_gh/2:-1]
h0 = gh[-1]
mag_v = np.sqrt(g*g+h*h)
sorted_mag_v = np.sort(mag_v)
inactive_feature_mask = mag_v <= sorted_mag_v[sparsity_num[i]]
g[inactive_feature_mask]=0
h[inactive_feature_mask]=0
gh_hull[i,:] = np.concatenate((g, np.array([g0,]), h, np.array([h0,])))
i = i+1
fig_dic[hull_index] = plt.figure()
fig_dic[hull_index].suptitle(str(hull_index)+": accu cost:"+str(pts_val[hull_index,:]), fontsize=20)
axes_dic[hull_index] = fig_dic[hull_index].add_subplot(111)
axes_dic[hull_index].plot(abs(g),label='|g|')
axes_dic[hull_index].plot(abs(h),label='|h|')
axes_dic[hull_index].legend()
fig_dic[hull_index].savefig('EM_gh_hull_plot%d.svg' % hull_index, transparent=True, bbox_inches='tight', pad_inches=0)
############################################################
######### Evaluate all g,h models on test data ####
load_existing = False
pts_test = em_adaptive_sparse_eval_all(dataset, output_dic, X_test, y_test, clf, lambd_p_array, load_existing, hull_indices)
accu_test_hull = pts_test[hull_indices,0]
cost_test_hull = pts_test[hull_indices,1]
test_accu_cost={'accu_test_hull':accu_test_hull, 'cost_test_hull':cost_test_hull}
with open(dataset+"_test_accu_cost_em.pickle", "wb") as output_file:
cPickle.dump(test_accu_cost, output_file)
with open(dataset+"_test_accu_cost_l1.pickle", "rb") as input_file:
test_accu_cost_l1 = cPickle.load(input_file)
plt.plot(cost_test_hull,accu_test_hull,"k-", label='EM')
plt.plot(test_accu_cost_l1['cost_test_hull'], test_accu_cost_l1['accu_test_hull'], "r--",label='L1')
# plt.xlim(0,16)
# plt.ylim(0.5,1)
plt.ylabel('accuracy')
plt.xlabel('feature cost')
plt.title('%s EM' % (dataset))
plt.legend(loc="lower right")
plt.savefig('%s_EM_zoom_out_plot.svg' % dataset, transparent=True, bbox_inches='tight', pad_inches=0)
|
the-stack_0_23121 | import matplotlib.pyplot as plt
import plotly.express as px
from sklearn import preprocessing
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.io as pio
import pandas as pd
import constant
pd.options.plotting.backend = 'plotly'
pio.renderers.default = "browser"
pio.renderers.default = "notebook_connected"
pd.set_option('display.max_row', 500)
pd.set_option('display.max_columns', 100)
class Preprocessing:
def __init__(self, original):
self.original = original
def add_feature(self, filename=None):
path = "../datas/worldbank_"
original.columns = [cols.upper() for cols in original.columns.tolist()]
# GDP per capita 데이터 추가
gdp_percap = pd.read_csv("../datas/worldbank_gdppercap.csv")
gdp_percap = gdp_percap.groupby('Country Code').mean()
gdp_percap.drop(columns=['2016', '2017', '2018', '2019', '2020'], axis=1, inplace=True)
# life_df에 GDP per capita 컬럼 추가
original["GDP_PERCAP"] = [gdp_percap.loc[original['COUNTRYCODE'][i]][str(original['YEAR'][i])] for i in
range(len(original))]
original["GDP_PERCAP"].fillna(original["GDP"] / original["POPULATION"], inplace=True)
if not filename == None:
df = pd.read_csv(f"{path}{filename}.csv").groupby('Country Code').mean()
df.drop(columns=['2016', '2017', '2018', '2019', '2020'], axis=1, inplace=True)
col_name = filename.upper()
original[col_name] = [df.loc[original['COUNTRYCODE'][i]][str(original['YEAR'][i])] for i in
range(len(original))]
return original
def processing(self, data):
# Nan값 GDP/POP으로 대체
data["GDPPERCAP"].fillna(data["GDP"] / data["POPULATION"], inplace=True)
# Developing: 0, developed: 1
data["STATUS"] = [row.replace("Developing", "0") for row in data["STATUS"].tolist()]
data["STATUS"] = [row.replace("Developed", "1") for row in data["STATUS"].tolist()]
data["STATUS"] = [int(row) for row in data["STATUS"].tolist()]
return data
def corr_matrix(self, data):
# 기대수명에 대한 나머지 feature들의 상관관계
corr_matrix = data.drop(['COUNTRYCODE', 'ISO3166', 'COUNTRY', 'YEAR', 'ISO3166', 'REGION', 'INCOMEGROUP'],
axis=1).corr()
corr_matrix['LIFE_EXPECTANCY'].sort_values(ascending=False)
# LIFE_EXPECTANCY와 높은 상관관계를 가지는 피처 순 정렬
top_corr = abs(corr_matrix['LIFE_EXPECTANCY']).sort_values(ascending=False)[:6]
top_features = top_corr.index.tolist()
return top_features
def minmax_scaling(self, data):
# mix-max scaling
scaled_data = pd.DataFrame(preprocessing.minmax_scale(data))
scaled_data.index = data.index
scaled_data.columns = data.columns.tolist()
return scaled_data
def set_region_df(self, data, region):
data = data.drop(['COUNTRYCODE'], axis=1)
data = data.replace({'REGION': region})
# region별 dataframe 선언
regions_df = [pd.DataFrame(data=data[data['REGION'] == i]) for i in range(len(region))]
# region별 연도에 따라 groupby
year_merge_df = [regions_df[i].groupby('YEAR').mean().drop(['REGION', 'ISO3166'], axis=1)
for i in range(len(region))]
scaled_region_datas = [pd.DataFrame(data=self.minmax_scaling(year_merge_df[i]))
for i in range(len(region))]
return year_merge_df, scaled_region_datas
def set_category(self, year_merge_df):
# 리전별 대분류 카테고리로 구분한 데이터프레임 생성
cat_region_df, regions_df = [], []
for i in range(len(year_merge_df)):
economy_df = year_merge_df[i][constant.CAT['economy']]
death_df = year_merge_df[i][constant.CAT['death_rate']]
illness_df = year_merge_df[i][constant.CAT['illness_rate']]
vaccine_df = year_merge_df[i][constant.CAT['vaccine']]
others_df = year_merge_df[i][constant.CAT['others']]
regions_df.append(economy_df)
regions_df.append(death_df)
regions_df.append(illness_df)
regions_df.append(vaccine_df)
regions_df.append(others_df)
cat_region_df.append(regions_df)
regions_df = []
return cat_region_df
class Visualization:
def __init__(self, original):
self.original = original
# 전체 컬럼별 평균값에 대한 전체 연도 추이 그래프
def show_year_cols(self, year_df):
plt.figure(figsize=(20, 5))
plt.title("Yearly Features Fluctuation", fontsize=15)
for a in year_df.columns.tolist():
plt.plot(year_data.index, preprocessing.minmax_scale(year_df[a]), label=a)
plt.xlabel("Year")
plt.legend()
return plt.show()
# 대분류 카테고리별로 나눠본 전체 연도별 추이 plotly 그래프
def show_px_lines(self, scaled_data, category):
plt.interactive(False)
px.line(scaled_data[constant.CAT[category]])
return plt.show()
# 각 컬럼별 전체 국가 연도별 평균 추이 plotly 그래프
def show_year_sep(self, scaled_data):
# raw data의 연도별 feature 추이
lower_titles = [scaled_data.columns.tolist()[i].lower().capitalize().replace('_', ' ')
for i in range(len(scaled_data.columns))]
fig = make_subplots(rows=7, cols=3, subplot_titles=lower_titles)
count = 0
for i in range(7):
for j in range(3):
if count == 19:
break
fig.add_trace(go.Scatter(x=scaled_data.index,
y=scaled_data[scaled_data.columns[count]],
name=scaled_data.columns[count],
line=dict(width=3.5)), row=i + 1, col=j + 1)
count += 1
fig.update_layout(title='Life Expectancy Feautre 연도별 추이', font_size=14,
width=2500, height=2000, template="plotly_white")
fig.update_annotations(font_size=18)
return fig.show()
# 전체 리전에 대한 컬럼 연도별 추이 plotly 그래프
def show_year_regions(self, target_region, rows, cols, regions_df):
category = []
target_region_idx = constant.REGION[target_region] # 타겟 리전 데이터프레임 인덱스
for i in range(len(constant.REGION)):
category.append(regions_df[i][target_region_idx])
lower_titles = [category[target_region_idx].columns.tolist()[i].lower().capitalize().replace('_', ' ')
for i in range(len(category[target_region_idx].columns))]
fig = make_subplots(rows=rows, cols=cols, subplot_titles=lower_titles)
count = 0
colors = ['lightsteelblue', 'cornflowerblue', 'slateblue', 'darkviolet', 'plum', 'limegreen', 'mediumturquoise']
for i in range(rows):
for j in range(cols):
if count == len(category[target_region_idx].columns):
break
for k in range(len(constant.REGION)):
flag = True if count == 0 else False
fig.add_trace(go.Scatter(x=category[target_region_idx].index,
y=category[k][category[k].columns[count]],
name=list(constant.REGION.items())[k][0],
showlegend=flag,
marker=dict(color=colors[k]), line=dict(width=3)), row=i + 1, col=j + 1)
count += 1
fig.update_layout(font_size=14, width=2800, height=900, template="plotly_white")
fig.update_annotations(font_size=19)
# return fig.show()
# 기대수명과 1인당 GDP scatter plotly 그래프
def show_moving_scatter(self, data, size_target, animation_target, facet_col=None):
px.scatter(data, x="GDP_PERCAP", y="LIFE_EXPECTANCY", animation_frame=animation_target,
animation_group="COUNTRY",
size=size_target, color="REGION", hover_name="COUNTRY", facet_col=facet_col,
log_x=True, size_max=60, range_y=[40, 100])
# return fig.show()
# 선진국 / 개발도상국으로 나라별 상관관계 높은 컬럼들의 수치 비교 plotly 그래프
def show_status_barchart(self, data):
mean_df = data.groupby(['COUNTRY']).mean().round(3).drop(['YEAR'], axis=1)
# top corr columns
cols = ['LIFE_EXPECTANCY', 'INCOME_COMPOSITION_OF_RESOURCES', 'SCHOOLING', 'INFANT_DEATHS',
'ADULT_MORTALITY']
lower_cols = [col.lower().capitalize().replace('_', ' ') for col in cols]
colors = ['Burg', 'Darkmint', 'Purp', 'Teal', 'Magenta']
for i in range(5):
hdi_df = mean_df.sort_values(by=cols[i], ascending=False)
fig = px.bar(hdi_df, x=hdi_df.index, y=hdi_df[cols[i]], color=hdi_df['STATUS'],
barmode='group', color_continuous_scale=colors[i])
fig.update_layout(
title_text=lower_cols[i],
height=500,
width=1000,
template='plotly_white',
font_color='grey'
)
# return fig.show()
if __name__ == '__main__':
original = pd.read_csv(constant.PATH + 'life_expectancy_data_fillna.csv')
original.columns = [cols.upper() for cols in original.columns.tolist()]
p = Preprocessing(original)
add_data = p.add_feature("gdppercap")
pc_data = p.processing(original)
top_features = p.corr_matrix(pc_data)
year_merge_df, scaled_region_datas = p.set_region_df(pc_data, constant.REGION)
cat_regions_df = p.set_category(year_merge_df)
# 연도별로 groupby
year_data = pc_data.groupby("YEAR").mean()
v = Visualization(pc_data)
v.show_year_cols(year_data)
year_df = p.minmax_scaling(year_data)
v.show_year_regions('South Asia', 2, 3, cat_regions_df)
v.show_moving_scatter(pc_data, 'POPULATION', 'YEAR')
v.show_status_barchart(pc_data)
|
the-stack_0_23123 | '''
Plot predicted data from random airfoils using Graph Neural Networks and Deep Neural Networks
'''
import os, glob, random, json
import os.path as osp
from labellines import labelLines
import matplotlib.pyplot as plt
from pathlib import Path
import pickle
import pandas as pd
import torch
from performance import performance_predict_dnn, performance_predict_gnn
from MultiLayerLinear import MultiLayerLinear
from gnn_model import GnnModel
import numpy as np
Path('predicted_results').mkdir(parents=True, exist_ok=True)
'''
Compare the checkpoints found here.
Note:
I did some rearranging of the files so that they were in separate folders
'''
# These 2 will be compared
dnn_no_cp = glob.glob('checkpoints_dnn_no_cp' + "/**/*.pt.tar", recursive = True)
gnn_no_cp = glob.glob('checkpoints_gnn_no_cp' + "/**/*.pt.tar", recursive = True)
dnn_no_cp.extend(gnn_no_cp)
compare_no_cp = dnn_no_cp
# These 2 will be compared
dnn_cp = glob.glob('checkpoints_dnn_cp' + "/**/*.pt.tar", recursive = True)
gnn_cp = glob.glob('checkpoints_gnn_cp' + "/**/*.pt.tar", recursive = True)
dnn_cp.extend(gnn_cp)
compare_cp = dnn_cp
def plot_airfoil_performance(predicted_results,folder,airfoil_name,Reynolds,Ncrit, plot_cp:bool):
"""Creates and saves an example plot
Args:
predicted_results (List[Dict[str,float]]): List of results generated by main code
filename (str): filename to save the results as
"""
plt.rcParams['font.size'] = '14'
fig = plt.figure(figsize=(8,7),dpi=150,num=1,clear=True)
ax1 = fig.add_subplot(111) # Airfoil
ax1.set_title(predicted_results['airfoil_name'])
ax1.plot(predicted_results['xss'], predicted_results['yss'],'.-',linewidth=2)
ax1.plot(predicted_results['xps'], predicted_results['yps'],'.-',linewidth=2)
ax1.set_ylabel('y/c')
ax1.set_xlabel('x/c')
ax1.set_xlim([0,1])
ax1.set_ylim([-0.5,0.5])
plt.savefig(os.path.join(folder,f'{airfoil_name}.png'),dpi=300)
plt.clf()
plt.gcf()
fig = plt.figure(figsize=(18,15),dpi=150,num=2,clear=True)
ax1 = fig.add_subplot(221) # Cl vs. alpha
ax1.set_title("Cl vs. angle of attack")
ax2 = fig.add_subplot(222) # Cd vs. alpha
ax2.set_title("Cd vs. angle of attack")
ax3 = fig.add_subplot(223) # Cdp vs. alpha
ax3.set_title("Cdp vs. angle of attack")
ax4 = fig.add_subplot(224) # Cm vs. alpha
ax4.set_title("Cm vs. angle of attack")
for model_data in predicted_results['models']:
model_type = model_data['model_info']['type']
scaler_type = model_data['model_info']['scaler_type']
if model_type=='dnn':
layer_size = model_data['model_info']['Layers'][0]
nlayers = len(model_data['model_info']['Layers'])
model_name = f'{scaler_type}-MLP-{layer_size}x{nlayers}'
else:
hidden_layers = model_data['model_info']['hiddenLayers']
if hidden_layers:
layer_size = model_data['model_info']['hiddenLayers'][0]
nlayers = len(model_data['model_info']['hiddenLayers'])
model_name = f'{scaler_type}-GNN-{layer_size}x{nlayers}'
else:
model_name = f'{scaler_type}-GNN-None'
df = pd.DataFrame(model_data['predicted_polars']).sort_values(by='alpha')
ax1.plot(df['alpha'],df['Cl'],'-', label=model_name,linewidth=1.1)
ax2.plot(df['alpha'],df['Cd'],'-', label=model_name,linewidth=1.1)
ax3.plot(df['alpha'],df['Cdp'],'-', label=model_name,linewidth=1.1)
ax4.plot(df['alpha'],df['Cm'],'-', label=model_name,linewidth=1.1)
df_actual = pd.DataFrame(predicted_results['actual_polars']).sort_values(by='alpha')
model_name='xfoil'
ax1.plot(df_actual['alpha'],df_actual['Cl'],'.-', label=model_name,linewidth=1.1)
ax2.plot(df_actual['alpha'],df_actual['Cd'],'.-', label=model_name,linewidth=1.1)
ax3.plot(df_actual['alpha'],df_actual['Cdp'],'.-', label=model_name,linewidth=1.1)
ax4.plot(df_actual['alpha'],df_actual['Cm'],'.-', label=model_name,linewidth=1.1)
ax1.set_ylabel('Coefficient of Lift (Cl)')
ax1.set_xlabel('Angle of Attack (alpha)')
ax2.set_ylabel('Coefficient of Drag (Cd)')
ax2.set_xlabel('Angle of Attack (alpha)')
ax3.set_ylabel('Coefficient of Drag zero lift (Cdp)')
ax3.set_xlabel('Angle of Attack (alpha)')
ax4.set_ylabel('Coefficient of Moment (Cm)')
ax4.set_xlabel('Angle of Attack (alpha)')
labelLines(ax1.get_lines(), zorder=2.5,fontsize=14)
labelLines(ax2.get_lines(), zorder=2.5,fontsize=14)
labelLines(ax3.get_lines(), zorder=2.5,fontsize=14)
labelLines(ax4.get_lines(), zorder=2.5,fontsize=14)
# ax1.legend(loc="upper left",fontsize=10)
# ax2.legend(loc="upper left",fontsize=10)
# ax3.legend(loc="upper left",fontsize=10)
# ax4.legend(loc="upper left",fontsize=10)
fig.tight_layout(pad=3.0)
if plot_cp:
plt.savefig(os.path.join(folder,f'{airfoil_name}-Re_{Reynolds}-Ncrit_{Ncrit}_cp_models.png'),dpi=300)
else:
plt.savefig(os.path.join(folder,f'{airfoil_name}-Re_{Reynolds}-Ncrit_{Ncrit}.png'),dpi=300)
# Plot random examples of Cp
if plot_cp:
alphas = [p['alpha'] for p in predicted_results['models'][0]['predicted_polars']]
alpha_selected = random.sample(alphas, 4) # Pick 3 random angle of attacks
alpha_selected.sort()
fig = plt.figure(figsize=(18,15),dpi=150,num=3,clear=True)
ax1 = fig.add_subplot(221) # alpha 1
ax2 = fig.add_subplot(222) # alpha 2
ax3 = fig.add_subplot(223) # alpha 3
ax4 = fig.add_subplot(224) # alpha 4
ax1.set_title(f"{name}: Cp (Re {Reynolds} Ncrit {Ncrit} alpha {alpha_selected[0]})")
ax2.set_title(f"{name}: Cp (Re {Reynolds} Ncrit {Ncrit} alpha {alpha_selected[1]})")
ax3.set_title(f"{name}: Cp (Re {Reynolds} Ncrit {Ncrit} alpha {alpha_selected[2]})")
ax4.set_title(f"{name}: Cp (Re {Reynolds} Ncrit {Ncrit} alpha {alpha_selected[3]})")
for model_data in predicted_results['models']:
model_type = model_data['model_info']['type']
if model_type=='dnn':
layer_size = model_data['model_info']['Layers'][0]
nlayers = len(model_data['model_info']['Layers'])
model_name = f'{scaler_type}-MLP-{layer_size}x{nlayers}'
polars = pd.DataFrame(model_data['predicted_polars'])
polar1 = polars[polars['alpha']==alpha_selected[0]]
polar2 = polars[polars['alpha']==alpha_selected[1]]
polar3 = polars[polars['alpha']==alpha_selected[2]]
polar4 = polars[polars['alpha']==alpha_selected[3]]
x = np.linspace(0,1,len(polar1['Cp_ss'].to_numpy()[0]))
# Plot the polars
ax1.plot(x, polar1['Cp_ss'].to_numpy()[0], label=model_name, linewidth=1.1)
ax1.plot(x, polar1['Cp_ps'].to_numpy()[0], label=model_name, linewidth=1.1)
ax2.plot(x, polar2['Cp_ss'].to_numpy()[0], label=model_name, linewidth=1.1)
ax2.plot(x, polar2['Cp_ps'].to_numpy()[0], label=model_name, linewidth=1.1)
ax3.plot(x, polar3['Cp_ss'].to_numpy()[0], label=model_name, linewidth=1.1)
ax3.plot(x, polar3['Cp_ps'].to_numpy()[0], label=model_name, linewidth=1.1)
ax4.plot(x, polar4['Cp_ss'].to_numpy()[0], label=model_name, linewidth=1.1)
ax4.plot(x, polar4['Cp_ps'].to_numpy()[0], label=model_name, linewidth=1.1)
model_name='xfoil'
polar1 = df_actual[df_actual['alpha']==alpha_selected[0]]
polar2 = df_actual[df_actual['alpha']==alpha_selected[1]]
polar3 = df_actual[df_actual['alpha']==alpha_selected[2]]
polar4 = df_actual[df_actual['alpha']==alpha_selected[3]]
x = np.linspace(0,1,len(polar1['Cp_ss'].to_numpy()[0]))
ax1.plot(x,polar1['Cp_ss'].to_numpy()[0],'.-', label=model_name,linewidth=1.1)
ax1.plot(x,polar1['Cp_ps'].to_numpy()[0],'.-', label=model_name,linewidth=1.1)
ax2.plot(x,polar2['Cp_ss'].to_numpy()[0],'.-', label=model_name,linewidth=1.1)
ax2.plot(x,polar2['Cp_ps'].to_numpy()[0],'.-', label=model_name,linewidth=1.1)
ax3.plot(x,polar3['Cp_ss'].to_numpy()[0],'.-', label=model_name,linewidth=1.1)
ax3.plot(x,polar3['Cp_ps'].to_numpy()[0],'.-', label=model_name,linewidth=1.1)
ax4.plot(x,polar4['Cp_ss'].to_numpy()[0],'.-', label=model_name,linewidth=1.1)
ax4.plot(x,polar4['Cp_ps'].to_numpy()[0],'.-', label=model_name,linewidth=1.1)
labelLines(ax1.get_lines(), zorder=2.5,fontsize=14)
labelLines(ax2.get_lines(), zorder=2.5,fontsize=14)
labelLines(ax3.get_lines(), zorder=2.5,fontsize=14)
labelLines(ax4.get_lines(), zorder=2.5,fontsize=14)
fig.tight_layout(pad=3.0)
plt.savefig(os.path.join(folder,f'{airfoil_name}-Re_{Reynolds}-Ncrit_{Ncrit}_Cp.png'),dpi=300)
def get_random_airfoil_data():
"""Retrieves a random airfoil by searching through the JSON
Returns:
[type]: [description]
"""
# Load a random airfoil
all_json = glob.glob('../generate_xfoil/json/*.json')
# Plot random data
filename = all_json[random.randrange(0, len(all_json), 1)]
with open(filename,'r') as f:
airfoil = json.load(f)
name = airfoil['name']
xss = airfoil['xss']
yss = airfoil['yss']
xps = airfoil['xps']
yps = airfoil['yps']
polars = pd.DataFrame(airfoil['polars'])
unique_reynolds = polars['Re'].unique()
reynolds = unique_reynolds[random.randrange(0, len(unique_reynolds), 1)]
polars = polars[polars['Re'] == reynolds]
unique_ncrit = polars['Ncrit'].unique()
ncrit = unique_ncrit[random.randrange(0, len(unique_ncrit), 1)]
polars = polars[polars['Ncrit'] == ncrit]
return name,xss,yss,xps,yps,reynolds,ncrit,polars,airfoil
def compare_models(saved_models,name,xss,yss,xps,yps,scalers):
"""[summary]
Args:
saved_models (Dict): Dictionary
name (str): Name describing the geometry
xss (np.ndarray): [description]
yss (np.ndarray): [description]
xps (np.ndarray): [description]
yps (np.ndarray): [description]
scalers ([type]): [description]
"""
plot_cp = False
models_to_evaluate = list()
for filename in saved_models:
model_data = torch.load(filename)
train_params = model_data['parameters']
if "Gnn" in filename:
# Load the model_settings used for training into memory
linear_layer = MultiLayerLinear(in_channels=train_params['input_size']*train_params['GnnLayers'][1],out_channels=train_params['output_size'],h_sizes=train_params['hiddenLayers'])
train_params['type']='gnn'
if 'minmax' in filename:
train_params['scaler_type']='minmax'
train_params['model']=GnnModel(train_params['input_size'],train_params['GnnLayers'],linear_layers=linear_layer)
train_params['model'].load_state_dict(model_data['state_dict'])
else:
train_params['scaler_type']='standard'
train_params['model']=GnnModel(train_params['input_size'],train_params['GnnLayers'],linear_layers=linear_layer)
train_params['model'].load_state_dict(model_data['state_dict'])
else:
train_params['type']='dnn'
if 'minmax' in filename:
train_params['scaler_type']='minmax'
train_params['model']=MultiLayerLinear(in_channels=train_params['input_size'],out_channels=train_params['output_size'],h_sizes=train_params['Layers'])
train_params['model'].load_state_dict(model_data['state_dict'])
else:
train_params['scaler_type']='standard'
train_params['model']=MultiLayerLinear(in_channels=train_params['input_size'],out_channels=train_params['output_size'],h_sizes=train_params['Layers'])
train_params['model'].load_state_dict(model_data['state_dict'])
train_params['model_name']=str(train_params['model'])
train_params['use_cp'] = True if train_params['output_size']>4 else False
plot_cp = train_params['use_cp']
models_to_evaluate.append(train_params)
''' This code below is used to evaluate all the models, all models predict the polar on a random airfoil '''
if len(models_to_evaluate)>0:
predicted_results = {'airfoil_name': name, 'xss':xss, 'yss':yss,'xps':xps,'yps':yps,'models':[]}
for model_data in models_to_evaluate:
if model_data['scaler_type'] == 'minmax':
scaler = scalers['min_max']
scaler_cp = scalers['min_max_cp']
else:
scaler = scalers['standard']
scaler_cp = scalers['standard_cp']
predicted_results['model'] = {'type':model_data['type'],'scaler_type':model_data['scaler_type']}
results = list()
actual_results = list()
for p in range(len(polars)):
alpha = polars.iloc[p]['alpha']
Re = polars.iloc[p]['Re']
Ncrit = polars.iloc[p]['Ncrit']
if model_data['type'] == 'gnn':
Cl, Cd, Cdp, Cm, Cp_ss, Cp_ps = performance_predict_gnn(model_data['model'],xss,yss,xps,yps,alpha,Re,Ncrit,scaler,scaler_cp,model_data['use_cp'])
else:
Cl, Cd, Cdp, Cm, Cp_ss, Cp_ps = performance_predict_dnn(model_data['model'],xss,yss,xps,yps,alpha,Re,Ncrit,scaler,scaler_cp,model_data['use_cp'])
results.append({'alpha':alpha,'Re':Re,'Ncrit':Ncrit,
'Cl':Cl,
'Cd':Cd,
'Cm':Cm,
'Cdp':Cdp,
'Cp_ss':Cp_ss,
'Cp_ps':Cp_ps})
actual_results.append({'alpha':alpha,'Re':Re,'Ncrit':Ncrit,
'Cl':polars.iloc[p]['Cl'],
'Cd':polars.iloc[p]['Cd'],
'Cm':polars.iloc[p]['Cm'],
'Cdp':polars.iloc[p]['Cdp'],
'Cp_ss':polars.iloc[p]['Cp_ss'],
'Cp_ps':polars.iloc[p]['Cp_ps']})
predicted_results['models'].append({'model_info':model_data, 'predicted_polars':results})
predicted_results['actual_polars'] = actual_results
'''
Create a Plot to compare all models, save to png
'''
plot_airfoil_performance(predicted_results,'predicted_results',name,Re,Ncrit,plot_cp)
if __name__ == "__main__":
# Pick a random json file
# Make a folder with the same name as this file. This is where we save al the images
# Load the scalers into memory
with open(osp.join('../generate_xfoil/','scalers.pickle'),'rb') as f: # min max of individual y and Cp positions
scalers = pickle.load(f)
polars= pd.DataFrame()
while (polars.empty): # Make sure I get a random airfoil with data
name,xss,yss,xps,yps,reynolds,ncrit,polars,airfoil = get_random_airfoil_data()
# Load all the models into memory for later prediction
compare_models(compare_no_cp,name,xss,yss,xps,yps,scalers)
compare_models(compare_cp,name,xss,yss,xps,yps,scalers)
|
the-stack_0_23124 | # ---------------------------------------------------------------------
# Firmware
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import os
import threading
import operator
import uuid
# Third-party modules
from mongoengine.document import Document
from mongoengine.fields import StringField, LongField, UUIDField
from mongoengine.errors import NotUniqueError
import cachetools
# NOC modules
from .vendor import Vendor
from noc.sa.models.profile import Profile
from noc.core.mongo.fields import PlainReferenceField
from noc.core.bi.decorator import bi_sync
from noc.core.prettyjson import to_json
from noc.core.model.decorator import on_delete_check
id_lock = threading.Lock()
@bi_sync
@on_delete_check(
check=[
("sa.ManagedObject", "version"),
("sa.ManagedObject", "next_version"),
("sa.ManagedObjectSelector", "filter_version"),
("inv.FirmwarePolicy", "firmware"),
]
)
class Firmware(Document):
meta = {
"collection": "noc.firmwares",
"strict": False,
"auto_create_index": False,
"json_collection": "inv.firmwares",
"json_depends_on": ["sa.profile"],
"json_unique_fields": ["profile", "vendor", "version"],
"indexes": [{"fields": ["profile", "vendor", "version"], "unique": True}],
}
# Global ID
uuid = UUIDField(binary=True)
#
profile = PlainReferenceField(Profile)
vendor = PlainReferenceField(Vendor)
version = StringField()
description = StringField()
download_url = StringField()
# Full name, combined from profile and version
full_name = StringField()
# Object id in BI
bi_id = LongField(unique=True)
_id_cache = cachetools.TTLCache(1000, ttl=60)
_bi_id_cache = cachetools.TTLCache(1000, ttl=60)
_ensure_cache = cachetools.TTLCache(1000, ttl=60)
def __str__(self):
return self.full_name if self.full_name else self.version
def clean(self):
self.full_name = "%s %s" % (self.profile.name, self.version)
super().clean()
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda _: id_lock)
def get_by_id(cls, id):
return Firmware.objects.filter(id=id).first()
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_bi_id_cache"), lock=lambda _: id_lock)
def get_by_bi_id(cls, id):
return Firmware.objects.filter(bi_id=id).first()
def to_json(self):
return to_json(
{
"$collection": self._meta["json_collection"],
"profile__name": self.profile.name,
"vendor__code": self.vendor.code[0],
"version": self.version,
"uuid": self.uuid,
},
order=["profile__name", "vendor__code", "version", "uuid"],
)
def get_json_path(self):
return os.path.join(
self.vendor.code[0], self.profile.name, "%s.json" % self.version.replace(os.sep, "_")
)
@classmethod
@cachetools.cachedmethod(
operator.attrgetter("_ensure_cache"),
key=lambda p, v, vv: "%s-%s-%s" % (p.id, v.id, vv),
lock=lambda _: id_lock,
)
def ensure_firmware(cls, profile, vendor, version):
"""
Get or create firmware by profile, vendor and version
:param profile:
:param vendor:
:param version:
:return:
"""
while True:
firmware = Firmware.objects.filter(
profile=profile.id, vendor=vendor.id, version=version
).first()
if firmware:
return firmware
try:
firmware = Firmware(
profile=profile, vendor=vendor, version=version, uuid=uuid.uuid4()
)
firmware.save()
return firmware
except NotUniqueError:
pass # Already created by concurrent process, reread
|
the-stack_0_23128 | from typing import Any, Dict
import arrow
from rsserpent.utils import HTTPClient, cached
path = "/bilibili/user/{uid}/video"
@cached
async def provider(uid: int) -> Dict[str, Any]:
"""订阅 up 上传的最新视频."""
user_info_api = f"https://api.bilibili.com/x/space/acc/info?mid={uid}&jsonp=jsonp"
video_list_api = (
f"https://api.bilibili.com/x/space/arc/search?mid={uid}&ps=30"
"&tid=0&pn=1&keyword=&order=pubdate&jsonp=jsonp"
)
async with HTTPClient() as client:
user_info = (await client.get(user_info_api)).json()
video_list = (await client.get(video_list_api)).json()
username = user_info["data"]["name"]
return {
"title": f"{username}的最新投稿视频",
"link": f"https://space.bilibili.com/{uid}/video",
"description": user_info["data"]["sign"],
"items": [
{
"title": item["title"],
"description": item["description"],
"link": f"https://www.bilibili.com/video/{item['bvid']}",
"pubDate": arrow.get(item["created"]),
"author": username,
}
for item in video_list["data"]["list"]["vlist"]
],
}
|
the-stack_0_23130 | """
sphinx.transforms.post_transforms.images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Docutils transforms used by Sphinx.
:copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
from math import ceil
from typing import Any, Dict, List, Optional, Tuple
from docutils import nodes
from sphinx.application import Sphinx
from sphinx.locale import __
from sphinx.transforms import SphinxTransform
from sphinx.util import epoch_to_rfc1123, logging, requests, rfc1123_to_epoch, sha1
from sphinx.util.images import get_image_extension, guess_mimetype, parse_data_uri
from sphinx.util.osutil import ensuredir
logger = logging.getLogger(__name__)
MAX_FILENAME_LEN = 32
CRITICAL_PATH_CHAR_RE = re.compile('[:;<>|*" ]')
class BaseImageConverter(SphinxTransform):
def apply(self, **kwargs: Any) -> None:
for node in self.document.findall(nodes.image):
if self.match(node):
self.handle(node)
def match(self, node: nodes.image) -> bool:
return True
def handle(self, node: nodes.image) -> None:
pass
@property
def imagedir(self) -> str:
return os.path.join(self.app.doctreedir, 'images')
class ImageDownloader(BaseImageConverter):
default_priority = 100
def match(self, node: nodes.image) -> bool:
if self.app.builder.supported_image_types == []:
return False
elif self.app.builder.supported_remote_images:
return False
else:
return '://' in node['uri']
def handle(self, node: nodes.image) -> None:
try:
basename = os.path.basename(node['uri'])
if '?' in basename:
basename = basename.split('?')[0]
if basename == '' or len(basename) > MAX_FILENAME_LEN:
filename, ext = os.path.splitext(node['uri'])
basename = sha1(filename.encode()).hexdigest() + ext
basename = re.sub(CRITICAL_PATH_CHAR_RE, "_", basename)
dirname = node['uri'].replace('://', '/').translate({ord("?"): "/",
ord("&"): "/"})
if len(dirname) > MAX_FILENAME_LEN:
dirname = sha1(dirname.encode()).hexdigest()
ensuredir(os.path.join(self.imagedir, dirname))
path = os.path.join(self.imagedir, dirname, basename)
headers = {}
if os.path.exists(path):
timestamp: float = ceil(os.stat(path).st_mtime)
headers['If-Modified-Since'] = epoch_to_rfc1123(timestamp)
r = requests.get(node['uri'], headers=headers)
if r.status_code >= 400:
logger.warning(__('Could not fetch remote image: %s [%d]') %
(node['uri'], r.status_code))
else:
self.app.env.original_image_uri[path] = node['uri']
if r.status_code == 200:
with open(path, 'wb') as f:
f.write(r.content)
last_modified = r.headers.get('last-modified')
if last_modified:
timestamp = rfc1123_to_epoch(last_modified)
os.utime(path, (timestamp, timestamp))
mimetype = guess_mimetype(path, default='*')
if mimetype != '*' and os.path.splitext(basename)[1] == '':
# append a suffix if URI does not contain suffix
ext = get_image_extension(mimetype)
newpath = os.path.join(self.imagedir, dirname, basename + ext)
os.replace(path, newpath)
self.app.env.original_image_uri.pop(path)
self.app.env.original_image_uri[newpath] = node['uri']
path = newpath
node['candidates'].pop('?')
node['candidates'][mimetype] = path
node['uri'] = path
self.app.env.images.add_file(self.env.docname, path)
except Exception as exc:
logger.warning(__('Could not fetch remote image: %s [%s]') % (node['uri'], exc))
class DataURIExtractor(BaseImageConverter):
default_priority = 150
def match(self, node: nodes.image) -> bool:
if self.app.builder.supported_remote_images == []:
return False
elif self.app.builder.supported_data_uri_images is True:
return False
else:
return node['uri'].startswith('data:')
def handle(self, node: nodes.image) -> None:
image = parse_data_uri(node['uri'])
ext = get_image_extension(image.mimetype)
if ext is None:
logger.warning(__('Unknown image format: %s...'), node['uri'][:32],
location=node)
return
ensuredir(os.path.join(self.imagedir, 'embeded'))
digest = sha1(image.data).hexdigest()
path = os.path.join(self.imagedir, 'embeded', digest + ext)
self.app.env.original_image_uri[path] = node['uri']
with open(path, 'wb') as f:
f.write(image.data)
node['candidates'].pop('?')
node['candidates'][image.mimetype] = path
node['uri'] = path
self.app.env.images.add_file(self.env.docname, path)
def get_filename_for(filename: str, mimetype: str) -> str:
basename = os.path.basename(filename)
basename = re.sub(CRITICAL_PATH_CHAR_RE, "_", basename)
return os.path.splitext(basename)[0] + get_image_extension(mimetype)
class ImageConverter(BaseImageConverter):
"""A base class for image converters.
An image converter is kind of Docutils transform module. It is used to
convert image files which are not supported by a builder to the
appropriate format for that builder.
For example, :py:class:`LaTeX builder <.LaTeXBuilder>` supports PDF,
PNG and JPEG as image formats. However it does not support SVG images.
For such case, using image converters allows to embed these
unsupported images into the document. One of the image converters;
:ref:`sphinx.ext.imgconverter <sphinx.ext.imgconverter>` can convert
a SVG image to PNG format using Imagemagick internally.
There are three steps to make your custom image converter:
1. Make a subclass of ``ImageConverter`` class
2. Override ``conversion_rules``, ``is_available()`` and ``convert()``
3. Register your image converter to Sphinx using
:py:meth:`.Sphinx.add_post_transform`
"""
default_priority = 200
#: The converter is available or not. Will be filled at the first call of
#: the build. The result is shared in the same process.
#:
#: .. todo:: This should be refactored not to store the state without class
#: variable.
available: Optional[bool] = None
#: A conversion rules the image converter supports.
#: It is represented as a list of pair of source image format (mimetype) and
#: destination one::
#:
#: conversion_rules = [
#: ('image/svg+xml', 'image/png'),
#: ('image/gif', 'image/png'),
#: ('application/pdf', 'image/png'),
#: ]
conversion_rules: List[Tuple[str, str]] = []
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def match(self, node: nodes.image) -> bool:
if not self.app.builder.supported_image_types:
return False
elif set(self.guess_mimetypes(node)) & set(self.app.builder.supported_image_types):
# builder supports the image; no need to convert
return False
elif node['uri'].startswith('data:'):
# all data URI MIME types are assumed to be supported
return False
elif self.available is None:
# store the value to the class variable to share it during the build
self.__class__.available = self.is_available()
if not self.available:
return False
else:
rule = self.get_conversion_rule(node)
if rule:
return True
else:
return False
def get_conversion_rule(self, node: nodes.image) -> Tuple[str, str]:
for candidate in self.guess_mimetypes(node):
for supported in self.app.builder.supported_image_types:
rule = (candidate, supported)
if rule in self.conversion_rules:
return rule
return None
def is_available(self) -> bool:
"""Return the image converter is available or not."""
raise NotImplementedError()
def guess_mimetypes(self, node: nodes.image) -> List[str]:
if '?' in node['candidates']:
return []
elif '*' in node['candidates']:
return [guess_mimetype(node['uri'])]
else:
return node['candidates'].keys()
def handle(self, node: nodes.image) -> None:
_from, _to = self.get_conversion_rule(node)
if _from in node['candidates']:
srcpath = node['candidates'][_from]
else:
srcpath = node['candidates']['*']
filename = get_filename_for(srcpath, _to)
ensuredir(self.imagedir)
destpath = os.path.join(self.imagedir, filename)
abs_srcpath = os.path.join(self.app.srcdir, srcpath)
if self.convert(abs_srcpath, destpath):
if '*' in node['candidates']:
node['candidates']['*'] = destpath
else:
node['candidates'][_to] = destpath
node['uri'] = destpath
self.env.original_image_uri[destpath] = srcpath
self.env.images.add_file(self.env.docname, destpath)
def convert(self, _from: str, _to: str) -> bool:
"""Convert an image file to the expected format.
*_from* is a path of the source image file, and *_to* is a path
of the destination file.
"""
raise NotImplementedError()
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_post_transform(ImageDownloader)
app.add_post_transform(DataURIExtractor)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
the-stack_0_23132 | import asyncio
import logging
from pathlib import Path
from secrets import token_bytes
from typing import Optional
import aiosqlite
import pytest
from clvm_tools import binutils
from ssdcoin.types.blockchain_format.coin import Coin
from ssdcoin.types.blockchain_format.program import Program, SerializedProgram
from ssdcoin.types.blockchain_format.sized_bytes import bytes32
from ssdcoin.types.coin_solution import CoinSolution
from ssdcoin.util.db_wrapper import DBWrapper
from ssdcoin.util.ints import uint64
from ssdcoin.wallet.wallet_pool_store import WalletPoolStore
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
def make_child_solution(coin_solution: CoinSolution, new_coin: Optional[Coin] = None) -> CoinSolution:
new_puzzle_hash: bytes32 = token_bytes(32)
solution = "()"
puzzle = f"(q . ((51 0x{new_puzzle_hash.hex()} 1)))"
puzzle_prog = Program.to(binutils.assemble(puzzle))
solution_prog = Program.to(binutils.assemble(solution))
if new_coin is None:
new_coin = coin_solution.additions()[0]
sol: CoinSolution = CoinSolution(
new_coin,
SerializedProgram.from_program(puzzle_prog),
SerializedProgram.from_program(solution_prog),
)
log.warning("ABC")
log.warning(f"{sol.additions()}")
return sol
class TestWalletPoolStore:
@pytest.mark.asyncio
async def test_store(self):
db_filename = Path("wallet_store_test.db")
if db_filename.exists():
db_filename.unlink()
db_connection = await aiosqlite.connect(db_filename)
db_wrapper = DBWrapper(db_connection)
store = await WalletPoolStore.create(db_wrapper)
try:
await db_wrapper.begin_transaction()
coin_0 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
coin_0_alt = Coin(token_bytes(32), token_bytes(32), uint64(12312))
solution_0: CoinSolution = make_child_solution(None, coin_0)
solution_0_alt: CoinSolution = make_child_solution(None, coin_0_alt)
solution_1: CoinSolution = make_child_solution(solution_0)
assert store.get_spends_for_wallet(0) == []
assert store.get_spends_for_wallet(1) == []
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
# Idempotent
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 101)
# Rebuild cache, no longer present
await db_wrapper.rollback_transaction()
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == []
await store.rebuild_cache()
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_1_alt: CoinSolution = make_child_solution(solution_0_alt)
with pytest.raises(ValueError):
await store.add_spend(1, solution_1_alt, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_2: CoinSolution = make_child_solution(solution_1)
await store.add_spend(1, solution_2, 100)
await store.rebuild_cache()
solution_3: CoinSolution = make_child_solution(solution_2)
await store.add_spend(1, solution_3, 100)
solution_4: CoinSolution = make_child_solution(solution_3)
with pytest.raises(ValueError):
await store.add_spend(1, solution_4, 99)
await store.rebuild_cache()
await store.add_spend(1, solution_4, 101)
await store.rebuild_cache()
await store.rollback(101, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
(101, solution_4),
]
await store.rebuild_cache()
await store.rollback(100, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 105)
await store.add_spend(1, solution_4, 105)
solution_5: CoinSolution = make_child_solution(solution_4)
await store.add_spend(1, solution_5, 105)
await store.rollback(99, 1)
assert store.get_spends_for_wallet(1) == []
finally:
await db_connection.close()
db_filename.unlink()
|
the-stack_0_23133 | # main function
while True:
print("\n==========================RESTART==========================")
try:
code = input("Enter customer's code: ")
code = code.lower()
except:
print("invalid code")
initial_reading = int(input("Enter beginning meter reading: "))
final_reading = int(input("Enter Ending meter reading: "))
print(" ")
def prompt_readings():
"""This function displays initial and final readings to the user"""
print(f"Beginning Meter Reading: {initial_reading: 09}")
print(f"Ending Meter Reading: {final_reading: 09}")
def reading():
"""Function to Compute gallons of water used"""
if initial_reading < final_reading:
gallons = final_reading - initial_reading
prompt_readings()
gallons = gallons / 10
print(f"Gallons of water used {gallons}")
else:
if initial_reading > final_reading:
gallons = 1000000000 - initial_reading
gallons = gallons + final_reading
gallons = gallons / 10
print(f"Gallons of water used {gallons}")
return gallons
def units():
g = reading()
four_million = 4000000
ten_million = 10000000
if code == "r":
amount = 5.00 + (0.0005 * g)
amount = float(amount)
print(f"Amount billed: $ {round(amount, 2)}")
else:
if code == "c":
if g <= four_million:
amount = 1000
amount = float(amount)
print(f"Amount billed: $ {round(amount, 2)}")
else:
if g > four_million:
addition_units = g - four_million
amount = 1000 + (0.00025 * addition_units)
amount = float(amount)
print(f"Amount billed: $ {round(amount, 2)}")
else:
if code == "i":
if g <= four_million:
amount = 1000
amount = float(amount)
print(f"Amount billed: $ {round(amount, 2)}")
else:
if four_million < g < ten_million:
amount = 2000
amount = float(amount)
print(f"Amount billed: $ {round(amount, 2)}")
else:
if g > ten_million:
addition_units = g - ten_million
amount = 2000 + (addition_units * 0.00025)
amount = float(amount)
print(f"Amount billed: $ {round(amount, 2)}")
units()
|
the-stack_0_23134 | import functools
from kubernetes import client, config
from kubernetes.config import ConfigException
from kubernetes.client.rest import ApiException
from . import utils
logger = utils.create_logger(__name__)
try:
# Load configuration inside the Pod
config.load_incluster_config()
except ConfigException:
# Load configuration for testing
config.load_kube_config()
# The API object for submitting SubjecAccessReviews
api = client.AuthorizationV1Api()
def create_subject_access_review(user, verb, namespace, group, version,
resource):
'''
Create the SubjecAccessReview object which we will use to determine if the
user is authorized.
'''
return client.V1SubjectAccessReview(
spec=client.V1SubjectAccessReviewSpec(
user=user,
resource_attributes=client.V1ResourceAttributes(
group=group,
namespace=namespace,
verb=verb,
resource=resource,
version=version
)
)
)
def is_authorized(user, verb, namespace, group, version, resource):
'''
Create a SubjectAccessReview to the K8s API to determine if the user is
authorized to perform a specific verb on a resource.
'''
if user is None:
logger.warning(
("No user credentials were found! Make sure you"
" have correctly set the USERID_HEADER in the"
"Jupyter Web App's deployment.")
)
return False
sar = create_subject_access_review(user, verb, namespace, group, version,
resource)
try:
obj = api.create_subject_access_review(sar)
except ApiException as e:
logger.error(
"Error submitting SubjecAccessReview: {}, {}".format(
sar, utils.parse_error(e))
)
return False
if obj.status is not None:
return obj.status.allowed
else:
logger.error("SubjectAccessReview doesn't have status.")
return False
def needs_authorization(verb, group, version, resource):
'''
This function will serve as a decorator. It will be used to make sure that
the decorated function is authorized to perform the corresponding k8s api
verb on a specific resource.
'''
def wrapper(func):
@functools.wraps(func)
def runner(*args, **kwargs):
user = utils.get_username_from_request()
namespace = kwargs.get("namespace", None)
if is_authorized(user, verb, namespace, group, version, resource):
return func(*args, **kwargs)
else:
msg = ("User {} is not authorized to {} {} for namespace: "
"{}").format(user,
verb,
f"{group}.{version}.{resource}",
namespace)
return {
"success": False,
"log": msg,
}
return runner
return wrapper
|
the-stack_0_23135 | # -*- coding: utf-8 -*-
'''
:synopsis: Unit Tests for Windows IIS Module 'module.win_iis'
:platform: Windows
:maturity: develop
versionadded:: 2016.11.0
'''
# Import Python Libs
from __future__ import absolute_import
import json
# Import Salt Libs
from salt.exceptions import SaltInvocationError
from salt.modules import win_iis
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON,
)
ensure_in_syspath('../../')
# Globals
win_iis.__salt__ = {}
# Make sure this module runs on Windows system
HAS_IIS = win_iis.__virtual__()
APP_LIST = {
'testApp': {
'apppool': 'MyTestPool',
'path': '/testApp',
'preload': False,
'protocols': ['http'],
'sourcepath': r'C:\inetpub\apps\testApp'
}
}
BINDING_LIST = {
'*:80:': {
'certificatehash': None,
'certificatestorename': None,
'hostheader': None,
'ipaddress': '*', 'port': 80,
'protocol': 'http',
'sslflags': 0
}
}
SITE_LIST = {
'MyTestSite': {
'apppool': 'MyTestPool',
'bindings': BINDING_LIST,
'id': 1, 'sourcepath': r'C:\inetpub\wwwroot',
'state': 'Started'
}
}
LIST_APPS_SRVMGR = {
'retcode': 0,
'stdout': json.dumps([{
'applicationPool': 'MyTestPool',
'name': 'testApp', 'path': '/testApp',
'PhysicalPath': r'C:\inetpub\apps\testApp',
'preloadEnabled': False,
'protocols': 'http'
}])
}
LIST_APPPOOLS_SRVMGR = {
'retcode': 0,
'stdout': json.dumps([{
'name': 'MyTestPool', 'state': 'Started',
'Applications': {
'value': ['MyTestSite'],
'Count': 1
}
}])
}
LIST_VDIRS_SRVMGR = {
'retcode': 0,
'stdout': json.dumps([{
'name': 'TestVdir',
'physicalPath': r'C:\inetpub\vdirs\TestVdir'
}])
}
@skipIf(not HAS_IIS, 'This test case runs only on Windows systems')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinIisTestCase(TestCase):
'''
Test cases for salt.modules.win_iis
'''
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value=dict()))
def test_create_apppool(self):
'''
Test - Create an IIS application pool.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.create_apppool('MyTestPool'))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value=LIST_APPPOOLS_SRVMGR))
def test_list_apppools(self):
'''
Test - List all configured IIS application pools.
'''
with patch.dict(win_iis.__salt__):
self.assertIsInstance(win_iis.list_apppools(), dict)
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value={'MyTestPool': {
'applications': list(),
'state': 'Started'}}))
def test_remove_apppool(self):
'''
Test - Remove an IIS application pool.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.remove_apppool('MyTestPool'))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
def test_restart_apppool(self):
'''
Test - Restart an IIS application pool.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.restart_apppool('MyTestPool'))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value=dict()))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value=dict()))
def test_create_site(self):
'''
Test - Create a basic website in IIS.
'''
kwargs = {'name': 'MyTestSite', 'sourcepath': r'C:\inetpub\wwwroot',
'apppool': 'MyTestPool', 'hostheader': 'mytestsite.local',
'ipaddress': '*', 'port': 80, 'protocol': 'http'}
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.create_site(**kwargs))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value=dict()))
@patch('salt.modules.win_iis.list_apppools',
MagicMock(return_value=dict()))
def test_create_site_failed(self):
'''
Test - Create a basic website in IIS using invalid data.
'''
kwargs = {'name': 'MyTestSite', 'sourcepath': r'C:\inetpub\wwwroot',
'apppool': 'MyTestPool', 'hostheader': 'mytestsite.local',
'ipaddress': '*', 'port': 80, 'protocol': 'invalid-protocol-name'}
with patch.dict(win_iis.__salt__):
self.assertRaises(SaltInvocationError, win_iis.create_site, **kwargs)
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value=SITE_LIST))
def test_remove_site(self):
'''
Test - Delete a website from IIS.
'''
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.remove_site('MyTestSite'))
@patch('os.path.isdir',
MagicMock(return_value=True))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_apps',
MagicMock(return_value=APP_LIST))
def test_create_app(self):
'''
Test - Create an IIS application.
'''
kwargs = {'name': 'testApp', 'site': 'MyTestSite',
'sourcepath': r'C:\inetpub\apps\testApp', 'apppool': 'MyTestPool'}
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.create_app(**kwargs))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value=LIST_APPS_SRVMGR))
def test_list_apps(self):
'''
Test - Get all configured IIS applications for the specified site.
'''
with patch.dict(win_iis.__salt__):
self.assertIsInstance(win_iis.list_apps('MyTestSite'), dict)
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_apps',
MagicMock(return_value=APP_LIST))
def test_remove_app(self):
'''
Test - Remove an IIS application.
'''
kwargs = {'name': 'otherApp', 'site': 'MyTestSite'}
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.remove_app(**kwargs))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_bindings',
MagicMock(return_value=BINDING_LIST))
def test_create_binding(self):
'''
Test - Create an IIS binding.
'''
kwargs = {'site': 'MyTestSite', 'hostheader': '', 'ipaddress': '*',
'port': 80, 'protocol': 'http', 'sslflags': 0}
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.create_binding(**kwargs))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_bindings',
MagicMock(return_value=BINDING_LIST))
def test_create_binding_failed(self):
'''
Test - Create an IIS binding using invalid data.
'''
kwargs = {'site': 'MyTestSite', 'hostheader': '', 'ipaddress': '*',
'port': 80, 'protocol': 'invalid-protocol-name', 'sslflags': 999}
with patch.dict(win_iis.__salt__):
self.assertRaises(SaltInvocationError, win_iis.create_binding, **kwargs)
@patch('salt.modules.win_iis.list_sites',
MagicMock(return_value=SITE_LIST))
def test_list_bindings(self):
'''
Test - Get all configured IIS bindings for the specified site.
'''
with patch.dict(win_iis.__salt__):
self.assertIsInstance(win_iis.list_bindings('MyTestSite'), dict)
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value={'retcode': 0}))
@patch('salt.modules.win_iis.list_bindings',
MagicMock(return_value=BINDING_LIST))
def test_remove_binding(self):
'''
Test - Remove an IIS binding.
'''
kwargs = {'site': 'MyTestSite', 'hostheader': 'mytestsite.local',
'ipaddress': '*', 'port': 443}
with patch.dict(win_iis.__salt__):
self.assertTrue(win_iis.remove_binding(**kwargs))
@patch('salt.modules.win_iis._srvmgr',
MagicMock(return_value=LIST_VDIRS_SRVMGR))
def test_list_vdirs(self):
'''
Test - Get configured IIS virtual directories.
'''
vdirs = {
'TestVdir': {
'sourcepath': r'C:\inetpub\vdirs\TestVdir'
}
}
with patch.dict(win_iis.__salt__):
self.assertEqual(win_iis.list_vdirs('MyTestSite'), vdirs)
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(WinIisTestCase, needs_daemon=False)
|
the-stack_0_23136 | import os
from collections import OrderedDict
from datetime import datetime
import json
import re
import glob
'''
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
def get_timestamp():
return datetime.now().strftime('_%y%m%d_%H%M%S')
def parse(opt_path, is_train=True):
# ----------------------------------------
# remove comments starting with '//'
# ----------------------------------------
json_str = ''
with open(opt_path, 'r') as f:
for line in f:
line = line.split('//')[0] + '\n'
json_str += line
# ----------------------------------------
# initialize opt
# ----------------------------------------
opt = json.loads(json_str, object_pairs_hook=OrderedDict)
opt['opt_path'] = opt_path
opt['is_train'] = is_train
# ----------------------------------------
# set default
# ----------------------------------------
if 'merge_bn' not in opt:
opt['merge_bn'] = False
opt['merge_bn_startpoint'] = -1
if 'scale' not in opt:
opt['scale'] = 1
# ----------------------------------------
# datasets
# ----------------------------------------
for phase, dataset in opt['datasets'].items():
phase = phase.split('_')[0]
dataset['phase'] = phase
dataset['scale'] = opt['scale'] # broadcast
dataset['n_channels'] = opt['n_channels'] # broadcast
if 'dataroot_H' in dataset and dataset['dataroot_H'] is not None:
dataset['dataroot_H'] = os.path.expanduser(dataset['dataroot_H'])
if 'dataroot_L' in dataset and dataset['dataroot_L'] is not None:
dataset['dataroot_L'] = os.path.expanduser(dataset['dataroot_L'])
# ----------------------------------------
# path
# ----------------------------------------
for key, path in opt['path'].items():
if path and key in opt['path']:
opt['path'][key] = os.path.expanduser(path)
path_task = os.path.join(opt['path']['root'], opt['task'])
opt['path']['task'] = path_task
opt['path']['log'] = path_task
opt['path']['options'] = os.path.join(path_task, 'options')
if is_train:
opt['path']['models'] = os.path.join(path_task, 'models')
opt['path']['images'] = os.path.join(path_task, 'images')
else: # test
opt['path']['images'] = os.path.join(path_task, 'test_images')
# ----------------------------------------
# network
# ----------------------------------------
opt['netG']['scale'] = opt['scale'] if 'scale' in opt else 1
# ----------------------------------------
# GPU devices
# ----------------------------------------
gpu_list = ','.join(str(x) for x in opt['gpu_ids'])
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
print('export CUDA_VISIBLE_DEVICES=' + gpu_list)
return opt
def find_last_checkpoint(save_dir, net_type='G'):
"""
Args:
save_dir: model folder
net_type: 'G' or 'D'
Return:
init_iter: iteration number
init_path: model path
"""
file_list = glob.glob(os.path.join(save_dir, '*_{}.pth'.format(net_type)))
if file_list:
iter_exist = []
for file_ in file_list:
iter_current = re.findall(r"(\d+)_{}.pth".format(net_type), file_)
iter_exist.append(int(iter_current[0]))
init_iter = max(iter_exist)
init_path = os.path.join(save_dir, '{}_{}.pth'.format(init_iter, net_type))
else:
init_iter = 0
init_path = None
return init_iter, init_path
'''
# --------------------------------------------
# convert the opt into json file
# --------------------------------------------
'''
def save(opt):
opt_path = opt['opt_path']
opt_path_copy = opt['path']['options']
dirname, filename_ext = os.path.split(opt_path)
filename, ext = os.path.splitext(filename_ext)
dump_path = os.path.join(opt_path_copy, filename+get_timestamp()+ext)
with open(dump_path, 'w') as dump_file:
json.dump(opt, dump_file, indent=2)
'''
# --------------------------------------------
# dict to string for logger
# --------------------------------------------
'''
def dict2str(opt, indent_l=1):
msg = ''
for k, v in opt.items():
if isinstance(v, dict):
msg += ' ' * (indent_l * 2) + k + ':[\n'
msg += dict2str(v, indent_l + 1)
msg += ' ' * (indent_l * 2) + ']\n'
else:
msg += ' ' * (indent_l * 2) + k + ': ' + str(v) + '\n'
return msg
'''
# --------------------------------------------
# convert OrderedDict to NoneDict,
# return None for missing key
# --------------------------------------------
'''
def dict_to_nonedict(opt):
if isinstance(opt, dict):
new_opt = dict()
for key, sub_opt in opt.items():
new_opt[key] = dict_to_nonedict(sub_opt)
return NoneDict(**new_opt)
elif isinstance(opt, list):
return [dict_to_nonedict(sub_opt) for sub_opt in opt]
else:
return opt
class NoneDict(dict):
def __missing__(self, key):
return None
|
the-stack_0_23137 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_community_list
short_description: Configure community lists in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify router feature and community_list category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.6
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
router_community_list:
description:
- Configure community lists.
default: null
type: dict
suboptions:
name:
description:
- Community list name.
required: true
type: str
rule:
description:
- Community list rule.
type: list
suboptions:
action:
description:
- Permit or deny route-based operations, based on the route's COMMUNITY attribute.
type: str
choices:
- deny
- permit
id:
description:
- ID.
required: true
type: int
match:
description:
- Community specifications for matching a reserved community.
type: str
regexp:
description:
- Ordered list of COMMUNITY attributes as a regular expression.
type: str
type:
description:
- Community list type (standard or expanded).
type: str
choices:
- standard
- expanded
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure community lists.
fortios_router_community_list:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
router_community_list:
name: "default_name_3"
rule:
-
action: "deny"
id: "6"
match: "<your_own_value>"
regexp: "<your_own_value>"
type: "standard"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_router_community_list_data(json):
option_list = ['name', 'rule', 'type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def router_community_list(data, fos):
vdom = data['vdom']
state = data['state']
router_community_list_data = data['router_community_list']
filtered_data = underscore_to_hyphen(filter_router_community_list_data(router_community_list_data))
if state == "present":
return fos.set('router',
'community-list',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('router',
'community-list',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_router(data, fos):
if data['router_community_list']:
resp = router_community_list(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"router_community_list": {
"required": False, "type": "dict", "default": None,
"options": {
"name": {"required": True, "type": "str"},
"rule": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["deny", "permit"]},
"id": {"required": True, "type": "int"},
"match": {"required": False, "type": "str"},
"regexp": {"required": False, "type": "str"}
}},
"type": {"required": False, "type": "str",
"choices": ["standard", "expanded"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_router(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_router(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
the-stack_0_23140 | ##############################
# PZ test analysis
# Aaron Berdanier
# [email protected]
# 1 August 2016
from datetime import datetime, timedelta
import pandas as pa
import numpy as np
from matplotlib import pyplot as plt
import seaborn
import os
import re
def getpredictors(da):
# ACTIVITY METRICS
# Number of days with account record
ndays = (max(da.date)-min(da.date)).days
# Average daily balance, filling non-transaction days
avbal = da.balance.resample('D').last().ffill().mean()
# Transactions per day
ntran = len(da)/ndays*1.
# Expenses covered by deposits
deposits = sum(da.amount[da.text.str.contains("Deposit of Funds")])
condition = (da.text.str.contains("Pay Bill to") | da.text.str.contains("Merchant Payment") | da.text.str.contains("Customer Withdrawal"))
withdraw = -sum(da.amount[condition])
expen = (deposits - withdraw)/ndays*1.
# Average deposit per day
depos = deposits/ndays*1.
# Average airtime purchase amount
airti = sum(da.amount[da.text.str.contains("Airtime Purchase")])/ndays*1.
# M-Shwari savings account usage
savin = 1 if len(da[da.text.str.contains("M-Shwari")]) > 0 else 0
#
# NETWORK METRICS
# Number of unique people interactions - scaled by n days
transactions = da.text[(da.text.str.contains('Customer Transfer to')|da.text.str.contains('Funds received from'))]
peopl = 1.*len(set([x.split("-")[1].lower() for x in transactions.tolist()]))/ndays
# Frequency of giving transfers
givin = 1.*len(da[da.text.str.contains('Customer Transfer to')])#/ndays
# Frequency of receiving transfers
recei = 1.*len(da[da.text.str.contains('Funds received from')])#/ndays
# Giving-receiving index: higher = giving more than receiving in their network
givrec = (givin - recei)/ndays
# Unique external lenders
lende = len(set(da.text[da.text.str.contains('Business Payment from')]))
# Average loan amount per day
loans = 1.*sum(da.amount[da.text.str.contains('Business Payment')])/ndays
return np.array([ndays, avbal, ntran, expen, airti, savin, peopl, givrec, lende, loans])
ff = os.listdir(d + "data/")
# initialize data
X = []
y = []
for i in xrange(len(ff)):
fi = ff[i]
da = pa.read_csv(d+'data/'+ff[i],header=None,names=["id","date","text","status","amount","balance"])
da.date = pa.to_datetime(da.date)
da = da.set_index(da.date)
#X[i] = getpredictors(da)
ld = da[da.text.str.contains("M-Shwari Loan")] # [['date','text','amount','balance']]
if(len(ld)>0 and any(ld.text.str.contains("Disburse"))): # get risk only if they have an M-Shwari loan record
##### Get training data
ldr = ld.sort_index() # sort ascending
# start with first loan disbursement
firstloan = next((i for i, v in enumerate(ldr.text.tolist()) if "Disburse" in v), None)
ldr = ldr[firstloan:]
ldr = ldr[ldr['date'] <= da.date[0]-timedelta(days=30)]
delinquentdates = []
loandates = ldr.date[ldr.text.str.contains("Disburse")]
for di in loandates: # loop through loans
debt = ldr.ix[di].amount*1.075 # add the loan amount
ldr = ldr.drop(di) # pop it off
# find all payments within 30 days of loan
s = (ldr['date'] <= di+timedelta(days=30)) & (ldr['text'] == "M-Shwari Loan Repayment")
ld2 = ldr[s]
for payi in xrange(len(ld2)):
debt += ld2.amount[payi] # pay down the debt
ldr = ldr.drop(ld2.date[payi]) # remove the payment
if(debt < 0): # paid it off
break
if(debt > 0): # delinquent loan...
delinquentdates.append(di)
# get the last delinquent loan
if(len(delinquentdates) > 0):
preds = getpredictors(da[da.date < delinquentdates[-1]])
X.append(preds)# all of the data before the delinquent loan
y.append(0)
else:
preds = getpredictors(da)
X.append(preds)
y.append(1) #
# Fit model
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.grid_search import GridSearchCV
pipe = Pipeline([('scl',StandardScaler()),
('pca',PCA(n_components=2)),
('clf',LogisticRegression(class_weight='balanced'))])
pipe.fit(X, y)
preds = pipe.predict(X)
accuracy_score(y,preds)
confusion_matrix(y,preds)
#### Get components and important variables
column_labels = ['PCA 1','PCA 2']
row_labels = ['ndays','avgbal','ntrans','deposit-expense','airtime','savings','npeople','giving-receiving','lendern','loanamt']
data = pipe.named_steps['pca'].components_.transpose() * pipe.named_steps['clf'].coef_ * 100
fig, ax = plt.subplots()
heatmap = ax.pcolor(data, cmap=plt.cm.coolwarm_r)
ax.set_xticks(np.arange(data.shape[1])+0.5, minor=False)
ax.set_yticks(np.arange(data.shape[0])+0.5, minor=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_yticklabels(row_labels, minor=False)
ax.set_xticklabels(column_labels, minor=False)
#plt.savefig('/home/aaron/Desktop/PZ1.pdf')
plt.show()
######## PREDICT SCORES FOR ALL PEOPLE
xl = pa.read_excel(d+"PZborrowers.xlsx",skiprows=1,names=['name','id','date'])
XX = []
order = [0]*len(ff) # order for sorting the predicted scores
for i in xrange(len(ff)):
fi = ff[i]
order[i] = np.where(xl.id==int(fi.split(".")[0]))[0][0]
da = pa.read_csv(d+'data/'+ff[i],header=None,names=["id","date","text","status","amount","balance"])
da.date = pa.to_datetime(da.date)
da = da.set_index(da.date)
XX.append(getpredictors(da))
predprob = np.array([x[1] for x in pipe.predict_proba(XX)])
Scores = [int(x) for x in predprob*100]
Scores = [x if x!= 0 else 1 for x in Scores] # zero scores get 1
# HISTOGRAM OF PREDICTED SCORES
plt.hist(Scores,bins=10,range=(0,100))
plt.show()
#plt.savefig('/home/aaron/Desktop/PZ2.pdf')
# Add scores to sheet
xl['score'] = [x[1] for x in sorted(zip(order, Scores))]
xl.to_excel(d+"Berdanier_PZresults.xlsx",header=["Name","ID Number","Date","Predicted Score"],index=False)
##############################
#pa.DataFrame(StandardScaler().fit_transform(XX)).to_excel('/home/aaron/Desktop/pzs.xlsx',header=row_labels,index=False)
|
the-stack_0_23142 | import numpy as np
from gym.spaces import Box
from metaworld.envs.env_util import get_asset_full_path
from metaworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv, _assert_task_is_set
class SawyerHandlePullEnv(SawyerXYZEnv):
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.8, 0.05)
obj_high = (0.1, 0.9, 0.05)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_pos': np.array([0, 0.9, 0.05]),
'hand_init_pos': np.array((0, 0.6, 0.2),),
}
self.goal = np.array([0, 0.8, 0.14])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self.max_path_length = 150
self.obj_and_goal_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.observation_space = Box(
np.hstack((self.hand_low, obj_low, obj_low, goal_low)),
np.hstack((self.hand_high, obj_high, obj_high, goal_high)),
)
@property
def model_name(self):
return get_asset_full_path('sawyer_xyz/sawyer_handle_press.xml')
@_assert_task_is_set
def step(self, action):
self.set_xyz_action(action[:3])
self.do_simulation([action[-1], -action[-1]])
# The marker seems to get reset every time you do a simulation
ob = self._get_obs()
obs_dict = self._get_obs_dict()
reward, reachDist, pressDist = self.compute_reward(action, obs_dict)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pressDist, 'epRew': reward, 'pickRew':None, 'success': float(pressDist <= 0.04)}
info['goal'] = self.goal
return ob, reward, self.curr_path_length == self.max_path_length, info
def _get_pos_objects(self):
return self.data.site_xpos[self.model.site_name2id('handleStart')]
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self._state_goal = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
if self.random_init:
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos
button_pos = goal_pos.copy()
button_pos[1] -= 0.1
button_pos[2] += 0.09
self._state_goal = button_pos
self.sim.model.body_pos[self.model.body_name2id('box')] = self.obj_init_pos
self.sim.model.body_pos[self.model.body_name2id('handle')] = self._state_goal
self._set_obj_xyz(-0.12)
self._state_goal = self.get_site_pos('goalPull')
self.maxDist = np.abs(self.data.site_xpos[self.model.site_name2id('handleStart')][-1] - self._state_goal[-1])
self.target_reward = 1000*self.maxDist + 1000*2
return self._get_obs()
def _reset_hand(self):
for _ in range(10):
self.data.set_mocap_pos('mocap', self.hand_init_pos)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation([-1,1], self.frame_skip)
rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger)/2
self.pickCompleted = False
def compute_reward(self, actions, obs):
del actions
obs = obs['state_observation']
objPos = obs[3:6]
leftFinger = self.get_site_pos('leftEndEffector')
fingerCOM = leftFinger
pressGoal = self._state_goal[-1]
pressDist = np.abs(objPos[-1] - pressGoal)
reachDist = np.linalg.norm(objPos - fingerCOM)
reachRew = -reachDist
c1 = 1000
c2 = 0.01
c3 = 0.001
if reachDist < 0.05:
pressRew = 1000*(self.maxDist - pressDist) + c1*(np.exp(-(pressDist**2)/c2) + np.exp(-(pressDist**2)/c3))
else:
pressRew = 0
pressRew = max(pressRew, 0)
reward = reachRew + pressRew
return [reward, reachDist, pressDist]
|
the-stack_0_23146 | import os
import pytest
import textwrap
from numpy.testing import assert_array_equal
import numpy as np
from . import util
class TestString(util.F2PyTest):
sources = [util.getpath("tests", "src", "string", "char.f90")]
@pytest.mark.slow
def test_char(self):
strings = np.array(["ab", "cd", "ef"], dtype="c").T
inp, out = self.module.char_test.change_strings(
strings, strings.shape[1])
assert_array_equal(inp, strings)
expected = strings.copy()
expected[1, :] = "AAA"
assert_array_equal(out, expected)
class TestDocStringArguments(util.F2PyTest):
suffix = ".f"
code = """
C FILE: STRING.F
SUBROUTINE FOO(A,B,C,D)
CHARACTER*5 A, B
CHARACTER*(*) C,D
Cf2py intent(in) a,c
Cf2py intent(inout) b,d
A(1:1) = 'A'
B(1:1) = 'B'
C(1:1) = 'C'
D(1:1) = 'D'
END
C END OF FILE STRING.F
"""
def test_example(self):
a = np.array(b"123\0\0")
b = np.array(b"123\0\0")
c = np.array(b"123")
d = np.array(b"123")
self.module.foo(a, b, c, d)
assert a.tobytes() == b"123\0\0"
assert b.tobytes() == b"B23\0\0"
assert c.tobytes() == b"123"
assert d.tobytes() == b"D23"
class TestFixedString(util.F2PyTest):
suffix = ".f90"
code = textwrap.dedent("""
function sint(s) result(i)
implicit none
character(len=*) :: s
integer :: j, i
i = 0
do j=len(s), 1, -1
if (.not.((i.eq.0).and.(s(j:j).eq.' '))) then
i = i + ichar(s(j:j)) * 10 ** (j - 1)
endif
end do
return
end function sint
function test_in_bytes4(a) result (i)
implicit none
integer :: sint
character(len=4) :: a
integer :: i
i = sint(a)
a(1:1) = 'A'
return
end function test_in_bytes4
function test_inout_bytes4(a) result (i)
implicit none
integer :: sint
character(len=4), intent(inout) :: a
integer :: i
if (a(1:1).ne.' ') then
a(1:1) = 'E'
endif
i = sint(a)
return
end function test_inout_bytes4
""")
@staticmethod
def _sint(s, start=0, end=None):
"""Return the content of a string buffer as integer value.
For example:
_sint('1234') -> 4321
_sint('123A') -> 17321
"""
if isinstance(s, np.ndarray):
s = s.tobytes()
elif isinstance(s, str):
s = s.encode()
assert isinstance(s, bytes)
if end is None:
end = len(s)
i = 0
for j in range(start, min(end, len(s))):
i += s[j] * 10**j
return i
def _get_input(self, intent="in"):
if intent in ["in"]:
yield ""
yield "1"
yield "1234"
yield "12345"
yield b""
yield b"\0"
yield b"1"
yield b"\01"
yield b"1\0"
yield b"1234"
yield b"12345"
yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0')
yield np.array(b"") # array(b'', dtype='|S1')
yield np.array(b"\0")
yield np.array(b"1")
yield np.array(b"1\0")
yield np.array(b"\01")
yield np.array(b"1234")
yield np.array(b"123\0")
yield np.array(b"12345")
def test_intent_in(self):
for s in self._get_input():
r = self.module.test_in_bytes4(s)
# also checks that s is not changed inplace
expected = self._sint(s, end=4)
assert r == expected, s
def test_intent_inout(self):
for s in self._get_input(intent="inout"):
rest = self._sint(s, start=4)
r = self.module.test_inout_bytes4(s)
expected = self._sint(s, end=4)
assert r == expected
# check that the rest of input string is preserved
assert rest == self._sint(s, start=4)
|
the-stack_0_23151 | import numpy as np
from gym.spaces import Box
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerStickPushEnvV2(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.04
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.08, 0.58, 0.000)
obj_high = (-0.03, 0.62, 0.001)
goal_low = (0.399, 0.55, 0.0199)
goal_high = (0.401, 0.6, 0.0201)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'stick_init_pos': np.array([-0.1, 0.6, 0.02]),
'hand_init_pos': np.array([0, 0.6, 0.2]),
}
self.goal = self.init_config['stick_init_pos']
self.stick_init_pos = self.init_config['stick_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self.liftThresh = liftThresh
self.max_path_length = 200
# For now, fix the object initial position.
self.obj_init_pos = np.array([0.2, 0.6, 0.0])
self.obj_init_qpos = np.array([0.0, 0.0])
self.obj_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_stick_obj.xml')
@_assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, pushDist, placeDist = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {
'reachDist': reachDist,
'pickRew': pickRew,
'epRew': reward,
'goalDist': pushDist,
'success': float(pushDist <= 0.1 and reachDist <= 0.05 and placeDist <= 0.15)
}
return ob, reward, False, info
def _get_pos_objects(self):
return np.hstack((
self.get_body_com('stick').copy(),
self._get_site_pos('insertion') + np.array([.0, .09, .0]),
))
def _get_obs_dict(self):
obs_dict = super()._get_obs_dict()
obs_dict['state_achieved_goal'] = self._get_site_pos(
'insertion'
) + np.array([.0, .09, .0])
return obs_dict
def _set_stick_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[16:18] = pos.copy()
qvel[16:18] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self.stick_init_pos = self.init_config['stick_init_pos']
self._target_pos = np.array([0.4, 0.6, self.stick_init_pos[-1]])
self.stickHeight = self.get_body_com('stick').copy()[2]
self.heightTarget = self.stickHeight + self.liftThresh
if self.random_init:
goal_pos = self._get_state_rand_vec()
while np.linalg.norm(goal_pos[:2] - goal_pos[-3:-1]) < 0.1:
goal_pos = self._get_state_rand_vec()
self.stick_init_pos = np.concatenate((goal_pos[:2], [self.stick_init_pos[-1]]))
self._target_pos = np.concatenate((goal_pos[-3:-1], [self.stick_init_pos[-1]]))
self._set_stick_xyz(self.stick_init_pos)
self._set_obj_xyz(self.obj_init_qpos)
self.obj_init_pos = self.get_body_com('object').copy()
self.maxPlaceDist = np.linalg.norm(np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self.stick_init_pos)) + self.heightTarget
self.maxPushDist = np.linalg.norm(self.obj_init_pos[:2] - self._target_pos[:2])
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
self.pickCompleted = False
def compute_reward(self, actions, obs):
stickPos = obs[3:6]
objPos = obs[6:9]
rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')
fingerCOM = (rightFinger + leftFinger)/2
heightTarget = self.heightTarget
pushGoal = self._target_pos
pushDist = np.linalg.norm(objPos[:2] - pushGoal[:2])
placeDist = np.linalg.norm(objPos - stickPos)
reachDist = np.linalg.norm(stickPos - fingerCOM)
def reachReward():
reachRew = -reachDist
# incentive to close fingers when reachDist is small
if reachDist < 0.05:
reachRew = -reachDist + max(actions[-1],0)/50
return reachRew , reachDist
def pickCompletionCriteria():
tolerance = 0.01
return stickPos[2] >= (heightTarget- tolerance)
self.pickCompleted = pickCompletionCriteria()
def objDropped():
return (stickPos[2] < (self.stickHeight + 0.005)) and (pushDist >0.02) and (reachDist > 0.02)
# Object on the ground, far away from the goal, and from the gripper
# Can tweak the margin limits
def orig_pickReward():
hScale = 100
if self.pickCompleted and not(objDropped()):
return hScale*heightTarget
elif (reachDist < 0.1) and (stickPos[2]> (self.stickHeight + 0.005)):
return hScale* min(heightTarget, stickPos[2])
else:
return 0
def pushReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
cond = self.pickCompleted and (reachDist < 0.1) and not(objDropped())
if cond:
pushRew = 1000*(self.maxPlaceDist - placeDist) + c1*(np.exp(-(placeDist**2)/c2) + np.exp(-(placeDist**2)/c3))
if placeDist < 0.05:
c4 = 2000
c5 = 0.001
c6 = 0.0001
pushRew += 1000*(self.maxPushDist - pushDist) + c4*(np.exp(-(pushDist**2)/c5) + np.exp(-(pushDist**2)/c6))
pushRew = max(pushRew,0)
return [pushRew , pushDist]
else:
return [0 , pushDist]
reachRew, reachDist = reachReward()
pickRew = orig_pickReward()
pushRew , pushDist = pushReward()
assert ((pushRew >=0) and (pickRew>=0))
reward = reachRew + pickRew + pushRew
return [reward, reachRew, reachDist, pickRew, pushRew, pushDist, placeDist]
|
the-stack_0_23152 | # coding: utf-8
#
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for feedback thread and message operations."""
from __future__ import annotations
import datetime
import itertools
from core import feconf
from core import python_utils
from core.domain import email_manager
from core.domain import feedback_domain
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import taskqueue_services
from core.domain import user_services
from core.platform import models
(
email_models, expl_models, feedback_models,
question_models, skill_models, suggestion_models,
topic_models
) = models.Registry.import_models([
models.NAMES.email, models.NAMES.exploration, models.NAMES.feedback,
models.NAMES.question, models.NAMES.skill, models.NAMES.suggestion,
models.NAMES.topic
])
datastore_services = models.Registry.import_datastore_services()
transaction_services = models.Registry.import_transaction_services()
DEFAULT_SUGGESTION_THREAD_SUBJECT = 'Suggestion from a learner'
DEFAULT_SUGGESTION_THREAD_INITIAL_MESSAGE = ''
TARGET_TYPE_TO_TARGET_MODEL = {
feconf.ENTITY_TYPE_EXPLORATION: (
expl_models.ExplorationModel),
feconf.ENTITY_TYPE_QUESTION: (
question_models.QuestionModel),
feconf.ENTITY_TYPE_SKILL: (
skill_models.SkillModel),
feconf.ENTITY_TYPE_TOPIC: (
topic_models.TopicModel)
}
def get_exp_id_from_thread_id(thread_id):
"""Returns the exploration_id part of the thread_id.
TODO(#8370): Once feedback threads are generalized, this function needs to
be updated to get the id from any general entity, not just explorations. At
the moment, it still assumes that the thread id is associated to an
exploration.
Args:
thread_id: str. The id of the thread.
Returns:
str. The exploration id part of the thread_id.
"""
return thread_id.split('.')[1]
def _create_models_for_thread_and_first_message(
entity_type, entity_id, original_author_id, subject, text,
has_suggestion):
"""Creates a feedback thread and its first message.
Args:
entity_type: str. The type of entity the feedback thread is linked to.
entity_id: str. The id of the entity.
original_author_id: str. The author id who starts this thread.
subject: str. The subject of this thread.
text: str. The text of the feedback message. This may be ''.
has_suggestion: bool. Whether this thread has a related learner
suggestion.
Returns:
str. The id of the new thread.
"""
thread_id = (
feedback_models.GeneralFeedbackThreadModel.generate_new_thread_id(
entity_type, entity_id))
thread = feedback_models.GeneralFeedbackThreadModel.create(thread_id)
thread.entity_type = entity_type
thread.entity_id = entity_id
thread.original_author_id = original_author_id
# The feedback analytics jobs rely on the thread status being set to 'open'
# when a new thread is created. If this is changed, changes need to be made
# there as well.
thread.status = feedback_models.STATUS_CHOICES_OPEN
thread.subject = subject
thread.has_suggestion = has_suggestion
thread.message_count = 0
thread.update_timestamps()
thread.put()
create_message(
thread_id, original_author_id, feedback_models.STATUS_CHOICES_OPEN,
subject, text)
return thread_id
def create_thread(
entity_type, entity_id, original_author_id, subject, text,
has_suggestion=False):
"""Creates a thread and its first message.
Args:
entity_type: str. The type of entity the feedback thread is linked to.
entity_id: str. The id of the entity.
original_author_id: str. The author id who starts this thread.
subject: str. The subject of this thread.
text: str. The text of the feedback message. This may be ''.
has_suggestion: bool. Whether the thread has a suggestion attached to
it.
Returns:
str. The id of the new thread.
"""
return _create_models_for_thread_and_first_message(
entity_type, entity_id, original_author_id, subject, text,
has_suggestion)
def create_message(
thread_id, author_id, updated_status, updated_subject, text,
received_via_email=False, should_send_email=True):
"""Creates a new message for the thread and subscribes the author to the
thread.
Args:
thread_id: str. The thread id the message belongs to.
author_id: str. The author id who creates this message.
updated_status: str|None. One of STATUS_CHOICES. New thread status.
Must be supplied if this is the first message of a thread. For the
rest of the thread, should exist only when the status changes.
updated_subject: str|None. New thread subject. Must be supplied if this
is the first message of a thread. For the rest of the thread, should
exist only when the subject changes.
text: str. The text of the feedback message. This may be ''.
received_via_email: bool. Whether new message is received via email or
web.
should_send_email: bool. Whether the new message(s) need to be added to
the email buffer.
Returns:
FeedbackMessage. The domain object representing the new message added
in the datastore.
Raises:
Exception. GeneralFeedbackThreadModel entity not found.
"""
return create_messages(
[thread_id], author_id, updated_status, updated_subject, text,
received_via_email=received_via_email,
should_send_email=should_send_email)[0]
def create_messages(
thread_ids, author_id, updated_status, updated_subject, text,
received_via_email=False, should_send_email=True):
"""Creates a new message for each of the distinct threads in thread_ids and
for each message, subscribes the author to the thread.
Args:
thread_ids: list(str). The thread ids to append the messages to.
author_id: str. The id of the author who creates the messages.
updated_status: str|None. One of STATUS_CHOICES. Applied to each thread.
Must be supplied if this is the first message of the threads.
Otherwise, this property should only exist when the status
changes.
updated_subject: str|None. New thread subject. Applied to each thread.
Must be supplied if this is the first message of the threads.
Otherwise, this property should only exist when the subject changes.
text: str. The text of the feedback message. This may be ''.
received_via_email: bool. Whether the new message(s) are received via
email or web.
should_send_email: bool. Whether the new message(s) need to be added to
the email buffer.
Returns:
list(FeedbackMessage). The domain objects representing the new messages
added in the datastore.
Raises:
Exception. Thread_ids must be distinct.
Exception. One or more GeneralFeedbackThreadModel entities not found.
"""
from core.domain import event_services
# Check that the thread_ids are distinct.
if len(set(thread_ids)) != len(thread_ids):
raise Exception(
'Thread ids must be distinct when calling create_messsages.')
# Get the threads at the outset, in order to check that there are models
# corresponding to each of the thread_ids.
thread_models = feedback_models.GeneralFeedbackThreadModel.get_multi(
thread_ids)
thread_ids_that_do_not_have_models = []
for index, thread_model in enumerate(thread_models):
if thread_model is None:
thread_ids_that_do_not_have_models.append(thread_ids[index])
if len(thread_ids_that_do_not_have_models) > 0:
multiple_thread_models_are_missing = (
len(thread_ids_that_do_not_have_models) > 1
)
raise Exception(
'Thread%s belonging to the GeneralFeedbackThreadModel class with '
'id%s:[%s] %s not found.' % (
's' if multiple_thread_models_are_missing else '',
's' if multiple_thread_models_are_missing else '',
' '.join(thread_ids_that_do_not_have_models),
'were' if multiple_thread_models_are_missing else 'was'
)
)
# Get the corresponding message ids, which are required for message
# creation.
message_ids = (
feedback_models.GeneralFeedbackMessageModel.get_message_counts(
thread_ids)
)
# Create a list of FullyQualifiedMessageIdentifier objects so that each
# (thread_id, message_id) pair is kept together.
message_identifiers = []
for thread_id, message_id in python_utils.ZIP(thread_ids, message_ids):
message_identifiers.append(
feedback_domain.FullyQualifiedMessageIdentifier(
thread_id, message_id))
# Create the GeneralFeedbackMessageModel instances.
message_models = feedback_models.GeneralFeedbackMessageModel.create_multi(
message_identifiers)
# Update the message instances.
for index, message_model in enumerate(message_models):
message_model.thread_id = thread_ids[index]
message_model.message_id = message_ids[index]
message_model.author_id = author_id
message_model.text = text
message_model.received_via_email = received_via_email
# Get the corresponding thread in storage.
thread_model = thread_models[index]
if updated_status:
message_model.updated_status = updated_status
if message_model.message_id == 0:
# New thread.
if thread_model.entity_type == feconf.ENTITY_TYPE_EXPLORATION:
event_services.FeedbackThreadCreatedEventHandler.record(
thread_model.entity_id)
else:
# Thread status changed.
if thread_model.entity_type == feconf.ENTITY_TYPE_EXPLORATION:
(
event_services
.FeedbackThreadStatusChangedEventHandler
.record(
thread_model.entity_id, thread_model.status,
updated_status)
)
if updated_subject:
message_model.updated_subject = updated_subject
feedback_models.GeneralFeedbackMessageModel.update_timestamps_multi(
message_models)
feedback_models.GeneralFeedbackMessageModel.put_multi(message_models)
# Update the message data cache of the threads.
for thread_model in thread_models:
thread_model.message_count += 1
if text:
thread_model.last_nonempty_message_text = text
thread_model.last_nonempty_message_author_id = author_id
# We do a put() even if the status and subject are not updated, so that the
# last_updated time of the threads reflects the last time a message was
# added to it.
old_statuses = [thread_model.status for thread_model in thread_models]
new_statuses = old_statuses
if updated_status or updated_subject:
new_statuses = []
for index, thread_model in enumerate(thread_models):
# Can't be the first thread.
if message_ids[index] != 0:
if updated_status and (updated_status != thread_model.status):
thread_model.status = updated_status
if updated_subject and (
updated_subject != thread_model.subject):
thread_model.subject = updated_subject
new_statuses.append(thread_model.status)
feedback_models.GeneralFeedbackThreadModel.update_timestamps_multi(
thread_models)
feedback_models.GeneralFeedbackThreadModel.put_multi(thread_models)
# For each thread, we do a put on the suggestion linked (if it exists) to
# the thread, so that the last_updated time changes to show that there is
# activity in the thread.
thread_ids_that_have_linked_suggestions = []
for thread_model in thread_models:
if thread_model.has_suggestion:
thread_ids_that_have_linked_suggestions.append(thread_model.id)
general_suggestion_models = (
suggestion_models.GeneralSuggestionModel.get_multi(
thread_ids_that_have_linked_suggestions)
)
suggestion_models_to_update = []
for suggestion_model in general_suggestion_models:
# As the thread is created before the suggestion, for the first message
# we need not update the suggestion.
if suggestion_model:
suggestion_models_to_update.append(suggestion_model)
suggestion_models.GeneralSuggestionModel.update_timestamps_multi(
suggestion_models_to_update)
suggestion_models.GeneralSuggestionModel.put_multi(
suggestion_models_to_update)
if (feconf.CAN_SEND_EMAILS and (
feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS and
user_services.is_user_registered(author_id)) and
# TODO(#12079): Figure out a better way to avoid sending feedback
# thread emails for contributor dashboard suggestions.
(len(text) > 0 or old_statuses[index] != new_statuses[index]) and
should_send_email):
for index, thread_model in enumerate(thread_models):
_add_message_to_email_buffer(
author_id, thread_model.id, message_ids[index],
len(text), old_statuses[index], new_statuses[index])
if author_id:
subscription_services.subscribe_to_threads(author_id, thread_ids)
add_message_ids_to_read_by_list(author_id, message_identifiers)
# Convert the GeneralFeedbackMessageModels into a list of FeedbackMessage
# domain objects.
feedback_messages = [
_get_message_from_model(message_model) for message_model in
message_models
]
return feedback_messages
def _get_threads_user_info_keys(thread_ids):
"""Gets the feedback thread user model keys belonging to thread.
Args:
thread_ids: list(str). The ids of the threads.
Returns:
list(datastore_services.Key). The keys of the feedback thread user
model.
"""
if thread_ids:
return feedback_models.GeneralFeedbackThreadUserModel.query(
feedback_models.GeneralFeedbackThreadUserModel.thread_id.IN(
thread_ids)
).fetch(keys_only=True)
else:
return []
def delete_threads_for_multiple_entities(entity_type, entity_ids):
"""Deletes a thread, its messages and thread user models. When the thread
belongs to exploration deletes feedback analytics. When the thread has a
suggestion deletes the suggestion.
Args:
entity_type: str. The type of entity the feedback thread is linked to.
entity_ids: list(str). The ids of the entities.
"""
threads = []
for entity_id in entity_ids:
threads.extend(get_threads(entity_type, entity_id))
model_keys = []
for thread in threads:
for message in get_messages(thread.id):
model_keys.append(
datastore_services.Key(
feedback_models.GeneralFeedbackMessageModel, message.id)
)
model_keys.append(
datastore_services.Key(
feedback_models.GeneralFeedbackThreadModel, thread.id)
)
if thread.has_suggestion:
model_keys.append(
datastore_services.Key(
suggestion_models.GeneralSuggestionModel, thread.id)
)
model_keys += _get_threads_user_info_keys([thread.id for thread in threads])
if entity_type == feconf.ENTITY_TYPE_EXPLORATION:
for entity_id in entity_ids:
model_keys.append(
datastore_services.Key(
feedback_models.FeedbackAnalyticsModel, entity_id)
)
datastore_services.delete_multi(model_keys)
def update_messages_read_by_the_user(user_id, thread_id, message_ids):
"""Replaces the list of message ids read by the message ids given to the
function.
Args:
user_id: str. The id of the user reading the messages.
thread_id: str. The id of the thread.
message_ids: list(int). The ids of the messages in the thread read by
the user.
"""
feedback_thread_user_model = (
feedback_models.GeneralFeedbackThreadUserModel.get(
user_id, thread_id) or
feedback_models.GeneralFeedbackThreadUserModel.create(
user_id, thread_id))
feedback_thread_user_model.message_ids_read_by_user = message_ids
feedback_thread_user_model.update_timestamps()
feedback_thread_user_model.put()
def add_message_ids_to_read_by_list(user_id, message_identifiers):
"""Adds the given message IDs to the list of message IDs read by the user.
Args:
user_id: str. The id of the user reading the messages.
message_identifiers: list(FullyQualifiedMessageIdentifier). Each
message_identifier contains a thread_id and the corresponding
message_id that will be added to the thread's list of message IDs
read by the user.
"""
# Extract the thread_ids and message_ids from the
# FullyQualifiedMessageIdentifier objects.
thread_ids = [
message_identifier.thread_id for message_identifier
in message_identifiers
]
message_ids = [
message_identifier.message_id for message_identifier
in message_identifiers
]
# Get all of the GeneralFeedbackThreadUserModels that already exist. These
# models will be None if a GeneralFeedbackThreadUserModel does not exist
# for the user_id and thread_id yet.
current_feedback_thread_user_models_with_possible_nones = (
feedback_models.GeneralFeedbackThreadUserModel.get_multi(
user_id, thread_ids))
# Keep track of which thread_ids do not have feedback thread user models
# yet.
thread_ids_missing_user_models = []
# Keep track of the message_ids corresponding to the thread_ids that do not
# have feedback thread user models yet.
message_ids_for_missing_user_models = []
# Keep track of the feedback thread user models that already exist and
# aren't None. This list will be used when we update the datastore.
current_feedback_thread_user_models = []
for index, feedback_thread_user_model in enumerate(
current_feedback_thread_user_models_with_possible_nones):
if feedback_thread_user_model is None:
thread_ids_missing_user_models.append(thread_ids[index])
message_ids_for_missing_user_models.append(message_ids[index])
else:
current_feedback_thread_user_models.append(
feedback_thread_user_model)
# Add the message_id to the messages read by the user.
feedback_thread_user_model.message_ids_read_by_user.append(
message_ids[index])
# Create the new GeneralFeedbackThreadUserModels for each of the thread_ids
# that do not have a model yet.
new_feedback_thread_user_models = []
if thread_ids_missing_user_models:
new_feedback_thread_user_models = (
feedback_models.GeneralFeedbackThreadUserModel.create_multi(
user_id, thread_ids_missing_user_models)
)
# For each of the new models, append the message_id to the
# message_ids_read_by_user property.
for index, feedback_thread_user_model in enumerate(
new_feedback_thread_user_models):
feedback_thread_user_model.message_ids_read_by_user.append(
message_ids_for_missing_user_models[index]
)
# Update both the new and previously existing models in the datastore.
current_feedback_thread_user_models.extend(
new_feedback_thread_user_models)
feedback_models.GeneralFeedbackThreadUserModel.update_timestamps_multi(
current_feedback_thread_user_models)
feedback_models.GeneralFeedbackThreadUserModel.put_multi(
current_feedback_thread_user_models)
def _get_message_from_model(message_model):
"""Converts the FeedbackMessageModel to a FeedbackMessage.
Args:
message_model: FeedbackMessageModel. The FeedbackMessageModel to be
converted.
Returns:
FeedbackMessage. The resulting FeedbackMessage domain object.
"""
return feedback_domain.FeedbackMessage(
message_model.id, message_model.thread_id, message_model.message_id,
message_model.author_id, message_model.updated_status,
message_model.updated_subject, message_model.text,
message_model.created_on, message_model.last_updated,
message_model.received_via_email)
def get_messages(thread_id):
"""Fetches all messages of the given thread.
Args:
thread_id: str. The id of the thread.
Returns:
list(FeedbackMessage). Contains all the messages in the thread.
"""
return [
_get_message_from_model(model)
for model in feedback_models.GeneralFeedbackMessageModel.get_messages(
thread_id)
]
def get_message(thread_id, message_id):
"""Fetches the message indexed by thread_id and message_id.
Args:
thread_id: str. The id of the thread.
message_id: int. The id of the message, relative to the thread.
Returns:
FeedbackMessage. The fetched message.
"""
return _get_message_from_model(
feedback_models.GeneralFeedbackMessageModel.get(thread_id, message_id))
def get_next_page_of_all_feedback_messages(
page_size=feconf.FEEDBACK_TAB_PAGE_SIZE, urlsafe_start_cursor=None):
"""Fetches a single page from the list of all feedback messages that have
been posted to any exploration on the site.
Args:
page_size: int. The number of feedback messages to display per page.
Defaults to feconf.FEEDBACK_TAB_PAGE_SIZE.
urlsafe_start_cursor: str or None. The cursor which represents the
current position to begin the fetch from. If None, the fetch is
started from the beginning of the list of all messages.
Returns:
tuple(messages_on_page, next_urlsafe_start_cursor, more). Where:
messages_on_page: list(FeedbackMessage). Contains the slice of
messages that are part of the page pointed to by the given start
cursor.
next_urlsafe_start_cursor: str. The cursor to the next page.
more: bool. Whether there are more messages available to fetch after
this batch.
"""
models_on_page, next_urlsafe_start_cursor, more = (
feedback_models.GeneralFeedbackMessageModel.get_all_messages(
page_size, urlsafe_start_cursor))
messages_on_page = [_get_message_from_model(m) for m in models_on_page]
return (messages_on_page, next_urlsafe_start_cursor, more)
def get_thread_analytics_multi(exploration_ids):
"""Fetches all FeedbackAnalytics, for all the given exploration ids.
A FeedbackAnalytics contains the exploration id the analytics belongs to,
how many open threads exist for the exploration, how many total threads
exist for the exploration.
Args:
exploration_ids: list(str). A list of exploration ids.
Returns:
list(FeedbackAnalytics). Analytics in the the same order as the input
list. If an exploration id is invalid, the number of threads in the
corresponding FeedbackAnalytics object will be zero.
"""
feedback_thread_analytics_models = (
feedback_models.FeedbackAnalyticsModel.get_multi(exploration_ids))
return [
feedback_domain.FeedbackAnalytics(
feconf.ENTITY_TYPE_EXPLORATION, exp_id,
model.num_open_threads if model is not None else 0,
model.num_total_threads if model is not None else 0)
for exp_id, model in python_utils.ZIP(
exploration_ids, feedback_thread_analytics_models)
]
def get_thread_analytics(exploration_id):
"""Fetches the FeedbackAnalytics for the given exploration.
Args:
exploration_id: str. The id of the exploration.
Returns:
FeedbackAnalytics. The feedback analytics of the given exploration.
"""
return get_thread_analytics_multi([exploration_id])[0]
def get_total_open_threads(feedback_analytics_list):
"""Gets the count of all open threads from the given list of
FeedbackAnalytics domain objects.
Args:
feedback_analytics_list: list(FeedbackAnalytics). A list of
FeedbackAnalytics objects to get the count of all open threads.
Returns:
int. The count of all open threads for the given the given list of
FeedbackAnalytics domain objects.
"""
return sum(a.num_open_threads for a in feedback_analytics_list)
def get_multiple_threads(thread_ids):
"""Gets multiple feedback threads.
Args:
thread_ids: list(str). The list of thread ids.
Returns:
list(FeedbackThread). The list of feedback threads.
"""
return [
_get_thread_from_model(model)
for model in feedback_models.GeneralFeedbackThreadModel.get_multi(
thread_ids)
]
def _get_thread_from_model(thread_model):
"""Converts the given FeedbackThreadModel to a FeedbackThread object.
Args:
thread_model: FeedbackThreadModel. The FeedbackThread model object to be
converted to FeedbackThread object.
Returns:
FeedbackThread. The corresponding FeedbackThread domain object.
"""
message_count = (
thread_model.message_count or
feedback_models.GeneralFeedbackMessageModel.get_message_count(
thread_model.id))
return feedback_domain.FeedbackThread(
thread_model.id, thread_model.entity_type, thread_model.entity_id, None,
thread_model.original_author_id, thread_model.status,
thread_model.subject, thread_model.summary, thread_model.has_suggestion,
message_count, thread_model.created_on, thread_model.last_updated,
thread_model.last_nonempty_message_text,
thread_model.last_nonempty_message_author_id)
def get_exp_thread_summaries(user_id, thread_ids):
"""Returns a list of summaries corresponding to the exploration threads from
the given thread ids. Non-exploration threads are not included in the list.
It also returns the number of threads that are currently not read by the
user.
Args:
user_id: str. The id of the user.
thread_ids: list(str). The ids of the threads for which we have to fetch
the summaries.
Returns:
tuple(thread_summaries, number_of_unread_threads). Where:
thread_summaries: list(FeedbackThreadSummary).
number_of_unread_threads: int. The number of threads not read by the
user.
"""
# We need to fetch the thread models first to filter out the threads which
# don't refer to an exploration.
exp_thread_models = [
model for model in feedback_models.GeneralFeedbackThreadModel.get_multi(
thread_ids)
if model and model.entity_type == feconf.ENTITY_TYPE_EXPLORATION
]
exp_thread_user_model_ids = [
feedback_models.GeneralFeedbackThreadUserModel.generate_full_id(
user_id, model.id)
for model in exp_thread_models
]
exp_model_ids = [model.entity_id for model in exp_thread_models]
exp_thread_user_models, exp_models = (
datastore_services.fetch_multiple_entities_by_ids_and_models([
('GeneralFeedbackThreadUserModel', exp_thread_user_model_ids),
('ExplorationModel', exp_model_ids),
]))
threads = [_get_thread_from_model(m) for m in exp_thread_models]
flattened_last_two_message_models_of_threads = (
feedback_models.GeneralFeedbackMessageModel.get_multi(
itertools.chain.from_iterable(
t.get_last_two_message_ids() for t in threads)))
last_two_message_models_of_threads = [
flattened_last_two_message_models_of_threads[i:i + 2]
for i in range(0, len(flattened_last_two_message_models_of_threads), 2)
]
thread_summaries = []
number_of_unread_threads = 0
for thread, last_two_message_models, thread_user_model, exp_model in (
python_utils.ZIP(
threads, last_two_message_models_of_threads,
exp_thread_user_models, exp_models)):
message_ids_read_by_user = (
() if thread_user_model is None else
thread_user_model.message_ids_read_by_user)
last_message_model, second_last_message_model = last_two_message_models
# We don't need to check if the last message is None because all threads
# have at least one message.
last_message_is_read = (
last_message_model.message_id in message_ids_read_by_user)
author_last_message = (
last_message_model.author_id and
user_services.get_username(last_message_model.author_id))
# The second-to-last message, however, might be None.
second_last_message_is_read = (
second_last_message_model is not None and
second_last_message_model.message_id in message_ids_read_by_user)
author_second_last_message = (
second_last_message_model and
second_last_message_model.author_id and
user_services.get_username(second_last_message_model.author_id))
if not last_message_is_read:
number_of_unread_threads += 1
thread_summaries.append(
feedback_domain.FeedbackThreadSummary(
thread.status, thread.original_author_id, thread.last_updated,
last_message_model.text, thread.message_count,
last_message_is_read, second_last_message_is_read,
author_last_message, author_second_last_message,
exp_model.title, exp_model.id, thread.id))
return thread_summaries, number_of_unread_threads
def get_threads(entity_type, entity_id):
"""Fetches all the threads for the given entity id.
Args:
entity_type: str. The type of entity the feedback thread is linked to.
entity_id: str. The id of the entity.
Returns:
list(FeedbackThread). The corresponding Suggestion domain object.
"""
thread_models = feedback_models.GeneralFeedbackThreadModel.get_threads(
entity_type, entity_id)
return [_get_thread_from_model(m) for m in thread_models]
def get_thread(thread_id):
"""Fetches the thread by thread id.
Args:
thread_id: str. The id of the thread.
Returns:
FeedbackThread. The resulting FeedbackThread domain object.
"""
return _get_thread_from_model(
feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id))
def get_closed_threads(entity_type, entity_id, has_suggestion):
"""Fetches all closed threads of the given entity id.
Args:
entity_type: str. The type of entity the feedback thread is linked to.
entity_id: str. The id of the entity.
has_suggestion: bool. If it's True, return a list of all closed threads
that have a suggestion, otherwise return a list of all closed
threads that do not have a suggestion.
Returns:
list(FeedbackThread). The resulting FeedbackThread domain objects.
"""
return [
thread for thread in get_threads(entity_type, entity_id)
if (
thread.has_suggestion == has_suggestion and
thread.status != feedback_models.STATUS_CHOICES_OPEN)
]
def get_all_threads(entity_type, entity_id, has_suggestion):
"""Fetches all threads (regardless of their status) that correspond to the
given entity id.
Args:
entity_type: str. The type of entity the feedback thread is linked to.
entity_id: str. The id of the entity.
has_suggestion: bool. If it's True, return a list of all threads that
have a suggestion, otherwise return a list of all threads that do
not have a suggestion.
Returns:
list(FeedbackThread). The resulting FeedbackThread domain objects.
"""
return [
thread for thread in get_threads(entity_type, entity_id)
if thread.has_suggestion == has_suggestion
]
def enqueue_feedback_message_batch_email_task(user_id):
"""Adds a 'send feedback email' (batch) task into the task queue.
Args:
user_id: str. The user to be notified.
"""
taskqueue_services.enqueue_task(
feconf.TASK_URL_FEEDBACK_MESSAGE_EMAILS, {'user_id': user_id},
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS)
def enqueue_feedback_message_instant_email_task_transactional(
user_id, reference):
"""Adds a 'send feedback email' (instant) task into the task queue.
Args:
user_id: str. The user to be notified.
reference: FeedbackMessageReference. A reference that contains the data
needed to identify the feedback message.
"""
payload = {
'user_id': user_id,
'reference_dict': reference.to_dict()
}
taskqueue_services.enqueue_task(
feconf.TASK_URL_INSTANT_FEEDBACK_EMAILS, payload, 0)
@transaction_services.run_in_transaction_wrapper
def _enqueue_feedback_thread_status_change_email_task_transactional(
user_id, reference, old_status, new_status):
"""Adds a task for sending email when a feedback thread status is changed.
Args:
user_id: str. The user to be notified.
reference: FeedbackMessageReference. The feedback message reference
object to be converted to dict.
old_status: str. One of STATUS_CHOICES.
new_status: str. One of STATUS_CHOICES.
"""
payload = {
'user_id': user_id,
'reference_dict': reference.to_dict(),
'old_status': old_status,
'new_status': new_status
}
taskqueue_services.enqueue_task(
feconf.TASK_URL_FEEDBACK_STATUS_EMAILS, payload, 0)
def get_feedback_message_references(user_id):
"""Fetches all FeedbackMessageReference objects written by the given user。
Args:
user_id: str. If the user id is invalid or there is no message for this
user, return an empty list.
Returns:
list(FeedbackMessageReference). The resulting FeedbackMessageReference
domain objects.
"""
model = feedback_models.UnsentFeedbackEmailModel.get(user_id, strict=False)
feedback_message_references = (
() if model is None else model.feedback_message_references)
return [
feedback_domain.FeedbackMessageReference(
reference['entity_type'], reference['entity_id'],
reference['thread_id'], reference['message_id'])
for reference in feedback_message_references
]
@transaction_services.run_in_transaction_wrapper
def _add_feedback_message_reference_transactional(user_id, reference):
"""Adds a new message to the feedback message buffer that is used to
generate the next notification email to the given user.
Args:
user_id: str. If there's an UnsentFeedbackEmailModel for the given user,
update the instance with given reference, otherwise create a new
instance.
reference: FeedbackMessageReference. The new message reference to add to
the buffer.
"""
model = feedback_models.UnsentFeedbackEmailModel.get(user_id, strict=False)
if model is not None:
model.feedback_message_references.append(reference.to_dict())
model.update_timestamps()
model.put()
else:
model = feedback_models.UnsentFeedbackEmailModel(
id=user_id, feedback_message_references=[reference.to_dict()])
model.update_timestamps()
model.put()
enqueue_feedback_message_batch_email_task(user_id)
@transaction_services.run_in_transaction_wrapper
def update_feedback_email_retries_transactional(user_id):
"""If sufficient time has passed, increment the number of retries for the
corresponding user's UnsentEmailFeedbackModel.
Args:
user_id: str. The id of the given user.
"""
model = feedback_models.UnsentFeedbackEmailModel.get(user_id)
time_since_buffered = (
(datetime.datetime.utcnow() - model.created_on).seconds)
if (time_since_buffered >
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS):
model.retries += 1
model.update_timestamps()
model.put()
@transaction_services.run_in_transaction_wrapper
def pop_feedback_message_references_transactional(
user_id, num_references_to_pop):
"""Pops feedback message references of the given user which have been
processed already.
Args:
user_id: str. The id of the current user.
num_references_to_pop: int. Number of feedback message references that
have been processed already.
"""
model = feedback_models.UnsentFeedbackEmailModel.get(user_id)
remaining_references = (
model.feedback_message_references[num_references_to_pop:])
model.delete()
if remaining_references:
# We recreate the model in order to re-initialize its 'created_on'
# property and reset the retries count to 0. If we don't do this, then
# the retries count will be incorrect.
model = feedback_models.UnsentFeedbackEmailModel(
id=user_id, feedback_message_references=remaining_references)
model.update_timestamps()
model.put()
enqueue_feedback_message_batch_email_task(user_id)
@transaction_services.run_in_transaction_wrapper
def clear_feedback_message_references_transactional(
user_id, exploration_id, thread_id):
"""Removes feedback message references associated with a feedback thread.
Args:
user_id: str. The user who created this reference.
exploration_id: str. The id of the exploration.
thread_id: str. The id of the thread.
"""
model = feedback_models.UnsentFeedbackEmailModel.get(user_id, strict=False)
if model is None:
# Model exists only if user has received feedback on exploration.
return
updated_references = [
reference for reference in model.feedback_message_references
if (reference['entity_id'] != exploration_id or
reference['thread_id'] != thread_id)
]
if not updated_references:
# Note that any tasks remaining in the email queue will still be
# processed, but if the model for the given user does not exist,
# no email will be sent.
# Note that, since the task in the queue is not deleted, the following
# scenario may occur: If creator attends to arrived feedback before
# email is sent then model will be deleted but task will still execute
# after its countdown. Arrival of new feedback (before task is executed)
# will create new model and task. But actual email will be sent by first
# task. It means that email may be sent just after a few minutes of
# feedback's arrival.
# In PR #2261, we decided to leave things as they are for now, since it
# looks like the obvious solution of keying tasks by user id doesn't
# work (see #2258). However, this may be worth addressing in the future.
model.delete()
else:
model.feedback_message_references = updated_references
model.update_timestamps()
model.put()
def _get_all_recipient_ids(exploration_id, thread_id, author_id):
"""Fetches all authors of the exploration excluding the given author and all
the other recipients.
Args:
exploration_id: str. The id of the exploration.
thread_id: str. The id of the thread.
author_id: str. One author of the given exploration_id.
Returns:
tuple(batch_recipients, other_recipients). Where:
batch_recipients: list(str). The user_ids of the authors excluding
the given author.
other_recipients: list(str). The user_ids of the other participants
in this thread, excluding owners of the exploration and the
given author.
"""
exploration_rights = rights_manager.get_exploration_rights(exploration_id)
owner_ids = set(exploration_rights.owner_ids)
participant_ids = {
message.author_id for message in get_messages(thread_id)
if user_services.is_user_registered(message.author_id)
}
batch_recipient_ids = owner_ids - {author_id}
other_recipient_ids = participant_ids - batch_recipient_ids - {author_id}
return (list(batch_recipient_ids), list(other_recipient_ids))
def _send_batch_emails(
recipient_list, feedback_message_reference, exploration_id,
has_suggestion):
"""Adds the given FeedbackMessageReference to each of the recipient's email
buffers. The collected messages will be sent out as a batch after a short
delay.
Args:
recipient_list: list(str). A list of user_ids of all recipients of the
email.
feedback_message_reference: FeedbackMessageReference. The reference to
add to each email buffer.
exploration_id: str. The id of exploration that received new message.
has_suggestion: bool. Whether this thread has a related learner
suggestion.
"""
can_recipients_receive_email = email_manager.can_users_receive_thread_email(
recipient_list, exploration_id, has_suggestion)
for recipient_id, can_receive_email in python_utils.ZIP(
recipient_list, can_recipients_receive_email):
if can_receive_email:
_add_feedback_message_reference_transactional(
recipient_id, feedback_message_reference)
def _send_instant_emails(
recipient_list, feedback_message_reference, exploration_id,
has_suggestion):
"""Adds the given FeedbackMessageReference to each of the recipient's email
buffers. The collected messages will be sent out immediately.
Args:
recipient_list: list(str). A list of user_ids of all recipients of the
email.
feedback_message_reference: FeedbackMessageReference. The reference to
add to each email buffer.
exploration_id: str. The id of exploration that received new message.
has_suggestion: bool. Whether this thread has a related learner
suggestion.
"""
can_recipients_receive_email = email_manager.can_users_receive_thread_email(
recipient_list, exploration_id, has_suggestion)
for recipient_id, can_receive_email in python_utils.ZIP(
recipient_list, can_recipients_receive_email):
if can_receive_email:
enqueue_feedback_message_instant_email_task_transactional(
recipient_id, feedback_message_reference)
def _send_feedback_thread_status_change_emails(
recipient_list, feedback_message_reference, old_status, new_status,
exploration_id, has_suggestion):
"""Notifies the given recipients about the status change.
Args:
recipient_list: list(str). A list of recipient ids.
feedback_message_reference: FeedbackMessageReference. The reference to
add to each email buffer.
old_status: str. One of STATUS_CHOICES.
new_status: str. One of STATUS_CHOICES.
exploration_id: str. The id of the exploration that received a new
message.
has_suggestion: bool. Whether this thread has a related learner
suggestion.
"""
can_recipients_receive_email = email_manager.can_users_receive_thread_email(
recipient_list, exploration_id, has_suggestion)
for recipient_id, can_receive_email in python_utils.ZIP(
recipient_list, can_recipients_receive_email):
if can_receive_email:
_enqueue_feedback_thread_status_change_email_task_transactional(
recipient_id, feedback_message_reference, old_status,
new_status)
def _add_message_to_email_buffer(
author_id, thread_id, message_id, message_length, old_status,
new_status):
"""Sends the given message to the recipients of the given thread. If status
has changed, notify the recipients as well.
Args:
author_id: str. The id of the author of message.
thread_id: str. The id of the thread that received new message.
message_id: int. The id of the new message.
message_length: int. Length of the feedback message to be sent.
old_status: str. One of STATUS_CHOICES. Value of old thread status.
new_status: str. One of STATUS_CHOICES. Value of new thread status.
"""
thread = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id)
exploration_id = thread.entity_id
has_suggestion = thread.has_suggestion
feedback_message_reference = feedback_domain.FeedbackMessageReference(
thread.entity_type, thread.entity_id, thread_id, message_id)
batch_recipient_ids, other_recipient_ids = (
_get_all_recipient_ids(exploration_id, thread_id, author_id))
if old_status != new_status:
# Send email for feedback thread status change.
_send_feedback_thread_status_change_emails(
other_recipient_ids, feedback_message_reference, old_status,
new_status, exploration_id, has_suggestion)
if message_length:
# Send feedback message email only if message text is non empty (the
# message text can be empty in the case when only status is changed).
_send_batch_emails(
batch_recipient_ids, feedback_message_reference, exploration_id,
has_suggestion)
_send_instant_emails(
other_recipient_ids, feedback_message_reference, exploration_id,
has_suggestion)
def delete_exploration_feedback_analytics(exp_ids):
"""Deletes the FeedbackAnalyticsModel models corresponding to
the given exp_ids.
Args:
exp_ids: list(str). A list of exploration IDs whose feedback analytics
models are to be deleted.
"""
feedback_analytics_models = (
feedback_models.FeedbackAnalyticsModel.get_multi(
exp_ids))
feedback_analytics_models_to_be_deleted = [
model for model in feedback_analytics_models
if model is not None]
feedback_models.FeedbackAnalyticsModel.delete_multi(
feedback_analytics_models_to_be_deleted)
def handle_new_thread_created(exp_id):
"""Reacts to new threads added to an exploration.
Args:
exp_id: str. The exploration ID associated with the thread.
"""
_increment_total_threads_count_transactional(exp_id)
_increment_open_threads_count_transactional(exp_id)
def handle_thread_status_changed(exp_id, old_status, new_status):
"""Reacts to changes in an exploration thread's status.
Args:
exp_id: str. The exploration ID associated with the thread.
old_status: str. The old status of the thread.
new_status: str. The updated status of the thread.
"""
# Status changed from closed to open.
if (old_status != feedback_models.STATUS_CHOICES_OPEN and
new_status == feedback_models.STATUS_CHOICES_OPEN):
_increment_open_threads_count_transactional(exp_id)
# Status changed from open to closed.
elif (old_status == feedback_models.STATUS_CHOICES_OPEN and
new_status != feedback_models.STATUS_CHOICES_OPEN):
_decrement_open_threads_count_transactional(exp_id)
@transaction_services.run_in_transaction_wrapper
def _increment_open_threads_count_transactional(exp_id):
"""Increments count of open threads by one."""
model = (
feedback_models.FeedbackAnalyticsModel.get(exp_id, strict=False) or
feedback_models.FeedbackAnalyticsModel(id=exp_id, num_open_threads=0))
model.num_open_threads = (model.num_open_threads or 0) + 1
model.update_timestamps()
model.put()
@transaction_services.run_in_transaction_wrapper
def _increment_total_threads_count_transactional(exp_id):
"""Increments count of total threads by one."""
model = (
feedback_models.FeedbackAnalyticsModel.get(exp_id, strict=False) or
feedback_models.FeedbackAnalyticsModel(id=exp_id, num_total_threads=0))
model.num_total_threads = (model.num_total_threads or 0) + 1
model.update_timestamps()
model.put()
@transaction_services.run_in_transaction_wrapper
def _decrement_open_threads_count_transactional(exp_id):
"""Decrements count of open threads by one."""
model = (
feedback_models.FeedbackAnalyticsModel.get(exp_id, strict=False) or
feedback_models.FeedbackAnalyticsModel(id=exp_id, num_open_threads=0))
model.num_open_threads = (model.num_open_threads or 1) - 1
model.update_timestamps()
model.put()
|
the-stack_0_23153 | #!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test VRTWarpedDataset support.
# Author: Frank Warmerdam <[email protected]>
#
###############################################################################
# Copyright (c) 2004, Frank Warmerdam <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import shutil
import sys
from osgeo import gdal
import gdaltest
import pytest
###############################################################################
# Verify reading from simple existing warp definition.
def test_vrtwarp_1():
tst = gdaltest.GDALTest('VRT', 'vrt/rgb_warp.vrt', 2, 21504)
return tst.testOpen(check_filelist=False)
###############################################################################
# Create a new VRT warp in the temp directory.
def test_vrtwarp_2():
try:
os.remove('tmp/warp.vrt')
except OSError:
pass
gcp_ds = gdal.OpenShared('data/rgb_gcp.vrt', gdal.GA_ReadOnly)
gdaltest.vrtwarp_ds = gdal.AutoCreateWarpedVRT(gcp_ds)
gcp_ds = None
checksum = gdaltest.vrtwarp_ds.GetRasterBand(2).Checksum()
expected = 21504
assert checksum == expected, ('Got checksum of %d instead of expected %d.'
% (checksum, expected))
###############################################################################
# Force the VRT warp file to be written to disk and close it. Reopen, and
# verify checksum.
def test_vrtwarp_3():
gdaltest.vrtwarp_ds.SetDescription('tmp/warp.vrt')
gdaltest.vrtwarp_ds = None
gdaltest.vrtwarp_ds = gdal.Open('tmp/warp.vrt', gdal.GA_ReadOnly)
checksum = gdaltest.vrtwarp_ds.GetRasterBand(2).Checksum()
expected = 21504
gdaltest.vrtwarp_ds = None
gdal.GetDriverByName('VRT').Delete('tmp/warp.vrt')
assert checksum == expected, ('Got checksum of %d instead of expected %d.'
% (checksum, expected))
###############################################################################
# Test implicit overviews with default source overview level strategy (AUTO)
def test_vrtwarp_4():
src_ds = gdal.Open('../gcore/data/byte.tif')
tmp_ds = gdal.GetDriverByName('GTiff').CreateCopy('tmp/vrtwarp_4.tif', src_ds)
cs_main = tmp_ds.GetRasterBand(1).Checksum()
tmp_ds.BuildOverviews('NONE', overviewlist=[2, 4])
tmp_ds.GetRasterBand(1).GetOverview(0).Fill(127)
cs_ov0 = tmp_ds.GetRasterBand(1).GetOverview(0).Checksum()
tmp_ds.GetRasterBand(1).GetOverview(1).Fill(255)
cs_ov1 = tmp_ds.GetRasterBand(1).GetOverview(1).Checksum()
vrtwarp_ds = gdal.AutoCreateWarpedVRT(tmp_ds)
tmp_ds = None
for i in range(3):
assert vrtwarp_ds.GetRasterBand(1).GetOverviewCount() == 2
assert vrtwarp_ds.GetRasterBand(1).Checksum() == cs_main, i
assert vrtwarp_ds.GetRasterBand(1).GetOverview(0).Checksum() == cs_ov0
assert vrtwarp_ds.GetRasterBand(1).GetOverview(1).Checksum() == cs_ov1
if i == 0:
vrtwarp_ds.SetDescription('tmp/vrtwarp_4.vrt')
vrtwarp_ds = None
vrtwarp_ds = gdal.Open('tmp/vrtwarp_4.vrt')
elif i == 1:
vrtwarp_ds = None
tmp_ds = gdal.Open('tmp/vrtwarp_4.tif')
vrtwarp_ds = gdal.AutoCreateWarpedVRT(tmp_ds)
vrtwarp_ds.SetMetadataItem('SrcOvrLevel', 'AUTO')
vrtwarp_ds.SetDescription('tmp/vrtwarp_4.vrt')
tmp_ds = None
# Add an explicit overview
vrtwarp_ds.BuildOverviews('NEAR', overviewlist=[2, 4, 8])
vrtwarp_ds = None
ds = gdal.GetDriverByName('MEM').Create('', 3, 3, 1)
ds.GetRasterBand(1).Fill(255)
expected_cs_ov2 = ds.GetRasterBand(1).Checksum()
ds = None
vrtwarp_ds = gdal.Open('tmp/vrtwarp_4.vrt')
assert vrtwarp_ds.GetRasterBand(1).GetOverviewCount() == 3
assert vrtwarp_ds.GetRasterBand(1).Checksum() == cs_main
assert vrtwarp_ds.GetRasterBand(1).GetOverview(0).Checksum() == cs_ov0
assert vrtwarp_ds.GetRasterBand(1).GetOverview(1).Checksum() == cs_ov1
assert vrtwarp_ds.GetRasterBand(1).GetOverview(2).Checksum() == expected_cs_ov2
vrtwarp_ds = None
gdal.Unlink('tmp/vrtwarp_4.vrt')
gdal.Unlink('tmp/vrtwarp_4.tif')
###############################################################################
# Test implicit overviews with selection of the upper source overview level
def test_vrtwarp_5():
src_ds = gdal.Open('../gcore/data/byte.tif')
tmp_ds = gdal.GetDriverByName('GTiff').CreateCopy('tmp/vrtwarp_5.tif', src_ds)
cs_main = tmp_ds.GetRasterBand(1).Checksum()
tmp_ds.BuildOverviews('NONE', overviewlist=[2, 4])
tmp_ds.GetRasterBand(1).GetOverview(0).Fill(127)
tmp_ds.GetRasterBand(1).GetOverview(0).Checksum()
tmp_ds.GetRasterBand(1).GetOverview(1).Fill(255)
tmp_ds.GetRasterBand(1).GetOverview(1).Checksum()
tmp_ds = None
ds = gdal.Warp('', 'tmp/vrtwarp_5.tif', options='-of MEM -ovr NONE -overwrite -ts 10 10')
expected_cs_ov0 = ds.GetRasterBand(1).Checksum()
ds = None
ds = gdal.GetDriverByName('MEM').Create('', 5, 5, 1)
ds.GetRasterBand(1).Fill(127)
expected_cs_ov1 = ds.GetRasterBand(1).Checksum()
ds = None
tmp_ds = gdal.Open('tmp/vrtwarp_5.tif')
vrtwarp_ds = gdal.AutoCreateWarpedVRT(tmp_ds)
vrtwarp_ds.SetMetadataItem('SrcOvrLevel', 'AUTO-1')
tmp_ds = None
assert vrtwarp_ds.GetRasterBand(1).GetOverviewCount() == 2
assert vrtwarp_ds.GetRasterBand(1).Checksum() == cs_main
assert vrtwarp_ds.GetRasterBand(1).GetOverview(0).Checksum() == expected_cs_ov0
assert vrtwarp_ds.GetRasterBand(1).GetOverview(1).Checksum() == expected_cs_ov1
vrtwarp_ds = None
gdal.Unlink('tmp/vrtwarp_5.tif')
###############################################################################
# Test implicit overviews with GCP
def test_vrtwarp_6():
src_ds = gdal.Open('../gcore/data/byte.tif')
tmp_ds = gdal.GetDriverByName('GTiff').CreateCopy('tmp/vrtwarp_6.tif', src_ds)
cs_main = tmp_ds.GetRasterBand(1).Checksum()
tmp_ds.SetGeoTransform([0, 1, 0, 0, 0, 1]) # cancel geotransform
gcp1 = gdal.GCP()
gcp1.GCPPixel = 0
gcp1.GCPLine = 0
gcp1.GCPX = 440720.000
gcp1.GCPY = 3751320.000
gcp2 = gdal.GCP()
gcp2.GCPPixel = 0
gcp2.GCPLine = 20
gcp2.GCPX = 440720.000
gcp2.GCPY = 3750120.000
gcp3 = gdal.GCP()
gcp3.GCPPixel = 20
gcp3.GCPLine = 0
gcp3.GCPX = 441920.000
gcp3.GCPY = 3751320.000
src_gcps = (gcp1, gcp2, gcp3)
tmp_ds.SetGCPs(src_gcps, src_ds.GetProjectionRef())
tmp_ds.BuildOverviews('NEAR', overviewlist=[2, 4])
cs_ov0 = tmp_ds.GetRasterBand(1).GetOverview(0).Checksum()
cs_ov1 = tmp_ds.GetRasterBand(1).GetOverview(1).Checksum()
vrtwarp_ds = gdal.AutoCreateWarpedVRT(tmp_ds)
vrtwarp_ds.SetDescription('tmp/vrtwarp_6.vrt')
vrtwarp_ds = None
tmp_ds = None
vrtwarp_ds = gdal.Open('tmp/vrtwarp_6.vrt')
assert vrtwarp_ds.GetRasterBand(1).GetOverviewCount() == 2
assert vrtwarp_ds.GetRasterBand(1).Checksum() == cs_main
assert vrtwarp_ds.GetRasterBand(1).GetOverview(0).Checksum() == cs_ov0
assert vrtwarp_ds.GetRasterBand(1).GetOverview(1).Checksum() == cs_ov1
gdal.Unlink('tmp/vrtwarp_6.vrt')
gdal.Unlink('tmp/vrtwarp_6.tif')
###############################################################################
# Test implicit overviews with GCP (TPS)
def test_vrtwarp_7():
src_ds = gdal.Open('../gcore/data/byte.tif')
tmp_ds = gdal.GetDriverByName('GTiff').CreateCopy('tmp/vrtwarp_7.tif', src_ds)
cs_main = tmp_ds.GetRasterBand(1).Checksum()
tmp_ds.SetGeoTransform([0, 1, 0, 0, 0, 1]) # cancel geotransform
gcp1 = gdal.GCP()
gcp1.GCPPixel = 0
gcp1.GCPLine = 0
gcp1.GCPX = 440720.000
gcp1.GCPY = 3751320.000
gcp2 = gdal.GCP()
gcp2.GCPPixel = 0
gcp2.GCPLine = 20
gcp2.GCPX = 440720.000
gcp2.GCPY = 3750120.000
gcp3 = gdal.GCP()
gcp3.GCPPixel = 20
gcp3.GCPLine = 0
gcp3.GCPX = 441920.000
gcp3.GCPY = 3751320.000
src_gcps = (gcp1, gcp2, gcp3)
tmp_ds.SetGCPs(src_gcps, src_ds.GetProjectionRef())
tmp_ds.BuildOverviews('NEAR', overviewlist=[2, 4])
cs_ov0 = tmp_ds.GetRasterBand(1).GetOverview(0).Checksum()
cs_ov1 = tmp_ds.GetRasterBand(1).GetOverview(1).Checksum()
tmp_ds = None
vrtwarp_ds = gdal.Warp('tmp/vrtwarp_7.vrt', 'tmp/vrtwarp_7.tif', options='-overwrite -of VRT -tps')
assert vrtwarp_ds.GetRasterBand(1).GetOverviewCount() == 2
assert vrtwarp_ds.GetRasterBand(1).Checksum() == cs_main
assert vrtwarp_ds.GetRasterBand(1).GetOverview(0).Checksum() == cs_ov0
assert vrtwarp_ds.GetRasterBand(1).GetOverview(1).Checksum() == cs_ov1
vrtwarp_ds = None
gdal.Unlink('tmp/vrtwarp_7.vrt')
gdal.Unlink('tmp/vrtwarp_7.tif')
###############################################################################
# Test implicit overviews with RPC
def test_vrtwarp_8():
shutil.copy('../gcore/data/byte.tif', 'tmp/vrtwarp_8.tif')
shutil.copy('../gcore/data/test_rpc.txt', 'tmp/vrtwarp_8_rpc.txt')
ds = gdal.Open('tmp/vrtwarp_8.tif', gdal.GA_Update)
ds.BuildOverviews('NEAR', overviewlist=[2])
ds = None
ds = gdal.Warp('', 'tmp/vrtwarp_8.tif', options='-of MEM -rpc')
expected_cs_main = ds.GetRasterBand(1).Checksum()
ds = None
vrtwarp_ds = gdal.Warp('tmp/vrtwarp_8.vrt', 'tmp/vrtwarp_8.tif', options='-overwrite -of VRT -rpc')
assert vrtwarp_ds.GetRasterBand(1).GetOverviewCount() == 1
assert vrtwarp_ds.GetRasterBand(1).Checksum() == expected_cs_main
if vrtwarp_ds.GetRasterBand(1).GetOverview(0).Checksum() != 1214:
print(vrtwarp_ds.GetRasterBand(1).GetOverview(0).XSize)
pytest.fail(vrtwarp_ds.GetRasterBand(1).GetOverview(0).YSize)
vrtwarp_ds = None
gdal.Unlink('tmp/vrtwarp_8.vrt')
gdal.Unlink('tmp/vrtwarp_8.tif')
gdal.Unlink('tmp/vrtwarp_8_rpc.txt')
###############################################################################
# Test implicit overviews with GEOLOCATION
def test_vrtwarp_9():
shutil.copy('../gcore/data/sstgeo.tif', 'tmp/sstgeo.tif')
f = open('tmp/sstgeo.vrt', 'wb')
f.write('''<VRTDataset rasterXSize="60" rasterYSize="39">
<Metadata domain="GEOLOCATION">
<MDI key="SRS">GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9108"]],AXIS["Lat",NORTH],AXIS["Long",EAST],AUTHORITY["EPSG","4326"]]</MDI>
<MDI key="X_DATASET">tmp/sstgeo.tif</MDI>
<MDI key="X_BAND">1</MDI>
<MDI key="PIXEL_OFFSET">0</MDI>
<MDI key="PIXEL_STEP">1</MDI>
<MDI key="Y_DATASET">tmp/sstgeo.tif</MDI>
<MDI key="Y_BAND">2</MDI>
<MDI key="LINE_OFFSET">0</MDI>
<MDI key="LINE_STEP">1</MDI>
</Metadata>
<VRTRasterBand dataType="Int16" band="1">
<ColorInterp>Gray</ColorInterp>
<NoDataValue>-32767</NoDataValue>
<SimpleSource>
<SourceFilename relativeToVRT="1">sstgeo.tif</SourceFilename>
<SourceBand>3</SourceBand>
<SrcRect xOff="0" yOff="0" xSize="60" ySize="39"/>
<DstRect xOff="0" yOff="0" xSize="60" ySize="39"/>
</SimpleSource>
</VRTRasterBand>
</VRTDataset>
'''.encode('ascii'))
f.close()
ds = gdal.Open('tmp/sstgeo.vrt', gdal.GA_Update)
ds.BuildOverviews('NEAR', overviewlist=[2])
ds = None
ds = gdal.Warp('', 'tmp/sstgeo.vrt', options='-of MEM -geoloc')
expected_cs_main = ds.GetRasterBand(1).Checksum()
ds = None
vrtwarp_ds = gdal.Warp('tmp/vrtwarp_9.vrt', 'tmp/sstgeo.vrt', options='-overwrite -of VRT -geoloc')
assert vrtwarp_ds.GetRasterBand(1).GetOverviewCount() == 1
assert vrtwarp_ds.GetRasterBand(1).Checksum() == expected_cs_main
assert vrtwarp_ds.GetRasterBand(1).GetOverview(0).Checksum() == 62489, \
(vrtwarp_ds.GetRasterBand(1).GetOverview(0).XSize, vrtwarp_ds.GetRasterBand(1).GetOverview(0).YSize)
vrtwarp_ds = None
gdal.Unlink('tmp/vrtwarp_9.vrt')
gdal.Unlink('tmp/sstgeo.vrt')
gdal.Unlink('tmp/sstgeo.vrt.ovr')
gdal.Unlink('tmp/sstgeo.tif')
###############################################################################
# Test implicit overviews with selection of the full resolution level
def test_vrtwarp_10():
src_ds = gdal.Open('../gcore/data/byte.tif')
tmp_ds = gdal.GetDriverByName('GTiff').CreateCopy('tmp/vrtwarp_10.tif', src_ds)
cs_main = tmp_ds.GetRasterBand(1).Checksum()
tmp_ds.BuildOverviews('NONE', overviewlist=[2, 4])
tmp_ds.GetRasterBand(1).GetOverview(0).Fill(127)
tmp_ds.GetRasterBand(1).GetOverview(0).Checksum()
tmp_ds.GetRasterBand(1).GetOverview(1).Fill(255)
tmp_ds.GetRasterBand(1).GetOverview(1).Checksum()
tmp_ds = None
ds = gdal.Warp('', 'tmp/vrtwarp_10.tif', options='-of MEM -ovr NONE -ts 10 10')
expected_cs_ov0 = ds.GetRasterBand(1).Checksum()
ds = None
ds = gdal.Warp('', 'tmp/vrtwarp_10.tif', options='-of MEM -ovr NONE -ts 5 5')
expected_cs_ov1 = ds.GetRasterBand(1).Checksum()
ds = None
tmp_ds = gdal.Open('tmp/vrtwarp_10.tif')
vrtwarp_ds = gdal.AutoCreateWarpedVRT(tmp_ds)
vrtwarp_ds.SetMetadataItem('SrcOvrLevel', 'NONE')
tmp_ds = None
assert vrtwarp_ds.GetRasterBand(1).GetOverviewCount() == 2
assert vrtwarp_ds.GetRasterBand(1).Checksum() == cs_main
assert vrtwarp_ds.GetRasterBand(1).GetOverview(0).Checksum() == expected_cs_ov0
assert vrtwarp_ds.GetRasterBand(1).GetOverview(1).Checksum() == expected_cs_ov1
vrtwarp_ds = None
gdal.Unlink('tmp/vrtwarp_10.tif')
###############################################################################
# Test implicit overviews with dest alpha band (#6081)
def test_vrtwarp_11():
ds = gdal.Open('data/vrt/bug6581.vrt')
cs1 = ds.GetRasterBand(1).Checksum()
cs2 = ds.GetRasterBand(2).Checksum()
cs3 = ds.GetRasterBand(3).Checksum()
ds = None
assert cs1 == 22122 and cs2 == 56685 and cs3 == 22122
###############################################################################
# Test reading a regular VRT whose source is a warped VRT inlined
def test_vrtwarp_read_vrt_of_warped_vrt():
ds = gdal.Open('data/vrt/vrt_of_warped_vrt.vrt')
cs = ds.GetRasterBand(1).Checksum()
assert cs == 4672
###############################################################################
# Test reading a warped VRT with blocks > 2 gigapixels
def test_vrtwarp_read_blocks_larger_than_2_gigapixels():
if not gdaltest.run_slow_tests():
pytest.skip()
if sys.maxsize < 2**32:
pytest.skip('Test not available on 32 bit')
import psutil
if psutil.virtual_memory().available < 2 * 50000 * 50000:
pytest.skip("Not enough virtual memory available")
ds = gdal.Open('data/vrt/test_deflate_2GB.vrt')
data = ds.ReadRaster(0, 0, ds.RasterXSize, ds.RasterYSize, buf_xsize = 20, buf_ysize = 20)
assert data
ref_ds = gdal.GetDriverByName('MEM').Create('', 20, 20)
ref_ds.GetRasterBand(1).Fill(127)
assert data == ref_ds.ReadRaster()
###############################################################################
# Test reading a warped VRT that has blocks pointing to space.
# https://github.com/OSGeo/gdal/issues/1985
def test_vrtwarp_read_blocks_in_space():
ds = gdal.Open('data/vrt/geos_vrtwarp.vrt')
assert ds.GetRasterBand(1).ReadRaster(0, 0, 512, 512)
###############################################################################
# Test reading a warped VRT that has inconsistent block size at band and
# dataset level
@pytest.mark.parametrize("filename", ["data/vrt/warp_inconsistent_blockxsize.vrt",
"data/vrt/warp_inconsistent_blockysize.vrt"])
def test_vrtwarp_read_inconsistent_blocksize(filename):
gdal.ErrorReset()
with gdaltest.error_handler():
ds = gdal.Open(filename)
assert ds is None
assert gdal.GetLastErrorMsg() == 'Block size specified on band 1 not consistent with dataset block size'
###############################################################################
# Test that we don't write duplicated block size information
def test_vrtwarp_write_no_duplicated_blocksize():
tmpfilename = '/vsimem/tmp.vrt'
gdal.Warp(tmpfilename, 'data/byte.tif', format='VRT', width=1024, height=1024)
fp = gdal.VSIFOpenL(tmpfilename, 'rb')
assert fp
data = gdal.VSIFReadL(1, 10000, fp).decode('utf-8')
gdal.VSIFCloseL(fp)
gdal.Unlink(tmpfilename)
assert '<BlockXSize>' in data
assert '<BlockYSize>' in data
assert ' blockXSize=' not in data
assert ' blockYSize=' not in data
###############################################################################
# Test reading blocks without source pixels from a warped VRT with an alpha band
# (#4997)
def test_vrtwarp_alpha_band_and_block_without_source_pixel():
tmpfilename = '/vsimem/tmp.tif'
gdal.Translate(tmpfilename, 'data/vrt/bug4997.vrt')
ds = gdal.Open(tmpfilename)
assert ds.GetRasterBand(4).Checksum() == 0
ds = None
gdal.Unlink(tmpfilename)
###############################################################################
# Test the relativeToVRT attribute of SourceDataset
def test_vrtwarp_sourcedataset_all_relatives():
shutil.copy('data/byte.tif', 'tmp')
try:
src_ds = gdal.Open(os.path.join('tmp', 'byte.tif'))
ds = gdal.AutoCreateWarpedVRT(src_ds)
ds.SetDescription(os.path.join('tmp', 'byte.vrt'))
ds = None
assert '<SourceDataset relativeToVRT="1">byte.tif<' in open('tmp/byte.vrt', 'rt').read()
finally:
gdal.Unlink('tmp/byte.tif')
gdal.Unlink('tmp/byte.vrt')
###############################################################################
# Test the relativeToVRT attribute of SourceDataset
def test_vrtwarp_sourcedataset_source_relative_dest_absolute():
shutil.copy('data/byte.tif', 'tmp')
try:
src_ds = gdal.Open(os.path.join('tmp', 'byte.tif'))
ds = gdal.AutoCreateWarpedVRT(src_ds)
path = os.path.join(os.getcwd(), 'tmp', 'byte.vrt')
if sys.platform == 'win32':
path = path.replace('/', '\\')
ds.SetDescription(path)
ds = None
assert '<SourceDataset relativeToVRT="1">byte.tif<' in open('tmp/byte.vrt', 'rt').read()
finally:
gdal.Unlink('tmp/byte.tif')
gdal.Unlink('tmp/byte.vrt')
###############################################################################
# Test the relativeToVRT attribute of SourceDataset
def test_vrtwarp_sourcedataset_source_absolute_dest_absolute():
shutil.copy('data/byte.tif', 'tmp')
try:
src_ds = gdal.Open(os.path.join(os.getcwd(), 'tmp', 'byte.tif'))
ds = gdal.AutoCreateWarpedVRT(src_ds)
ds.SetDescription(os.path.join(os.getcwd(), 'tmp', 'byte.vrt'))
ds = None
assert '<SourceDataset relativeToVRT="1">byte.tif<' in open('tmp/byte.vrt', 'rt').read()
finally:
gdal.Unlink('tmp/byte.tif')
gdal.Unlink('tmp/byte.vrt')
###############################################################################
# Test the relativeToVRT attribute of SourceDataset
def test_vrtwarp_sourcedataset_source_absolute_dest_relative():
shutil.copy('data/byte.tif', 'tmp')
try:
path = os.path.join(os.getcwd(), 'tmp', 'byte.tif')
if sys.platform == 'win32':
path = path.replace('/', '\\')
src_ds = gdal.Open(path)
ds = gdal.AutoCreateWarpedVRT(src_ds)
ds.SetDescription(os.path.join('tmp', 'byte.vrt'))
ds = None
assert '<SourceDataset relativeToVRT="1">byte.tif<' in open('tmp/byte.vrt', 'rt').read()
finally:
gdal.Unlink('tmp/byte.tif')
gdal.Unlink('tmp/byte.vrt')
|
the-stack_0_23155 | """
Process model class file.
author: Matthew Casey
© [Digital Content Analysis Technology Ltd](https://www.d-cat.co.uk)
"""
from datetime import datetime, timedelta, timezone
from functools import partial
import i18n
from marshmallow import Schema, EXCLUDE
from time import sleep
import fusion_platform
from fusion_platform.models import fields
from fusion_platform.models.data import Data
from fusion_platform.models.model import Model, ModelError
from fusion_platform.models.process_execution import ProcessExecution
from fusion_platform.session import Session
# Define a schema class used to coerce option values. See #__coerce_value.
class OptionDataTypeSchema(Schema):
"""
Defines a Marshmallow schema which can be used to coerce any option value into its underlying Python data type.
"""
# List out all the allowed field types using their corresponding data type names.
numeric = fields.Float(allow_none=True)
currency = fields.Decimal(allow_none=True)
boolean = fields.Boolean(allow_none=True)
datetime = fields.DateTime(allow_none=True)
string = fields.String(allow_none=True)
constrained = fields.String(allow_none=True)
# Define the model schema classes. These are maintained from the API definitions.
class ProcessChainOptionSchema(Schema):
"""
Nested schema class for SSD chain option.
"""
name = fields.String(required=True)
value = fields.String(allow_none=True)
class Meta:
"""
When loading an object, make sure we exclude any unknown fields, rather than raising an exception, and put fields in their definition order.
"""
unknown = EXCLUDE
ordered = True
class ProcessChainSchema(Schema):
"""
Nested schema class for SSD chain.
"""
ssd_id = fields.UUID(required=True)
service_id = fields.UUID(required=True)
inputs = fields.List(fields.UUID(allow_none=True), allow_none=True)
outputs = fields.List(fields.UUID(required=True), allow_none=True)
options = fields.List(fields.Nested(ProcessChainOptionSchema()), allow_none=True)
intermediate = fields.Boolean(allow_none=True)
class Meta:
"""
When loading an object, make sure we exclude any unknown fields, rather than raising an exception, and put fields in their definition order.
"""
unknown = EXCLUDE
ordered = True
class ProcessSelectorSchema(Schema):
"""
Nested schema class for selector, category, data type, unit and format.
"""
selector = fields.String(required=True)
category = fields.String(required=True)
data_type = fields.String(required=True)
unit = fields.String(allow_none=True)
validation = fields.String(allow_none=True)
class Meta:
"""
When loading an object, make sure we exclude any unknown fields, rather than raising an exception, and put fields in their definition order.
"""
unknown = EXCLUDE
ordered = True
class ProcessInputSchema(Schema):
"""
Nested schema class for the processes inputs.
"""
ssd_id = fields.UUID(required=True)
input = fields.Integer(required=True)
file_type = fields.String(required=True)
resolution = fields.Integer(allow_none=True)
selectors = fields.List(fields.Nested(ProcessSelectorSchema()), allow_none=True)
id = fields.UUID(required=True)
model = fields.String(required=True)
change_trigger = fields.Boolean(required=True)
change_hash = fields.String(allow_none=True)
title = fields.String(allow_none=True)
description = fields.String(allow_none=True)
class Meta:
"""
When loading an object, make sure we exclude any unknown fields, rather than raising an exception, and put fields in their definition order.
"""
unknown = EXCLUDE
ordered = True
class ProcessOptionSchema(Schema):
"""
Nested schema class for options which are provided to the SSD images when run.
"""
ssd_id = fields.UUID(required=True)
name = fields.String(required=True)
value = fields.String(allow_none=True)
required = fields.Boolean(required=True)
data_type = fields.String(required=True)
validation = fields.String(allow_none=True)
mutually_exclusive = fields.String(allow_none=True)
advanced = fields.Boolean(allow_none=True)
title = fields.String(allow_none=True)
description = fields.String(allow_none=True)
constrained_names = fields.List(fields.String(required=True), allow_none=True, metadata={'title': i18n.t('models.process.option.constrained_names.title'),
'description': i18n.t(
'models.process.option.constrained_names.description')}) # Added this field to gold extracted data.
constrained_values = fields.List(fields.String(required=True), allow_none=True, metadata={'title': i18n.t('models.process.option.constrained_values.title'),
'description': i18n.t(
'models.process.option.constrained_values.description')}) # Added this field to gold extracted data.
class Meta:
"""
When loading an object, make sure we exclude any unknown fields, rather than raising an exception, and put fields in their definition order.
"""
unknown = EXCLUDE
ordered = True
class ProcessSchema(Schema):
"""
Schema class for process model.
Each process model has the following fields (and nested fields):
.. include::process.md
"""
id = fields.UUID(required=True, metadata={'read_only': True}) # Changed to prevent this being updated.
created_at = fields.DateTime(required=True, metadata={'read_only': True}) # Changed to prevent this being updated.
updated_at = fields.DateTime(required=True, metadata={'read_only': True}) # Changed to prevent this being updated.
organisation_id = fields.UUID(required=True, metadata={'read_only': True}) # Changed to prevent this being updated.
ssd_id = fields.UUID(required=True, metadata={'read_only': True}) # Changed to prevent this being updated.
service_id = fields.UUID(required=True, metadata={'read_only': True}) # Changed to prevent this being updated.
name = fields.String(required=True)
inputs = fields.List(fields.Nested(ProcessInputSchema()), allow_none=True, metadata={'hide': True}) # Changed to hide as an attribute.
options = fields.List(fields.Nested(ProcessOptionSchema()), allow_none=True, metadata={'hide': True}) # Changed to hide as an attribute.
chains = fields.List(fields.Nested(ProcessChainSchema()), allow_none=True, metadata={'read_only': True}) # Changed to prevent this being updated.
# Removed maximum_bounds.
run_type = fields.String(required=True)
repeat_count = fields.Integer(required=True)
repeat_start = fields.DateTime(required=True)
repeat_end = fields.DateTime(allow_none=True)
repeat_gap = fields.RelativeDelta(allow_none=True)
repeat_offset = fields.TimeDelta(allow_none=True)
process_status = fields.String(required=True, metadata={'read_only': True}) # Changed to prevent this being updated.
process_status_at = fields.DateTime(required=True, metadata={'read_only': True}) # Changed to prevent this being updated.
output_storage_period = fields.Integer(allow_none=True)
test_run = fields.Boolean(required=True, metadata={'read_only': True}) # Changed to prevent this being updated.
# Removed prices.
price = fields.Decimal(required=True, metadata={'read_only': True}) # Changed to prevent this being updated.
deletable = fields.String(allow_none=True, metadata={'read_only': True}) # Changed to prevent this being updated.
executions = fields.Boolean(allow_none=True, metadata={'hide': True}) # Changed to hide as an attribute.
# Removed creator.
class Meta:
"""
When loading an object, make sure we exclude any unknown fields, rather than raising an exception, and put fields in their definition order.
"""
unknown = EXCLUDE
ordered = True
class Process(Model):
"""
Process model class providing attributes and methods to manipulate process item details.
"""
# Override the schema.
_SCHEMA = ProcessSchema()
# Override the base model class name.
_BASE_MODEL_CLASS_NAME = 'Organisation' # A string to prevent circular imports.
# Base path.
_PATH_ROOT = '/organisations/{organisation_id}/processes'
_PATH_BASE = f"{_PATH_ROOT}/{{process_id}}"
# Override the standard model paths.
_PATH_CREATE = _PATH_ROOT
_PATH_DELETE = _PATH_BASE
_PATH_GET = _PATH_BASE
_PATH_NEW = f"{_PATH_ROOT}/new"
_PATH_PATCH = _PATH_BASE
# Add in the custom model paths.
_PATH_EXECUTE = f"{_PATH_BASE}/execute"
_PATH_EXECUTIONS = f"{_PATH_BASE}/executions"
_PATH_STOP = f"{_PATH_BASE}/stop"
# Process status values.
_PROCESS_STATUS_EXECUTE = 'execute'
# The maximum number of seconds to wait after an execution was meant to start.
_EXECUTE_WAIT_TOLERANCE = 60
# Validation parsing for constrained values.
_VALIDATION_DELIMITER = ';'
_VALIDATION_EQUALS = '='
_VALIDATION_FORMAT = 'format'
_VALIDATION_ITEM_DELIMITER = ','
_VALIDATION_NAMES = 'names'
_VALIDATION_VALUES = 'values'
# Allowed file type substitutions.
# @formatter:off
_FILE_TYPE_SUBSTITUTIONS = {
fusion_platform.FILE_TYPE_GEOTIFF: [fusion_platform.FILE_TYPE_GEOTIFF, fusion_platform.FILE_TYPE_DEM, fusion_platform.FILE_TYPE_JPEG2000],
fusion_platform.FILE_TYPE_JPEG2000: [fusion_platform.FILE_TYPE_JPEG2000],
fusion_platform.FILE_TYPE_DEM: [fusion_platform.FILE_TYPE_DEM],
fusion_platform.FILE_TYPE_GEOJSON: [fusion_platform.FILE_TYPE_GEOJSON, fusion_platform.FILE_TYPE_KML, fusion_platform.FILE_TYPE_KMZ,
fusion_platform.FILE_TYPE_ESRI_SHAPEFILE],
fusion_platform.FILE_TYPE_KML: [fusion_platform.FILE_TYPE_KML],
fusion_platform.FILE_TYPE_KMZ: [fusion_platform.FILE_TYPE_KMZ],
fusion_platform.FILE_TYPE_CSV: [fusion_platform.FILE_TYPE_CSV],
fusion_platform.FILE_TYPE_ESRI_SHAPEFILE: [fusion_platform.FILE_TYPE_ESRI_SHAPEFILE],
fusion_platform.FILE_TYPE_JPEG: [fusion_platform.FILE_TYPE_JPEG],
fusion_platform.FILE_TYPE_PNG: [fusion_platform.FILE_TYPE_PNG],
fusion_platform.FILE_TYPE_OTHER: [fusion_platform.FILE_TYPE_OTHER],
}
# @formatter:on
@classmethod
def __coerce_value(cls, value, data_type):
"""
Attempts to coerce a value into its corresponding data type.
Args:
value: The value to coerce.
data_type: The required data type.
Returns:
The coerced value of the correct data type.
"""
# Deal with None values.
if (value is None) or (value == str(None)) or (value == 'null'):
return None
else:
# Use the option schema to attempt to load the value using its data type as a name.
model = OptionDataTypeSchema().load({data_type: value})
return model.get(data_type)
def create(self):
"""
Attempts to persist the template process in the Fusion Platform<sup>®</sup> so that it can be executed.
Raises:
RequestError: if the create fails.
ModelError: if the model could not be created and validated by the Fusion Platform<sup>®</sup>.
"""
# Attempt to issue the create.
self._create()
def execute(self, wait=False):
"""
Attempts to execute the created process in the Fusion Platform<sup>®</sup>. Optionally waits for the next execution to start, and then for it to complete.
Args:
wait: Optionally wait for the next execution to start and complete? Default False.
Raises:
RequestError: if the execute fails.
ModelError: if the execution fails.
"""
# Send the request and load the resulting model.
self._send_and_load(self._get_path(self.__class__._PATH_EXECUTE), method=Session.METHOD_POST)
if wait:
# If we are waiting for the execution to complete, wait for the next execution to start...
self.wait_for_next_execution()
# ...and for all the executions to complete.
for execution in self.executions:
execution.check_complete(wait=wait)
@property
def executions(self):
"""
Provides an iterator through the process's executions.
Returns:
An iterator through the execution objects.
Raises:
RequestError: if any get fails.
ModelError: if a model could not be loaded or validated from the Fusion Platform<sup>®</sup>.
"""
return ProcessExecution._models_from_api_path(self._session, self._get_path(self.__class__._PATH_EXECUTIONS), reverse=True) # Most recent first.
@classmethod
def __extract_constrained_validation(cls, validation):
"""
Extracts the constrained values from a constrained option validation.
Args:
validation: The constrained option validation.
Returns:
A tuple (constrained_names, constrained_values) of the extracted elements or (None, None) if the constrained values cannot be extracted.
"""
# Extract the elements.
elements = validation.split(Process._VALIDATION_DELIMITER)
constrained_names = None
constrained_values = None
if len(elements) <= 2:
for element in elements:
values = element.split(Process._VALIDATION_EQUALS)
if len(values) > 0:
lhs = values[0].lower()
rhs = Process._VALIDATION_EQUALS.join(values[1:])
try:
if lhs == Process._VALIDATION_VALUES:
constrained_values = rhs.split(Process._VALIDATION_ITEM_DELIMITER)
if lhs == Process._VALIDATION_NAMES:
constrained_names = rhs.split(Process._VALIDATION_ITEM_DELIMITER)
except:
pass # Cannot be parsed.
return constrained_names, constrained_values
@classmethod
def __extract_datetime_validation(cls, validation):
"""
Extracts the datetime format from a datatime option validation.
Args:
validation: The datatime option validation.
Returns:
The datetime format, or None if it cannot be extracted.
"""
# Extract the elements.
elements = validation.split(Process._VALIDATION_DELIMITER)
format = None
if len(elements) <= 3:
for element in elements:
values = element.split(Process._VALIDATION_EQUALS)
if len(values) > 0:
lhs = values[0].lower()
rhs = Process._VALIDATION_EQUALS.join(values[1:])
try:
if lhs == Process._VALIDATION_FORMAT:
format = rhs
except:
pass # Cannot be parsed.
return format
def find_executions(self, id=None, group_id=None):
"""
Searches for the process's executions with the specified id and/or group id, returning the first object found and an iterator.
Args:
id: The execution id to search for.
group_id: The execution group id to search for.
Returns:
The first found execution object, or None if not found, and an iterator through the found execution objects.
Raises:
RequestError if any get fails.
ModelError if a model could not be loaded or validated from the Fusion Platform<sup>®</sup>.
"""
filter = self.__class__._build_filter(
[(self.__class__._FIELD_ID, self.__class__._FILTER_MODIFIER_EQ, id), (self.__class__._FIELD_GROUP_ID, self.__class__._FILTER_MODIFIER_EQ, group_id)])
# Build the partial find generator and execute it.
find = partial(ProcessExecution._models_from_api_path, self._session, self._get_path(self.__class__._PATH_EXECUTIONS), filter=filter)
return self.__class__._first_and_generator(find)
@property
def inputs(self):
"""
Provides an iterator through the process' inputs.
Returns:
An iterator through the inputs.
"""
for input in self._model.get(self.__class__._FIELD_INPUTS, []):
# We first have to remove the mapping proxy so that we can wrap the dictionary in a model.
input = dict(input)
# Encapsulate the dictionary within a model (which does not talk to the API).
model = Model(None, schema=ProcessInputSchema())
model._set_model(input)
yield model
@property
def options(self):
"""
Provides an iterator through the process' options.
Returns:
An iterator through the options.
"""
for option in self._model.get(self.__class__._FIELD_OPTIONS, []):
# We first have to remove the mapping proxy so that we can wrap the dictionary in a model.
option = dict(option)
# If the option is a constrained data type, then add in the constrained names and values from the validation.
if option.get(self.__class__._FIELD_DATA_TYPE) == fusion_platform.DATA_TYPE_CONSTRAINED:
option[self.__class__._FIELD_CONSTRAINED_NAMES], option[self.__class__._FIELD_CONSTRAINED_VALUES] = self.__class__.__extract_constrained_validation(
option.get(self.__class__._FIELD_VALIDATION, ''))
# Coerce the value to be of the correct data type.
option[self.__class__._FIELD_VALUE] = self.__class__.__coerce_value(option.get(self.__class__._FIELD_VALUE),
option.get(self.__class__._FIELD_DATA_TYPE))
# Encapsulate the dictionary within a model (which does not talk to the API).
model = Model(None, schema=ProcessOptionSchema())
model._set_model(option)
yield model
def __set_input(self, number=None, input=None, data=None):
"""
Sets the specified input for the process to the data item. An exception is raised if the process is in the execute status, the input does not exist, is not
ready to be used or has the wrong file type.
Args:
number: The input number to set, starting from 1 for the first input. Either the number or the input must be provided.
input: The input object for the input to set. Either the number or the input must be provided.
data: The data object to use for the input.
Raises:
ModelError: if the process is the execute status.
ModelError: if the input does not exist.
ModelError: if the data object is ready to be used in a process.
ModelError: if the data object has a different file type to the input.
"""
# Make sure the arguments are provided.
if (number is None) and (input is None):
raise ModelError(i18n.t('models.process.input_not_specified'))
if (data is None) or (not isinstance(data, Data)):
raise ModelError(i18n.t('models.process.data_not_specified'))
# Make sure the process is not in the execute state.
if hasattr(self, self.__class__._FIELD_PROCESS_STATUS) and (self.process_status == Process._PROCESS_STATUS_EXECUTE):
raise ModelError(i18n.t('models.process.no_change_executing'))
# Find the corresponding input.
index = None
found_input = None
for index, item in enumerate(self._model.get(self.__class__._FIELD_INPUTS, [])):
found = (number is not None) and (number == item.get(self.__class__._FIELD_INPUT))
found = (input is not None) and (str(input.ssd_id) == str(item.get(self.__class__._FIELD_SSD_ID))) and (
input.input == item.get(self.__class__._FIELD_INPUT)) if not found else found
if found:
found_input = item
break
if found_input is None:
raise ModelError(i18n.t('models.process.cannot_find_input'))
# Check that all the files in the data object are ready to be used. Along the way, pick out the first file type.
found_file_type = None
ready = True
for file in data.files:
found_file_type = file.file_type if found_file_type is None else found_file_type
if not hasattr(file, self.__class__._FIELD_PUBLISHABLE):
ready = False
if not ready:
raise ModelError(i18n.t('models.process.data_not_ready'))
# Check the file type against the allowed list of substitutions.
if found_file_type not in Process._FILE_TYPE_SUBSTITUTIONS.get(found_input.get(self.__class__._FIELD_FILE_TYPE), []):
raise ModelError(i18n.t('models.process.wrong_file_type', expected=found_input.get(self.__class__._FIELD_FILE_TYPE), actual=found_file_type))
# We can now update the input.
self._set_field([self.__class__._FIELD_INPUTS, index, self.__class__._FIELD_ID], data.id)
def __set_option(self, name=None, option=None, value=None):
"""
Sets the specified option for the process to the value. An exception is raised if the process is in the execute status, the option does not exist or the
value has the wrong type.
Args:
name: The option name to set. Either the name or the option must be provided.
option: The option object for the option to set. Either the name or the option must be provided.
value: The value for the option.
Raises:
ModelError: if the process is the execute status.
ModelError: if the value has a different type to the option.
"""
# Make sure the arguments are provided.
if (name is None) and (option is None):
raise ModelError(i18n.t('models.process.option_not_specified'))
# Make sure the process is not in the execute state.
if hasattr(self, self.__class__._FIELD_PROCESS_STATUS) and (self.process_status == Process._PROCESS_STATUS_EXECUTE):
raise ModelError(i18n.t('models.process.no_change_executing'))
# Find the corresponding option.
index = None
found_option = None
for index, item in enumerate(self._model.get(self.__class__._FIELD_OPTIONS, [])):
found = (name is not None) and (name == item.get(self.__class__._FIELD_NAME))
found = (option is not None) and (str(option.ssd_id) == str(item.get(self.__class__._FIELD_SSD_ID))) and (
option.name == item.get(self.__class__._FIELD_NAME)) if not found else found
if found:
found_option = item
break
if found_option is None:
raise ModelError(i18n.t('models.process.cannot_find_option'))
# Check that the option has the same data type as the value. We cannot check this if either value is None. Note that supplied ints can be used for floats.
data_type = found_option.get(self.__class__._FIELD_DATA_TYPE)
existing_value = self.__class__.__coerce_value(found_option[self.__class__._FIELD_VALUE], data_type)
class_matches = isinstance(value, existing_value.__class__)
if isinstance(value, int) and isinstance(existing_value, float):
class_matches = True
if (value is not None) and (existing_value is not None) and (not class_matches):
raise ModelError(i18n.t('models.process.option_wrong_type', type=existing_value.__class__))
# We can now update the option. All options are expressed as strings with the correct format.
validation = found_option.get(self.__class__._FIELD_VALIDATION)
self._set_field([self.__class__._FIELD_OPTIONS, index, self.__class__._FIELD_VALUE], self.__class__.__value_to_option(value, data_type, validation))
def stop(self):
"""
Stops the process from being executed. This will abort any executions which are in progress and prevent any scheduled executions from taking place.
Raises:
RequestError: if the stop fails.
"""
# Send the request and load the resulting model.
self._send_and_load(self._get_path(self.__class__._PATH_STOP), method=Session.METHOD_POST)
def update(self, input_number=None, input=None, data=None, option_name=None, option=None, value=None, **kwargs):
"""
Attempts to update the model object with the given values. This assumes the model is updated using a PATCH RESTful request. This assumes that the patch body
contains key names which include the name of the model class. Overridden to prevent changes if the process is being executed and to handle the special cases
of setting inputs and options.
Args:
input_number: The input number to set, starting from 1 for the first input. Either the number or the input must be provided when setting an input.
input: The input object for the input to set. Either the number or the input must be provided when setting an input.
data: The data object to use for an input.
option_name: The option name to set. Either the name or the option must be provided when setting an option.
option: The option object for the option to set. Either the name or the option must be provided when setting an option.
value: The value for the option.
kwargs: The model attributes which are to be patched.
Raises:
RequestError: if the update fails.
ModelError: if the process is in the execute state.
ModelError: if the process has been persisted and changes are requested to an input or option.
ModelError: if the model could not be loaded or validated from the Fusion Platform<sup>®</sup>.
"""
# Make sure the process is not in the execute state.
if hasattr(self, self.__class__._FIELD_PROCESS_STATUS) and (self.process_status == Process._PROCESS_STATUS_EXECUTE):
raise ModelError(i18n.t('models.process.no_change_executing'))
# Deal with the special case of inputs.
if (input_number is not None) or (input is not None):
self.__set_input(number=input_number, input=input, data=data)
# Deal with the special case of options.
if (option_name is not None) or (option is not None):
self.__set_option(name=option_name, option=option, value=value)
# Now update the model, persisting as needed.
super(Process, self).update(**kwargs)
@classmethod
def __value_to_option(cls, value, data_type, validation):
"""
Converts a Python option value into a string depending upon its data type and validation parameters.
Args:
value: The value to convert.
data_type: The option data type.
validation: The optional validation for the option.
Returns:
The correct string representation of the option.
"""
if value is None:
return None
elif isinstance(value, bool):
return str(value).lower()
if data_type == fusion_platform.DATA_TYPE_DATETIME:
return datetime.strftime(value, cls.__extract_datetime_validation(validation))
else:
return str(value)
def wait_for_next_execution(self):
"""
Waits for the next execution of the process to start, according to its schedule. If executions are already started, this method will return immediately.
Otherwise, this method will block until the next scheduled execution has started. An exception will be raised if the process is not being executed.
Raises:
RequestError: if any request fails.
ModelError: if the process is not being executed.
ModelError: if a model could not be loaded or validated from the Fusion Platform<sup>®</sup>.
"""
# Wait until we find the next execution.
while True:
# Load in the most recent version of the model.
self.get(organisation_id=self.organisation_id)
# Make sure the process is executable.
if self.process_status != Process._PROCESS_STATUS_EXECUTE:
raise ModelError(i18n.t('models.process.not_executable'))
# Get the most recent execution for the process. This assumes that the executions are returned with the most recent first.
self._logger.debug('checking for next execution')
executions = ProcessExecution._models_from_api_path(self._session, self._get_path(self.__class__._PATH_EXECUTIONS), items_per_request=1, reverse=True)
execution = next(executions, None)
# Ignore any execution older than when the next execution is expected. This assumes that the process repeat start is maintained correctly, and that it
# is set after any corresponding executions have been created for the current repeat start.
execution = None if (execution is not None) and (execution.created_at < self.repeat_start) else execution
# Stop if we have an execution which is beyond the repeat start date.
if execution is not None:
self._logger.debug('execution %s found', execution.id)
break
# If we have no recent executions, and longer than the allowed period has elapsed since the next execution was meant to start, then raise an exception.
if (execution is None) and (self.repeat_start + timedelta(seconds=Process._EXECUTE_WAIT_TOLERANCE)) < datetime.now(timezone.utc):
raise ModelError(i18n.t('models.process.execution_should_have_started'))
# We are waiting, so block for a short while.
sleep(self.__class__._API_UPDATE_WAIT_PERIOD)
|
the-stack_0_23156 | import argparse
import sys
from os import path, makedirs, scandir
from typing import Optional, Tuple, Union, List, Iterable, Dict
import numpy as np
import pandas as pd
from tqdm import tqdm
from classifier.face_classifier import FaceClassifier
models = [
"knn",
"svm",
"mlp",
"all"
]
# save_images = False
proj_folder = path.dirname(__file__)
input_folder = path.join(proj_folder, "data", "input")
output_folder = path.join(proj_folder, "data", "output")
# Salvar dataframe embeddings e people em csvs
csv_dfs_output = False
VECTOR_SIZE = 512
def download(file_path, url):
"""
Realiza download de uma url em uma localizacao desejada
:param file_path: caminho para salvar o conteudo baixado
:param url: fonte do download
"""
import requests
import math
r = requests.get(url, stream=True)
total_size = int(r.headers.get('content-length'))
block_size = 1024
with open(file_path, 'wb') as f:
for data in tqdm(r.iter_content(block_size), total=math.ceil(total_size // block_size), desc="Download",
unit='B', unit_scale=True, unit_divisor=1024):
f.write(data)
def df_tolist(df: pd.DataFrame) -> list:
"""
Converte um DataFrame para uma lista, mantendo indices e valores
:param df: DataFrame a ser convertido
:return: lista representando df
"""
return [[index] + value for index, value in zip(df.index.tolist(), df.values.tolist())]
class TrainTestRecognition:
def __init__(self, rand_seed: int = 42, download_images: bool = False, append: bool = False,
save_images: bool = False):
"""
:param rand_seed: seed usada na geracao de numeros pseudo aleatorios
:param download_images: baixar banco de imagens LFW
:param append: adicionar as imagens da atual pasta de entrada aos dfs ja salvos
:param save_images: salvar imagens das faces recortadas
"""
self.people_folders: Optional[List[str]] = None
self.number_imgs_list: List[List[str, int, int]] = []
self.embeddings: List[Iterable] = []
self.embeddings_ids: list = []
self.embeddings_df: Optional[pd.DataFrame] = None
self.people_df: Optional[pd.DataFrame] = None
self.images_per_person: Optional[int] = None
self.save_images = save_images
if rand_seed >= 0:
self.random_seed = rand_seed
print(f"Reproducibilidade possivel com seed {self.random_seed}")
else:
self.random_seed: Optional[int] = None
if download_images is True:
self._down_img_db()
self.classifiers: List[str] = []
if not self._load_dfs() or append:
assert path.exists(input_folder), f"A pasta de entrada {input_folder} nao existe, informe uma " \
f"pasta com imagens para deteccao, ou utilize a opcao '-down' " \
f"para baixar imagens do banco LFW"
self._detect_faces(append)
def train(self, classifier: str, tune_parameters: bool = True, num_sets: int = 5, images_per_person: int = 10,
optimize_number_images: bool = False, num_images_test: int = 5, num_images_train: int = 10):
"""
Executa treinamentos e testes
:param classifier: nome do classificador a ser utilizado ('all' para todos)
:param tune_parameters: se deve ser executada otimizacao dos hiperparametros utilizando bayes
:param num_sets: numero de sets para k-fold (usado junto com tune_parameters)
:param images_per_person: numero de imagens por pessoa a ser dividida entre os sets (usado junto com
tune_parameters)
:param optimize_number_images: se devem ser executados testes de otimizacao de numero minimo de imagens por
pessoa
:param num_images_test: numero de imagens para utilizar em teste (utilizado quando tune_parameters == False)
:param num_images_train: numero de imagens para utilizar em treinamento (utilizado quando
tune_parameters == False)
"""
global models
clf_name = classifier.lower()
if clf_name == "all":
self.classifiers = models.copy()
self.classifiers.remove("all")
else:
self.classifiers.append(clf_name)
if tune_parameters:
self.images_per_person = images_per_person
assert num_sets > 1, f"Para realizar cross-validation, precisa haver sets de treinamento e testes " \
f"(self.num_sets >= 2)"
assert (self.images_per_person >= num_sets) and \
(self.images_per_person % num_sets == 0), \
f"Deve haver ao menos uma imagem por set para o cross-validation, e o valor deve ser proporcional" \
f" ao numero de sets para que as diferentes classes tenham a mesma probabilidade de serem " \
f"classificadas"
else:
self.images_per_person = num_images_test + num_images_train
num_sets = num_images_train / self.images_per_person
if optimize_number_images:
self._optimize_num_images(num_images_train, num_images_test)
else:
shuffled_idx = self._get_shuffled_indices()
print(f"\nSelecionando {len(shuffled_idx) * self.images_per_person} imagens "
f"de {len(shuffled_idx)} pessoas com mais de {self.images_per_person} imagens")
X, Y, shuffled_idx, images_test_ids = self._select_embeddings(self.images_per_person, shuffled_idx)
num_people = len(shuffled_idx)
face_classifier = FaceClassifier(self.random_seed, tune_parameters)
for model in self.classifiers:
print(f"Treinando modelo {model}")
face_classifier.train(X, Y, model_name=model, num_sets=num_sets,
images_per_person=(self.images_per_person if tune_parameters
else (num_images_train, num_images_test)),
num_people=num_people,
test_images_id=images_test_ids)
@staticmethod
def _down_img_db():
"""
Baixa conjunto de imagens do banco de imagens LFW na pasta de entrada informada
"""
import shutil
import tarfile
input_parent_dir = path.dirname(input_folder)
temp_folder = path.join(path.curdir, "data", "temp")
makedirs(input_parent_dir, exist_ok=True)
makedirs(temp_folder, exist_ok=True)
if path.exists(input_folder):
shutil.move(input_folder, input_folder + "_bkp")
tgz_path = path.join(temp_folder, "lfw.tgz")
download(tgz_path, "http://vis-www.cs.umass.edu/lfw/lfw.tgz")
if not path.exists(tgz_path):
print("Problema no download")
sys.exit()
print("Extraindo arquivo para {}, isso pode levar um tempo".format(temp_folder))
tar = tarfile.open(tgz_path, "r:gz")
tar.extractall(temp_folder)
print("Movendo arquivos extraidos para a pasta de entrada")
shutil.move(path.join(temp_folder, "lfw"), input_folder)
return True
def _embeddings_to_df(self):
"""
Salva a lista de embeddings de imagens no dataframe 'self.embeddings_df', e no arquivo compactado
'embeddings.bz2' para uso posterior
"""
global VECTOR_SIZE, output_folder, csv_dfs_output
index = pd.MultiIndex.from_tuples(self.embeddings_ids, names=["Name", "Image_Number"])
temp = pd.DataFrame(self.embeddings,
columns=[("v" + str(i)) for i in range(VECTOR_SIZE)],
index=index)
if self.embeddings_df is not None:
self.embeddings_df = self.embeddings_df.append(temp)
else:
self.embeddings_df = temp
self.embeddings_df.to_pickle(path.join(output_folder, "embeddings.bz2"))
if csv_dfs_output:
self.embeddings_df.to_csv(path.join(output_folder, "embeddings.csv"), sep=";")
del self.embeddings, self.embeddings_ids
def _people_to_df(self):
"""
Salva a lista das pessoas no dataframe 'self.people_df', e no arquivo compactado 'people.bz2' para uso posterior
"""
global output_folder, csv_dfs_output
temp = pd.DataFrame(self.number_imgs_list, columns=["Name", "Number_Images", "Not_Found"])
temp.set_index("Name", inplace=True)
if self.people_df is not None:
self.people_df = self.people_df.append(temp)
else:
self.people_df = temp
self.people_df.to_pickle(path.join(output_folder, "people.bz2"))
if csv_dfs_output:
self.people_df.to_csv(path.join(output_folder, "people.csv"), sep=";")
del self.number_imgs_list
def _load_dfs(self) -> bool:
"""
Carrega dataframes de pessoas identificadas e os embeddings de suas imagens
:return: True se conseguir carregar, ou false, se nao conseguir
"""
global output_folder, input_folder
people_file = path.join(output_folder, "people.bz2")
embeddings_file = path.join(output_folder, "embeddings.bz2")
if not path.exists(people_file) and not path.exists(embeddings_file):
return False
self.people_df = pd.read_pickle(people_file).infer_objects()
self.embeddings_df = pd.read_pickle(embeddings_file).infer_objects()
return True
def _detect_faces(self, append: bool = False):
"""
Percorre pastas de pessoas na pasta de entrada, detectando faces nas imagens e gerando embeddings
"""
global VECTOR_SIZE, output_folder, input_folder
from embeddings.face_embeddings import FaceEmbeddings
from detection.face_detector import FaceDetector
import cv2
face_detector = FaceDetector()
face_recognition = FaceEmbeddings()
print("\nExecutando deteccao facial")
if self.people_folders is None:
self.people_folders = [f.path for f in scandir(input_folder) if f.is_dir()]
assert len(self.people_folders) >= 1
prog_bar = tqdm(total=len(self.people_folders), desc="Detectando", position=0, unit="pessoas")
for person_path in self.people_folders:
person_name = path.basename(person_path)
person_imgs_path = [f.path for f in scandir(person_path) if f.is_file()]
start = 0
failed_to_detect = 0
if append:
try:
# Para adicionar novas imagens a dataframes ja formados
df_row = self.people_df.loc[person_name]
start = int(df_row["Number_Images"])
failed_to_detect = int(df_row["Not_Found"])
self.people_df.drop(person_name, inplace=True)
except (KeyError, TypeError, AttributeError) as err:
pass
self.number_imgs_list.append([person_name, len(person_imgs_path) + start, failed_to_detect])
for i, img_path in enumerate(person_imgs_path, start=start):
# face_img = path.join(person_path, "MTCNN", f"{str(i)}.jpg")
# if path.exists(face_img):
# import cv2
# img = cv2.imread(face_img)
#
# img = face_detector.pre_process(img)
#
# self.embeddings.append(face_recognition.describe(img))
# self.embeddings_ids.append([str(person_name), i])
#
# continue
try:
# img = Image.open(img_path)
img = cv2.imread(img_path)
except (OSError, IOError):
tqdm.write('Open image file failed: ' + img_path)
self.number_imgs_list[-1][-1] += 1
continue
if img is None:
tqdm.write('Open image file failed: ' + img_path)
self.number_imgs_list[-1][-1] += 1
continue
tqdm.write(f'Detecting image {i}, file: {img_path}')
# image_torch, score = face_detector.extract_face(img)
image_torch, score = face_detector.extract_face(img, save_path=(path.join(person_path, "MTCNN",
f"{str(i)}.jpg")
if self.save_images is True else None))
if image_torch is None or score < 0.5:
tqdm.write(f'No face found in {img_path}')
if score is not None:
tqdm.write(f'(Score: {score})')
self.number_imgs_list[-1][-1] += 1
continue
self.embeddings.append(face_recognition.describe(image_torch))
self.embeddings_ids.append([str(person_name), i])
prog_bar.update(1)
prog_bar.close()
VECTOR_SIZE = face_recognition.get_embedding_size()
makedirs(output_folder, exist_ok=True)
self._embeddings_to_df()
self._people_to_df()
del self.people_folders
def _get_embeddings_vector(self, person_name: str, img_number: int) -> Optional[np.ndarray]:
"""
Obtem embedding da pessoa pessoa e imagem desejadas
:param person_name: nome da pessoa a ser buscada
:param img_number: numero da imagem desejada da pessoa
:return: array com vetor de embeddings, ou None caso nao tenha sido localizado
"""
try:
return self.embeddings_df.loc[(person_name, img_number)].values
except (KeyError, TypeError) as kerr:
# tqdm.write(kerr)
# tqdm.write(f"ID desejado: {person_name}; Img: {img_number}")
return None
def _get_shuffled_indices(self) -> Dict[str, List[int]]:
"""
Seleciona indices aleatorios de imagens para cada pessoa que contenha ao menos 'self.images_per_person' imagens
:return: Dictionary com pessoas selecionadas e lista de indices das imagens embaralhados
"""
people_list = df_tolist(self.people_df.loc[(self.people_df["Number_Images"] - self.people_df["Not_Found"])
>= self.images_per_person])
assert len(people_list) > 0, "Nao ha pessoas com a quantidade de imagens desejada"
shuffled_idxs = {}
import random
for person, num_images, not_found in people_list:
shuffled_idxs[person] = list(range(num_images))
if self.random_seed is not None:
random.seed(self.random_seed)
random.shuffle(shuffled_idxs[person])
# print(f"Indices de {people_list[0][0]}: {shuffled_idxs[people_list[0][0]]}")
return shuffled_idxs
def _optimize_num_images(self, num_images_test: int = 5, num_images_train: int = 10):
"""
Realiza treinamentos e testes para verificar numero de imagens ideal a ser utilizado por pessoa,
executando desde 1 imagem para treinamento por pessoa, ate atingir num_images_train.
:param num_images_test: numero de imagens que sera fixado para teste
:param num_images_train: numero de imagens limite para testar em treinamento
"""
try:
shuffled_idxs = self._get_shuffled_indices()
except AssertionError:
raise Exception("Nao ha pessoas com a quantidade de imagens necessaria para efetuar o teste")
X_test, y_test, shuffled_idxs, test_images_id = self._select_embeddings(num_images_test, shuffled_idxs)
X_train, y_train, temp = [], [], []
face_classifier = FaceClassifier(self.random_seed)
progress_bar = tqdm(total=num_images_train, desc="Otimizando Numero de Imagens",
unit="iteracoes", file=sys.stdout,
dynamic_ncols=True)
for i in range(1, num_images_train + 1):
X_train_new, y_train_new, shuffled_idxs, _ = self._select_embeddings(1, shuffled_idxs)
X_train.extend(X_train_new)
y_train.extend(y_train_new)
best_score, best_model = 0.0, None
for model in self.classifiers:
new_score = face_classifier.train(X=X_train, y=y_train, X_test=X_test, y_test=y_test, model_name=model,
num_sets=i / (i + num_images_test),
images_per_person=(i, num_images_test),
num_people=len(shuffled_idxs),
test_images_id=test_images_id)
if new_score > best_score:
best_score, best_model = new_score, model
progress_bar.write(f"Melhor com {i} imagens - {best_model}: {best_score}")
progress_bar.update()
def _select_embeddings(self, number_images_select: int,
people: Dict[str, List[int]]) -> Tuple[Union[list, np.ndarray], list, dict, list]:
"""
Seleciona embeddings de imagens de acordo com indices de imagens fornecidos por pessoa, e a quantidade desejada.
:param number_images_select: Numero de embeddings a ser selecionado para cada pessoa
:param people: Dictionary que relaciona nomes de pessoas e indices de images embaralhados, sendo selecionados
embeddings das imagens com indices no final da lista, ate atingir o valor desejado (number_images_select)
:return: X (embeddings), y (labels), dict com indices restantes, e indices das imagens que geraram os embeddings
"""
global VECTOR_SIZE
if VECTOR_SIZE is None:
VECTOR_SIZE = len(self.embeddings_df.iloc[:1].values[0])
def new_list():
return [[None] for x in range(number_images_select * len(people))]
saved_images_idx = 0
# X = np.zeros(((number_images_select * len(people)), VECTOR_SIZE))
X = new_list()
y = new_list()
images_num = new_list()
for person, images_idx in people.items():
person_saved_images = 0
while person_saved_images < number_images_select:
img_num = images_idx.pop()
try:
img_vector = self._get_embeddings_vector(person, img_num)
if img_vector is not None:
X[saved_images_idx] = img_vector
y[saved_images_idx] = person
images_num[saved_images_idx] = img_num
saved_images_idx += 1
person_saved_images += 1
finally:
pass
return X, y, people, images_num
# def no_arg(args_name: List[str]):
# return all(arg not in sys.argv for arg in args_name)
def main():
global input_folder, models
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input_dir", default=input_folder, metavar="path",
help="endereco para pasta com as imagens de entrada")
ap.add_argument("-clf", "--classifier", default="svm", const="svm", nargs='?', choices=models,
help=f"classificador responsavel pelo reconhecimento facial ({models})")
ap.add_argument("-down", "--download", default=False, action='store_true',
help="download do banco de imagens lfw")
ap.add_argument("-rs", "--rand_seed", type=int, default=42,
help="seed utilizada na geracao de resultados aleatorios para reproducibilidade")
ap.add_argument("-ap", "--append", default=False, action='store_true',
help="adicionar imagens da pasta de entrada atual a dataframes ja formados")
ap.add_argument("-si", "--save_images", default=False, action='store_true',
help="salvar imagens de faces recortadas")
training = ap.add_mutually_exclusive_group(required=False)
training.add_argument("-pt", "--parameter_tuning", default=False, action='store_true',
help="otimizacao dos hiperparametros dos classificadores")
training.add_argument("-oni", "--optimize_num_images", default=False, action='store_true',
help="realizacao de testes para detectar numero de imagens ideal")
ap.add_argument("-ns", "--num_sets", type=int, default=3,
# required=(True if not no_arg(["-pt", "--parameter_tuning"]) else False),
help="quantidade de sets para divisao dos dados, sendo 1 set para teste e o restante "
"para treinamento (usar junto com --parameter_tuning)")
ap.add_argument("-ipp", "--images_per_person", type=int, default=6,
# required=(True if not no_arg(["-pt", "--parameter_tuning"]) else False),
help="quantidade de imagens para cada pessoa (valor total que sera dividido entre os sets "
"(usar junto com --parameter_tuning))")
ap.add_argument("-itn", "--images_train", type=int, default=4,
# required=(True if no_arg(["-pt", "--parameter_tuning"]) else False),
help="quantidade de imagens para treinamento")
ap.add_argument("-itt", "--images_test", type=int, default=2,
# required=(True if no_arg(["-pt", "--parameter_tuning"]) else False),
help="quantidade de imagens para teste")
args = vars(ap.parse_args())
input_folder = args["input_dir"]
training = TrainTestRecognition(rand_seed=args["rand_seed"], download_images=args["download"],
append=args["append"], save_images=args["save_images"])
training.train(classifier=args["classifier"], tune_parameters=args["parameter_tuning"],
num_sets=args["num_sets"], images_per_person=args["images_per_person"],
optimize_number_images=args["optimize_num_images"],
num_images_train=args["images_train"], num_images_test=args["images_test"])
if __name__ == "__main__":
main()
|
the-stack_0_23157 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InteligentDataCondition(object):
def __init__(self):
self._data_type = None
self._limit_type = None
self._value = None
@property
def data_type(self):
return self._data_type
@data_type.setter
def data_type(self, value):
self._data_type = value
@property
def limit_type(self):
return self._limit_type
@limit_type.setter
def limit_type(self, value):
self._limit_type = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def to_alipay_dict(self):
params = dict()
if self.data_type:
if hasattr(self.data_type, 'to_alipay_dict'):
params['data_type'] = self.data_type.to_alipay_dict()
else:
params['data_type'] = self.data_type
if self.limit_type:
if hasattr(self.limit_type, 'to_alipay_dict'):
params['limit_type'] = self.limit_type.to_alipay_dict()
else:
params['limit_type'] = self.limit_type
if self.value:
if hasattr(self.value, 'to_alipay_dict'):
params['value'] = self.value.to_alipay_dict()
else:
params['value'] = self.value
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InteligentDataCondition()
if 'data_type' in d:
o.data_type = d['data_type']
if 'limit_type' in d:
o.limit_type = d['limit_type']
if 'value' in d:
o.value = d['value']
return o
|
the-stack_0_23159 | '''PNASNet in PyTorch.
Paper: Progressive Neural Architecture Search
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class SepConv(nn.Module):
'''Separable Convolution.'''
def __init__(self, in_planes, out_planes, kernel_size, stride):
super(SepConv, self).__init__()
self.conv1 = nn.Conv2d(in_planes, out_planes,
kernel_size, stride,
padding=(kernel_size-1)//2,
bias=False, groups=in_planes)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
return self.bn1(self.conv1(x))
class CellA(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellA, self).__init__()
self.stride = stride
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7,
stride=stride)
if stride == 2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1,
stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
y1 = self.sep_conv1(x)
y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride == 2:
y2 = self.bn1(self.conv1(y2))
return F.relu(y1+y2)
class CellB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellB, self).__init__()
self.stride = stride
# Left branch
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7,
stride=stride)
self.sep_conv2 = SepConv(in_planes, out_planes, kernel_size=3,
stride=stride)
# Right branch
self.sep_conv3 = SepConv(in_planes, out_planes, kernel_size=5,
stride=stride)
if stride == 2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1,
stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
# Reduce channels
self.conv2 = nn.Conv2d(2*out_planes, out_planes, kernel_size=1,
stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
# Left branch
y1 = self.sep_conv1(x)
y2 = self.sep_conv2(x)
# Right branch
y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride == 2:
y3 = self.bn1(self.conv1(y3))
y4 = self.sep_conv3(x)
# Concat & reduce channels
b1 = F.relu(y1+y2)
b2 = F.relu(y3+y4)
y = torch.cat([b1, b2], 1)
return F.relu(self.bn2(self.conv2(y)))
class PNASNet(nn.Module):
def __init__(self, cell_type, num_cells, num_planes):
super(PNASNet, self).__init__()
self.in_planes = num_planes
self.cell_type = cell_type
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_planes)
self.layer1 = self._make_layer(num_planes, num_cells=6)
self.layer2 = self._downsample(num_planes*2)
self.layer3 = self._make_layer(num_planes*2, num_cells=6)
self.layer4 = self._downsample(num_planes*4)
self.layer5 = self._make_layer(num_planes*4, num_cells=6)
self.linear = nn.Linear(num_planes*4, 10)
def _make_layer(self, planes, num_cells):
layers = []
for _ in range(num_cells):
layers.append(self.cell_type(self.in_planes, planes, stride=1))
self.in_planes = planes
return nn.Sequential(*layers)
def _downsample(self, planes):
layer = self.cell_type(self.in_planes, planes, stride=2)
self.in_planes = planes
return layer
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = F.avg_pool2d(out, 8)
out = self.linear(out.view(out.size(0), -1))
return out
def PNASNetA():
return PNASNet(CellA, num_cells=6, num_planes=44)
def PNASNetB():
return PNASNet(CellB, num_cells=6, num_planes=32)
def test():
net = PNASNetB()
print(net)
x = Variable(torch.randn(1, 3, 32, 32))
y = net(x)
print(y)
# test()
|
the-stack_0_23160 | #!/usr/bin/env python3
"""Run pre-commit checks on the repository."""
import argparse
import enum
import os
import pathlib
import subprocess
import sys
class Step(enum.Enum):
REFORMAT = "reformat"
MYPY = "mypy"
PYLINT = "pylint"
TEST = "test"
DOCTEST = "doctest"
CHECK_INIT_AND_SETUP_COINCIDE = "check-init-and-setup-coincide"
CHECK_HELP_IN_README = "check-help-in-readme"
def main() -> int:
""""Execute entry_point routine."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--overwrite",
help="Try to automatically fix the offending files (e.g., by re-formatting).",
action="store_true",
)
parser.add_argument(
"--select",
help=(
"If set, only the selected steps are executed. "
"This is practical if some of the steps failed and you want to "
"fix them in isolation. "
"The steps are given as a space-separated list of: "
+ " ".join(value.value for value in Step)
),
metavar="",
nargs="+",
choices=[value.value for value in Step],
)
parser.add_argument(
"--skip",
help=(
"If set, skips the specified steps. "
"This is practical if some of the steps passed and "
"you want to fix the remainder in isolation. "
"The steps are given as a space-separated list of: "
+ " ".join(value.value for value in Step)
),
metavar="",
nargs="+",
choices=[value.value for value in Step],
)
args = parser.parse_args()
overwrite = bool(args.overwrite)
selects = (
[Step(value) for value in args.select]
if args.select is not None
else [value for value in Step]
)
skips = [Step(value) for value in args.skip] if args.skip is not None else []
repo_root = pathlib.Path(os.path.realpath(__file__)).parent.parent
if Step.REFORMAT in selects and Step.REFORMAT not in skips:
print("Re-formatting...")
# fmt: off
reformat_targets = [
"aas_core_codegen",
"continuous_integration",
"tests",
"setup.py"
]
# fmt: on
if overwrite:
subprocess.check_call(["black"] + reformat_targets, cwd=str(repo_root))
else:
subprocess.check_call(
["black", "--check"] + reformat_targets, cwd=str(repo_root)
)
else:
print("Skipped re-formatting.")
if Step.MYPY in selects and Step.MYPY not in skips:
print("Mypy'ing...")
# fmt: off
mypy_targets = [
"aas_core_codegen",
"tests",
"continuous_integration"
]
config_file = pathlib.Path("continuous_integration") / "mypy.ini"
subprocess.check_call(
["mypy", "--strict", f"--config-file", str(config_file)] + mypy_targets,
cwd=str(repo_root))
# fmt: on
else:
print("Skipped mypy'ing.")
if Step.PYLINT in selects and Step.PYLINT not in skips:
# fmt: off
print("Pylint'ing...")
pylint_targets = ["aas_core_codegen"]
rcfile = pathlib.Path("continuous_integration") / "pylint.rc"
subprocess.check_call(
["pylint", f"--rcfile={rcfile}"] + pylint_targets, cwd=str(repo_root)
)
# fmt: on
else:
print("Skipped pylint'ing.")
if Step.TEST in selects and Step.TEST not in skips:
print("Testing...")
env = os.environ.copy()
env["ICONTRACT_SLOW"] = "true"
# fmt: off
subprocess.check_call(
[
"coverage", "run",
"--source", "aas_core_codegen",
"-m", "unittest", "discover"
],
cwd=str(repo_root),
env=env
)
# fmt: on
subprocess.check_call(
["coverage", "report"],
cwd=str(repo_root),
)
else:
print("Skipped testing.")
if Step.DOCTEST in selects and Step.DOCTEST not in skips:
print("Doctest'ing...")
doc_files = ["README.rst"]
# BEFORE-RELEASE (mristin, 2021-12-13):
# Add ``{repo_root}/docs/source/**/*.rst`` as well here
subprocess.check_call(
[sys.executable, "-m", "doctest"] + doc_files, cwd=str(repo_root)
)
for pth in (repo_root / "aas_core_codegen").glob("**/*.py"):
if pth.name == "__main__.py":
continue
# NOTE (mristin, 2021-12-27):
# The subprocess calls are expensive, call only if there is an actual
# doctest
text = pth.read_text(encoding="utf-8")
if ">>>" in text:
subprocess.check_call(
[sys.executable, "-m", "doctest", str(pth)], cwd=str(repo_root)
)
else:
print("Skipped doctest'ing.")
if (
Step.CHECK_INIT_AND_SETUP_COINCIDE in selects
and Step.CHECK_INIT_AND_SETUP_COINCIDE not in skips
):
print("Checking that aas_core_codegen/__init__.py and setup.py coincide...")
subprocess.check_call(
[sys.executable, "continuous_integration/check_init_and_setup_coincide.py"],
cwd=str(repo_root),
)
else:
print(
"Skipped checking that aas_core_codegen/__init__.py and "
"setup.py coincide."
)
if Step.CHECK_HELP_IN_README in selects and Step.CHECK_HELP_IN_README not in skips:
cmd = [sys.executable, "continuous_integration/check_help_in_readme.py"]
if overwrite:
cmd.append("--overwrite")
if not overwrite:
print("Checking that --help's and the readme coincide...")
else:
print("Overwriting the --help's in the readme...")
subprocess.check_call(cmd, cwd=str(repo_root))
else:
print("Skipped checking that --help's and the doc coincide.")
return 0
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_23162 | from typing import Union
from author.models import Author
from author.serializers import AuthorSerializer
from author.token import NodeBasicAuth, TokenAuth
from comment.documentation import NoSchemaTitleInspector
from comment.models import Comment
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.http import HttpRequest
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from inbox.models import InboxItem
from posts.models import Post
from rest_framework.decorators import api_view, authentication_classes
from rest_framework.response import Response
from utils.request import ClassType, ParsedRequest, checkIsLocal, parseIncomingRequest, returnGETRequest, returnPOSTRequest
from likes.models import Like
from likes.serializers import LikeSerializer
from .documentation import getLikesResponse
# Create your views here.
@swagger_auto_schema(
method="GET",
operation_summary="get all likes of a post",
operation_description="not paginated atm, author id doesn't need to be an real author, maybe this needs an update",
responses={200: getLikesResponse, 404: "post with given id not found"},
field_inspectors=[NoSchemaTitleInspector],
tags=["Likes"],
)
@api_view(["GET"])
@parseIncomingRequest(["GET"], ClassType.POST)
def getPostLikes(request: Union[HttpRequest, ParsedRequest], authorId, postId):
if request.islocal:
try:
post = Post.objects.get(pk=request.id)
except Post.DoesNotExist:
return Response("post does not exist", status=404)
likes = Like.objects.filter(parentId=request.id)
return Response(LikeSerializer(likes.all(), many=True).data, status=200)
else:
return returnGETRequest(f"{request.id}likes")
@swagger_auto_schema(
method="GET",
operation_summary="get all likes of a comment",
operation_description="not paginated atm, author and post id doesnt need to be real",
responses={200: getLikesResponse, 404: "comment with given id not found"},
field_inspectors=[NoSchemaTitleInspector],
tags=["Likes"],
)
@api_view(["GET"])
@parseIncomingRequest(["GET"], ClassType.COMMENT)
def getCommentLikes(request, authorId, postId, commentId):
if request.islocal:
try:
comment = Post.objects.get(pk=request.id)
except Post.DoesNotExist:
return Response("comment does not exist", status=404)
likes = Like.objects.filter(parentId=request.id)
# pagination not required
return Response(LikeSerializer(likes.all(), many=True).data, status=200)
else:
return returnGETRequest(f"{request.id}likes")
@swagger_auto_schema(
method="GET",
operation_summary="get all likes from a author",
operation_description="with pagination options ",
responses={200: getLikesResponse, 404: "author with given id not found"},
field_inspectors=[NoSchemaTitleInspector],
tags=["Likes"],
manual_parameters=[
openapi.Parameter(
name="page",
in_=openapi.IN_QUERY,
type=openapi.TYPE_INTEGER,
description="Page number",
default=1,
),
openapi.Parameter(
name="size",
in_=openapi.IN_QUERY,
type=openapi.TYPE_INTEGER,
description="Page size",
default=10,
),
],
)
@api_view(["GET"])
@parseIncomingRequest(["GET"])
def getLiked(request: Union[ParsedRequest, HttpRequest], authorId):
if request.islocal:
try:
author = Author.objects.get(pk=request.id)
except Author.DoesNotExist:
return Response("author does not exist", status=404)
if request.method == "GET":
if not request.islocal:
return returnGETRequest(f"{request.id}liked/")
likes = Like.objects.filter(author=request.id).all()
params = request.query_params
if "page" in params and "size" in params:
try:
pager = Paginator(likes, int(params["size"]))
serial = LikeSerializer(pager.page(int(params["page"])), many=True)
except (ValueError, EmptyPage, PageNotAnInteger) as e:
return Response(str(e), status=400)
else:
serial = LikeSerializer(likes, many=True)
return Response({"type": "liked", "items": serial.data}, status=200)
# elif request.method == "POST":
# # parse does not handle POST oops.
# # if not request.islocal:
# # return Response("POSTing likes directly to other servers user is not allowed.", status=400)
# data = request.data
# try:
# target = data["target"]
# if pair := checkIsLocal(target):
# Like.objects.create(author=authorId, parentId=pair[0])
# else:
# Like.objects.create(author=authorId, parentId=target)
# return Response(status=204)
# except KeyError as e:
# return Response("bad request json format", status=400)
@swagger_auto_schema(
method="post",
operation_summary="(for frontend only)add a record of a local author liking a local or foreign post, and send the like to their inbox",
operation_description="Note, no request body is needed, the semantics of this api is 'authorId' likes 'postId'. This post id can local or encoded foreign",
field_inspectors=[NoSchemaTitleInspector],
tags=["Likes"],
responses={204: "like added", 400: "bad format", 404: "post to like, or author of the post is not found"},
)
@api_view(["POST"])
@parseIncomingRequest(["POST"], ClassType.POST)
@authentication_classes([TokenAuth(["POST"]), NodeBasicAuth])
def addLikePost(request: Union[ParsedRequest, HttpRequest], authorId, postId: str):
if request.islocal:
try:
post: Post = Post.objects.get(pk=request.id)
targetAuthorId = post.author_id.pk
except Post.DoesNotExist:
return Response("post not found!", status=404)
if Like.objects.filter(author=authorId).filter(parentId = request.id):
return Response(status = 204) #ignore like request if it exists already
# this author id def exists, since it passed auth
like = Like.objects.create(author=authorId, parentId=request.id)
# post.author_id is linked a existing author via foreignkey
InboxItem.objects.create(author=post.author_id, type="L", contentId=like.pk)
return Response(status=204)
else:
foreignid = request.id[: request.id.find("post")]
foreignid += "/" if not foreignid[-1] == "/" else ""
data = {
"@context": "https://www.w3.org/ns/activitystreams",
"summary": f"{request.user.displayName} likes your post",
"type": "Like",
"author": AuthorSerializer(request.user).data,
"object": request.id,
}
return returnPOSTRequest(f"{foreignid}inbox/", data=data)
# feeling lazy
@swagger_auto_schema(
method="post",
operation_summary="(for frontend only)add a record of a local author liking a local or foreign comment, and send the like to their inbox",
operation_description="Note, no request body is needed, the semantics of this api is 'authorId' likes 'commentId'. This comment id can local or encoded foreign",
field_inspectors=[NoSchemaTitleInspector],
tags=["Likes"],
responses={204: "like added", 400: "bad format", 404: "post to like, or author of the post is not found"},
)
@api_view(["POST"])
@parseIncomingRequest(["POST"], ClassType.COMMENT)
@authentication_classes([TokenAuth(needAuthorCheck=["POST"]), NodeBasicAuth])
def addLikeComment(request: Union[ParsedRequest, HttpRequest], authorId, commentId: str):
if request.islocal:
try:
comment: Comment = Comment.objects.get(pk=request.id)
targetAuthorId = comment.author
except Comment.DoesNotExist:
return Response("comment not found!", status=404)
# this author id def exists, since it passed auth
if Like.objects.filter(author=authorId).filter(parentId = request.id):
return Response(status = 204) #ignore like request if it exists already
like = Like.objects.create(author=authorId, parentId=request.id)
try:
targetAuthor = Author.objects.get(pk=targetAuthorId)
InboxItem.objects.create(author=targetAuthor, type="L", contentId=like.pk)
except Author.DoesNotExist:
# if owner of the comment no longer exists, just simply skip sending inbox
pass
return Response(status=204)
else:
foreignid = request.id[: request.id.find("post")]
foreignid += "/" if not foreignid[-1] == "/" else ""
data = {
"@context": "https://www.w3.org/ns/activitystreams",
"summary": f"{request.user.displayName} likes your comment",
"type": "Like",
"author": AuthorSerializer(request.user).data,
"object": request.id,
}
return returnPOSTRequest(f"{foreignid}inbox/", data=data)
|
the-stack_0_23163 | """
The filters module allows developers to apply filters to datasources. A filter
is a simple string, and it matches if it is contained anywhere within a line.
If a datasource has filters defined, it will return only lines matching at
least one of them. If a datasource has no filters, it will return all lines.
Filters aren't applicable to "raw" datasources, which are created with
``kind=RawFileProvider`` and have ``RegistryPoint``s with ``raw=True``.
The addition of a single filter can cause a datasource to change from returning
all lines to returning just those that match. Therefore, any filtered
datasource should have at least one filter in the commit introducing it so
downstream components don't inadvertently change its behavior.
The benefit of this fragility is the ability to drastically reduce in-memory
footprint and archive sizes. An additional benefit is the ability to evaluate
only lines known to be free of sensitive information.
Filters added to a ``RegistryPoint`` will be applied to all datasources that
implement it. Filters added to a datasource implementation apply only to that
implementation.
For example, a filter added to ``Specs.ps_auxww`` will apply to
``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,
``SosSpecs.ps_auxww``, etc. But a filter added to `DefaultSpecs.ps_auxww` will
only apply to ``DefaultSpecs.ps_auxww``. See the modules in ``insights.specs``
for those classes.
Filtering can be disabled globally by setting the environment variable
``INSIGHTS_FILTERS_ENABLED=False``. This means that no datasources will be
filtered even if filters are defined for them.
"""
import os
import pkgutil
import six
import yaml as ser
from collections import defaultdict
import insights
from insights.core import dr, plugins
from insights.util import parse_bool
_CACHE = {}
FILTERS = defaultdict(set)
ENABLED = parse_bool(os.environ.get("INSIGHTS_FILTERS_ENABLED"), default=True)
def add_filter(ds, patterns):
"""
Add a filter or list of filters to a datasource. A filter is a simple
string, and it matches if it is contained anywhere within a line.
Args:
ds (@datasource component): The datasource to filter
patterns (str, [str]): A string, list of strings, or set of strings to
add to the datasource's filters.
"""
if not plugins.is_datasource(ds):
raise Exception("Filters are applicable only to datasources.")
if dr.get_delegate(ds).raw:
raise Exception("Filters aren't applicable to raw datasources.")
if ds in _CACHE:
del _CACHE[ds]
if isinstance(patterns, six.string_types):
FILTERS[ds].add(patterns)
elif isinstance(patterns, list):
FILTERS[ds] |= set(patterns)
elif isinstance(patterns, set):
FILTERS[ds] |= patterns
else:
raise TypeError("patterns must be string, list, or set.")
def get_filters(component):
"""
Get the set of filters for the given datasource.
Filters added to a ``RegistryPoint`` will be applied to all datasources that
implement it. Filters added to a datasource implementation apply only to
that implementation.
For example, a filter added to ``Specs.ps_auxww`` will apply to
``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,
``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww``
will only apply to ``DefaultSpecs.ps_auxww``. See the modules in
``insights.specs`` for those classes.
Args:
component (a datasource): The target datasource
Returns:
set: The set of filters defined for the datasource
"""
def inner(c, filters=None):
filters = filters or set()
if not ENABLED:
return filters
if not plugins.is_datasource(c):
return filters
if c in FILTERS:
filters |= FILTERS[c]
for d in dr.get_dependents(c):
filters |= inner(d, filters)
return filters
if component not in _CACHE:
_CACHE[component] = inner(component)
return _CACHE[component]
def apply_filters(target, lines):
"""
Applys filters to the lines of a datasource. This function is used only in
integration tests. Filters are applied in an equivalent but more performant
way at run time.
"""
filters = get_filters(target)
if filters:
for l in lines:
if any(f in l for f in filters):
yield l
else:
for l in lines:
yield l
_filename = ".".join(["filters", ser.__name__])
_dumps = ser.dump
_loads = ser.safe_load
def loads(string):
"""Loads the filters dictionary given a string."""
d = _loads(string)
for k, v in d.items():
FILTERS[dr.get_component(k) or k] = set(v)
def load(stream=None):
"""
Loads filters from a stream, normally an open file. If one is
not passed, filters are loaded from a default location within
the project.
"""
if stream:
loads(stream.read())
else:
data = pkgutil.get_data(insights.__name__, _filename)
return loads(data) if data else None
def dumps():
"""Returns a string representation of the FILTERS dictionary."""
d = {}
for k, v in FILTERS.items():
d[dr.get_name(k)] = list(v)
return _dumps(d)
def dump(stream=None):
"""
Dumps a string representation of `FILTERS` to a stream, normally an
open file. If none is passed, `FILTERS` is dumped to a default location
within the project.
"""
if stream:
stream.write(dumps())
else:
path = os.path.join(os.path.dirname(insights.__file__), _filename)
with open(path, "wu") as f:
f.write(dumps())
|
the-stack_0_23164 | # Copyright (c) 2015, University of Kaiserslautern
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Matthias Jung
import m5
from m5.objects import *
# This configuration shows a simple setup of a TrafficGen (CPU) and an
# external TLM port for SystemC co-simulation
#
# Base System Architecture:
# +-------------+ +-----+ ^
# | System Port | | CPU | |
# +-------+-----+ +--+--+ |
# | | | gem5 World
# | +----+ | (see this file)
# | | |
# +-------v------v-------+ |
# | Membus | v
# +----------------+-----+ External Port (see sc_slave_port.*)
# | ^
# +---v---+ | TLM World
# | TLM | | (see sc_target.*)
# +-------+ v
#
# Create a system with a Crossbar and a TrafficGenerator as CPU:
system = System()
system.membus = IOXBar(width = 16)
system.physmem = SimpleMemory() # This must be instanciated, even if not needed
#system.cpu = TrafficGen(config_file = "conf/tgen.cfg")
system.cpu = MemTest(max_loads = 1e5, progress_interval = 16,
progress_check = 1e4,
size = 16,
block_addrmask = 1,
base_addr1 = 0,
base_addr2 = 0,
uncache_addr = 0,
percent_functional = 0,
start_tick = 20000,
interval = 16
)
system.clk_domain = SrcClockDomain(clock = '1.5GHz',
voltage_domain = VoltageDomain(voltage = '1V'))
# Route the connections:
system.cpu.port = system.membus.slave
system.system_port = system.membus.slave
system.membus.master = system.physmem.port
system.memchecker = MemChecker()
# -----------------------
# run simulation
# -----------------------
# Start the simulation:
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
m5.instantiate()
#m5.simulate() #Simulation time specified later on commandline
exit_event = m5.simulate(1000000000)
if exit_event.getCause() != "simulate() limit reached":
exit(1)
|
the-stack_0_23165 | import base64
import hashlib
import hmac
import time
try:
import json
except ImportError: # Python 2.6
from django.utils import simplejson as json
from mezzanine import template
register = template.Library()
@register.simple_tag
def disqus_id_for(obj):
"""
Returns a unique identifier for the object to be used in
DISQUS JavaScript.
"""
return "%s-%s" % (obj._meta.object_name, obj.id)
@register.inclusion_tag("generic/includes/disqus_sso.html", takes_context=True)
def disqus_sso_script(context):
"""
Provides a generic context variable which adds single-sign-on
support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and
``COMMENTS_DISQUS_API_SECRET_KEY`` are specified.
"""
settings = context["settings"]
public_key = getattr(settings, "COMMENTS_DISQUS_API_PUBLIC_KEY", "")
secret_key = getattr(settings, "COMMENTS_DISQUS_API_SECRET_KEY", "")
user = context["request"].user
if public_key and secret_key and user.is_authenticated():
context["public_key"] = public_key
context["sso_data"] = _get_disqus_sso(user, public_key, secret_key)
return context
def _get_disqus_sso(user, public_key, secret_key):
# Based on snippet provided on http://docs.disqus.com/developers/sso/
# create a JSON packet of our data attributes
data = json.dumps({
'id': '%s' % user.id,
'username': user.username,
'email': user.email,
})
# encode the data to base64
message = base64.b64encode(data)
# generate a timestamp for signing the message
timestamp = int(time.time())
# generate our hmac signature
sig = hmac.HMAC(str(secret_key), '%s %s' % (message, timestamp),
hashlib.sha1).hexdigest()
# Messages are of the form <message> <signature> <timestamp>
return '%s %s %s' % (message, sig, timestamp)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.