ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b403b3ec1debf4de8db839a52717d189c1176cb9 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
'''
* @Desc: train GPT2 from scratch/ fine tuning.
Modified based on Huggingface GPT-2 implementation
'''
import json
import os
import sys
import argparse
import logging
import time
import tqdm
import datetime
import torch
import numpy as np
from os.path import join
from torch.distributed import get_rank, get_world_size
from lsp_model_rl import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config, Adam
from gpt2_training.train_utils import load_model, boolean_string, set_lr, get_eval_list_same_length
from gpt2_training.eval_utils import eval_model_loss
from data_loader import BucketingDataLoader, DynamicBatchingLoader, DistributedBucketingDataLoader
from gpt2_training.distributed import all_reduce_and_rescale_tensors, all_gather_list
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger = logging.getLogger(__name__)
INF = 100000000
CACHE_EMPTY_STEP = 1000
EVAL_STEP = 100000
#########################################################################
# Prepare Parser
##########################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--model_name_or_path', type=str,
help='pretrained model name or path to local checkpoint')
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--max_seq_length", type=int, default=128)
parser.add_argument("--skip_eval", action='store_true',
help='If true, skip evaluation.')
parser.add_argument("--use_baseline", action='store_true',
help='If true, use baseline for RL.')
parser.add_argument("--init_checkpoint", type=str)
parser.add_argument("--train_input_file", type=str)
parser.add_argument("--eval_input_file", type=str)
parser.add_argument("--continue_from", type=int, default=0)
parser.add_argument("--train_batch_size", type=int, default=4,
help="batch size now means per GPU per step")
parser.add_argument("--gradient_accumulation_steps", type=int, default=2,
help="to increase effective batch size "
"and reduce synchronization")
parser.add_argument("--eval_batch_size", type=int, default=4)
parser.add_argument("--learning_rate", type=float, default=1e-5)
parser.add_argument("--num_optim_steps", type=int, default=1000000,
help="new API specifies num update steps")
parser.add_argument("--valid_step", type=int, default=1000,
help="how many optim steps between validations")
parser.add_argument("--warmup_proportion", type=float, default=0.1)
parser.add_argument("--warmup_steps", type=int, default=16000)
parser.add_argument("--normalize_data", type=boolean_string, default=True)
parser.add_argument("--fp16", type=boolean_string, default=False)
parser.add_argument("--lr_schedule", type=str,
choices=['noam', 'noamwd', 'BERT', 'None'], default='noam')
parser.add_argument("--loss_scale", type=float, default=0)
parser.add_argument("--no_token_id", type=boolean_string, default=True)
parser.add_argument("--output_dir", type=str)
parser.add_argument("--log_dir", type=str)
parser.add_argument('--pbar', type=boolean_string, default=True, help='turn on progress bar')
# distributed
parser.add_argument('--local_rank', type=int, default=-1,
help='for torch.distributed')
parser.add_argument('--config', help='JSON config file')
# do normal parsing
args = parser.parse_args()
if args.config is not None:
# override argparse defaults by config JSON
opts = json.load(open(args.config))
for k, v in opts.items():
if isinstance(v, str):
# PHILLY ENV special cases
if 'PHILLY_JOB_DIRECTORY' in v:
v = v.replace('PHILLY_JOB_DIRECTORY',
os.environ['PHILLY_JOB_DIRECTORY'])
elif 'PHILLY_LOG_DIRECTORY' in v:
v = v.replace('PHILLY_LOG_DIRECTORY',
os.environ['PHILLY_LOG_DIRECTORY'])
setattr(args, k, v)
# command line should override config JSON
argv = sys.argv[1:]
overrides, _ = parser.parse_known_args(argv)
for k, v in vars(overrides).items():
if f'--{k}' in argv:
setattr(args, k, v)
setattr(args, 'local_rank', overrides.local_rank)
assert args.train_batch_size % args.gradient_accumulation_steps == 0, \
'batch size % gradient accumulation steps != 0!'
args.train_batch_size = (args.train_batch_size
// args.gradient_accumulation_steps)
logger.info('train batch size = {}, '
'new train batch size (after gradient accumulation) = {}'.format(
args.train_batch_size*args.gradient_accumulation_steps,
args.train_batch_size))
if args.local_rank == -1:
logger.info('CUDA available? {}'.format(str(torch.cuda.is_available())))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
args.device, args.n_gpu = device, n_gpu
else:
# distributed training
print('args.local_rank:', args.local_rank)
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
# Initializes the distributed backend which will take care of
# sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
n_gpu = torch.distributed.get_world_size()
args.device, args.n_gpu = device, 1
logger.info("device: {} n_gpu: {}, distributed training: {}, "
"16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
timestamp = datetime.datetime.now().strftime('%Y-%m-%d%H%M%S')
output_dir = join(args.output_dir,
'GPT2.{}.{}.{}gpu.{}'.format(args.learning_rate,
args.train_batch_size, n_gpu,
timestamp))
log_dir = args.log_dir if args.log_dir is not None and len(args.log_dir) > 0 else output_dir
if args.local_rank == -1 or get_rank() == 0:
os.makedirs(output_dir, exist_ok=True)
logger.info('Input Argument Information')
args_dict = vars(args)
for a in args_dict:
logger.info('%-28s %s' % (a, args_dict[a]))
#########################################################################
# Prepare Data Set
##########################################################################
enc = GPT2Tokenizer.from_pretrained('gpt2-medium')
enc.add_tokens(['<SPLIT>', '<START>', '<END>'])
eos = enc.encoder["<|endoftext|>"]
config = GPT2Config.from_json_file(
join(args.model_name_or_path, 'config.json'))
if args.local_rank == -1:
train_dataloader = BucketingDataLoader(args.train_input_file,
args.train_batch_size,
args.max_seq_length)
else:
train_dataloader = DistributedBucketingDataLoader(
get_rank(), get_world_size(),
args.train_input_file, args.train_batch_size,
args.max_seq_length)
# eval_dataloader_loss = DynamicBatchingLoader(
# args.eval_input_file, enc, args.normalize_data,
# args.eval_batch_size, args.max_seq_length)
# eval_dataloader_gen = get_eval_list_same_length(
# args.eval_input_file, enc, args.eval_batch_size, True)
#########################################################################
# Prepare Model and Optimizer
##########################################################################
gpt2_model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
gpt2_model.resize_token_embeddings(len(enc))
model = load_model(gpt2_model, args.init_checkpoint,
args, verbose=True)
if args.local_rank != -1:
# when from scratch make sure initial models are the same
params = [p.data for p in model.parameters()]
all_reduce_and_rescale_tensors(
params, float(torch.distributed.get_world_size()))
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
total_params = sum([np.prod(p.size()) for p in model_parameters])
logger.info('Number of parameter = {}'.format(total_params))
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'ln'] # no decay for bias and LayerNorm (ln)
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
logger.info('in fp16, using FusedAdam')
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex "
"to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True,
verbose=False)
else:
optimizer = FP16_Optimizer(optimizer,
static_loss_scale=args.loss_scale,
verbose=False)
else:
optimizer = Adam(optimizer_grouped_parameters, args.learning_rate,
max_grad_norm=1.0)
#########################################################################
# Training !
##########################################################################
if args.local_rank == -1 or get_rank() == 0:
train_logger = open(join(log_dir, 'train_log.txt'), 'a+', buffering=1)
eval_logger = open(join(log_dir, 'eval_log.txt'), 'a+', buffering=1)
print('epoch,global_step,step,mean_loss,n_token_real,'
'n_token_total,epoch_time', file=train_logger)
print('epoch,global_step,step,eval_loss', file=eval_logger)
global_step = 0
step = 0
epoch = 0
if args.continue_from:
global_step = args.continue_from
step = global_step*2 - 1
if args.local_rank != -1:
n_gpu = 1
if args.local_rank == -1 or get_rank() == 0:
if args.pbar:
pbar = tqdm.tqdm(total=args.num_optim_steps, desc=f"training")
else:
pbar = None
# pbar = None
while True:
if args.use_baseline:
moving_avg_cnt = 20
cumsum = [0.0]
moving_avg_idx = 0
model.train()
(tr_loss, nb_tr_examples, nb_tr_steps) = 0.0, 0.0, 0.0
n_token_real, n_token_total = 0, 0
train_start_time_epoch = time.time()
# print('iteration started')
for batch in train_dataloader:
# torch.cuda.empty_cache()
# activate new training mode
seq_len = batch[0].shape[1]
batch = tuple(t for t in batch)
input_ids, position_ids, token_ids, seeker_post, response_post, = batch
input_ids = input_ids.to(device)
position_ids = position_ids.to(device)
token_ids = token_ids.to(device)
if args.no_token_id:
token_ids = None
forward_pass_start_time = time.time()
if args.use_baseline:
if len(cumsum) >= moving_avg_cnt:
baseline_val = (cumsum[moving_avg_idx] - cumsum[moving_avg_idx-moving_avg_cnt])/moving_avg_cnt
else:
baseline_val = cumsum[moving_avg_idx]
loss, reward = model(input_ids, position_ids=position_ids, token_type_ids=token_ids, seeker_post=seeker_post, response_post=response_post, eos=eos, tokenizer=enc, baseline_val=baseline_val)
cumsum.append(cumsum[moving_avg_idx-1] + reward)
moving_avg_idx+=1
else:
loss, reward = model(input_ids, position_ids=position_ids, token_type_ids=token_ids, seeker_post=seeker_post, response_post=response_post, eos=eos, tokenizer=enc)
forward_pass_end_time = time.time()
backward_pass_start_time = time.time()
if n_gpu > 1:
loss = loss.mean()
loss = loss / (args.train_batch_size / input_ids.shape[0])
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
backward_pass_end_time = time.time()
tr_loss += float(loss.item()) * (args.train_batch_size / input_ids.shape[0])
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
mean_loss = tr_loss / nb_tr_steps
n_token_total += input_ids.shape[0] * input_ids.shape[1]
n_token_real += (input_ids != 0).sum().item()
# gradient update
step += 1
if step % args.gradient_accumulation_steps == 0:
set_lr(optimizer, global_step,
args.lr_schedule, args.learning_rate,
args.warmup_steps, args.warmup_proportion,
config.n_embd, args.num_optim_steps)
if args.local_rank != -1:
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Print log info to file
if args.local_rank != -1:
mean_loss = sum(all_gather_list(mean_loss)) / get_world_size()
n_token_real_all_proc = sum(all_gather_list(n_token_real))
n_token_total_all_proc = sum(all_gather_list(n_token_total))
else:
n_token_real_all_proc = n_token_real
n_token_total_all_proc = n_token_total
if args.local_rank == -1 or get_rank() == 0:
epoch_time = time.time() - train_start_time_epoch
# print('step:', global_step, 'time:', forward_pass_end_time - forward_pass_start_time, backward_pass_end_time - backward_pass_start_time)
if pbar is not None:
pbar.set_postfix_str(
f"tok/s: {n_token_real_all_proc//epoch_time//1000}k "
f"epoch: {epoch}")
pbar.update(1)
print('{},{},{},{},{},{},{}'.format(
epoch+1, global_step+1, step+1, mean_loss,
n_token_real_all_proc, n_token_total_all_proc, epoch_time),
file=train_logger)
if global_step % args.valid_step == 0:
if args.local_rank == -1 or get_rank() == 0:
# only rank 0 process evaluate
torch.save(
{k: (v.cpu() if v is not None else None) # save to cpu tensors
for k, v in model.state_dict().items()},
join(output_dir,
f'GP2-pretrain-step-{global_step}.pkl'))
# eval_loss, eval_ppl = eval_model_loss(
# model, enc, eval_dataloader_loss, epoch, args)
# enable generation step evaluation for now
# gen_response = eval_model_generation(
# model, enc, eval_dataloader_gen, epoch, args)
'''
# probably use beam search only for test set
if False:
gen_response_beam = eval_model_generation(
model, enc, eval_dataloader_gen, epoch, args,
use_beam_search=True, beam_width=3)
'''
# print('{},{},{},{},{}'.format(
# epoch+1, global_step+1, step+1, eval_loss, eval_ppl),
# file=eval_logger)
logger.info('current learning rate: '
+ str(optimizer.param_groups[0]['lr']))
model.train()
if global_step >= args.num_optim_steps:
break
if (step+1) % CACHE_EMPTY_STEP == 0:
torch.cuda.empty_cache()
if global_step >= args.num_optim_steps:
break
epoch += 1
if args.local_rank == -1 or get_rank() == 0:
if pbar is not None:
pbar.close()
train_logger.close()
eval_logger.close()
|
py | b403b49ef87a062762e02fdc2ccf668f9dd56b10 | import argparse
import glob
import os
import time
import cv2
import imutils
from imutils.object_detection import non_max_suppression
subject_label = 1
font = cv2.FONT_HERSHEY_SIMPLEX
list_of_videos = []
cascade_path = "face_cascades/haarcascade_profileface.xml"
face_cascade = cv2.CascadeClassifier(cascade_path)
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
recognizer = cv2.face.LBPHFaceRecognizer_create()
count = 0
def detect_people(frame):
"""
detect humans using HOG descriptor
Args:
frame:
Returns:
processed frame
"""
(rects, weights) = hog.detectMultiScale(frame, winStride=(8, 8), padding=(16, 16), scale=1.06)
rects = non_max_suppression(rects, probs=None, overlapThresh=0.65)
for (x, y, w, h) in rects:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
return frame
def detect_face(frame):
"""
detect human faces in image using haar-cascade
Args:
frame:
Returns:
coordinates of detected faces
"""
faces = face_cascade.detectMultiScale(frame, 1.1, 2, 0, (20, 20))
return faces
def recognize_face(frame_orginal, faces):
"""
recognize human faces using LBPH features
Args:
frame_orginal:
faces:
Returns:
label of predicted person
"""
predict_label = []
predict_conf = []
for x, y, w, h in faces:
frame_orginal_grayscale = cv2.cvtColor(frame_orginal[y: y + h, x: x + w], cv2.COLOR_BGR2GRAY)
cv2.imshow("cropped", frame_orginal_grayscale)
predict_tuple = recognizer.predict(frame_orginal_grayscale)
a, b = predict_tuple
predict_label.append(a)
predict_conf.append(b)
print("Predition label, confidence: " + str(predict_tuple))
return predict_label
def draw_faces(frame, faces):
"""
draw rectangle around detected faces
Args:
frame:
faces:
Returns:
face drawn processed frame
"""
for (x, y, w, h) in faces:
xA = x
yA = y
xB = x + w
yB = y + h
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
return frame
def put_label_on_face(frame, faces, labels):
"""
draw label on faces
Args:
frame:
faces:
labels:
Returns:
processed frame
"""
i = 0
for x, y, w, h in faces:
cv2.putText(frame, str(labels[i]), (x, y), font, 1, (255, 255, 255), 2)
i += 1
return frame
def background_subtraction(previous_frame, frame_resized_grayscale, min_area):
"""
This function returns 1 for the frames in which the area
after subtraction with previous frame is greater than minimum area
defined.
Thus expensive computation of human detection face detection
and face recognition is not done on all the frames.
Only the frames undergoing significant amount of change (which is controlled min_area)
are processed for detection and recognition.
"""
frameDelta = cv2.absdiff(previous_frame, frame_resized_grayscale)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
im2, cnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
temp = 0
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) > min_area:
temp = 1
return temp
if __name__ == '__main__':
"""
main function
"""
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--videos", required=True, help="path to videos directory")
args = vars(ap.parse_args())
path = args["videos"]
for f in os.listdir(path):
list_of_videos = glob.glob(os.path.join(os.path.abspath(path), f))
print(os.path.join(os.path.abspath(path), f) + "*.mp4")
print(list_of_videos)
if os.path.exists("model.yaml"):
recognizer.read("model.yaml")
for video in list_of_videos:
camera = cv2.VideoCapture(os.path.join(path, video))
grabbed, frame = camera.read()
print(frame.shape)
frame_resized = imutils.resize(frame, width=min(800, frame.shape[1]))
frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
print(frame_resized.shape)
# defining min cuoff area
min_area = (3000 / 800) * frame_resized.shape[1]
while True:
starttime = time.time()
previous_frame = frame_resized_grayscale
grabbed, frame = camera.read()
if not grabbed:
break
frame_resized = imutils.resize(frame, width=min(800, frame.shape[1]))
frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
temp = background_subtraction(previous_frame, frame_resized_grayscale, min_area)
if temp == 1:
frame_processed = detect_people(frame_resized)
faces = detect_face(frame_resized_grayscale)
if len(faces) > 0:
frame_processed = draw_faces(frame_processed, faces)
label = recognize_face(frame_resized, faces)
frame_processed = put_label_on_face(frame_processed, faces, label)
cv2.imshow("Detected Human and face", frame_processed)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
endtime = time.time()
print("Time to process a frame: " + str(starttime - endtime))
else:
count = count + 1
print("Number of frame skipped in the video= " + str(count))
camera.release()
cv2.destroyAllWindows()
else:
print("model file not found")
list_of_videos = []
|
py | b403b5891d0a38a45c06137563ce371184e32aff | from .Data import Data
from .Widget import Widget
class RaceDash:
modules = []
intervals = []
def __init__(self, scale, opacity):
self.scale = scale | 1
self.opacity = opacity | 1
self.update()
def add(self, module, options, interval):
options.x = options.x | 0
options.y = options.y | 0
options.scale = options.scale | self.scale
update = update | 0.1
widget = Widget(options.x, options.y)
module(widget, Data, options)
module.render()
self.modules.push({ interval, module })
self.intervals = list(map(lambda x: { interval: x.interval, timer: 0 }, self.modules))
def update(self, deltaT):
[timer += deltaT for timer in self.timers]
for i in self.intervals:
if i.timer > i.interval:
i.timer = 0
[module.update() for module in self.modules[i.interval]]
def save(self):
pass |
py | b403b5c552b7de95d5065270a558f2dfad841f62 | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Getting a input function that will give input and label tensors."""
from tensor2tensor import problems
import tensorflow as tf
def get_input(
data_dir,
batch_size=50,
augmented=False,
data='cifar10',
mode=tf.estimator.ModeKeys.TRAIN,
repeat_num=None,
data_format='HWC'):
"""Returns a input function for the estimator framework.
Args:
batch_size: batch size for training or testing
augmented: whether data augmentation is used
data: a string that specifies the dataset, must be cifar10
or cifar100
mode: indicates whether the input is for training or testing,
needs to be a member of tf.estimator.ModeKeys
repeat_num: how many times the dataset is repeated
data_format: order of the data's axis
Returns:
an input function
"""
assert data == 'cifar10' or data == 'cifar100'
class_num = 10 if data == 'cifar10' else 100
data = 'image_' + data
if mode != tf.estimator.ModeKeys.TRAIN:
repeat_num = 1
problem_name = data
if data == 'image_cifar10' and not augmented:
problem_name = 'image_cifar10_plain'
def preprocess(example):
"""Perform per image standardization on a single image."""
image = example['inputs']
image.set_shape([32, 32, 3])
image = tf.cast(image, tf.float32)
example['inputs'] = tf.image.per_image_standardization(image)
return example
def input_data():
"""Input function to be returned."""
prob = problems.problem(problem_name)
if data == 'image_cifar100':
dataset = prob.dataset(mode, data_dir, preprocess=augmented)
if not augmented: dataset = dataset.map(map_func=preprocess)
else:
dataset = prob.dataset(mode, data_dir, preprocess=False)
dataset = dataset.map(map_func=preprocess)
dataset = dataset.batch(batch_size)
dataset = dataset.repeat(repeat_num)
dataset = dataset.make_one_shot_iterator().get_next()
if data_format == 'CHW':
dataset['inputs'] = tf.transpose(dataset['inputs'], (0, 3, 1, 2))
return dataset['inputs'], tf.squeeze(tf.one_hot(dataset['targets'],
class_num))
return input_data
|
py | b403b67a2f263d24818d29bd8d3b256105601bc0 | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.extractor import FrontExtractorOp
from mo.front.onnx.extractors.utils import onnx_attr
from mo.ops.softmax import SoftmaxONNX
from mo.ops.log_softmax import LogSoftmaxONNX
class SoftmaxExtractor(FrontExtractorOp):
op = 'Softmax'
enabled = True
@classmethod
def extract(cls, node):
axis = onnx_attr(node, 'axis', 'i', default=1)
SoftmaxONNX.update_node_stat(node, {'axis': axis})
return cls.enabled
class LogSoftmaxExtractor(FrontExtractorOp):
op = 'LogSoftmax'
enabled = True
@classmethod
def extract(cls, node):
axis = onnx_attr(node, 'axis', 'i', default=1)
LogSoftmaxONNX.update_node_stat(node, {'axis': axis})
return cls.enabled
|
py | b403b692f11a7439c66eee15141d30d874b1082c | from . import CSVReader
|
py | b403b6dbfc41c88948cd9db0f72dbde372a255f4 | from .enso_lib import ( # noqa
AddParserArgument,
CLIVAR_LargeEnsemble_Variables,
find_realm,
get_file,
match_obs_name,
metrics_to_json,
sort_human,
tree,
)
|
py | b403b70992cf2e9c614c27bb87b48be85c410e0c | import time
import numpy as np
import pygame
from rl.env.bird_env import BirdEnv
class MCBirdEnv(BirdEnv):
def init_bird(self):
self.states = [i for i in range(100)]
self.values = [0 for i in range(100)]
self.actions = ['e', 's', 'w', 'n']
self.q_values = np.zeros((100, 4))
self.gamma = 0.9
def transform(self, state, action):
current_position = self.state_to_position(state)
next_position = [0, 0]
is_collision = self.collision_detection(current_position)
is_destination = self.destination_detection(current_position)
if is_collision or is_destination:
return state, 0, True
if action == 'e':
next_position[0] = current_position[0] + 120
next_position[1] = current_position[1]
if action == 's':
next_position[0] = current_position[0]
next_position[1] = current_position[1] + 90
if action == 'w':
next_position[0] = current_position[0] - 120
next_position[1] = current_position[1]
if action == 'n':
next_position[0] = current_position[0]
next_position[1] = current_position[1] - 90
if self.collision_detection(next_position):
return self.position_to_state(current_position), -10, True
if self.destination_detection(next_position):
return self.position_to_state(next_position), 100, True
return self.position_to_state(next_position), -1, False
def render(self, path):
self.init_render()
# 绘制值函数
for i in range(100):
x = int(i / 10)
y = i % 10
surface = self.textFont.render(str(self.q_values[i, 0]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 85, 90 * x + 45))
surface = self.textFont.render(str(self.q_values[i, 1]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 50, 90 * x + 75))
surface = self.textFont.render(str(self.q_values[i, 2]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 10, 90 * x + 45))
surface = self.textFont.render(str(self.q_values[i, 3]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 50, 90 * x + 5))
for i in range(len(path)):
# 绘制鸟
state = path[i]
self.current_position = self.state_to_position(state)
self.screen.blit(self.bird, self.current_position)
# 绘制红框、路径编号
pygame.draw.rect(self.screen, [255, 0, 0], [self.current_position[0], self.current_position[1], 120, 90], 2)
surface = self.textFont.render(str(i), True, (255, 0, 0))
self.screen.blit(surface, (self.current_position[0] + 5, self.current_position[1] + 5))
# 绘制值函数
x = int(path[i] / 10)
y = path[i] % 10
surface = self.textFont.render(str(self.q_values[path[i], 0]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 85, 90 * x + 45))
surface = self.textFont.render(str(self.q_values[path[i], 1]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 50, 90 * x + 75))
surface = self.textFont.render(str(self.q_values[path[i], 2]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 10, 90 * x + 45))
surface = self.textFont.render(str(self.q_values[path[i], 3]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 50, 90 * x + 5))
# 清理上一步
if i >= 1:
last_state = path[i - 1]
last_position = self.state_to_position(last_state)
# 绿矩形和红框填充
pygame.draw.rect(self.screen, [0, 180, 0], [last_position[0], last_position[1], 120, 90], 0)
pygame.draw.rect(self.screen, [255, 0, 0], [last_position[0], last_position[1], 120, 90], 2)
# 绘制上一步编号
surface = self.textFont.render(str(i - 1), True, (255, 0, 0))
self.screen.blit(surface, (last_position[0] + 5, last_position[1] + 5))
# 绘制上一步值函数
x = int(last_state / 10)
y = last_state % 10
surface = self.textFont.render(str(self.q_values[path[i - 1], 0]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 85, 90 * x + 45))
surface = self.textFont.render(str(self.q_values[path[i - 1], 1]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 50, 90 * x + 75))
surface = self.textFont.render(str(self.q_values[path[i - 1], 2]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 10, 90 * x + 45))
surface = self.textFont.render(str(self.q_values[path[i - 1], 3]), True, (0, 0, 0))
self.screen.blit(surface, (120 * y + 50, 90 * x + 5))
time.sleep(0.5)
pygame.time.Clock().tick(30)
pygame.display.update()
self.game_over()
while True:
self.game_over()
|
py | b403b875c93aed45f5231e7cda77ded8595c4d79 | # 0705.py
import cv2
import numpy as NotImplemented
#1
src1 = cv2.imread('./data/hand.jpg')
hsv1 = cv2.cvtColor(src1, cv2.COLOR_BGR2HSV)
lowerb1 = (0, 40, 0)
upperb1 = (20, 180, 255)
dst1 = cv2.inRange(hsv1, lowerb1, upperb1)
#2
src2 = cv2.imread('./data/flower.jpg')
hsv2 = cv2.cvtColor(src2, cv2.COLOR_RGB2HSV)
lowerb2 = (15, 100, 100)
upperb2 = (180, 255, 255)
dst2 = cv2.inRange(hsv2, lowerb2, upperb2)
#3
cv2.imshow('src1', src1)
cv2.imshow('dst1', dst1)
cv2.imshow('src2', src2)
cv2.imshow('dst2', dst2)
cv2.waitKey()
cv2.desroyAllWindows() |
py | b403b9637a889fd3051943ebbf15d753ea8d50ec | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
#
# Copyright (c) 2017 <> All Rights Reserved
#
#
# Author: Hai Liang Wang
# Date: 2018-06-04:18:56:20
#
#===============================================================================
"""
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Xu Ming Lin<>, Hai Liang Wang<[email protected]>,"
__date__ = "2018-06-04:18:56:20"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
if sys.version_info[0] < 3:
stdout = sys.stdout
reload(sys)
sys.stdout = stdout
else:
unicode = str
# Get ENV
ENVIRON = os.environ.copy()
# In[1]:
import os
import sys
import cPickle as pkl
from collections import Counter
from nltk import sent_tokenize, word_tokenize
from nltk.corpus import stopwords, wordnet
from nltk.stem import WordNetLemmatizer
import jieba
# jieba.enable_parallel(8)
lemma = WordNetLemmatizer()
raw_data_path = './data/WikiQA/raw'
processed_data_path = './data/WikiQA/processed'
if not os.path.exists(processed_data_path):
os.mkdir(processed_data_path)
# In[8]:
# 分词、词干化处理
def segment(filename, use_lemma=True):
processed_qa = []
count = 0
with open(os.path.join(raw_data_path, filename), 'r') as fr:
fr.readline()
for line in fr:
items = line.strip().split('\t')
qid, q, aid, a, label = items[0], items[1], items[4], items[5], items[6]
if use_lemma:
q = ' '.join([lemma.lemmatize(_) for _ in jieba.cut(q)]).lower()
a = ' '.join([lemma.lemmatize(_) for _ in jieba.cut(a)]).lower()
else:
q = ' '.join(jieba.cut(q)).lower()
q = ' '.join(jieba.cut(a)).lower()
processed_qa.append('\t'.join([qid, q, aid, a, label]))
count += 1
if count % 1000 == 0:
print('Finished {}'.format(count))
return processed_qa
# 构建词典
def build_vocab(corpus, topk=None):
vocab = Counter()
for line in corpus:
qid, q, aid, a, label = line.strip().split('\t')
vocab.update(q.split())
vocab.update(a.split())
if topk:
vocab = vocab.most_common(topk)
else:
vocab = dict(vocab.most_common()).keys()
vocab = {_ : i+2 for i, _ in enumerate(vocab)}
vocab['<PAD>'] = 0
vocab['<UNK>'] = 1
reverse_vocab = dict(zip(vocab.values(), vocab.keys()))
return vocab, reverse_vocab
# 将每个词映射为词典中的id
def transform(corpus, word2id, unk_id=1):
transformed_corpus = []
for line in corpus:
qid, q, aid, a, label = line.strip().split('\t')
q = [word2id.get(w, unk_id) for w in q.split()]
a = [word2id.get(w, unk_id) for w in a.split()]
transformed_corpus.append([qid, q, aid, a, int(label)])
return transformed_corpus
# 得到pointwise形式的数据,即(Q, A, label)
def pointwise_data(corpus, keep_ids=False):
# (q, a, label)
pointwise_corpus = []
for sample in corpus:
qid, q, aid, a, label = sample
if keep_ids:
pointwise_corpus.append((qid, q, aid, a, label))
else:
pointwise_corpus.append((q, a, label))
return pointwise_corpus
# 得到pairwise形式的数据,即(Q, positive A, negative A)
def pairwise_data(corpus):
# (q, a_pos, a_neg), two answers must from the same q
# once a question contains no positive answers, we discard this sample.
pairwise_corpus = dict()
for sample in corpus:
qid, q, aid, a, label = sample
pairwise_corpus.setdefault(qid, dict())
pairwise_corpus[qid].setdefault('pos', list())
pairwise_corpus[qid].setdefault('neg', list())
pairwise_corpus[qid]['q'] = q
if label == 0:
pairwise_corpus[qid]['neg'].append(a)
else:
pairwise_corpus[qid]['pos'].append(a)
real_pairwise_corpus = []
for qid in pairwise_corpus:
q = pairwise_corpus[qid]['q']
for pos in pairwise_corpus[qid]['pos']:
for neg in pairwise_corpus[qid]['neg']:
real_pairwise_corpus.append((q, pos, neg))
return real_pairwise_corpus
# 得到listwise形式的数据,即(Q, All answers related to this Q)
def listwise_data(corpus):
# (q, a_list)
listwise_corpus = dict()
for sample in corpus:
qid, q, aid, a, label = sample
listwise_corpus.setdefault(qid, dict())
listwise_corpus[qid].setdefault('a', list())
listwise_corpus[qid]['q'] = q
listwise_corpus[qid]['a'].append(a)
real_listwise_corpus = []
for qid in listwise_corpus:
q = listwise_corpus[qid]['q']
alist = listwise_corpus[qid]['a']
real_listwise_corpus.append((q, alist))
return real_listwise_corpus
train_processed_qa = segment('WikiQA-train.tsv')
val_processed_qa = segment('WikiQA-dev.tsv')
test_processed_qa = segment('WikiQA-test.tsv')
word2id, id2word = build_vocab(train_processed_qa)
transformed_train_corpus = transform(train_processed_qa, word2id)
pointwise_train_corpus = pointwise_data(transformed_train_corpus, keep_ids=True)
pairwise_train_corpus = pairwise_data(transformed_train_corpus)
listwise_train_corpus = listwise_data(transformed_train_corpus)
transformed_val_corpus = transform(val_processed_qa, word2id)
pointwise_val_corpus = pointwise_data(transformed_val_corpus, keep_ids=True)
pairwise_val_corpus = pointwise_data(transformed_val_corpus, keep_ids=True)
listwise_val_corpus = listwise_data(transformed_val_corpus)
transformed_test_corpus = transform(test_processed_qa, word2id)
pointwise_test_corpus = pointwise_data(transformed_test_corpus, keep_ids=True)
pairwise_test_corpus = pointwise_data(transformed_test_corpus, keep_ids=True)
listwise_test_corpus = listwise_data(transformed_test_corpus)
with open(os.path.join(processed_data_path, 'vocab.pkl'), 'w') as fw:
pkl.dump([word2id, id2word], fw)
with open(os.path.join(processed_data_path, 'pointwise_corpus.pkl'), 'w') as fw:
pkl.dump([pointwise_train_corpus, pointwise_val_corpus, pointwise_test_corpus], fw)
with open(os.path.join(processed_data_path, 'pairwise_corpus.pkl'), 'w') as fw:
pkl.dump([pairwise_train_corpus, pairwise_val_corpus, pairwise_test_corpus], fw)
with open(os.path.join(processed_data_path, 'listwise_corpus.pkl'), 'w') as fw:
pkl.dump([listwise_train_corpus, listwise_val_corpus, listwise_test_corpus], fw)
print('done!')
|
py | b403ba2c354d16cf9b0dddd75b8dce09842532a6 | # WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
class CustomerNotification(object):
"""A thin wrapper around a customer_notification, providing easy access to its
attributes.
Example:
customer_notification = client.customer_notifications.get()
customer_notification.id
"""
def __init__(self, attributes, api_response):
self.attributes = attributes
self.api_response = api_response
@property
def action_taken(self):
return self.attributes.get('action_taken')
@property
def action_taken_at(self):
return self.attributes.get('action_taken_at')
@property
def action_taken_by(self):
return self.attributes.get('action_taken_by')
@property
def id(self):
return self.attributes.get('id')
@property
def links(self):
return self.Links(self.attributes.get('links'))
@property
def type(self):
return self.attributes.get('type')
class Links(object):
"""Wrapper for the response's 'links' attribute."""
def __init__(self, attributes):
self.attributes = attributes
@property
def customer(self):
return self.attributes.get('customer')
@property
def event(self):
return self.attributes.get('event')
@property
def mandate(self):
return self.attributes.get('mandate')
@property
def payment(self):
return self.attributes.get('payment')
@property
def refund(self):
return self.attributes.get('refund')
@property
def subscription(self):
return self.attributes.get('subscription')
|
py | b403ba37359e605443ff0be16819676ff8028d83 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite Python Interface: Sanity check."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.lite.python import convert
from tensorflow.contrib.lite.python import lite_constants
from tensorflow.contrib.lite.python import op_hint
from tensorflow.contrib.lite.python.interpreter import Interpreter
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes
from tensorflow.python.framework.graph_util_impl import _extract_graph_summary
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ConvertTest(test_util.TensorFlowTestCase):
def testBasic(self):
in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Try running on valid graph
tflite_model = convert.toco_convert(sess.graph_def, [in_tensor],
[out_tensor])
self.assertTrue(tflite_model)
# TODO(aselle): remove tests that fail (we must get TOCO to not fatal
# all the time).
# Try running on identity graph (known fail)
# with self.assertRaisesRegexp(RuntimeError, "!model->operators.empty()"):
# result = convert.toco_convert(sess.graph_def, [in_tensor], [in_tensor])
def testQuantization(self):
in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(in_tensor + in_tensor,
min=0., max=1.)
sess = session.Session()
tflite_model = convert.toco_convert(
sess.graph_def, [in_tensor], [out_tensor],
inference_type=lite_constants.QUANTIZED_UINT8,
quantized_input_stats=[(0., 1.)])
self.assertTrue(tflite_model)
def testGraphDefBasic(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="input")
_ = in_tensor + in_tensor
sess = session.Session()
tflite_model = convert.toco_convert_graph_def(
sess.graph_def, [("input", [1, 16, 16, 3])], ["add"],
inference_type=lite_constants.FLOAT)
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual("input", input_details[0]["name"])
self.assertEqual(np.float32, input_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
self.assertEqual((0., 0.), input_details[0]["quantization"])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual("add", output_details[0]["name"])
self.assertEqual(np.float32, output_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
self.assertEqual((0., 0.), output_details[0]["quantization"])
def testGraphDefQuantization(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA")
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB")
_ = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name="output")
sess = session.Session()
input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])]
output_arrays = ["output"]
tflite_model = convert.toco_convert_graph_def(
sess.graph_def,
input_arrays_map,
output_arrays,
inference_type=lite_constants.QUANTIZED_UINT8,
quantized_input_stats=[(0., 1.), (0., 1.)])
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual("inputA", input_details[0]["name"])
self.assertEqual(np.uint8, input_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
self.assertEqual((1., 0.),
input_details[0]["quantization"]) # scale, zero_point
self.assertEqual("inputB", input_details[1]["name"])
self.assertEqual(np.uint8, input_details[1]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[1]["shape"]).all())
self.assertEqual((1., 0.),
input_details[1]["quantization"]) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual("output", output_details[0]["name"])
self.assertEqual(np.uint8, output_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
self.assertTrue(output_details[0]["quantization"][0] > 0) # scale
class ConvertTestOpHint(test_util.TensorFlowTestCase):
"""Test the hint to stub functionality."""
def _getGraphOpTypes(self, graphdef, output_nodes):
"""Returns used op types in `graphdef` reachable from `output_nodes`.
This is used to check that after the stub transformation the expected
nodes are there.
NOTE: this is not a exact test that the graph is the correct output, but
it balances compact expressibility of test with sanity checking.
Args:
graphdef: TensorFlow proto graphdef.
output_nodes: A list of output node names that we need to reach.
Returns:
A set of node types reachable from `output_nodes`.
"""
name_to_input_name, name_to_node, _ = (
_extract_graph_summary(graphdef))
# Find all nodes that are needed by the outputs
used_node_names = _bfs_for_reachable_nodes(output_nodes, name_to_input_name)
return set([name_to_node[node_name].op for node_name in used_node_names])
def _countIdentities(self, nodes):
"""Count the number of "Identity" op types in the list of proto nodes.
Args:
nodes: NodeDefs of the graph.
Returns:
The number of nodes with op type "Identity" found.
"""
return len([x for x in nodes if x.op == "Identity"])
def testSwishLiteHint(self):
"""Makes a custom op swish and makes sure it gets converted as a unit."""
image = array_ops.constant([1., 2., 3., 4.])
swish_scale = array_ops.constant(1.0)
def _swish(input_tensor, scale):
custom = op_hint.OpHint("cool_activation")
input_tensor, scale = custom.add_inputs(input_tensor, scale)
output = math_ops.sigmoid(input_tensor) * input_tensor * scale
output, = custom.add_outputs(output)
return output
output = array_ops.identity(_swish(image, swish_scale), name="ModelOutput")
with self.cached_session() as sess:
# check if identities have been put into the graph (2 input, 1 output,
# and 1 final output).
self.assertEqual(self._countIdentities(sess.graph_def.node), 4)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["cool_activation", "Const", "Identity"]))
def testScaleAndBiasAndIdentity(self):
"""This tests a scaled add which has 3 inputs and 2 outputs."""
a = array_ops.constant(1.)
x = array_ops.constant([2., 3.])
b = array_ops.constant([4., 5.])
def _scaled_and_bias_and_identity(a, x, b):
custom = op_hint.OpHint("scale_and_bias_and_identity")
a, x, b = custom.add_inputs(a, x, b)
return custom.add_outputs(a * x + b, x)
output = array_ops.identity(_scaled_and_bias_and_identity(a, x, b),
name="ModelOutput")
with self.cached_session() as sess:
# make sure one identity for each input (3) and output (2) => 3 + 2 = 5
# +1 for the final output
self.assertEqual(self._countIdentities(sess.graph_def.node), 6)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["scale_and_bias_and_identity", "Const", "Identity", "Pack"]))
def testTwoFunctions(self):
"""Tests if two functions are converted correctly."""
a = array_ops.constant([1.])
b = array_ops.constant([1.])
def _double_values(x):
custom = op_hint.OpHint("add_test")
x, = custom.add_inputs(x)
output = math_ops.multiply(x, x)
output, = custom.add_outputs(output)
return output
output = array_ops.identity(
math_ops.add(_double_values(a), _double_values(b)), name="ModelOutput")
with self.cached_session() as sess:
# make sure one identity for each input (2) and output (2) => 2 + 2
# +1 for the final output
self.assertEqual(self._countIdentities(sess.graph_def.node), 5)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["add_test", "Const", "Identity", "Add"]))
def _get_input_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_INPUT_INDEX_ATTR].i
def _get_output_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i
def _get_sort_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_SORT_INDEX_ATTR].i
def testTags(self):
"""Test if multiple args with the same tag are grouped."""
a = array_ops.constant([1.])
b = array_ops.constant([2.])
c = array_ops.constant([3.])
d = array_ops.constant([4.])
custom = op_hint.OpHint("test_tag")
a = custom.add_input(a, tag="mytag",
aggregate=op_hint.OpHint.AGGREGATE_STACK)
b, = custom.add_inputs(b)
c = custom.add_input(c, tag="mytag",
aggregate=op_hint.OpHint.AGGREGATE_STACK)
d = custom.add_input(d, tag="mytag2",
aggregate=op_hint.OpHint.AGGREGATE_STACK)
res = math_ops.add(math_ops.mul(a, b), math_ops.mul(c, b))
custom.add_outputs([res])
with self.cached_session():
self.assertEqual(self._get_input_index(a), 0)
self.assertEqual(self._get_sort_index(a), 0)
self.assertEqual(self._get_input_index(b), 1)
self.assertEqual(self._get_input_index(c), 0)
self.assertEqual(self._get_sort_index(c), 1)
def testOverrideIndex(self):
a = array_ops.constant([1.])
b = array_ops.constant([2.])
c = array_ops.constant([3.])
custom = op_hint.OpHint("test_override")
b = custom.add_input(b) # should auto assign 0
a = custom.add_input(a, index_override=1)
c = custom.add_input(c) # should auto assign 2
with self.cached_session():
self.assertEqual(self._get_input_index(a), 1)
self.assertEqual(self._get_input_index(b), 0)
self.assertEqual(self._get_input_index(c), 2)
def testAggregate(self):
a = array_ops.constant([3., 4.])
b = array_ops.constant([5., 6.])
hint = op_hint.OpHint("agg")
a0, a1 = array_ops.unstack(a)
b0, b1 = array_ops.unstack(b)
a0 = hint.add_input(a0, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b0 = hint.add_input(b0, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)
a1 = hint.add_input(a1, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b1 = hint.add_input(b1, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)
c0 = math_ops.add(a0, b0, name="addleft")
c1 = math_ops.add(a1, b1, name="addright")
c0 = hint.add_output(
c0, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK)
c1 = hint.add_output(
c1, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK)
curr = array_ops.stack([c0, c1])
output = array_ops.identity(curr, name="FINAL_OUTPUT")
with self.cached_session() as sess:
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["agg", "Const", "Identity"]))
if __name__ == "__main__":
test.main()
|
py | b403bb0f749e607d2304659b4ef5715de58bbd45 |
r"""
The torch package contains data structures for multi-dimensional
tensors and defines mathematical operations over these tensors.
Additionally, it provides many utilities for efficient serializing of
Tensors and arbitrary types, and other useful utilities.
It has a CUDA counterpart, that enables you to run your tensor computations
on an NVIDIA GPU with compute capability >= 3.0.
"""
import os
import sys
import platform
import textwrap
import ctypes
import warnings
if sys.version_info < (3,):
raise Exception("Python 2 has reached end-of-life and is no longer supported by PyTorch.")
from ._utils import _import_dotted_name, classproperty
from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
# TODO(torch_deploy) figure out how to freeze version.py in fbcode build
if sys.executable == 'torch_deploy':
__version__ = "torch-deploy-1.8"
else:
from .torch_version import __version__ as __version__
from ._six import string_classes as _string_classes
from typing import Set, Type, TYPE_CHECKING
__all__ = [
'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
'lobpcg', 'use_deterministic_algorithms',
'are_deterministic_algorithms_enabled',
'set_warn_always', 'is_warn_always_enabled',
]
################################################################################
# Load the extension module
################################################################################
if sys.platform == 'win32':
pfiles_path = os.getenv('ProgramFiles', 'C:\\Program Files')
py_dll_path = os.path.join(sys.exec_prefix, 'Library', 'bin')
th_dll_path = os.path.join(os.path.dirname(__file__), 'lib')
# When users create a virtualenv that inherits the base environment,
# we will need to add the corresponding library directory into
# DLL search directories. Otherwise, it will rely on `PATH` which
# is dependent on user settings.
if sys.exec_prefix != sys.base_exec_prefix:
base_py_dll_path = os.path.join(sys.base_exec_prefix, 'Library', 'bin')
else:
base_py_dll_path = ''
dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, base_py_dll_path]))
if all([not os.path.exists(os.path.join(p, 'nvToolsExt64_1.dll')) for p in dll_paths]):
nvtoolsext_dll_path = os.path.join(
os.getenv('NVTOOLSEXT_PATH', os.path.join(pfiles_path, 'NVIDIA Corporation', 'NvToolsExt')), 'bin', 'x64')
else:
nvtoolsext_dll_path = ''
from .version import cuda as cuda_version
import glob
if cuda_version and all([not glob.glob(os.path.join(p, 'cudart64*.dll')) for p in dll_paths]):
cuda_version_1 = cuda_version.replace('.', '_')
cuda_path_var = 'CUDA_PATH_V' + cuda_version_1
default_path = os.path.join(pfiles_path, 'NVIDIA GPU Computing Toolkit', 'CUDA', 'v' + cuda_version)
cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), 'bin')
else:
cuda_path = ''
dll_paths.extend(filter(os.path.exists, [nvtoolsext_dll_path, cuda_path]))
kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
prev_error_mode = kernel32.SetErrorMode(0x0001)
kernel32.LoadLibraryW.restype = ctypes.c_void_p
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
kernel32.LoadLibraryExW.restype = ctypes.c_void_p
for dll_path in dll_paths:
if sys.version_info >= (3, 8):
os.add_dll_directory(dll_path)
elif with_load_library_flags:
res = kernel32.AddDllDirectory(dll_path)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error adding "{dll_path}" to the DLL directories.'
raise err
try:
ctypes.CDLL('vcruntime140.dll')
ctypes.CDLL('msvcp140.dll')
ctypes.CDLL('vcruntime140_1.dll')
except OSError:
print('''Microsoft Visual C++ Redistributable is not installed, this may lead to the DLL load failure.
It can be downloaded at https://aka.ms/vs/16/release/vc_redist.x64.exe''')
dlls = glob.glob(os.path.join(th_dll_path, '*.dll'))
path_patched = False
for dll in dlls:
is_loaded = False
if with_load_library_flags:
res = kernel32.LoadLibraryExW(dll, None, 0x00001100)
last_error = ctypes.get_last_error()
if res is None and last_error != 126:
err = ctypes.WinError(last_error)
err.strerror += f' Error loading "{dll}" or one of its dependencies.'
raise err
elif res is not None:
is_loaded = True
if not is_loaded:
if not path_patched:
os.environ['PATH'] = ';'.join(dll_paths + [os.environ['PATH']])
path_patched = True
res = kernel32.LoadLibraryW(dll)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error loading "{dll}" or one of its dependencies.'
raise err
kernel32.SetErrorMode(prev_error_mode)
# See Note [Global dependencies]
def _load_global_deps():
if platform.system() == 'Windows' or sys.executable == 'torch_deploy':
return
lib_name = 'libtorch_global_deps' + ('.dylib' if platform.system() == 'Darwin' else '.so')
here = os.path.abspath(__file__)
lib_path = os.path.join(os.path.dirname(here), 'lib', lib_name)
ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \
platform.system() != 'Windows':
# Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a
# few circumstances:
#
# 1. You're in a build environment (e.g., fbcode) where
# libtorch_global_deps is not available, but you still need
# to get mkl to link in with RTLD_GLOBAL or it will just
# not work.
#
# 2. You're trying to run PyTorch under UBSAN and you need
# to ensure that only one copy of libtorch is loaded, so
# vptr checks work properly
#
# If you're using this setting, you must verify that all the libraries
# you load consistently use the same libstdc++, or you may have
# mysterious segfaults.
#
import os as _dl_flags
if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_LAZY'):
try:
# next try if DLFCN exists
import DLFCN as _dl_flags # type: ignore[import, no-redef]
except ImportError:
# as a last attempt, use compile-time constants
import torch._dl as _dl_flags # type: ignore[import, no-redef]
old_flags = sys.getdlopenflags()
sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_LAZY)
from torch._C import * # noqa: F403
sys.setdlopenflags(old_flags)
del old_flags
del _dl_flags
else:
# Easy way. You want this most of the time, because it will prevent
# C++ symbols from libtorch clobbering C++ symbols from other
# libraries, leading to mysterious segfaults.
#
# If building in an environment where libtorch_global_deps isn't available
# like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will
# want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False
#
# See Note [Global dependencies]
if USE_GLOBAL_DEPS:
_load_global_deps()
from torch._C import * # noqa: F403
# Appease the type checker; ordinarily this binding is inserted by the
# torch._C module initialization code in C
if TYPE_CHECKING:
import torch._C as _C
# Check to see if we can load C extensions, and if not provide some guidance
# on what the problem might be.
try:
# _initExtension is chosen (arbitrarily) as a sentinel.
from torch._C import _initExtension
except ImportError:
import torch._C as _C_for_compiled_check
# The __file__ check only works for Python 3.7 and above.
if sys.version_info >= (3, 7) and _C_for_compiled_check.__file__ is None:
raise ImportError(textwrap.dedent('''
Failed to load PyTorch C extensions:
It appears that PyTorch has loaded the `torch/_C` folder
of the PyTorch repository rather than the C extensions which
are expected in the `torch._C` namespace. This can occur when
using the `install` workflow. e.g.
$ python setup.py install && python -c "import torch"
This error can generally be solved using the `develop` workflow
$ python setup.py develop && python -c "import torch" # This should succeed
or by running Python from a different directory.
''').strip()) from None
raise # If __file__ is not None the cause is unknown, so just re-raise.
__all__ += [name for name in dir(_C)
if name[0] != '_' and
not name.endswith('Base')]
if not TYPE_CHECKING:
# issue 38137 and python issue 43367. Submodules of a C extension are
# non-standard, and attributes of those submodules cannot be pickled since
# pickle expect to be able to import them as "from _C.sub import attr"
# which fails with "_C is not a package
for attr in dir(_C):
candidate = getattr(_C, attr)
if type(candidate) is type(_C):
# submodule
if f'torch._C.{attr}' not in sys.modules:
sys.modules[f'torch._C.{attr}'] = candidate
################################################################################
# Define basic utilities
################################################################################
def typename(o):
if isinstance(o, torch.Tensor):
return o.type()
module = ''
class_name = ''
if hasattr(o, '__module__') and o.__module__ != 'builtins' \
and o.__module__ != '__builtin__' and o.__module__ is not None:
module = o.__module__ + '.'
if hasattr(o, '__qualname__'):
class_name = o.__qualname__
elif hasattr(o, '__name__'):
class_name = o.__name__
else:
class_name = o.__class__.__name__
return module + class_name
def is_tensor(obj):
r"""Returns True if `obj` is a PyTorch tensor.
Note that this function is simply doing ``isinstance(obj, Tensor)``.
Using that ``isinstance`` check is better for typechecking with mypy,
and more explicit - so it's recommended to use that instead of
``is_tensor``.
Args:
obj (Object): Object to test
Example::
>>> x=torch.tensor([1,2,3])
>>> torch.is_tensor(x)
True
"""
return isinstance(obj, torch.Tensor)
def is_storage(obj):
r"""Returns True if `obj` is a PyTorch storage object.
Args:
obj (Object): Object to test
"""
return type(obj) in _storage_classes
def set_default_tensor_type(t):
r"""Sets the default ``torch.Tensor`` type to floating point tensor type
``t``. This type will also be used as default floating point type for
type inference in :func:`torch.tensor`.
The default floating point tensor type is initially ``torch.FloatTensor``.
Args:
t (type or string): the floating point tensor type or its name
Example::
>>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32
torch.float32
>>> torch.set_default_tensor_type(torch.DoubleTensor)
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
torch.float64
"""
if isinstance(t, _string_classes):
t = _import_dotted_name(t)
_C._set_default_tensor_type(t)
def set_default_dtype(d):
r"""
Sets the default floating point dtype to :attr:`d`. Supports torch.float32
and torch.float64 as inputs. Other dtypes may be accepted without complaint
but are not supported and are unlikely to work as expected.
When PyTorch is initialized its default floating point dtype is torch.float32,
and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like
type inference. The default floating point dtype is used to:
1. Implicitly determine the default complex dtype. When the default floating point
type is float32 the default complex dtype is complex64, and when the default
floating point type is float64 the default complex type is complex128.
2. Infer the dtype for tensors constructed using Python floats or complex Python
numbers. See examples below.
3. Determine the result of type promotion between bool and integer tensors and
Python floats and complex Python numbers.
Args:
d (:class:`torch.dtype`): the floating point dtype to make the default.
Either torch.float32 or torch.float64.
Example:
>>> # initial default for floating point is torch.float32
>>> # Python floats are interpreted as float32
>>> torch.tensor([1.2, 3]).dtype
torch.float32
>>> # initial default for floating point is torch.complex64
>>> # Complex Python numbers are interpreted as complex64
>>> torch.tensor([1.2, 3j]).dtype
torch.complex64
>>> torch.set_default_dtype(torch.float64)
>>> # Python floats are now interpreted as float64
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
torch.float64
>>> # Complex Python numbers are now interpreted as complex128
>>> torch.tensor([1.2, 3j]).dtype # a new complex tensor
torch.complex128
"""
_C._set_default_dtype(d)
def use_deterministic_algorithms(mode, *, warn_only=False):
r""" Sets whether PyTorch operations must use "deterministic"
algorithms. That is, algorithms which, given the same input, and when
run on the same software and hardware, always produce the same output.
When enabled, operations will use deterministic algorithms when available,
and if only nondeterministic algorithms are available they will throw a
:class:`RuntimeError` when called.
The following normally-nondeterministic operations will act
deterministically when ``mode=True``:
* :class:`torch.nn.Conv1d` when called on CUDA tensor
* :class:`torch.nn.Conv2d` when called on CUDA tensor
* :class:`torch.nn.Conv3d` when called on CUDA tensor
* :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor
* :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor
* :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor
* :func:`torch.bmm` when called on sparse-dense CUDA tensors
* :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor
and the index is a list of tensors
* :func:`torch.Tensor.index_put` with ``accumulate=False``
* :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU
tensor
* :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU
tensor
* :func:`torch.Tensor.scatter_add_` when ``input`` dimension is one and called
on a CUDA tensor
* :func:`torch.gather` when ``input`` dimension is one and called
on a CUDA tensor that requires grad
* :func:`torch.index_add` when called on CUDA tensor
* :func:`torch.index_select` when attempting to differentiate a CUDA tensor
* :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor
* :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor
The following normally-nondeterministic operations will throw a
:class:`RuntimeError` when ``mode=True``:
* :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor
* :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor
and one of the following modes is used:
- ``linear``
- ``bilinear``
- ``bicubic``
- ``trilinear``
* :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.NLLLoss` when called on a CUDA tensor
* :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when
``mode='max'``
* :func:`torch.Tensor.scatter_add_` when ``input`` dimension is larger than one
and called on a CUDA tensor
* :func:`torch.gather` when ``input`` dimension is larger than one
and called on a CUDA tensor that requires grad
* :func:`torch.Tensor.put_` when ``accumulate=False``
* :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor
* :func:`torch.histc` when called on a CUDA tensor
* :func:`torch.bincount` when called on a CUDA tensor
* :func:`torch.kthvalue` with called on a CUDA tensor
* :func:`torch.median` with indices output when called on a CUDA tensor
* :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor
A handful of CUDA operations are nondeterministic if the CUDA version is
10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8``
or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more
details: `<https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility>`_
If one of these environment variable configurations is not set, a :class:`RuntimeError`
will be raised from these operations when called with CUDA tensors:
* :func:`torch.mm`
* :func:`torch.mv`
* :func:`torch.bmm`
Note that deterministic operations tend to have worse performance than
nondeterministic operations.
.. note::
This flag does not detect or prevent nondeterministic behavior caused
by calling an inplace operation on a tensor with an internal memory
overlap or by giving such a tensor as the :attr:`out` argument for an
operation. In these cases, multiple writes of different data may target
a single memory location, and the order of writes is not guaranteed.
Args:
mode (:class:`bool`): If True, makes potentially nondeterministic
operations switch to a deterministic algorithm or throw a runtime
error. If False, allows nondeterministic operations.
Keyword args:
warn_only (:class:`bool`, optional): If True, operations that do not
have a deterministic implementation will throw a warning instead of
an error. Default: ``False``
Example::
>>> torch.use_deterministic_algorithms(True)
# Forward mode nondeterministic error
>>> torch.randn(10).index_copy(0, torch.tensor([0]), torch.randn(1))
...
RuntimeError: index_copy does not have a deterministic implementation...
# Backward mode nondeterministic error
>>> torch.randn(10, requires_grad=True, device='cuda').index_select(0, torch.tensor([0], device='cuda')).backward()
...
RuntimeError: index_add_cuda_ does not have a deterministic implementation...
"""
_C._set_deterministic_algorithms(mode, warn_only=warn_only)
def are_deterministic_algorithms_enabled():
r"""Returns True if the global deterministic flag is turned on. Refer to
:func:`torch.use_deterministic_algorithms` documentation for more details.
"""
return _C._get_deterministic_algorithms()
def is_deterministic_algorithms_warn_only_enabled():
r"""Returns True if the global deterministic flag is set to warn only.
Refer to :func:`torch.use_deterministic_algorithms` documentation for more
details.
"""
return _C._get_deterministic_algorithms_warn_only()
def set_warn_always(b):
r"""When this flag is False (default) then some PyTorch warnings may only
appear once per process. This helps avoid excessive warning information.
Setting it to True causes these warnings to always appear, which may be
helpful when debugging.
Args:
b (:class:`bool`): If True, force warnings to always be emitted
If False, set to the default behaviour
"""
_C._set_warnAlways(b)
def is_warn_always_enabled():
r"""Returns True if the global warn_always flag is turned on. Refer to
:func:`torch.set_warn_always` documentation for more details.
"""
return _C._get_warnAlways()
################################################################################
# Define numeric constants
################################################################################
# For Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and
# NumPy consistency (https://numpy.org/devdocs/reference/constants.html)
from math import e , nan , inf , pi
__all__.extend(['e', 'pi', 'nan', 'inf'])
################################################################################
# Define Storage and Tensor classes
################################################################################
from ._tensor import Tensor
from .storage import _StorageBase, TypedStorage
# NOTE: New <type>Storage classes should never be added. When adding a new
# dtype, use torch.storage.TypedStorage directly.
class UntypedStorage(_C.ByteStorageBase, _StorageBase):
pass
class ByteStorage(TypedStorage):
@classproperty
def dtype(self):
return torch.uint8
class DoubleStorage(TypedStorage):
@classproperty
def dtype(self):
return torch.double
class FloatStorage(TypedStorage):
@classproperty
def dtype(self):
return torch.float
class HalfStorage(TypedStorage):
@classproperty
def dtype(self):
return torch.half
class LongStorage(TypedStorage):
@classproperty
def dtype(self):
return torch.long
class IntStorage(TypedStorage):
@classproperty
def dtype(self):
return torch.int
class ShortStorage(TypedStorage):
@classproperty
def dtype(self):
return torch.short
class CharStorage(TypedStorage):
@classproperty
def dtype(self):
return torch.int8
class BoolStorage(TypedStorage):
@classproperty
def dtype(self):
return torch.bool
class BFloat16Storage(TypedStorage):
@classproperty
def dtype(self):
return torch.bfloat16
class ComplexDoubleStorage(TypedStorage):
@classproperty
def dtype(self):
return torch.cdouble
class ComplexFloatStorage(TypedStorage):
@classproperty
def dtype(self):
return torch.cfloat
class QUInt8Storage(TypedStorage):
@classproperty
def dtype(self):
return torch.quint8
class QInt8Storage(TypedStorage):
@classproperty
def dtype(self):
return torch.qint8
class QInt32Storage(TypedStorage):
@classproperty
def dtype(self):
return torch.qint32
class QUInt4x2Storage(TypedStorage):
@classproperty
def dtype(self):
return torch.quint4x2
class QUInt2x4Storage(TypedStorage):
@classproperty
def dtype(self):
return torch.quint2x4
_storage_classes = {
UntypedStorage, DoubleStorage, FloatStorage, LongStorage, IntStorage,
ShortStorage, CharStorage, ByteStorage, HalfStorage, BoolStorage,
QUInt8Storage, QInt8Storage, QInt32Storage, BFloat16Storage,
ComplexFloatStorage, ComplexDoubleStorage, QUInt4x2Storage, QUInt2x4Storage,
}
# The _tensor_classes set is initialized by the call to _C._initialize_tensor_type_bindings()
_tensor_classes: Set[Type] = set()
# If you edit these imports, please update torch/__init__.py.in as well
from .random import set_rng_state, get_rng_state, manual_seed, initial_seed, seed
from .serialization import save, load
from ._tensor_str import set_printoptions
################################################################################
# Initialize extension
################################################################################
def manager_path():
if platform.system() == 'Windows' or sys.executable == 'torch_deploy':
return b""
path = get_file_path('torch', 'bin', 'torch_shm_manager')
prepare_multiprocessing_environment(get_file_path('torch'))
if not os.path.exists(path):
raise RuntimeError("Unable to find torch_shm_manager at " + path)
return path.encode('utf-8')
from .autocast_mode import autocast
# Shared memory manager needs to know the exact location of manager executable
_C._initExtension(manager_path())
del manager_path
# Appease the type checker: it can't deal with direct setting of globals().
# Note that we will see "too many" functions when reexporting this way; there
# is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions
# so that this import is good enough
if TYPE_CHECKING:
# Some type signatures pulled in from _VariableFunctions here clash with
# signatures already imported. For now these clashes are ignored; see
# PR #43339 for details.
from torch._C._VariableFunctions import * # type: ignore[misc] # noqa: F403
# Ops not to be exposed in `torch` namespace,
# mostly helper ops.
PRIVATE_OPS = (
'unique_dim',
)
for name in dir(_C._VariableFunctions):
if name.startswith('__') or name in PRIVATE_OPS:
continue
globals()[name] = getattr(_C._VariableFunctions, name)
__all__.append(name)
################################################################################
# Import interface functions defined in Python
################################################################################
# needs to be after the above ATen bindings so we can overwrite from Python side
from .functional import * # noqa: F403
################################################################################
# Remove unnecessary members
################################################################################
del ByteStorageBase
################################################################################
# Define _assert
################################################################################
# needs to be before the submodule imports to avoid circular dependencies
def _assert(condition, message):
r"""A wrapper around Python's assert which is symbolically traceable.
"""
from .overrides import has_torch_function, handle_torch_function
if type(condition) is not torch.Tensor and has_torch_function((condition,)):
return handle_torch_function(_assert, (condition,), condition, message)
assert condition, message
################################################################################
# Import most common subpackages
################################################################################
# Use the redundant form so that type checkers know that these are a part of
# the public API. The "regular" import lines are there solely for the runtime
# side effect of adding to the imported module's members for other users.
from torch import cuda as cuda
from torch import cpu as cpu
from torch import autograd as autograd
from torch.autograd import (
no_grad as no_grad,
enable_grad as enable_grad,
set_grad_enabled as set_grad_enabled,
inference_mode as inference_mode,
)
from torch import fft as fft
from torch import futures as futures
from torch import nn as nn
import torch.nn.intrinsic
import torch.nn.quantizable
import torch.nn.quantized
# AO depends on nn, as well as quantized stuff -- so should be after those.
from torch import ao as ao
from torch import optim as optim
import torch.optim._multi_tensor
from torch import multiprocessing as multiprocessing
from torch import sparse as sparse
from torch import special as special
import torch.utils.backcompat
from torch import onnx as onnx
from torch import jit as jit
from torch import linalg as linalg
from torch import hub as hub
from torch import random as random
from torch import distributions as distributions
from torch import testing as testing
import torch.backends.cuda
import torch.backends.mkl
import torch.backends.mkldnn
import torch.backends.openmp
import torch.backends.quantized
import torch.utils.data
from torch import __config__ as __config__
from torch import __future__ as __future__
from torch import profiler as profiler
_C._init_names(list(torch._storage_classes))
# attach docstrings to torch and tensor functions
from . import _torch_docs, _tensor_docs, _storage_docs
del _torch_docs, _tensor_docs, _storage_docs
def compiled_with_cxx11_abi():
r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""
return _C._GLIBCXX_USE_CXX11_ABI
# Import the ops "namespace"
from torch._ops import ops
from torch._classes import classes
# quantization depends on torch.fx
# Import quantization
from torch import quantization as quantization
# Import the quasi random sampler
from torch import quasirandom as quasirandom
# If you are seeing this, it means that this call site was not checked if
# the memory format could be preserved, and it was switched to old default
# behaviour of contiguous
legacy_contiguous_format = contiguous_format
# Register fork handler to initialize OpenMP in child processes (see gh-28389)
from torch.multiprocessing._atfork import register_after_fork
register_after_fork(torch.get_num_threads)
del register_after_fork
# Import tools that require fully imported torch (for applying
# torch.jit.script as a decorator, for instance):
from ._lobpcg import lobpcg as lobpcg
from ._vmap_internals import vmap as vmap
# These were previously defined in native_functions.yaml and appeared on the
# `torch` namespace, but we moved them to c10 dispatch to facilitate custom
# class usage. We add these lines here to preserve backward compatibility.
quantized_lstm = torch.ops.aten.quantized_lstm
quantized_gru = torch.ops.aten.quantized_gru
from torch.utils.dlpack import from_dlpack, to_dlpack
# Import experimental masked operations support. See
# [RFC-0016](https://github.com/pytorch/rfcs/pull/27) for more
# information.
from . import _masked
def _register_device_module(device_type, module):
r"""Register an external runtime module of the specific :attr:`device_type`
supported by torch.
After the :attr:`module` is registered correctly, the user can refer
the external runtime module as part of torch with attribute torch.xxx.
"""
# Make sure the device_type represent a supported device type for torch.
device_type = torch.device(device_type).type
m = sys.modules[__name__]
if hasattr(m, device_type):
raise RuntimeError("The runtime module of '{}' has already "
"been registered with '{}'".format(device_type, getattr(m, device_type)))
setattr(m, device_type, module)
|
py | b403bbc0f5d7e001e1b53c42c5b099f83bcf9b7f | """
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import googletest
from tcav.model import ModelWrapper
class ModelTest_model(ModelWrapper):
"""A mock model of model class for ModelTest class."""
def __init__(self, model_path=None, node_dict=None):
super(ModelTest_model, self).__init__(
model_path=model_path, node_dict=node_dict)
class ModelTest(googletest.TestCase):
def setUp(self):
# Create an execution graph
x = tf.placeholder(dtype=tf.float64, shape=[], name='input')
a = tf.Variable(111, name='var1', dtype=tf.float64)
y = tf.math.multiply(x, a, name='output')
self.ckpt_dir = '/tmp/ckpts/'
self.saved_model_dir = '/tmp/saved_model/'
self.frozen_graph_dir = '/tmp/frozen_graph/'
self.tmp_dirs = [self.ckpt_dir, self.saved_model_dir, self.frozen_graph_dir]
for d in self.tmp_dirs:
if tf.gfile.Exists(d):
tf.gfile.DeleteRecursively(d)
tf.gfile.MakeDirs(d)
with tf.Session() as sess:
tf.initialize_all_variables().run()
# Save as checkpoint
saver = tf.train.Saver()
saver.save(sess, self.ckpt_dir + 'model.ckpt', write_meta_graph=True)
# Save as SavedModel
tf.saved_model.simple_save(
sess,
self.saved_model_dir,
inputs={'input': x},
outputs={'output': y})
graph = sess.graph
input_graph_def = graph.as_graph_def()
output_node_names = ['output']
output_graph_def = graph_util.convert_variables_to_constants(
sess, input_graph_def, output_node_names)
# Save as binary graph
tf.io.write_graph(
output_graph_def, self.frozen_graph_dir, 'graph.pb', as_text=False)
# Save as text graph
tf.io.write_graph(
output_graph_def, self.frozen_graph_dir, 'graph.pbtxt', as_text=True)
def tearDown(self):
for d in self.tmp_dirs:
tf.gfile.DeleteRecursively(d)
def _check_output_and_gradient(self, model_path, import_prefix=False):
model = ModelTest_model(model_path=model_path, node_dict={'v1': 'var1'})
input_name = 'input:0'
output_name = 'output:0'
if import_prefix:
input_name = 'import/' + input_name
output_name = 'import/' + output_name
out = model.sess.run(output_name, feed_dict={input_name: 3})
self.assertEqual(out, 333.0)
model.loss = model.sess.graph.get_tensor_by_name(output_name)
# Make sure that loaded graph can be modified
model._make_gradient_tensors()
grad = model.sess.run(
model.bottlenecks_gradients['v1'], feed_dict={input_name: 555})
self.assertEqual(grad, 555.0)
def test_try_loading_model_from_ckpt(self):
self._check_output_and_gradient(self.ckpt_dir)
def test_try_loading_model_from_saved_model(self):
self._check_output_and_gradient(self.saved_model_dir)
def test_try_loading_model_from_frozen_pb(self):
model_path = self.frozen_graph_dir + 'graph.pb'
self._check_output_and_gradient(model_path, import_prefix=True)
def test_try_loading_model_from_frozen_txt(self):
model_path = self.frozen_graph_dir + 'graph.pbtxt'
self._check_output_and_gradient(model_path, import_prefix=True)
if __name__ == '__main__':
googletest.main()
|
py | b403bc0a59c5bafc4092a4c299a55fc4a87fef8d | #!/usr/bin/env python3
import os
import sys
import pickle
from functools import partial
from string import ascii_lowercase
import jax
import torch
sys.path.append('..')
from ti_mps import TI_MPS
from train_tools import init_strset, to_string
samp_lens = [1, 16, 50] # What lengths we want to sample at
samp_size = 1000 # Number of samples to draw
comp_exp = False # Whether to run completion or
# sampling task
dataset = 'brackets' # Dataset models were trained on
save_name = ".motzkin_exp.record" # Where the record is saved
ALPHABET = {'brackets': ['(', ')', '*'],
'tomita': ['0', '1'],
'bos_eos': ['^', '$'],
}
alph_noends = ALPHABET[dataset]
alph_ends = alph_noends + ALPHABET['bos_eos']
if dataset == 'brackets':
from toy_datasets import brackets_dataset
from toy_datasets import score_brackets as score_fun
elif dataset == 'tomita':
from toy_datasets import score_tomita as tom_score
def is_lstm(model):
assert isinstance(model, (torch.nn.Module, TI_MPS))
return isinstance(model, torch.nn.Module)
def mps_sample_fun(rng_key, mps, target_lens, score_fun, ref_sets=None):
"""Draw samples from MPS model within JAX"""
from sampler import draw_samples, fill_in_blanks
bi_exp = ref_sets is not None
examp_samps = {}
if bi_exp:
corr_frac = {}
for samp_l in target_lens:
ref_s = ref_sets[samp_l]
ref_strs = to_string(ref_s, alph_noends)
rng_key, key = jax.random.split(rng_key)
samp_chars = fill_in_blanks(key, mps, alphabet=alph_noends,
ref_strset=ref_s)
# TODO: Fold this code into fill_in_blanks
# Generate validation strings with each character replaced by
# suggested character from samp_chars
samples = [s[:i] + c + s[i+1:] for s, cs in zip(ref_strs, samp_chars)
for i, c in enumerate(cs)]
corr_frac[samp_l] = 100 * score_fun(samples)
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {corr_frac[samp_l]:.1f}%")
print(f"Replacement examples: {samples[:10]}\n")
else:
corr_frac = {}
for samp_l in target_lens:
rng_key, key = jax.random.split(rng_key)
samples = draw_samples(key, mps, alphabet=alph_noends,
num_samps=samp_size, samp_len=samp_l)
score = score_fun(samples)
corr_frac[samp_l] = 100 * score
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {100 * score:.1f}%")
print(f"Example samples: {samples[:10]}\n")
return corr_frac
def lstm_sample_fun(rng_key, lstm, target_lens, score_fun, ref_sets=None):
"""Draw samples from LSTM model within Pytorch"""
samp_mode = 'fixed'
bi_exp = lstm.bi_dir
lstm = lstm.eval()
examp_samps = {}
if bi_exp:
corr_frac = {}
for samp_l in target_lens:
ref_s = ref_sets[samp_l]
rng_key, key = jax.random.split(rng_key)
# TODO: Finish up better bidirectional sampling code, including
# (a) deal with BOS/EOS, (b) properly put samp_chars in
# ref_set strings
ref_strs = [s[1:-1] for s in to_string(ref_s, alph_ends)]
samples = lstm.sample(key, alph_ends,
samp_mode='completion', ref_strset=ref_s)
# BOS and EOS should never be sampled, so replace those with
# incorrect strings
assert not any(('^' in s or '$' in s) for s in samples)
# samples = [')(' if ('^' in s or '$' in s) else s for s in samples]
corr_frac[samp_l] = 100 * score_fun(samples)
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {corr_frac[samp_l]:.1f}%")
print(f"Replacement examples:{examp_samps[samp_l]}\n")
else:
corr_frac = {}
for samp_l in target_lens:
rng_key, key = jax.random.split(rng_key)
samples = lstm.sample(key, alph_ends, samp_mode=samp_mode,
num_samps=samp_size, samp_len=samp_l)
score = score_fun(samples)
corr_frac[samp_l] = 100 * score
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {100 * score:.1f}%")
print(f"Example samples: {examp_samps[samp_l]}\n")
return corr_frac
cf_form = "corr_frac_bi_{}" if comp_exp else "corr_frac_{}"
rng_key = jax.random.PRNGKey(0)
# Load the data record we're interested in
full_record = pickle.load(open(save_name, 'rb'))
# Get a StrSet containing brackets of interest
if comp_exp:
ref_sets_ends = {}
ref_sets_noends = {}
for samp_l in samp_lens:
rng_key, key = jax.random.split(rng_key)
min_l = samp_l if samp_l < 18 else 1
try:
ref_se = brackets_dataset(rng_key=key,
data_split=samp_size,
max_len=samp_l,
min_len=min_l,
add_ends=True)
ref_sne = brackets_dataset(rng_key=key,
data_split=samp_size,
max_len=samp_l,
min_len=min_l,
add_ends=False)
except:
assert samp_l == 1
ref_se = brackets_dataset(rng_key=key,
data_split=0.99999,
max_len=samp_l,
min_len=min_l,
add_ends=True)
ref_sne = brackets_dataset(rng_key=key,
data_split=0.99999,
max_len=samp_l,
min_len=min_l,
add_ends=False)
ref_se, ref_sne = ref_se * samp_size, ref_sne * samp_size
ref_sets_ends[samp_l] = init_strset(ref_se, alph_ends)
ref_sets_noends[samp_l] = init_strset(ref_sne, alph_noends)
else:
ref_sets_ends = None
ref_sets_noends = None
# Go through each experimental setting and resample with trained model
for setting, global_rec in full_record.items():
# Get relevant data for this experimental setting
print(setting)
_, _, model = setting[:3]
assert model in ['mps', 'lstm']
assert len(setting) in [3, 4]
samp_fun = lstm_sample_fun if model == 'lstm' else mps_sample_fun
best_model = global_rec['best_model']
best_epoch = global_rec['best_epoch']
local_rec = global_rec['local_recs'][best_epoch]
# Figure out which lengths haven't been sampled yet
these_lens = [l for l in samp_lens if cf_form.format(l) not in local_rec]
if these_lens == []: continue
# Perform the resampling and add results to local_rec
rng_key, key = jax.random.split(rng_key)
corr_frac = samp_fun(key, best_model, these_lens, score_fun, ref_sets=(
ref_sets_ends if model=='lstm' else ref_sets_noends))
for s_len, score in corr_frac.items():
lookup = cf_form.format(s_len)
if lookup in local_rec:
print(f"Already have samples from len {s_len}")
continue
local_rec[lookup] = score
# Put this back in full_record and save
global_rec['local_recs'][best_epoch] = local_rec
full_record[setting] = global_rec
pickle.dump(full_record, open(save_name, 'wb')) |
py | b403bc10c5ab63679e620fbbe29ac4f73318a98d | from osmgraph.importer import GraphImporter
def test_importer():
gi = GraphImporter()
coords = [
(0, 1.0, 2.0),
(1, 3.0, 4.0),
(2, 5.0, 6.0),
(3, 7.0, 8.0),
(4, 9.0, 10.0),
(5, 11.0, 12.0),
(6, 13.0, 14.0),
(7, 15.0, 16.0),
(8, 17.0, 18.0),
(9, 19.0, 20.0),
]
gi.coords_callback(coords)
nodes = [
(1, {'key1': 'value1'}, (None, None)),
(3, {'key2': 'value2'}, (None, None)),
]
gi.nodes_callback(nodes)
ways = [
(1, {'key3': 'value3'}, [0, 1, 2]),
(2, {}, [8, 9]), # no tags, should be absent from graph
(3, {'key4': 'value4', 'oneway': 'yes'}, [2, 3, 4, 5]),
(4, {'oneway': '-1'}, [7, 6, 5]),
]
gi.ways_callback(ways)
g = gi.get_graph()
expected_nodes = [
(0, {'coordinate': (1.0, 2.0)}),
(1, {'coordinate': (3.0, 4.0), 'key1': 'value1'}),
(2, {'coordinate': (5.0, 6.0)}),
(3, {'coordinate': (7.0, 8.0), 'key2': 'value2'}),
(4, {'coordinate': (9.0, 10.0)}),
(5, {'coordinate': (11.0, 12.0)}),
(6, {'coordinate': (13.0, 14.0)}),
(7, {'coordinate': (15.0, 16.0)}),
]
expected_edges = [
(0, 1, {'key3': 'value3'}),
(1, 0, {'key3': 'value3'}),
(1, 2, {'key3': 'value3'}),
(2, 1, {'key3': 'value3'}),
(2, 3, {'key4': 'value4', 'oneway': 'yes'}),
(3, 4, {'key4': 'value4', 'oneway': 'yes'}),
(4, 5, {'key4': 'value4', 'oneway': 'yes'}),
(5, 6, {'oneway': 'yes'}),
(6, 7, {'oneway': 'yes'}),
]
assert sorted(g.nodes(data=True)) == expected_nodes
assert sorted(g.edges(data=True)) == expected_edges
def test_u_v_edges():
gi = GraphImporter()
coords = [
(0, 1.0, 2.0),
(1, 3.0, 4.0),
]
gi.coords_callback(coords)
ways = [
(1, {'u': 'value1', 'v': 'value2'}, [0, 1]),
]
gi.ways_callback(ways)
g = gi.get_graph()
expected_edges = [
(0, 1, {'u': 'value1', 'v': 'value2'}),
(1, 0, {'u': 'value1', 'v': 'value2'})
]
assert sorted(g.edges(data=True)) == expected_edges
def test_parse_direction():
gi = GraphImporter()
coords = [
(0, 1.0, 2.0),
(1, 3.0, 4.0),
]
gi.coords_callback(coords)
ways = [
(1, {'u': 'value1', 'v': 'value2'}, [0, 1]),
]
gi.ways_callback(ways)
g = gi.get_graph(parse_direction=True)
assert g[0][1]['_direction'] == 'forward'
assert g[1][0]['_direction'] == 'backward'
|
py | b403bc9a12593a9520d1092272f46b033f4ad567 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
import seaborn
import datetime
def h2o_f2_f7(dat):
fig,axes = plt.subplots(2,2,figsize=(16,12),gridspec_kw={"width_ratios":[1,3]})
#c = plt.rcParams['axes.prop_cycle'].by_key()['color']
#c = ["#FF1F58","#009ADE","#FFC61E"]
c = ["#009ADE","#FF1F58","k"]
ms = 4.0
flno = [2,3,4,6,7,8]
maxlag = [0,0,5,10,10,20]
# add cloudy flag
dat['CLOUDY'] = ((dat['NICE'] > 0) | (dat['MASBR'] >= 1.2)).astype(int)
for i,f in enumerate(flno):
for lag in np.arange(1,maxlag[i]):
dat.loc[(dat['FLIGHT'] == f),'CLOUDY'] = np.maximum(dat.loc[(dat['FLIGHT'] == f),'CLOUDY'],
dat[(dat['FLIGHT'] == f)].shift(periods=lag, fill_value=0.0)['CLOUDY'])
# add ascent/descent flag
dz = (dat['ALT'] - dat.shift(periods=1)['ALT'])*1e3
dt = dat['TIME'] - dat.shift(periods=1)['TIME']
vert = np.abs(dz / dt)
vert_avg = vert.rolling(window=20).mean()
dat['ASCENT_FLAG'] = ((vert_avg > 10) | (dat['ALT'] < 12)).astype(int)
# add chiwis flag
dat['CELL_FLAG'] = ((dat['PRES_CELL'] < 30.0) | (dat['PRES_CELL'] > 45.0) | (dat['FLAG'] == 1)).astype(int)
dat['CELL_OOR'] = ((dat['PRES_CELL'] < 20.0) | (dat['PRES_CELL'] > 30.0) | (dat['FLAG'] == 1)).astype(int)
# FL7 dive flag
dat['F7_DIVE'] = ((dat['FLIGHT'] == 7) & (dat['TIME'] > 19.9e3) & (dat['TIME'] < 20.2e3)).astype('int')
for f in [2,7]:
if f == 2:
ax0 = axes[0,0]
ax1 = axes[0,1]
sub0 = "a"
sub1 = "b"
t1 = 9.15
t2 = 10.9
t2 = 12.5
ptlim = [360,480]
wvlim = [3,14]
wvtks = [4,8,12]
maxlag = 0
if f == 7:
ax0 = axes[1,0]
ax1 = axes[1,1]
sub0 = "c"
sub1 = "d"
t1 = 10.58
t2 = 12.2
ptlim = [360,440]
wvlim = [3,10]
wvtks = [4,6,8]
maxlag = 10
dati = dat[(dat['FLIGHT'] == f) & (dat['ASCENT_FLAG'] == 0)]
dat_clr_fish = dat[(dat['FLIGHT'] == f) & (dat['CLOUDY'] == 0) & (dat['ASCENT_FLAG'] == 0)]
dat_flash = dat[(dat['FLIGHT'] == f) & (dat['ASCENT_FLAG'] == 0) & (dat['F7_DIVE'] == 0)
& ((dat['TIME']/3600.+5.75 > t1) | (dat['TIME']/3600.+5.75 < t2))]
dat_chiwis = dat[(dat['FLIGHT'] == f) & (dat['CELL_FLAG'] == 0) & (dat['ASCENT_FLAG'] == 0)]
dat_chiwis_oor = dat[(dat['FLIGHT'] == f) & (dat['CELL_OOR'] == 0) & (dat['ASCENT_FLAG'] == 0)]
ax0.plot(dat_clr_fish['FIH2O'],dat_clr_fish['PT'],'.', ms=ms, color=c[1], label="clear-sky FISH")
ax0.plot(dat_flash['FLH2O'],dat_flash['PT'],'.', ms=ms, color=c[0], label="FLASH")
ax0.plot(dat_chiwis['H2O'],dat_chiwis['PT'],'.', ms=ms, color=c[2], label="ChiWIS")
ax0.plot(dat_chiwis_oor['H2O'], dat_chiwis_oor['PT'],'.', ms=ms, color='grey')
ax0.grid(which='major',linestyle=':')
ax0.set_xlim(wvlim)
ax0.set_xticks(wvtks)
if f == 7:
ax0.set_xlabel(r"H$_2$O (ppmv)")
ax0.set_ylim(ptlim)
ax0.set_ylabel("Potential Temperature (K)")
ax0.set_title(sub0,weight="bold",loc="left")
if f == 2:
ax0.legend(loc=1, markerscale=3.0, labelspacing=0.4, handletextpad=0.1, fontsize=15)
ax0.set_title("Flight "+str(f))
ax1.plot(dat_clr_fish['TIME']/3600.+5.75,dat_clr_fish['FIH2O'],'.', ms=ms, color=c[1],label="clear-sky FISH")
ax1.plot(dat_flash['TIME']/3600.+5.75,dat_flash['FLH2O'],'.', ms=ms, color=c[0],label="FLASH")
ax1.plot(dat_chiwis['TIME']/3600.+5.75,dat_chiwis['H2O'],'.', ms=ms, color=c[2],label="ChiWIS")
ax1.plot(dat_chiwis_oor['TIME']/3600.+5.75, dat_chiwis_oor['H2O'], '.', ms=ms, color="grey")
if f == 7:
ax1.set_xlabel("Kathmandu Local Time")
ax1.set_xlim([t1,t2])
locs = ax1.get_xticks()
labels = [str(datetime.timedelta(hours=x)).rsplit(':',1)[0] for x in locs]
ax1.set_xticklabels(labels)
ax1.set_ylabel(r"H$_2$O (ppmv)")
ax1.set_ylim(wvlim)
ax1.grid(which='major',linestyle=':')
ax1.set_title(sub1,weight="bold",loc="left")
ax1.set_title("Flight "+str(f))
ax2 = ax1.twinx()
dat_alt = dat[(dat['FLIGHT'] == f) & (dat['ALT'] > 10)]
ax2.plot(dat_alt['TIME']/3600.+5.75,dat_alt['ALT'],"-",color="green",lw=2)
ax2.set_ylabel("Altitude (km)",color="green")
ax2.tick_params(axis='y', colors="green")
ax2.set_yticks([10,12,14,16,18,20])
plt.rcParams.update({'font.size': 20})
fig.tight_layout()
plt.savefig("./Paper-Figures/fig5-f2f7.png",dpi=300)
plt.show() |
py | b403bcdc654b7e54505e2b2efa9fd5f25365def1 | '''MNIST test using keras'''
from keras.datasets import mnist
from keras.utils import to_categorical
from keras import models
from keras import layers
# Load data
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# print("Train")
# print(train_images.shape)
# print(len(train_labels))
# print(train_labels)
# print("Test")
# print(test_images.shape)
# print(len(test_labels))
# print(test_labels)
# Define our neural network
network = models.Sequential()
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28, )))
network.add(layers.Dense(10, activation='softmax'))
# Compile the network
network.compile(
optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# Reshape data for training
train_images = train_images.reshape((60000, 28 * 28))
train_images = train_images.astype('float32') / 255
# Reshape data for testing
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255
# Change labels to categorical for the model
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
# Actually train the model
network.fit(train_images, train_labels, epochs=5, batch_size=128)
# Test the model
test_loss, test_acc = network.evaluate(test_images, test_labels)
print("Accuracy: ", test_acc)
|
py | b403bd076412ede234c726c7d063dd46db5e6d7c | """restembed URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from core.views import save_embed
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', save_embed),
]
|
py | b403bd9ece3e6357616a16390e4d0f0c65f5c259 | #!/usr/bin/env python
"""
coding=utf-8
Build model for a dataset by identifying type of column along with its
respective parameters.
"""
from __future__ import print_function
from __future__ import division
from future.utils import with_metaclass
import copy
import time
import abc
import warnings
import numpy as np
from . import histogram_utils
from .base_column_profilers import BaseColumnProfiler
from .profiler_options import NumericalOptions
class abstractstaticmethod(staticmethod):
__slots__ = ()
def __init__(self, function):
super(abstractstaticmethod, self).__init__(function)
function.__isabstractmethod__ = True
__isabstractmethod__ = True
class NumericStatsMixin(with_metaclass(abc.ABCMeta, object)):
"""
Abstract numerical column profile subclass of BaseColumnProfiler. Represents
a column in the dataset which is a text column. Has Subclasses itself.
"""
col_type = None
def __init__(self, options=None):
"""
Initialization of column base properties and itself.
:param options: Options for the numerical stats.
:type options: NumericalOptions
"""
self.options = None
if options and isinstance(options, NumericalOptions):
self.options = options
self.min = None
self.max = None
self.sum = 0
self.variance = 0
self.max_histogram_bin = 10000
self.histogram_bin_method_names = ['auto', 'fd', 'doane', 'scott',
'rice', 'sturges', 'sqrt']
self.histogram_methods = {}
for method in self.histogram_bin_method_names:
self.histogram_methods[method] = {
'total_loss': 0,
'current_loss': 0,
'histogram': {
'bin_counts': None,
'bin_edges': None
}
}
self.histogram_selection = None
self.quantiles = {
bin_num: None for bin_num in range(1000)
}
self.__calculations = {
"min": NumericStatsMixin._get_min,
"max": NumericStatsMixin._get_max,
"sum": NumericStatsMixin._get_sum,
"variance": NumericStatsMixin._get_variance,
"histogram_and_quantiles":
NumericStatsMixin._get_histogram_and_quantiles
}
self._filter_properties_w_options(self.__calculations, options)
def __getattribute__(self, name):
return super(NumericStatsMixin, self).__getattribute__(name)
def __getitem__(self, item):
return super(NumericStatsMixin, self).__getitem__(item)
@BaseColumnProfiler._timeit(name="histogram_and_quantiles")
def _add_helper_merge_profile_histograms(self, other1, other2):
"""
Adds histogram of two profiles together
:param other1: profile1 being added to self
:type other1: BaseColumnProfiler
:param other2: profile2 being added to self
:type other2: BaseColumnProfiler
:return: None
"""
# get available bin methods and set to current
bin_methods = list(set(other1.histogram_bin_method_names) &
set(other2.histogram_bin_method_names))
if not bin_methods:
raise ValueError('Profiles have no overlapping bin methods and '
'therefore cannot be added together.')
self.histogram_bin_method_names = bin_methods
for i, method in enumerate(self.histogram_bin_method_names):
combined_values = other1._histogram_to_array(
method) + other2._histogram_to_array(method)
bin_counts, bin_edges = self._get_histogram(
combined_values, method)
self.histogram_methods[method]['histogram']['bin_counts'] = \
bin_counts
self.histogram_methods[method]['histogram']['bin_edges'] = bin_edges
# Select histogram: always choose first profile selected method
# Either both profiles have the same selection or you at least use one
# of the profiles selected method
self.histogram_selection = other1.histogram_selection
self._get_quantiles()
def _add_helper(self, other1, other2):
"""
Helper function for merging profiles.
:param other1: profile1 being added to self
:param other2: profile2 being added to self
:return: None
"""
BaseColumnProfiler._merge_calculations(
self._NumericStatsMixin__calculations,
other1._NumericStatsMixin__calculations,
other2._NumericStatsMixin__calculations)
# Merge variance, histogram, min, max, and sum
if "variance" in self.__calculations.keys():
self.variance = self._merge_variance(
other1.match_count, other1.variance, other1.mean,
other2.match_count, other2.variance, other2.mean)
if "histogram_and_quantiles" in self.__calculations.keys():
if other1.histogram_selection is not None and \
other2.histogram_selection is not None:
self._add_helper_merge_profile_histograms(other1, other2)
elif other2.histogram_selection is None:
self.histogram_methods = other1.histogram_methods
self.quantiles = other1.quantiles
else:
self.histogram_methods = other2.histogram_methods
self.quantiles = other2.quantiles
if "min" in self.__calculations.keys():
if other1.min is not None and other2.min is not None:
self.min = min(other1.min, other2.min)
elif other2.min is None:
self.min = other1.min
else:
self.min = other2.min
if "max" in self.__calculations.keys():
if other1.max is not None and other2.max is not None:
self.max = max(other1.max, other2.max)
elif other2.max is None:
self.max = other1.max
else:
self.max = other2.max
if "sum" in self.__calculations.keys():
self.sum = other1.sum + other2.sum
@property
def mean(self):
if self.match_count == 0:
return 0
return float(self.sum) / self.match_count
@property
def stddev(self):
if self.match_count == 0:
return np.nan
return np.sqrt(self.variance)
def _update_variance(self, batch_mean, batch_var, batch_count):
"""
Calculate the combined variance of the current values and new dataset.
:param batch_mean: mean of new chunk
:param batch_var: variance of new chunk
:param batch_count: number of samples in new chunk
:return: combined variance
:rtype: float
"""
return self._merge_variance(self.match_count, self.variance, self.mean,
batch_count, batch_var, batch_mean)
@staticmethod
def _merge_variance(match_count1, variance1, mean1,
match_count2, variance2, mean2):
"""
Calculate the combined variance of the current values and new dataset.
:param match_count1: number of samples in new chunk 1
:param mean1: mean of chunk 1
:param variance1: variance of chunk 1
:param match_count2: number of samples in new chunk 2
:param mean2: mean of chunk 2
:param variance2: variance of chunk 2
:return: combined variance
:rtype: float
"""
if np.isnan(variance1):
variance1 = 0
if np.isnan(variance2):
variance2 = 0
if match_count1 < 1:
return variance2
elif match_count2 < 1:
return variance1
curr_count = match_count1
delta = mean2 - mean1
m_curr = variance1 * (curr_count - 1)
m_batch = variance2 * (match_count2 - 1)
M2 = m_curr + m_batch + delta ** 2 * curr_count * match_count2 / \
(curr_count + match_count2)
new_variance = M2 / (curr_count + match_count2 - 1)
return new_variance
def _estimate_stats_from_histogram(self, method):
# test estimated mean and var
bin_counts = self.histogram_methods[method]['histogram']['bin_counts']
bin_edges = self.histogram_methods[method]['histogram']['bin_edges']
mids = 0.5 * (bin_edges[1:] + bin_edges[:-1])
mean = np.average(mids, weights=bin_counts)
var = np.average((mids - mean) ** 2, weights=bin_counts)
std = np.sqrt(var)
return mean, var, std
def _total_histogram_bin_variance(self, input_array, method):
# calculate total variance over all bins of a histogram
bin_edges = self.histogram_methods[method]['histogram']['bin_edges']
inds = np.digitize(input_array, bin_edges)
sum_var = 0
for i in range(1, len(bin_edges)):
elements_in_bin = input_array[inds == i]
bin_var = elements_in_bin.var() if len(elements_in_bin) > 0 else 0
sum_var += bin_var
return sum_var
@staticmethod
def _histogram_loss(diff_var, avg_diffvar, total_var,
avg_totalvar, run_time, avg_runtime):
norm_diff_var, norm_total_var, norm_runtime = 0, 0, 0
if avg_diffvar > 0:
norm_diff_var = float(diff_var - avg_diffvar) / avg_diffvar
if avg_totalvar > 0:
norm_total_var = float(total_var - avg_totalvar) / avg_totalvar
penalized_time = 1 # currently set as 1s
if (run_time - avg_runtime) >= penalized_time:
norm_runtime = float(run_time - avg_runtime) / avg_runtime
return norm_diff_var + norm_total_var + norm_runtime
def _select_method_for_histogram(self, current_exact_var, current_est_var,
current_total_var, current_run_time):
current_diff_var = np.abs(current_exact_var - current_est_var)
current_avg_diff_var = current_diff_var.mean()
current_avg_total_var = current_total_var.mean()
current_avg_run_time = current_run_time.mean()
min_total_loss = np.inf
selected_method = ''
for method_id, method in enumerate(self.histogram_bin_method_names):
self.histogram_methods[method]['current_loss'] = \
self._histogram_loss(current_diff_var[method_id],
current_avg_diff_var,
current_total_var[method_id],
current_avg_total_var,
current_run_time[method_id],
current_avg_run_time)
self.histogram_methods[method]['total_loss'] += \
self.histogram_methods[method]['current_loss']
if min_total_loss > self.histogram_methods[method]['total_loss']:
min_total_loss = self.histogram_methods[method]['total_loss']
selected_method = method
return selected_method
def _histogram_to_array(self, bins):
# Extend histogram to array format
bin_counts = self.histogram_methods[bins]['histogram']['bin_counts']
bin_edges = self.histogram_methods[bins]['histogram']['bin_edges']
hist_to_array = [[bin_edge] * bin_count for bin_count, bin_edge in
zip(bin_counts[:-1], bin_edges[:-2])]
hist_to_array.append([bin_edges[-2]] * int(bin_counts[-1] / 2))
hist_to_array.append([bin_edges[-1]] *
(bin_counts[-1] - int(bin_counts[-1] / 2)))
array_flatten = [element for sublist in hist_to_array for
element in sublist]
return array_flatten
def _get_histogram(self, values, bin_method):
"""
Get histogram from values and bin method, using np.histogram
:param values: input values
:type values: np.array or pd.Series
:param bin_method: bin method, e.g., sqrt, rice, etc
:type bin_method: str
:return: bin edges and bin counts
"""
if len(np.unique(values)) == 1:
bin_counts = np.array([len(values)])
if isinstance(values, (np.ndarray, list)):
unique_value = values[0]
else:
unique_value = values.iloc[0]
bin_edges = np.array([unique_value, unique_value])
else:
values, weights = histogram_utils._ravel_and_check_weights(
values, None)
_, n_equal_bins = histogram_utils._get_bin_edges(
values, bin_method, None, None)
n_equal_bins = min(n_equal_bins, self.max_histogram_bin)
bin_counts, bin_edges = np.histogram(values, bins=n_equal_bins)
return bin_counts, bin_edges
def _merge_histogram(self, values, bins):
# values is the current array of values,
# that needs to be updated to the accumulated histogram
combined_values = values + self._histogram_to_array(bins)
bin_counts, bin_edges = self._get_histogram(combined_values, bins)
self.histogram_methods[bins]['histogram']['bin_counts'] = bin_counts
self.histogram_methods[bins]['histogram']['bin_edges'] = bin_edges
def _update_histogram(self, df_series):
"""
Update histogram for each method and the combined method. The algorithm
'Follow the best expert' is applied to select the combined method:
N. Cesa-Bianchi and G. Lugosi, Prediction, learning, and games.
Cambridge University Press, 2006.
R. D. Kleinberg, A. Niculescu-Mizil, and Y. Sharma, "Regret bounds
for sleeping experts and bandits," in Proceedings of the 21st Annual
Conference on Learning Theory - COLT 2008, Helsinki, Finland, 2008,
pp. 425–436.
The idea is to select the current best method based on accumulated
losses up to the current time: all methods are compared using the
accumulated losses, and the best method with minimal loss is picked
:param df_series: a given column
:type df_series: pandas.core.series.Series
:return:
"""
df_series = df_series.replace([np.inf, -np.inf], np.nan).dropna()
if df_series.empty:
return
current_est_var = np.zeros(len(self.histogram_bin_method_names))
current_exact_var = np.zeros(len(self.histogram_bin_method_names))
current_total_var = np.zeros(len(self.histogram_bin_method_names))
current_run_time = np.zeros(len(self.histogram_bin_method_names))
for i, method in enumerate(self.histogram_bin_method_names):
# update histogram for the method
start_time = time.time()
bin_counts, bin_edges = self._get_histogram(df_series, method)
if self.histogram_methods[method]['histogram']['bin_counts'] is None:
self.histogram_methods[method]['histogram']['bin_counts'] = bin_counts
self.histogram_methods[method]['histogram']['bin_edges'] = bin_edges
else:
self._merge_histogram(df_series.tolist(), bins=method)
run_time = time.time() - start_time
# update loss for the method
current_est_var[i] = self._estimate_stats_from_histogram(method)[1]
current_exact_var = df_series.values.var()
current_total_var[i] = self._total_histogram_bin_variance(
df_series.values, method)
current_run_time[i] = run_time
# select the best method and update the total loss
selected_method = self._select_method_for_histogram(
current_exact_var, current_est_var,
current_total_var, current_run_time)
self.histogram_selection = selected_method
def _get_percentile(self, percentile):
"""
Get value for the number where the given percentage of values fall below
it.
:param percentile: Percentage of values to fall before the value
:type percentile: float
:return: Value for which the percentage of values in the distribution
fall before the percentage
"""
selected_method = self.histogram_selection
bin_counts = \
self.histogram_methods[selected_method]['histogram']['bin_counts']
bin_edges = \
self.histogram_methods[selected_method]['histogram']['bin_edges']
num_edges = len(bin_edges)
if percentile == 100:
return bin_edges[-1]
percentile = float(percentile) / 100
accumulated_count = 0
bin_counts = bin_counts.astype(float)
normalized_bin_counts = bin_counts / np.sum(bin_counts)
bin_id = -1
# keep updating the total counts until it is
# close to the designated percentile
while accumulated_count < percentile:
bin_id += 1
accumulated_count += normalized_bin_counts[bin_id]
if accumulated_count == percentile:
if (num_edges % 2) == 0:
return 0.5 * (bin_edges[bin_id] + bin_edges[bin_id + 1])
else:
return bin_edges[bin_id + 1]
else:
if bin_id == 0:
return 0.5 * (bin_edges[0] + bin_edges[1])
if (num_edges % 2) == 0:
return 0.5 * (bin_edges[bin_id - 1] + bin_edges[bin_id])
else:
return bin_edges[bin_id]
def _get_quantiles(self):
"""
Retrieves the quantile set based on the specified number of quantiles
in self.quantiles.
:return: list of quantiles
"""
size_bins = 100 / len(self.quantiles)
for bin_num in range(len(self.quantiles) - 1):
self.quantiles[bin_num] = self._get_percentile(
percentile=((bin_num + 1) * size_bins))
def _update_helper(self, df_series_clean, profile):
"""
Method for updating the base numerical profile properties with a cleaned
dataset and the known null parameters of the dataset.
:param df_series_clean: df series with nulls removed
:type df_series_clean: pandas.core.series.Series
:param profile: numerical profile dictionary
:type profile: dict
:return: None
"""
if df_series_clean.empty:
return
prev_dependent_properties = {"mean": self.mean}
subset_properties = copy.deepcopy(profile)
df_series_clean = df_series_clean.astype(float)
super(NumericStatsMixin, self)._perform_property_calcs(self.__calculations,
df_series=df_series_clean,
prev_dependent_properties=prev_dependent_properties,
subset_properties=subset_properties)
@BaseColumnProfiler._timeit(name="min")
def _get_min(self, df_series, prev_dependent_properties,
subset_properties):
min_value = df_series.min()
self.min = min_value if not self.min else min(self.min, min_value)
subset_properties["min"] = min_value
@BaseColumnProfiler._timeit(name="max")
def _get_max(self, df_series, prev_dependent_properties,
subset_properties):
max_value = df_series.max()
self.max = max_value if not self.max else max(self.max, max_value)
subset_properties["max"] = max_value
@BaseColumnProfiler._timeit(name="sum")
def _get_sum(self, df_series, prev_dependent_properties,
subset_properties):
sum_value = df_series.sum()
subset_properties["sum"] = sum_value
self.sum = self.sum + sum_value
@BaseColumnProfiler._timeit(name="variance")
def _get_variance(self, df_series, prev_dependent_properties,
subset_properties):
variance = df_series.var()
subset_properties["variance"] = variance
sum_value = subset_properties["sum"]
batch_count = subset_properties["match_count"]
batch_mean = 0. if not batch_count else \
float(sum_value) / batch_count
self.variance = self._merge_variance(self.match_count, self.variance,
prev_dependent_properties["mean"],
batch_count,
variance,
batch_mean)
@BaseColumnProfiler._timeit(name="histogram_and_quantiles")
def _get_histogram_and_quantiles(self, df_series,
prev_dependent_properties,
subset_properties):
try:
self._update_histogram(df_series)
if self.histogram_selection is not None:
self._get_quantiles()
except BaseException:
warnings.warn(
'Histogram error. Histogram and quantile results will not be '
'available')
@abc.abstractmethod
def update(self, df_series):
"""
Abstract Method for updating the numerical profile properties with an
uncleaned dataset.
:param df_series: df series with nulls removed
:type df_series: pandas.core.series.Series
:return: None
"""
raise NotImplementedError()
@staticmethod
def is_float(x):
"""
For "0.80" this function returns True
For "1.00" this function returns True
For "1" this function returns True
:param x: string to test
:type x: str
:return: if is float or not
:rtype: bool
"""
try:
float(x)
except ValueError:
return False
else:
return True
@staticmethod
def is_int(x):
"""
For "0.80" This function returns False
For "1.00" This function returns True
For "1" this function returns True
:param x: string to test
:type x: str
:return: if is integer or not
:rtype: bool
"""
try:
a = float(x)
b = int(a)
except (ValueError, OverflowError, TypeError):
return False
else:
return a == b
|
py | b403bdb183d540bd49b2953287d3504c4423a42c | from keras import Model
from keras.optimizers import SGD, Adam
from keras.layers import Input, Conv2D, Dense, Flatten
import tensorflow as tf
import numpy as np
from collections import deque
import random
class DQNAgent:
def __init__(self, state_size, action_size):
# self.rows = rows
# self.cols = cols
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=100000)
self.gamma = 0.95 #discount rate
self.epsilon = 1.0 #exploration rate
self.epsilon_min = 0.001
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.model = self.create_model()
def create_model(self):
inputs = Input(shape=(self.state_size,))
hidden = Dense(128, activation='relu')(inputs)
outputs = Dense(self.action_size, activation='softmax')(hidden)
model = Model(inputs=inputs, outputs=outputs)
# inputs = Input(shape=(3, self.rows, self.cols))
# conv1 = Conv2D(16, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu', data_format='channels_first')(inputs)
# #conv2 = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu', data_format='channels_first')(conv1)
# flat = Flatten()(conv1)
# hidden = Dense(128, activation='relu')(flat)
# outputs = Dense(self.action_size, activation='softmax')(hidden)
# model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=Adam(lr=self.learning_rate), loss='mse')
model.summary()
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def get_action(self, state):
if np.random.rand() <= self.epsilon:
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
idx = random.randrange(self.action_size)
act_values = [0 for i in range(self.action_size)]
act_values[idx] = 1
return [act_values]
return self.model.predict(state)
def train_step(self, states, actions, rewards, next_states, dones):
targets = self.model.predict(states)
for i in range(len(dones)):
q_new = rewards[i]
if not dones[i]:
next_state = np.expand_dims(next_states[i], axis=0)
q_new = (rewards[i] + self.gamma * np.amax(self.model.predict(next_state)[0]))
targets[i][np.argmax(actions[i])] = q_new
history = self.model.fit(states, targets, epochs=1, verbose=0)
loss = history.history['loss'][0]
return loss
def train_short_memory(self, state, action, reward, next_state, done):
return self.train_step(state, action, reward, next_state, done)
def train_long_memory(self, batch_size):
minibatch = random.sample(self.memory, min(len(self.memory), batch_size))
states, actions, rewards, next_states, dones = zip(*minibatch)
states = np.squeeze(states)
actions = np.squeeze(actions)
rewards = np.squeeze(rewards)
next_states = np.squeeze(next_states)
dones = np.squeeze(dones)
return self.train_step(states, actions, rewards, next_states, dones)
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name) |
py | b403bde36419b459b626804c5ca8918d64c2d701 | import copy
import itertools
import logging
from collections import namedtuple
from galaxy import (
exceptions,
model,
util,
)
from galaxy.model.dataset_collections import (
matching,
subcollections,
)
from galaxy.util import permutations
from . import visit_input_values
log = logging.getLogger(__name__)
WorkflowParameterExpansion = namedtuple(
"WorkflowParameterExpansion", ["param_combinations", "param_keys", "input_combinations"]
)
class ParamKey:
def __init__(self, step_id, key):
self.step_id = step_id
self.key = key
class InputKey:
def __init__(self, input_id):
self.input_id = input_id
def expand_workflow_inputs(param_inputs, inputs=None):
"""
Expands incoming encoded multiple payloads, into the set of all individual payload combinations
>>> expansion = expand_workflow_inputs({'1': {'input': {'batch': True, 'product': True, 'values': [{'hid': '1'}, {'hid': '2'}] }}})
>>> print(["%s" % (p['1']['input']['hid']) for p in expansion.param_combinations])
['1', '2']
>>> expansion = expand_workflow_inputs({'1': {'input': {'batch': True, 'values': [{'hid': '1'}, {'hid': '2'}] }}})
>>> print(["%s" % (p['1']['input']['hid']) for p in expansion.param_combinations])
['1', '2']
>>> expansion = expand_workflow_inputs({'1': {'input': {'batch': True, 'values': [{'hid': '1'}, {'hid': '2'}] }}, '2': {'input': {'batch': True, 'values': [{'hid': '3'}, {'hid': '4'}] }}})
>>> print(["%s%s" % (p['1']['input']['hid'], p['2']['input']['hid']) for p in expansion.param_combinations])
['13', '24']
>>> expansion = expand_workflow_inputs({'1': {'input': {'batch': True, 'product': True, 'values': [{'hid': '1'}, {'hid': '2'}] }}, '2': {'input': {'batch': True, 'values': [{'hid': '3'}, {'hid': '4'}, {'hid': '5'}] }}})
>>> print(["%s%s" % (p['1']['input']['hid'], p['2']['input']['hid']) for p in expansion.param_combinations])
['13', '23', '14', '24', '15', '25']
>>> expansion = expand_workflow_inputs({'1': {'input': {'batch': True, 'product': True, 'values': [{'hid': '1'}, {'hid': '2'}] }}, '2': {'input': {'batch': True, 'product': True, 'values': [{'hid': '3'}, {'hid': '4'}, {'hid': '5'}] }}, '3': {'input': {'batch': True, 'product': True, 'values': [{'hid': '6'}, {'hid': '7'}, {'hid': '8'}] }}})
>>> print(["%s%s%s" % (p['1']['input']['hid'], p['2']['input']['hid'], p['3']['input']['hid']) for p in expansion.param_combinations])
['136', '137', '138', '146', '147', '148', '156', '157', '158', '236', '237', '238', '246', '247', '248', '256', '257', '258']
>>> expansion = expand_workflow_inputs(None, inputs={'myinput': {'batch': True, 'product': True, 'values': [{'hid': '1'}, {'hid': '2'}] }})
>>> print(["%s" % (p['myinput']['hid']) for p in expansion.input_combinations])
['1', '2']
"""
param_inputs = param_inputs or {}
inputs = inputs or {}
linked_n = None
linked = []
product = []
linked_keys = []
product_keys = []
def is_batch(value):
return (
isinstance(value, dict)
and "batch" in value
and value["batch"] is True
and "values" in value
and isinstance(value["values"], list)
)
for step_id, step in sorted(param_inputs.items()):
for key, value in sorted(step.items()):
if is_batch(value):
nval = len(value["values"])
if "product" in value and value["product"] is True:
product.append(value["values"])
product_keys.append(ParamKey(step_id, key))
else:
if linked_n is None:
linked_n = nval
elif linked_n != nval or nval == 0:
raise exceptions.RequestParameterInvalidException(
"Failed to match linked batch selections. Please select equal number of data files."
)
linked.append(value["values"])
linked_keys.append(ParamKey(step_id, key))
# Force it to a list to allow modification...
input_items = list(inputs.items())
for input_id, value in input_items:
if is_batch(value):
nval = len(value["values"])
if "product" in value and value["product"] is True:
product.append(value["values"])
product_keys.append(InputKey(input_id))
else:
if linked_n is None:
linked_n = nval
elif linked_n != nval or nval == 0:
raise exceptions.RequestParameterInvalidException(
"Failed to match linked batch selections. Please select equal number of data files."
)
linked.append(value["values"])
linked_keys.append(InputKey(input_id))
elif isinstance(value, dict) and "batch" in value:
# remove batch wrapper and render simplified input form rest of workflow
# code expects
inputs[input_id] = value["values"][0]
param_combinations = []
input_combinations = []
params_keys = []
linked = linked or [[None]]
product = product or [[None]]
linked_keys = linked_keys or [None]
product_keys = product_keys or [None]
for linked_values, product_values in itertools.product(zip(*linked), itertools.product(*product)):
new_params = copy.deepcopy(param_inputs)
new_inputs = copy.deepcopy(inputs)
new_keys = []
for input_key, value in list(zip(linked_keys, linked_values)) + list(zip(product_keys, product_values)):
if input_key:
if isinstance(input_key, ParamKey):
step_id = input_key.step_id
key = input_key.key
assert step_id is not None
new_params[step_id][key] = value
if "hid" in value:
new_keys.append(str(value["hid"]))
else:
input_id = input_key.input_id
assert input_id is not None
new_inputs[input_id] = value
if "hid" in value:
new_keys.append(str(value["hid"]))
params_keys.append(new_keys)
param_combinations.append(new_params)
input_combinations.append(new_inputs)
return WorkflowParameterExpansion(param_combinations, params_keys, input_combinations)
def process_key(incoming_key, incoming_value, d):
key_parts = incoming_key.split("|")
if len(key_parts) == 1:
# Regular parameter
if incoming_key in d and not incoming_value:
# In case we get an empty repeat after we already filled in a repeat element
return
d[incoming_key] = incoming_value
elif key_parts[0].rsplit("_", 1)[-1].isdigit():
# Repeat
input_name, index = key_parts[0].rsplit("_", 1)
index = int(index)
d.setdefault(input_name, [])
newlist = [{} for _ in range(index + 1)]
d[input_name].extend(newlist[len(d[input_name]) :])
subdict = d[input_name][index]
process_key("|".join(key_parts[1:]), incoming_value=incoming_value, d=subdict)
else:
# Section / Conditional
input_name = key_parts[0]
subdict = {}
d[input_name] = subdict
process_key("|".join(key_parts[1:]), incoming_value=incoming_value, d=subdict)
def expand_meta_parameters(trans, tool, incoming):
"""
Take in a dictionary of raw incoming parameters and expand to a list
of expanded incoming parameters (one set of parameters per tool
execution).
"""
for key in list(incoming.keys()):
if key.endswith("|__identifier__"):
incoming.pop(key)
# If we're going to multiply input dataset combinations
# order matters, so the following reorders incoming
# according to tool.inputs (which is ordered).
incoming_copy = incoming.copy()
nested_dict = {}
for incoming_key, incoming_value in incoming_copy.items():
if not incoming_key.startswith("__"):
process_key(incoming_key, incoming_value=incoming_value, d=nested_dict)
reordered_incoming = {}
def visitor(input, value, prefix, prefixed_name, prefixed_label, error, **kwargs):
if prefixed_name in incoming_copy:
reordered_incoming[prefixed_name] = incoming_copy[prefixed_name]
del incoming_copy[prefixed_name]
visit_input_values(inputs=tool.inputs, input_values=nested_dict, callback=visitor)
reordered_incoming.update(incoming_copy)
def classifier(input_key):
value = incoming[input_key]
if isinstance(value, dict) and "values" in value:
# Explicit meta wrapper for inputs...
is_batch = value.get("batch", False)
is_linked = value.get("linked", True)
if is_batch and is_linked:
classification = permutations.input_classification.MATCHED
elif is_batch:
classification = permutations.input_classification.MULTIPLIED
else:
classification = permutations.input_classification.SINGLE
if __collection_multirun_parameter(value):
collection_value = value["values"][0]
values = __expand_collection_parameter(
trans, input_key, collection_value, collections_to_match, linked=is_linked
)
else:
values = value["values"]
else:
classification = permutations.input_classification.SINGLE
values = value
return classification, values
collections_to_match = matching.CollectionsToMatch()
# Stick an unexpanded version of multirun keys so they can be replaced,
# by expand_mult_inputs.
incoming_template = reordered_incoming
expanded_incomings = permutations.expand_multi_inputs(incoming_template, classifier)
if collections_to_match.has_collections():
collection_info = trans.app.dataset_collection_manager.match_collections(collections_to_match)
else:
collection_info = None
return expanded_incomings, collection_info
def __expand_collection_parameter(trans, input_key, incoming_val, collections_to_match, linked=False):
# If subcollectin multirun of data_collection param - value will
# be "hdca_id|subcollection_type" else it will just be hdca_id
if "|" in incoming_val:
encoded_hdc_id, subcollection_type = incoming_val.split("|", 1)
else:
try:
src = incoming_val["src"]
if src != "hdca":
raise exceptions.ToolMetaParameterException(f"Invalid dataset collection source type {src}")
encoded_hdc_id = incoming_val["id"]
subcollection_type = incoming_val.get("map_over_type", None)
except TypeError:
encoded_hdc_id = incoming_val
subcollection_type = None
hdc_id = trans.app.security.decode_id(encoded_hdc_id)
hdc = trans.sa_session.query(model.HistoryDatasetCollectionAssociation).get(hdc_id)
collections_to_match.add(input_key, hdc, subcollection_type=subcollection_type, linked=linked)
if subcollection_type is not None:
subcollection_elements = subcollections.split_dataset_collection_instance(hdc, subcollection_type)
return subcollection_elements
else:
hdas = []
for element in hdc.collection.dataset_elements:
hda = element.dataset_instance
hda.element_identifier = element.element_identifier
hdas.append(hda)
return hdas
def __collection_multirun_parameter(value):
is_batch = value.get("batch", False)
if not is_batch:
return False
batch_values = util.listify(value["values"])
if len(batch_values) == 1:
batch_over = batch_values[0]
if isinstance(batch_over, dict) and ("src" in batch_over) and (batch_over["src"] in {"hdca", "dce"}):
return True
return False
|
py | b403be5c1bd3a4ff15a2210e9a3a1c050763d435 | # -*- coding:utf-8 -*-
"""
Binance Trade module.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md
Author: HuangTao
Date: 2018/08/09
Email: [email protected]
"""
import json
import copy
import hmac
import hashlib
from urllib.parse import urljoin
from aioquant.error import Error
from aioquant.utils import tools
from aioquant.utils import logger
from aioquant.order import Order
from aioquant.tasks import SingleTask, LoopRunTask
from aioquant.utils.decorator import async_method_locker
from aioquant.utils.web import Websocket, AsyncHttpRequests
from aioquant.order import ORDER_ACTION_SELL, ORDER_ACTION_BUY, ORDER_TYPE_LIMIT, ORDER_TYPE_MARKET
from aioquant.order import ORDER_STATUS_SUBMITTED, ORDER_STATUS_PARTIAL_FILLED, ORDER_STATUS_FILLED, \
ORDER_STATUS_CANCELED, ORDER_STATUS_FAILED
__all__ = ("BinanceRestAPI", "BinanceTrade", )
class BinanceRestAPI:
"""Binance REST API client.
Attributes:
access_key: Account's ACCESS KEY.
secret_key: Account's SECRET KEY.
host: HTTP request host, default `https://api.binance.com`.
"""
def __init__(self, access_key, secret_key, host=None):
"""Initialize REST API client."""
self._host = host or "https://api.binance.com"
self._access_key = access_key
self._secret_key = secret_key
async def ping(self):
"""Test connectivity.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/ping"
success, error = await self.request("GET", uri)
return success, error
async def get_server_time(self):
"""Get server time.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/time"
success, error = await self.request("GET", uri)
return success, error
async def get_exchange_info(self):
"""Get exchange information.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/exchangeInfo"
success, error = await self.request("GET", uri)
return success, error
async def get_orderbook(self, symbol, limit=10):
"""Get latest orderbook information.
Args:
symbol: Symbol name, e.g. `BTCUSDT`.
limit: Number of results per request. (default 10, max 5000.)
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/depth"
params = {
"symbol": symbol,
"limit": limit
}
success, error = await self.request("GET", uri, params=params)
return success, error
async def get_trade(self, symbol, limit=500):
"""Get latest trade information.
Args:
symbol: Symbol name, e.g. `BTCUSDT`.
limit: Number of results per request. (Default 500, max 1000.)
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/trades"
params = {
"symbol": symbol,
"limit": limit
}
success, error = await self.request("GET", uri, params=params)
return success, error
async def get_kline(self, symbol, interval="1m", start=None, end=None, limit=500):
"""Get kline information.
Args:
symbol: Symbol name, e.g. `BTCUSDT`.
interval: Kline interval type, valid values: 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w, 1M
start: Start timestamp(millisecond).
end: End timestamp(millisecond).
limit: Number of results per request. (Default 500, max 1000.)
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
Notes:
If start and end are not sent, the most recent klines are returned.
"""
uri = "/api/v3/klines"
params = {
"symbol": symbol,
"interval": interval,
"limit": limit
}
if start and end:
params["startTime"] = start
params["endTime"] = end
success, error = await self.request("GET", uri, params=params)
return success, error
async def get_average_price(self, symbol):
"""Current average price for a symbol.
Args:
symbol: Symbol name, e.g. `BTCUSDT`.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/avgPrice"
params = {
"symbol": symbol
}
success, error = await self.request("GET", uri, params=params)
return success, error
async def get_user_account(self):
"""Get user account information.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/account"
ts = tools.get_cur_timestamp_ms()
params = {
"timestamp": str(ts)
}
success, error = await self.request("GET", uri, params, auth=True)
return success, error
async def create_order(self, action, symbol, price, quantity, client_order_id=None):
"""Create an order.
Args:
action: Trade direction, `BUY` or `SELL`.
symbol: Symbol name, e.g. `BTCUSDT`.
price: Price of each contract.
quantity: The buying or selling quantity.
client_order_id: Client order id.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/order"
data = {
"symbol": symbol,
"side": action,
"type": "LIMIT",
"timeInForce": "GTC",
"quantity": quantity,
"price": price,
"recvWindow": "5000",
"newOrderRespType": "FULL",
"timestamp": tools.get_cur_timestamp_ms()
}
if client_order_id:
data["newClientOrderId"] = client_order_id
success, error = await self.request("POST", uri, body=data, auth=True)
return success, error
async def revoke_order(self, symbol, order_id, client_order_id=None):
"""Cancelling an unfilled order.
Args:
symbol: Symbol name, e.g. `BTCUSDT`.
order_id: Order id.
client_order_id: Client order id.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/order"
params = {
"symbol": symbol,
"orderId": order_id,
"timestamp": tools.get_cur_timestamp_ms()
}
if client_order_id:
params["origClientOrderId"] = client_order_id
success, error = await self.request("DELETE", uri, params=params, auth=True)
return success, error
async def get_order_status(self, symbol, order_id, client_order_id):
"""Get order details by order id.
Args:
symbol: Symbol name, e.g. `BTCUSDT`.
order_id: Order id.
client_order_id: Client order id.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/order"
params = {
"symbol": symbol,
"orderId": str(order_id),
"origClientOrderId": client_order_id,
"timestamp": tools.get_cur_timestamp_ms()
}
success, error = await self.request("GET", uri, params=params, auth=True)
return success, error
async def get_all_orders(self, symbol):
"""Get all account orders; active, canceled, or filled.
Args:
symbol: Symbol name, e.g. `BTCUSDT`.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/allOrders"
params = {
"symbol": symbol,
"timestamp": tools.get_cur_timestamp_ms()
}
success, error = await self.request("GET", uri, params=params, auth=True)
return success, error
async def get_open_orders(self, symbol):
"""Get all open order information.
Args:
symbol: Symbol name, e.g. `BTCUSDT`.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/openOrders"
params = {
"symbol": symbol,
"timestamp": tools.get_cur_timestamp_ms()
}
success, error = await self.request("GET", uri, params=params, auth=True)
return success, error
async def get_listen_key(self):
"""Get listen key, start a new user data stream.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/userDataStream"
success, error = await self.request("POST", uri)
return success, error
async def put_listen_key(self, listen_key):
"""Keepalive a user data stream to prevent a time out.
Args:
listen_key: Listen key.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/userDataStream"
params = {
"listenKey": listen_key
}
success, error = await self.request("PUT", uri, params=params)
return success, error
async def delete_listen_key(self, listen_key):
"""Delete a listen key.
Args:
listen_key: Listen key.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/v3/userDataStream"
params = {
"listenKey": listen_key
}
success, error = await self.request("DELETE", uri, params=params)
return success, error
async def request(self, method, uri, params=None, body=None, headers=None, auth=False):
"""Do HTTP request.
Args:
method: HTTP request method. `GET` / `POST` / `DELETE` / `PUT`.
uri: HTTP request uri.
params: HTTP query params.
body: HTTP request body.
headers: HTTP request headers.
auth: If this request requires authentication.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
url = urljoin(self._host, uri)
data = {}
if params:
data.update(params)
if body:
data.update(body)
if data:
query = "&".join(["=".join([str(k), str(v)]) for k, v in data.items()])
else:
query = ""
if auth and query:
signature = hmac.new(self._secret_key.encode(), query.encode(), hashlib.sha256).hexdigest()
query += "&signature={s}".format(s=signature)
if query:
url += ("?" + query)
if not headers:
headers = {}
headers["X-MBX-APIKEY"] = self._access_key
_, success, error = await AsyncHttpRequests.fetch(method, url, headers=headers, timeout=10, verify_ssl=False)
return success, error
class BinanceTrade:
"""Binance Trade module. You can initialize trade object with some attributes in kwargs.
Attributes:
account: Account name for this trade exchange.
strategy: What's name would you want to created for your strategy.
symbol: Symbol name for your trade.
host: HTTP request host. (default "https://api.binance.com")
wss: Websocket address. (default "wss://stream.binance.com:9443")
access_key: Account's ACCESS KEY.
secret_key Account's SECRET KEY.
order_update_callback: You can use this param to specify a async callback function when you initializing Trade
module. `order_update_callback` is like `async def on_order_update_callback(order: Order): pass` and this
callback function will be executed asynchronous when some order state updated.
init_callback: You can use this param to specify a async callback function when you initializing Trade
module. `init_callback` is like `async def on_init_callback(success: bool, **kwargs): pass`
and this callback function will be executed asynchronous after Trade module object initialized done.
error_callback: You can use this param to specify a async callback function when you initializing Trade
module. `error_callback` is like `async def on_error_callback(error: Error, **kwargs): pass`
and this callback function will be executed asynchronous when some error occur while trade module is running.
"""
def __init__(self, **kwargs):
"""Initialize Trade module."""
e = None
if not kwargs.get("account"):
e = Error("param account miss")
if not kwargs.get("strategy"):
e = Error("param strategy miss")
if not kwargs.get("symbol"):
e = Error("param symbol miss")
if not kwargs.get("host"):
kwargs["host"] = "https://api.binance.com"
if not kwargs.get("wss"):
kwargs["wss"] = "wss://stream.binance.com:9443"
if not kwargs.get("access_key"):
e = Error("param access_key miss")
if not kwargs.get("secret_key"):
e = Error("param secret_key miss")
if e:
logger.error(e, caller=self)
SingleTask.run(kwargs["error_callback"], e)
SingleTask.run(kwargs["init_callback"], False)
return
self._account = kwargs["account"]
self._strategy = kwargs["strategy"]
self._platform = kwargs["platform"]
self._symbol = kwargs["symbol"]
self._host = kwargs["host"]
self._wss = kwargs["wss"]
self._access_key = kwargs["access_key"]
self._secret_key = kwargs["secret_key"]
self._order_update_callback = kwargs.get("order_update_callback")
self._init_callback = kwargs.get("init_callback")
self._error_callback = kwargs.get("error_callback")
self._raw_symbol = self._symbol.replace("/", "") # Row symbol name, same as Binance Exchange.
self._listen_key = None # Listen key for Websocket authentication.
self._assets = {} # Asset data. e.g. {"BTC": {"free": "1.1", "locked": "2.2", "total": "3.3"}, ... }
self._orders = {} # Order data. e.g. {order_id: order, ... }
# Initialize our REST API client.
self._rest_api = BinanceRestAPI(self._access_key, self._secret_key, self._host)
# Create a loop run task to reset listen key every 30 minutes.
LoopRunTask.register(self._reset_listen_key, 60 * 30)
# Create a coroutine to initialize Websocket connection.
SingleTask.run(self._init_websocket)
LoopRunTask.register(self._send_heartbeat_msg, 10)
async def _send_heartbeat_msg(self, *args, **kwargs):
await self._ws.ping()
@property
def orders(self):
return copy.copy(self._orders)
@property
def rest_api(self):
return self._rest_api
async def _init_websocket(self):
"""Initialize Websocket connection."""
# Get listen key first.
success, error = await self._rest_api.get_listen_key()
if error:
e = Error("get listen key failed: {}".format(error))
logger.error(e, caller=self)
SingleTask.run(self._error_callback, e)
SingleTask.run(self._init_callback, False)
return
self._listen_key = success["listenKey"]
uri = "/ws/" + self._listen_key
url = urljoin(self._wss, uri)
self._ws = Websocket(url, self.connected_callback, process_callback=self.process)
async def _reset_listen_key(self, *args, **kwargs):
"""Reset listen key."""
if not self._listen_key:
logger.error("listen key not initialized!", caller=self)
return
await self._rest_api.put_listen_key(self._listen_key)
logger.info("reset listen key success!", caller=self)
async def connected_callback(self):
"""After websocket connection created successfully, pull back all open order information."""
logger.info("Websocket connection authorized successfully.", caller=self)
order_infos, error = await self._rest_api.get_open_orders(self._raw_symbol)
if error:
e = Error("get open orders error: {}".format(error))
SingleTask.run(self._error_callback, e)
SingleTask.run(self._init_callback, False)
return
for order_info in order_infos:
if order_info["status"] == "NEW":
status = ORDER_STATUS_SUBMITTED
elif order_info["status"] == "PARTIALLY_FILLED":
status = ORDER_STATUS_PARTIAL_FILLED
elif order_info["status"] == "FILLED":
status = ORDER_STATUS_FILLED
elif order_info["status"] == "CANCELED":
status = ORDER_STATUS_CANCELED
elif order_info["status"] == "REJECTED":
status = ORDER_STATUS_FAILED
elif order_info["status"] == "EXPIRED":
status = ORDER_STATUS_FAILED
else:
logger.warn("unknown status:", order_info, caller=self)
SingleTask.run(self._error_callback, "order status error.")
continue
order_id = str(order_info["orderId"])
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"order_id": order_id,
"client_order_id": order_info["clientOrderId"],
"action": ORDER_ACTION_BUY if order_info["side"] == "BUY" else ORDER_ACTION_SELL,
"order_type": ORDER_TYPE_LIMIT if order_info["type"] == "LIMIT" else ORDER_TYPE_MARKET,
"symbol": self._symbol,
"price": order_info["price"],
"quantity": order_info["origQty"],
"remain": float(order_info["origQty"]) - float(order_info["executedQty"]),
"status": status,
"avg_price": order_info["price"],
"ctime": order_info["time"],
"utime": order_info["updateTime"]
}
order = Order(**info)
self._orders[order_id] = order
SingleTask.run(self._order_update_callback, copy.copy(order))
SingleTask.run(self._init_callback, True)
async def create_order(self, action, price, quantity, *args, **kwargs):
"""Create an order.
Args:
action: Trade direction, `BUY` or `SELL`.
price: Price of each order.
quantity: The buying or selling quantity.
Returns:
order_id: Order id if created successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
client_order_id = kwargs["client_order_id"]
result, error = await self._rest_api.create_order(action, self._raw_symbol, price, quantity, client_order_id)
if error:
SingleTask.run(self._error_callback, error)
return None, error
order_id = str(result["orderId"])
return order_id, None
async def revoke_order(self, *order_ids):
"""Revoke (an) order(s).
Args:
order_ids: Order id list, you can set this param to 0 or multiple items. If you set 0 param, you can cancel
all orders for this symbol(initialized in Trade object). If you set 1 param, you can cancel an order.
If you set multiple param, you can cancel multiple orders. Do not set param length more than 100.
Returns:
Success or error, see bellow.
"""
# If len(order_ids) == 0, you will cancel all orders for this symbol(initialized in Trade object).
if len(order_ids) == 0:
order_infos, error = await self._rest_api.get_open_orders(self._raw_symbol)
if error:
SingleTask.run(self._error_callback, error)
return False, error
for order_info in order_infos:
_, error = await self._rest_api.revoke_order(self._raw_symbol, order_info["orderId"])
if error:
SingleTask.run(self._error_callback, error)
return False, error
return True, None
# If len(order_ids) == 1, you will cancel an order.
if len(order_ids) == 1:
success, error = await self._rest_api.revoke_order(self._raw_symbol, order_ids[0])
if error:
SingleTask.run(self._error_callback, error)
return order_ids[0], error
else:
return order_ids[0], None
# If len(order_ids) > 1, you will cancel multiple orders.
if len(order_ids) > 1:
success, error = [], []
for order_id in order_ids:
_, e = await self._rest_api.revoke_order(self._raw_symbol, order_id)
if e:
SingleTask.run(self._error_callback, e)
error.append((order_id, e))
else:
success.append(order_id)
return success, error
async def get_open_order_ids(self):
"""Get open order id list.
"""
success, error = await self._rest_api.get_open_orders(self._raw_symbol)
if error:
SingleTask.run(self._error_callback, error)
return None, error
else:
order_ids = []
for order_info in success:
order_id = str(order_info["orderId"])
order_ids.append(order_id)
return order_ids, None
@async_method_locker("BinanceTrade.process.locker")
async def process(self, msg):
"""Process message that received from Websocket connection.
Args:
msg: message received from Websocket connection.
"""
logger.debug("msg:", json.dumps(msg), caller=self)
e = msg.get("e")
if e == "executionReport": # Order update.
if msg["s"] != self._raw_symbol:
return
order_id = str(msg["i"])
if msg["X"] == "NEW":
status = ORDER_STATUS_SUBMITTED
elif msg["X"] == "PARTIALLY_FILLED":
status = ORDER_STATUS_PARTIAL_FILLED
elif msg["X"] == "FILLED":
status = ORDER_STATUS_FILLED
elif msg["X"] == "CANCELED":
status = ORDER_STATUS_CANCELED
elif msg["X"] == "REJECTED":
status = ORDER_STATUS_FAILED
elif msg["X"] == "EXPIRED":
status = ORDER_STATUS_FAILED
else:
logger.warn("unknown status:", msg, caller=self)
SingleTask.run(self._error_callback, "order status error.")
return
order = self._orders.get(order_id)
if not order:
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"order_id": order_id,
"client_order_id": msg["c"],
"action": ORDER_ACTION_BUY if msg["S"] == "BUY" else ORDER_ACTION_SELL,
"order_type": ORDER_TYPE_LIMIT if msg["o"] == "LIMIT" else ORDER_TYPE_MARKET,
"symbol": self._symbol,
"price": msg["p"],
"quantity": msg["q"],
"ctime": msg["O"]
}
order = Order(**info)
self._orders[order_id] = order
order.remain = float(msg["q"]) - float(msg["z"])
order.status = status
order.avg_price = msg["L"]
order.utime = msg["T"]
SingleTask.run(self._order_update_callback, copy.copy(order))
if status in [ORDER_STATUS_FAILED, ORDER_STATUS_CANCELED, ORDER_STATUS_FILLED]:
self._orders.pop(order_id)
|
py | b403c0aa84fbb1704cffe86d869e9536a7cf8318 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-scheduling a Neural Network for NVIDIA GPU
===============================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_
Auto-tuning for specific devices and workloads is critical for getting the
best performance. This is a tutorial on how to tune a whole neural
network for NVIDIA GPU with the auto-scheduler.
To auto-tune a neural network, we partition the network into small subgraphs and
tune them independently. Each subgraph is treated as one search task.
A task scheduler slices the time and dynamically allocates time resources to
these tasks. The task scheduler predicts the impact of each task on the end-to-end
execution time and prioritizes the one that can reduce the execution time the most.
For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to
get the computational DAG in the tensor expression form.
We then use the auto-scheduler to construct a search space of this DAG and search
for good schedules (low-level optimizations).
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any
schedule templates. In other words, the auto-scheduler only uses the compute declarations
in :code:`tvm/python/topi` and does not use existing schedule templates.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
import numpy as np
import tvm
from tvm import relay, auto_scheduler
import tvm.relay.testing
from tvm.contrib import graph_executor
#################################################################
# Define a Network
# ----------------
# First, we need to define the network with relay frontend API.
# We can load some pre-defined network from :code:`tvm.relay.testing`.
# We can also load models from MXNet, ONNX, PyTorch, and TensorFlow
# (see :ref:`front end tutorials<tutorial-frontend>`).
#
# For convolutional neural networks, although auto-scheduler can work correctly
# with any layout, we found the best performance is typically achieved with NHWC layout.
# We also implemented more optimizations for NHWC layout with the auto-scheduler.
# So it is recommended to convert your models to NHWC layout to use the auto-scheduler.
# You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM.
def get_network(name, batch_size, layout="NHWC", dtype="float32"):
"""Get the symbol definition and random weight of a network"""
# auto-scheduler prefers NHWC layout
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
else:
raise ValueError("Invalid layout: " + layout)
input_shape = (batch_size,) + image_shape
output_shape = (batch_size, 1000)
if name.startswith("resnet-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name.startswith("resnet3d-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape
)
elif name == "squeezenet_v1.1":
assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout"
mod, params = relay.testing.squeezenet.get_workload(
version="1.1",
batch_size=batch_size,
dtype=dtype,
image_shape=image_shape,
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
assert layout == "NCHW"
block = get_model("resnet18_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
return mod, params, input_shape, output_shape
# Define the neural network and compilation target
network = "resnet-18"
batch_size = 1
layout = "NHWC"
target = tvm.target.Target("cuda")
dtype = "float32"
log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name)
#################################################################
# Extract Search Tasks
# --------------------
# Next, we extract the search tasks and their weights from a network.
# The weight of a task is the number of appearances of the task's subgraph
# in the whole network.
# By using the weight, we can approximate the end-to-end latency of the network
# as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the
# latency of a task and :code:`weight[t]` is the weight of the task.
# The task scheduler will just optimize this objective.
# Extract tasks from the network
print("Extract tasks...")
mod, params, input_shape, output_shape = get_network(network, batch_size, layout, dtype=dtype)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
#################################################################
# Begin Tuning
# ------------
# Now, we set some options for tuning and launch the search tasks
#
# * :code:`measure_ctx` launches a different process for measurement to
# provide isolation. It can protect the main process from GPU crashes
# during measurement and avoid other runtime conflicts.
# * :code:`min_repeat_ms` defines the minimum duration of one "repeat" in every measurement.
# This can warmup the GPU, which is necessary to get accurate measurement results.
# Typically, we recommend a value >= 300 ms.
# * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning.
# You can set it to a small number (e.g., 200) for a fast demonstrative run.
# In practice, we recommend setting it around :code:`900 * len(tasks)`,
# which is typically enough for the search to converge.
# For example, there are 24 tasks in resnet-18, so we can set it as 20000.
# You can adjust this parameter according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a log file,
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRPCMeasureContext` for more parameters.
#
def run_tuning():
print("Begin tuning...")
measure_ctx = auto_scheduler.LocalRPCMeasureContext(repeat=1, min_repeat_ms=300, timeout=10)
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=200, # change this to 20000 to achieve the best performance
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option)
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# run_tuning()
######################################################################
# .. note:: Explain the printed information during tuning
#
# During the tuning, a lot of information will be printed on the console.
# They are used for debugging purposes. The most important info is the output
# of the task scheduler. The following table is a sample output.
#
# .. code-block:: c
#
# ----------------------------------------------------------------------
# ------------------------------ [ Task Scheduler ]
# ----------------------------------------------------------------------
# | ID | Latency (ms) | Speed (GFLOPS) | Trials |
# -------------------------------------------------
# | 0 | 0.005 | 0.88 | 64 |
# | 1 | 0.010 | 99.10 | 64 |
# | 2 | 0.006 | 0.00 | 64 |
# | 3 | 0.145 | 979.78 | 384 |
# | 4 | 0.130 | 1097.02 | 384 |
# | 5 | 0.143 | 992.69 | 384 |
# | 6 | 0.076 | 1526.86 | 192 |
# | 7 | 0.115 | 999.44 | 320 |
# | 8 | 0.079 | 1449.39 | 320 |
# | 9 | 0.122 | 938.73 | 384 |
# | 10 | 0.063 | 1832.98 | 192 |
# | 11 | 0.072 | 1763.62 | 256 |
# | 12 | 0.062 | 2036.40 | 192 |
# | 13 | 0.068 | 1874.44 | 192 |
# | 14 | 0.049 | 2346.50 | 128 |
# | 15 | 0.076 | 1694.31 | 256 |
# | 16 | 0.067 | 1933.30 | 448 |
# | 17 | 0.076 | 1680.90 | 256 |
# | 18 | 0.022 | 98.43 | 64 |
# | 19 | 0.076 | 3112.55 | 192 |
# | 20 | 0.013 | 2026.44 | 64 |
# | 21 | 0.011 | 1136.69 | 64 |
# | 22 | 0.013 | 992.47 | 64 |
# | 23 | 0.020 | 627.56 | 64 |
# -------------------------------------------------
# Estimated total latency: 1.587 ms Trials: 4992 Used time : 13296 s Next ID: 3
#
# This table lists the latency and (estimated) speed of all tasks.
# It also lists the allocation of measurement trials for all tasks.
# The last line prints the total weighted latency of these tasks,
# which can be a rough estimation of the end-to-end execution time
# of the network.
# The last line also prints the total number of measurement trials,
# total time spent on auto-tuning and the id of the next task to tune.
#
# There will also be some "tvm::Error"s and CUDA errors, because the
# auto-scheduler will try some invalid schedules.
# You can safely ignore them if the tuning can continue, because these
# errors are isolated from the main process.
#
######################################################################
# .. note:: Terminate the tuning earlier
#
# You can terminate the tuning earlier by forcibly killing this process.
# As long as you get at least one valid schedule for each task in the log file,
# you should be able to do the compilation (the secion below).
#
#################################################################
# Compile and Evaluate
# --------------------
# After auto-tuning, we can compile the network with the best schedules we found.
# All measurement records are dumped into the log file during auto-tuning,
# so we can read the log file and load the best schedules.
# Compile with the history best
print("Compile...")
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(opt_level=3, config={"relay.backend.use_auto_scheduler": True}):
lib = relay.build(mod, target=target, params=params)
# Create graph executor
dev = tvm.device(str(target), 0)
module = graph_executor.GraphModule(lib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# Evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, repeat=3, min_repeat_ms=500))
#################################################################
# Other Tips
# ----------
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster search.
# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json`
# to distill the large log file and only save the best useful records.
# 3. You can resume a search from the previous log file. You just need to
# add a new argument :code:`load_log_file` when creating the task scheduler
# in function :code:`run_tuning`. Say,
# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`
# 4. If you have multiple target GPUs, you can use all of them for measurements to
# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
|
py | b403c15580ff9cc75db73e775d0ae89f2c6c47ed | """Definitions for the primitive `gather`."""
from .. import lib, xtype
from ..lib import SHAPE, TYPE, bprop_to_grad_transform, standard_prim
from ..operations import scatter_add, zeros_like
from . import primitives as P
@standard_prim(P.gather)
async def infer_gather(
self,
engine,
input: lib.AbstractArray,
dim: xtype.UInt[64],
index: lib.AbstractArray,
):
"""Infer the return type of primitive `gather`."""
return type(input)(
input.element, {SHAPE: index.xshape(), TYPE: input.xtype()}
)
@bprop_to_grad_transform(P.gather)
def bprop_gather(x, dim, index, out, dout):
"""Backpropagator for primitive `gather`."""
z = zeros_like(x)
z = scatter_add(z, dim, index, dout)
return (z, zeros_like(dim), zeros_like(index))
__operation_defaults__ = {
"name": "gather",
"registered_name": "gather",
"mapping": P.gather,
"python_implementation": None,
}
__primitive_defaults__ = {
"name": "gather",
"registered_name": "gather",
"type": "backend",
"python_implementation": None,
"inferrer_constructor": infer_gather,
"grad_transform": bprop_gather,
}
|
py | b403c15b222135163debb373d9313508c21e951c | from flask import Flask, request
app = Flask(__name__)
@app.route('/url-params/<int:age>/<string:country>/<float:salary>')
def url_params(age: int, country: str, salary: float):
return "URL Parameter data are: " + str(age) + ", " + country + ", " + str(salary)
@app.route('/query-arguments')
def query_arguments():
# Get all argument
all_argument = request.args
# array arguments
data_type = None
argument_array = request.args.getlist("skill", data_type)
# Get specific data from dictionary
age = -1
if "age" in request.args:
age = request.args["age"]
# Get specific data by method
salary = request.args.get("salary") # if key doesn't exist, returns None
# if key doesn't exist, returns a 400, bad request error
country = request.args['country']
print(all_argument)
print(argument_array)
return "Query Arguments Data: " + str(age) + ", " + country + ", " + str(salary)
@app.route('/form')
def form():
return '''
<form action="/form-data" method="POST">
<div><label>Age:</label><input type="number" name="age"></div>
<div><label>Country:</label><input type="text" name="country"></div>
<div><label>Salary:</label><input type="number" name="salary"></div>
<div><label>Skill:</label>
<select name="skill" multiple>
<option value="python">Python</option>
<option value="typescript">Typescript</option>
<option value="react">React</option>
<option value="flask">Flask</option>
</select>
</div>
<input type="submit" value="Submit">
</form>
'''
@app.route('/form-data', methods=["POST"])
def form_data():
form_all_data = request.form
default_value = None
data_type = None
age = request.form.get("age", default_value, data_type)
argument_array = request.form.getlist("skill", data_type)
country = ''
if "country" in request.form:
country = request.form["country"]
salary = request.form.get("salary", 0.0, float)
print(form_all_data)
print(argument_array)
return "Form Data: " + str(age) + ", " + country + ", " + str(salary)
@app.route('/file-submission')
def file_submission():
return '''
<form action="/file-data" method="POST" enctype="multipart/form-data">
<div><label>File:</label><input type="file" name="file"></div>
<input type="submit" value="Submit">
</form>
'''
@app.route('/file-data', methods=["POST"])
def file_data():
form_files = request.files
file_list = []
for file_name in form_files:
file = form_files[file_name]
file_list.append({
"inputName": file.name,
"filename": file.filename,
"contentType": file.content_type,
"mimetype": file.mimetype,
})
return {"fileInfoList": file_list}
@app.route('/json-data', methods=["POST"])
def json_data():
json_data_object = request.get_json()
age = 0
if "age" in json_data_object:
age = json_data_object["age"]
country = ''
if "country" in json_data_object:
country = json_data_object["country"]
salary = 0.0
if "salary" in json_data_object:
salary = json_data_object["salary"]
argument_array = []
if "skill" in json_data_object:
argument_array = json_data_object["skill"]
print(json_data_object)
print(argument_array)
return "JSON Data: " + str(age) + ", " + country + ", " + str(salary)
@app.route('/get-url-info')
def get_url_info():
url_info = {
'relative_url': str(request.url_rule),
'relative_url_with_param': str(request.full_path),
'host_with_port': str(request.host),
'method': str(request.method),
'charset': str(request.url_charset)
}
return url_info
@app.route('/get-header')
def get_header():
default_value = None
auth = request.headers.get("auth", default_value)
return "Header : " + auth
if __name__ == '__main__':
app.run()
|
py | b403c191925d3078a71fdea3a7c6c3b6b47a86d6 | """
Allows utilizing telegram webhooks.
See https://core.telegram.org/bots/webhooks for details
about webhooks.
"""
import asyncio
import logging
from ipaddress import ip_network
import voluptuous as vol
from homeassistant.const import (
HTTP_BAD_REQUEST, HTTP_UNAUTHORIZED)
import homeassistant.helpers.config_validation as cv
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import CONF_API_KEY
from homeassistant.components.http.util import get_real_ip
DOMAIN = 'telegram_webhooks'
DEPENDENCIES = ['http']
REQUIREMENTS = ['python-telegram-bot==5.3.0']
_LOGGER = logging.getLogger(__name__)
EVENT_TELEGRAM_COMMAND = 'telegram.command'
TELEGRAM_HANDLER_URL = '/api/telegram_webhooks'
CONF_USER_ID = 'user_id'
CONF_TRUSTED_NETWORKS = 'trusted_networks'
DEFAULT_TRUSTED_NETWORKS = [
ip_network('149.154.167.197/32'),
ip_network('149.154.167.198/31'),
ip_network('149.154.167.200/29'),
ip_network('149.154.167.208/28'),
ip_network('149.154.167.224/29'),
ip_network('149.154.167.232/31')
]
ATTR_COMMAND = 'command'
ATTR_USER_ID = 'user_id'
ATTR_ARGS = 'args'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_API_KEY): cv.string,
vol.Optional(CONF_TRUSTED_NETWORKS, default=DEFAULT_TRUSTED_NETWORKS):
vol.All(cv.ensure_list, [ip_network]),
vol.Required(CONF_USER_ID): {cv.string: cv.positive_int},
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Setup the telegram_webhooks component.
register webhook if API_KEY is specified
register /api/telegram_webhooks as web service for telegram bot
"""
import telegram
conf = config[DOMAIN]
if CONF_API_KEY in conf:
bot = telegram.Bot(conf[CONF_API_KEY])
current_status = bot.getWebhookInfo()
_LOGGER.debug("telegram webhook status: %s", current_status)
handler_url = "{0}{1}".format(hass.config.api.base_url,
TELEGRAM_HANDLER_URL)
if current_status and current_status['url'] != handler_url:
if bot.setWebhook(handler_url):
_LOGGER.info("set new telegram webhook %s", handler_url)
else:
_LOGGER.error("set telegram webhook failed %s", handler_url)
hass.http.register_view(BotPushReceiver(conf[CONF_USER_ID],
conf[CONF_TRUSTED_NETWORKS]))
return True
class BotPushReceiver(HomeAssistantView):
"""Handle pushes from telegram."""
requires_auth = False
url = TELEGRAM_HANDLER_URL
name = "telegram_webhooks"
def __init__(self, user_id_array, trusted_networks):
"""Initialize users allowed to send messages to bot."""
self.trusted_networks = trusted_networks
self.users = {user_id: dev_id for dev_id, user_id in
user_id_array.items()}
_LOGGER.debug("users allowed: %s", self.users)
@asyncio.coroutine
def post(self, request):
"""Accept the POST from telegram."""
real_ip = get_real_ip(request)
if not any(real_ip in net for net in self.trusted_networks):
_LOGGER.warning("Access denied from %s", real_ip)
return self.json_message('Access denied', HTTP_UNAUTHORIZED)
try:
data = yield from request.json()
except ValueError:
_LOGGER.error("Received telegram data: %s", data)
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
# check for basic message rules
data = data.get('message')
if not data or 'from' not in data or 'text' not in data:
return self.json({})
if data['from'].get('id') not in self.users:
_LOGGER.warning("User not allowed")
return self.json_message('Invalid user', HTTP_BAD_REQUEST)
_LOGGER.debug("Received telegram data: %s", data)
if not data['text'] or data['text'][:1] != '/':
_LOGGER.warning('no command')
return self.json({})
pieces = data['text'].split(' ')
request.app['hass'].bus.async_fire(EVENT_TELEGRAM_COMMAND, {
ATTR_COMMAND: pieces[0],
ATTR_ARGS: " ".join(pieces[1:]),
ATTR_USER_ID: data['from']['id'],
})
return self.json({})
|
py | b403c378fb77ce092971e51a33df329a081e7913 | from functools import partial
# Django
from django import forms
# potrebno za RawIdWidget
from django.contrib.admin.sites import site
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.core.exceptions import ValidationError
from django.template.loader import render_to_string
from django.utils import timezone
# Models
from ..models import Postavka
# Widgets
from eda5.arhiv.widgets import ArhiviranjeManyToManyRawIdWidget
from eda5.partnerji.widgets import PartnerForeignKeyRawIdWidget
DateInput = partial(forms.DateInput, {'class': 'datepicker'})
TimeInput = partial(forms.TimeInput, {'class': 'timepicker'})
class PostavkaCreateForm(forms.ModelForm):
class Meta:
model = Postavka
fields = (
'oznaka',
'opis',
)
class PostavkaUpdateForm(forms.ModelForm):
class Meta:
model = Postavka
fields = (
'oznaka',
'opis',
'priloge',
)
widgets = {
'priloge': ArhiviranjeManyToManyRawIdWidget(model._meta.get_field('priloge').rel, site),
}
|
py | b403c561a4fed55140eac77659d1d648b7e6f6c1 | from django.apps import AppConfig
class ApplicationsConfig(AppConfig):
name = "api.audit_trail"
def ready(self):
import api.audit_trail.signals # noqa
|
py | b403c790586e35975c5315fed72f7165f0cdef9d | import uuid
from django.db import models
from django.urls import reverse
EXPENSE_TYPES = (
('unknown', 'UNKNOWN'),
('charity', 'Charity'),
('pr', 'PR'),
('p2p', 'P2P'),
('it', 'IT'),
)
class Proposal(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# time when the proposal was first added to our database
inserted_at = models.DateTimeField(auto_now_add=True)
# governence object id in the blockchain
# see "biblepay-cli gobject get [gobjectid]" for details about this proposal
gobjectid = models.CharField(max_length=100)
# all details about the proposal in form of a hex-encoded string
# you will find all these information decoded below. We store the string
# if we miss to decode any information, so that we can decode them later
hex_string = models.TextField(default="")
# what network is for this
network = models.CharField(max_length=20, default="unknown")
# name of the proposal, choosen by the user
name = models.CharField(max_length=250, default="")
# the users biblepay address, used to send the coins to when the proposal is accepted
receive_address = models.CharField(max_length=100, default="")
# amount requested by the user. Can not be changed later
amount = models.DecimalField(max_digits=14, decimal_places=4, default=0)
# discussion/detail url for this proposal, every proposal should have one
url = models.CharField(max_length=250, default="")
# the expense type can be:
expense_type = models.CharField(max_length=10, choices=EXPENSE_TYPES, default="unknown")
# in theory, porposals could start end end in different times, but we don't use that
# right now
unix_starttime = models.IntegerField(default=0)
unix_endtime = models.IntegerField(default=0)
# times of the main pool related to the submission of the porposal
prepare_time = models.DateTimeField(null=True, default=None)
submit_time = models.DateTimeField(null=True, default=None)
trigger_time = models.DateTimeField(null=True, default=None)
# then the proposal was paid from the sancturaries
paid_time = models.DateTimeField(null=True, default=None)
# unclear, always empty
funded_time = models.DateTimeField(null=True, default=None)
# unclear
prepare_txid = models.CharField(max_length=100, default="")
# unclear, seems to be a copy of the gobjectid
submit_txid = models.CharField(max_length=100, default="")
# id of the new height/block that is the superblock
# that paid the proposal. Is empty for not-paid proposals
superblock_txid = models.CharField(max_length=100, default="")
# the height of the superblock that paid the proposal
height = models.IntegerField(null=True, default=None)
# unclear
trigger_txid = models.CharField(max_length=100)
# information if the proposal was commited from the main pool
# to the blockchain
prepared = models.BooleanField(default=False)
submitted = models.BooleanField(default=False)
# who many sanctuaries votes and what they voted
yes_count = models.IntegerField(default=0)
no_count = models.IntegerField(default=0)
abstain_count = models.IntegerField(default=0)
# yes_count - no_count = absolute_yes_count
absolute_yes_count = models.IntegerField(default=0)
# masternode count at the time of this proposal, relevant for the
# absolute_yes_count, as you need to have > 10% count of the
# masternode_count as absolute_yes_count, or the proposal is not
# accepted in the next superblock
masternode_count = models.IntegerField(default=0)
# used to disable entries that got removed from the main pool, but we want to keep
# them
active = models.BooleanField(default=True)
def get_absolute_url(self):
return reverse('proposals')
def __str__(self):
return '%s (%s)'% (self.name, self.expense_type)
def is_fundable(self):
""" returns true if the amount of absolute_yes_count is at least 10%
of the max vote count (masternode_count) """
if self.absolute_yes_count >= (self.masternode_count / 100) * 10:
return True
return False
|
py | b403c90dd3b39b52cb76fd063204f4052cbf60cd | # -*- coding: utf-8 -*-
class GenerateConnectURLRequestDataAndPayments(object):
"""Implementation of the 'Generate Connect URL Request (Data and Payments)' model.
TODO: type model description here.
Attributes:
partner_id (string): The partner id you can obtain from your Finicity
developer dashboard
customer_id (string): Finicity’s customer ID. Obtained from the Add
Customer call.
mtype (FinicityConnectTypeEnum): The type of connect flow you want for
the customer/consumer. See Finicity Connect Type For Definitions.
institution_id (int): ID of the specific Institution login flow to
present to the Customer/Consumer. For type "aggregation" this will
present this institution by default as the first institution to
add and then the customer can move on to other institutions to add
after.
redirect_uri (string): The url that customers will be redirected to
after completing Finicity Connect. <br> *Required unless Connect
is embedded inside your application. (iframe)*
webhook (string): The publicly available URL you wish to be notified
with events as the user progresses through the application. See
[Connect Webhook
Event](https://docs.finicity.com/connect-webhooks/) for event
details.
webhook_content_type (string): The Content Type The Webhooks Events
Will Be Sent In. Supported Types `application/json` and
`application/xml`
webhook_data (object): Allows additional identifiable information to
be inserted into the payload of connect webhook events. See this
article for
[Details](https://docs.finicity.com/connect-custom-webhook-data-and
-headers/).
webhook_headers (object): Allows additional identifiable information
to be included as headers of connect webhook event. See this
article for
[Details](https://docs.finicity.com/connect-custom-webhook-data-and
-headers/).
institution_settings (object): Advanced options for configuration of
which institutions to display in. See this article for
[Details](https://docs.finicity.com/connect-institution-settings/)
analytics (string): Google Analytics or Adobe Analytics can be used
with Connect to provide an additional layer of transparency of end
user engagement. This is optional.
"""
# Create a mapping from Model property names to API property names
_names = {
"partner_id":'partnerId',
"customer_id":'customerId',
"mtype":'type',
"institution_id":'institutionId',
"redirect_uri":'redirectUri',
"webhook":'webhook',
"webhook_content_type":'webhookContentType',
"webhook_data":'webhookData',
"webhook_headers":'webhookHeaders',
"institution_settings":'institutionSettings',
"analytics":'analytics'
}
def __init__(self,
partner_id=None,
customer_id=None,
mtype=None,
institution_id=None,
redirect_uri=None,
webhook=None,
webhook_content_type='application/json',
webhook_data=None,
webhook_headers=None,
institution_settings=None,
analytics=None,
additional_properties = {}):
"""Constructor for the GenerateConnectURLRequestDataAndPayments class"""
# Initialize members of the class
self.partner_id = partner_id
self.customer_id = customer_id
self.mtype = mtype
self.institution_id = institution_id
self.redirect_uri = redirect_uri
self.webhook = webhook
self.webhook_content_type = webhook_content_type
self.webhook_data = webhook_data
self.webhook_headers = webhook_headers
self.institution_settings = institution_settings
self.analytics = analytics
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
partner_id = dictionary.get('partnerId')
customer_id = dictionary.get('customerId')
mtype = dictionary.get('type')
institution_id = dictionary.get('institutionId')
redirect_uri = dictionary.get('redirectUri')
webhook = dictionary.get('webhook')
webhook_content_type = dictionary.get("webhookContentType") if dictionary.get("webhookContentType") else 'application/json'
webhook_data = dictionary.get('webhookData')
webhook_headers = dictionary.get('webhookHeaders')
institution_settings = dictionary.get('institutionSettings')
analytics = dictionary.get('analytics')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(partner_id,
customer_id,
mtype,
institution_id,
redirect_uri,
webhook,
webhook_content_type,
webhook_data,
webhook_headers,
institution_settings,
analytics,
dictionary)
|
py | b403c941cfcf5b5aaac8a53012716c3da4973c64 | from django.urls import path
from .views import (PostListView,
# CreateCommentView,
PostDeleteView,
PostDetailView,
UserPostListView)
from .import views
urlpatterns = [
path('', PostListView.as_view(), name='blog-home'),
path('user/<str:username>/', UserPostListView.as_view(), name='user-posts'),
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
# path('post/<int:pk>/comment',CreateCommentView.as_view(), name='create-comment'),
path('post/new/', views.createPost, name='post-create'),
path('post/<int:pk>/update/', views.updatePost, name='post-update'),
path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('about/', views.about, name='blog-about'),
] |
py | b403c9ca446fe3f9e46575ad9f9990c4927e32c5 | """
WSGI config for aaw project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aaw.settings')
application = get_wsgi_application()
|
py | b403c9f8bcd480c1f8894ce4ebfc1a5b33f5c6b4 | # -*- coding: utf-8 -*-
"""DNA Center Create Device Credentials data model.
Copyright (c) 2019-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorFbb95B37484A9Fce(object):
"""Create Device Credentials request schema definition."""
def __init__(self):
super(JSONSchemaValidatorFbb95B37484A9Fce, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"settings": {
"description":
"Settings",
"properties": {
"cliCredential": {
"description":
"Cli Credential",
"items": {
"properties": {
"description":
{
"description":
"Description",
"type": [
"string",
"null"
]
},
"enablePassword": {
"description":
"Enable Password",
"type": [
"string",
"null"
]
},
"password": {
"description":
"Password",
"type": [
"string",
"null"
]
},
"username": {
"description":
"Username",
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": [
"array",
"null"
]
},
"httpsRead": {
"description":
"Https Read",
"items": {
"properties": {
"name": {
"description":
"Name",
"type": [
"string",
"null"
]
},
"password": {
"description":
"Password",
"type": [
"string",
"null"
]
},
"port": {
"type": [
"number",
"null"
]
},
"username": {
"description":
"Username",
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": [
"array",
"null"
]
},
"httpsWrite": {
"description":
"Https Write",
"items": {
"properties": {
"name": {
"description":
"Name",
"type": [
"string",
"null"
]
},
"password": {
"description":
"Password",
"type": [
"string",
"null"
]
},
"port": {
"type": [
"number",
"null"
]
},
"username": {
"description":
"Username",
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": [
"array",
"null"
]
},
"snmpV2cRead": {
"description":
"Snmp V2c Read",
"items": {
"properties": {
"description":
{
"description":
"Description",
"type": [
"string",
"null"
]
},
"readCommunity": {
"description":
"Read Community",
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": [
"array",
"null"
]
},
"snmpV2cWrite": {
"description":
"Snmp V2c Write",
"items": {
"properties": {
"description":
{
"description":
"Description",
"type": [
"string",
"null"
]
},
"writeCommunity": {
"description":
"Write Community",
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": [
"array",
"null"
]
},
"snmpV3": {
"description":
"Snmp V3",
"items": {
"properties": {
"authPassword": {
"description":
"Auth Password",
"type": [
"string",
"null"
]
},
"authType": {
"description":
"Auth Type",
"enum": [
"SHA",
"MD5",
null
],
"type": [
"string",
"null"
]
},
"description":
{
"description":
"Description",
"type": [
"string",
"null"
]
},
"privacyPassword": {
"description":
"Privacy Password",
"type": [
"string",
"null"
]
},
"privacyType": {
"description":
"Privacy Type",
"enum": [
"AES128",
"DES",
null
],
"type": [
"string",
"null"
]
},
"snmpMode": {
"description":
"Snmp Mode",
"enum": [
"AUTHPRIV",
"AUTHNOPRIV",
"NOAUTHNOPRIV",
null
],
"type": [
"string",
"null"
]
},
"username": {
"description":
"Username",
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": [
"array",
"null"
]
}
},
"type": [
"object"
]
}
},
"required": [
"settings"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
py | b403cbfcc4c7caf53474401a2eeacb5b20763917 | import copy
import json
import logging
import multiprocessing
import sys
from datetime import datetime
from types import FrameType
from typing import Dict, List, Optional
import pika.exceptions
from pika.adapters.blocking_connection import BlockingChannel
from src.abstract.publisher_subscriber import PublisherSubscriberComponent
from src.channels_manager.handlers.starters import (
start_telegram_alerts_handler, start_telegram_commands_handler,
start_slack_alerts_handler, start_slack_commands_handler,
start_twilio_alerts_handler, start_console_alerts_handler,
start_log_alerts_handler, start_email_alerts_handler,
start_pagerduty_alerts_handler, start_opsgenie_alerts_handler)
from src.message_broker.rabbitmq import RabbitMQApi
from src.utils import env
from src.utils.configs import (get_newly_added_configs, get_modified_configs,
get_removed_configs)
from src.utils.constants.names import (
TELEGRAM_ALERTS_HANDLER_NAME_TEMPLATE,
TELEGRAM_COMMANDS_HANDLER_NAME_TEMPLATE,
SLACK_ALERTS_HANDLER_NAME_TEMPLATE, SLACK_COMMANDS_HANDLER_NAME_TEMPLATE,
TWILIO_ALERTS_HANDLER_NAME_TEMPLATE, EMAIL_ALERTS_HANDLER_NAME_TEMPLATE,
PAGERDUTY_ALERTS_HANDLER_NAME_TEMPLATE,
OPSGENIE_ALERTS_HANDLER_NAME_TEMPLATE, CONSOLE_ALERTS_HANDLER_NAME_TEMPLATE,
LOG_ALERTS_HANDLER_NAME_TEMPLATE, CONSOLE_CHANNEL_ID, CONSOLE_CHANNEL_NAME,
LOG_CHANNEL_ID, LOG_CHANNEL_NAME,
)
from src.utils.constants.rabbitmq import (HEALTH_CHECK_EXCHANGE,
CONFIG_EXCHANGE,
HEARTBEAT_OUTPUT_MANAGER_ROUTING_KEY,
PING_ROUTING_KEY,
CHANNELS_MANAGER_HEARTBEAT_QUEUE_NAME,
CHANNELS_MANAGER_CONFIGS_QUEUE_NAME,
CHANNELS_MANAGER_CONFIGS_ROUTING_KEY,
TOPIC)
from src.utils.exceptions import MessageWasNotDeliveredException
from src.utils.logging import log_and_print
from src.utils.types import (str_to_bool, ChannelTypes, ChannelHandlerTypes,
convert_to_int)
class ChannelsManager(PublisherSubscriberComponent):
def __init__(self, logger: logging.Logger, name: str,
rabbitmq: RabbitMQApi) -> None:
self._name = name
self._channel_configs = {}
self._channel_process_dict = {}
super().__init__(logger, rabbitmq)
def __str__(self) -> str:
return self.name
@property
def name(self) -> str:
return self._name
@property
def channel_configs(self) -> Dict:
return self._channel_configs
@property
def channel_process_dict(self) -> Dict:
return self._channel_process_dict
def _initialise_rabbitmq(self) -> None:
self.rabbitmq.connect_till_successful()
# Declare consuming intentions
self.logger.info("Creating '%s' exchange", HEALTH_CHECK_EXCHANGE)
self.rabbitmq.exchange_declare(HEALTH_CHECK_EXCHANGE, TOPIC, False,
True, False, False)
self.logger.info("Creating queue '%s'",
CHANNELS_MANAGER_HEARTBEAT_QUEUE_NAME)
self.rabbitmq.queue_declare(CHANNELS_MANAGER_HEARTBEAT_QUEUE_NAME,
False, True, False, False)
self.logger.info("Binding queue '%s' to exchange '%s' with routing key "
"'%s'", CHANNELS_MANAGER_HEARTBEAT_QUEUE_NAME,
HEALTH_CHECK_EXCHANGE, PING_ROUTING_KEY)
self.rabbitmq.queue_bind(CHANNELS_MANAGER_HEARTBEAT_QUEUE_NAME,
HEALTH_CHECK_EXCHANGE, PING_ROUTING_KEY)
self.logger.debug("Declaring consuming intentions on '%s'",
CHANNELS_MANAGER_HEARTBEAT_QUEUE_NAME)
self.rabbitmq.basic_consume(CHANNELS_MANAGER_HEARTBEAT_QUEUE_NAME,
self._process_ping, True, False, None)
self.logger.info("Creating exchange '%s'", CONFIG_EXCHANGE)
self.rabbitmq.exchange_declare(CONFIG_EXCHANGE, TOPIC, False, True,
False, False)
self.logger.info("Creating queue '%s'",
CHANNELS_MANAGER_CONFIGS_QUEUE_NAME)
self.rabbitmq.queue_declare(CHANNELS_MANAGER_CONFIGS_QUEUE_NAME,
False, True, False, False)
self.logger.info("Binding queue '%s' to exchange '%s' with routing key "
"'%s'", CHANNELS_MANAGER_CONFIGS_QUEUE_NAME,
CONFIG_EXCHANGE, CHANNELS_MANAGER_CONFIGS_ROUTING_KEY)
self.rabbitmq.queue_bind(CHANNELS_MANAGER_CONFIGS_QUEUE_NAME,
CONFIG_EXCHANGE,
CHANNELS_MANAGER_CONFIGS_ROUTING_KEY)
self.logger.debug("Declaring consuming intentions on %s",
CHANNELS_MANAGER_CONFIGS_QUEUE_NAME)
self.rabbitmq.basic_consume(CHANNELS_MANAGER_CONFIGS_QUEUE_NAME,
self._process_configs, False, False, None)
# Declare publishing intentions
self.logger.info("Setting delivery confirmation on RabbitMQ channel")
self.rabbitmq.confirm_delivery()
def _listen_for_data(self) -> None:
self.rabbitmq.start_consuming()
def _send_heartbeat(self, data_to_send: Dict) -> None:
self.rabbitmq.basic_publish_confirm(
exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_MANAGER_ROUTING_KEY, body=data_to_send,
is_body_dict=True, properties=pika.BasicProperties(delivery_mode=2),
mandatory=True)
self.logger.debug("Sent heartbeat to '%s' exchange",
HEALTH_CHECK_EXCHANGE)
def _create_and_start_telegram_alerts_handler(
self, bot_token: str, bot_chat_id: str, channel_id: str,
channel_name: str) -> None:
process = multiprocessing.Process(target=start_telegram_alerts_handler,
args=(bot_token, bot_chat_id,
channel_id, channel_name))
process.daemon = True
log_and_print("Creating a new process for the alerts handler of "
"Telegram channel {}".format(channel_name), self.logger)
process.start()
if channel_id not in self._channel_process_dict:
self._channel_process_dict[channel_id] = {}
handler_type = ChannelHandlerTypes.ALERTS.value
self._channel_process_dict[channel_id][handler_type] = {}
process_details = self._channel_process_dict[channel_id][handler_type]
process_details['component_name'] = \
TELEGRAM_ALERTS_HANDLER_NAME_TEMPLATE.format(channel_name)
process_details['process'] = process
process_details['bot_token'] = bot_token
process_details['bot_chat_id'] = bot_chat_id
process_details['channel_id'] = channel_id
process_details['channel_name'] = channel_name
process_details['channel_type'] = ChannelTypes.TELEGRAM.value
def _create_and_start_telegram_cmds_handler(
self, bot_token: str, bot_chat_id: str, channel_id: str,
channel_name: str, associated_chains: Dict) -> None:
process = multiprocessing.Process(
target=start_telegram_commands_handler,
args=(bot_token, bot_chat_id, channel_id, channel_name,
associated_chains))
process.daemon = True
log_and_print("Creating a new process for the commands handler of "
"Telegram channel {}".format(channel_name), self.logger)
process.start()
if channel_id not in self._channel_process_dict:
self._channel_process_dict[channel_id] = {}
commands_handler_type = ChannelHandlerTypes.COMMANDS.value
self._channel_process_dict[channel_id][commands_handler_type] = {}
process_details = self._channel_process_dict[channel_id][
commands_handler_type]
process_details['component_name'] = \
TELEGRAM_COMMANDS_HANDLER_NAME_TEMPLATE.format(channel_name)
process_details['process'] = process
process_details['bot_token'] = bot_token
process_details['bot_chat_id'] = bot_chat_id
process_details['channel_id'] = channel_id
process_details['channel_name'] = channel_name
process_details['associated_chains'] = associated_chains
process_details['channel_type'] = ChannelTypes.TELEGRAM.value
def _create_and_start_slack_alerts_handler(
self, bot_token: str, app_token: str, bot_channel_id: str,
channel_id: str, channel_name: str) -> None:
process = multiprocessing.Process(target=start_slack_alerts_handler,
args=(bot_token, app_token,
bot_channel_id, channel_id,
channel_name))
process.daemon = True
log_and_print("Creating a new process for the alerts handler of "
"Slack channel {}".format(channel_name), self.logger)
process.start()
if channel_id not in self._channel_process_dict:
self._channel_process_dict[channel_id] = {}
handler_type = ChannelHandlerTypes.ALERTS.value
self._channel_process_dict[channel_id][handler_type] = {}
process_details = self._channel_process_dict[channel_id][handler_type]
process_details['component_name'] = \
SLACK_ALERTS_HANDLER_NAME_TEMPLATE.format(channel_name)
process_details['process'] = process
process_details['bot_token'] = bot_token
process_details['app_token'] = app_token
process_details['bot_channel_id'] = bot_channel_id
process_details['channel_id'] = channel_id
process_details['channel_name'] = channel_name
process_details['channel_type'] = ChannelTypes.SLACK.value
def _create_and_start_slack_cmds_handler(
self, bot_token: str, app_token: str, bot_channel_id: str,
channel_id: str, channel_name: str, associated_chains: Dict) -> \
None:
process = multiprocessing.Process(
target=start_slack_commands_handler,
args=(bot_token, app_token, bot_channel_id, channel_id,
channel_name, associated_chains))
process.daemon = True
log_and_print("Creating a new process for the commands handler of "
"Slack channel {}".format(channel_name), self.logger)
process.start()
if channel_id not in self._channel_process_dict:
self._channel_process_dict[channel_id] = {}
commands_handler_type = ChannelHandlerTypes.COMMANDS.value
self._channel_process_dict[channel_id][commands_handler_type] = {}
process_details = self._channel_process_dict[channel_id][
commands_handler_type]
process_details['component_name'] = \
SLACK_COMMANDS_HANDLER_NAME_TEMPLATE.format(channel_name)
process_details['process'] = process
process_details['bot_token'] = bot_token
process_details['app_token'] = app_token
process_details['bot_channel_id'] = bot_channel_id
process_details['channel_id'] = channel_id
process_details['channel_name'] = channel_name
process_details['associated_chains'] = associated_chains
process_details['channel_type'] = ChannelTypes.SLACK.value
def _create_and_start_twilio_alerts_handler(
self, account_sid: str, auth_token: str, channel_id: str,
channel_name: str, call_from: str, call_to: List[str], twiml: str,
twiml_is_url: bool) -> None:
process = multiprocessing.Process(
target=start_twilio_alerts_handler,
args=(account_sid, auth_token, channel_id, channel_name, call_from,
call_to, twiml, twiml_is_url))
process.daemon = True
log_and_print("Creating a new process for the alerts handler of "
"Twilio channel {}".format(channel_name), self.logger)
process.start()
if channel_id not in self._channel_process_dict:
self._channel_process_dict[channel_id] = {}
handler_type = ChannelHandlerTypes.ALERTS.value
self._channel_process_dict[channel_id][handler_type] = {}
process_details = self._channel_process_dict[channel_id][handler_type]
process_details['component_name'] = \
TWILIO_ALERTS_HANDLER_NAME_TEMPLATE.format(channel_name)
process_details['process'] = process
process_details['account_sid'] = account_sid
process_details['auth_token'] = auth_token
process_details['channel_id'] = channel_id
process_details['channel_name'] = channel_name
process_details['call_from'] = call_from
process_details['call_to'] = call_to
process_details['twiml'] = twiml
process_details['twiml_is_url'] = twiml_is_url
process_details['channel_type'] = ChannelTypes.TWILIO.value
def _create_and_start_email_alerts_handler(
self, smtp: str, email_from: str, emails_to: List[str],
channel_id: str, channel_name: str, username: Optional[str],
password: Optional[str], port: int = 0) -> None:
process = multiprocessing.Process(
target=start_email_alerts_handler,
args=(smtp, email_from, emails_to, channel_id, channel_name,
username, password, port))
process.daemon = True
log_and_print("Creating a new process for the alerts handler of "
"e-mail channel {}".format(channel_name), self.logger)
process.start()
if channel_id not in self._channel_process_dict:
self._channel_process_dict[channel_id] = {}
handler_type = ChannelHandlerTypes.ALERTS.value
self._channel_process_dict[channel_id][handler_type] = {}
process_details = self._channel_process_dict[channel_id][handler_type]
process_details['component_name'] = \
EMAIL_ALERTS_HANDLER_NAME_TEMPLATE.format(channel_name)
process_details['process'] = process
process_details['smtp'] = smtp
process_details['email_from'] = email_from
process_details['emails_to'] = emails_to
process_details['channel_id'] = channel_id
process_details['channel_name'] = channel_name
process_details['username'] = username
process_details['password'] = password
process_details['channel_type'] = ChannelTypes.EMAIL.value
process_details['port'] = port
def _create_and_start_pagerduty_alerts_handler(
self, integration_key: str, channel_id: str, channel_name: str) \
-> None:
process = multiprocessing.Process(
target=start_pagerduty_alerts_handler,
args=(integration_key, channel_id, channel_name))
process.daemon = True
log_and_print("Creating a new process for the alerts handler of "
"PagerDuty channel {}".format(channel_name), self.logger)
process.start()
if channel_id not in self._channel_process_dict:
self._channel_process_dict[channel_id] = {}
handler_type = ChannelHandlerTypes.ALERTS.value
self._channel_process_dict[channel_id][handler_type] = {}
process_details = self._channel_process_dict[channel_id][handler_type]
process_details['component_name'] = \
PAGERDUTY_ALERTS_HANDLER_NAME_TEMPLATE.format(channel_name)
process_details['process'] = process
process_details['integration_key'] = integration_key
process_details['channel_id'] = channel_id
process_details['channel_name'] = channel_name
process_details['channel_type'] = ChannelTypes.PAGERDUTY.value
def _create_and_start_opsgenie_alerts_handler(
self, api_key: str, eu_host: bool, channel_id: str,
channel_name: str) -> None:
process = multiprocessing.Process(
target=start_opsgenie_alerts_handler,
args=(api_key, eu_host, channel_id, channel_name))
process.daemon = True
log_and_print("Creating a new process for the alerts handler of "
"Opsgenie channel {}".format(channel_name), self.logger)
process.start()
if channel_id not in self._channel_process_dict:
self._channel_process_dict[channel_id] = {}
handler_type = ChannelHandlerTypes.ALERTS.value
self._channel_process_dict[channel_id][handler_type] = {}
process_details = self._channel_process_dict[channel_id][handler_type]
process_details['component_name'] = \
OPSGENIE_ALERTS_HANDLER_NAME_TEMPLATE.format(channel_name)
process_details['process'] = process
process_details['api_key'] = api_key
process_details['channel_id'] = channel_id
process_details['channel_name'] = channel_name
process_details['eu_host'] = eu_host
process_details['channel_type'] = ChannelTypes.OPSGENIE.value
def _create_and_start_console_alerts_handler(
self, channel_id: str, channel_name: str) -> None:
process = multiprocessing.Process(target=start_console_alerts_handler,
args=(channel_id, channel_name))
process.daemon = True
log_and_print("Creating a new process for the alerts handler of "
"console channel {}".format(channel_name), self.logger)
process.start()
if channel_id not in self._channel_process_dict:
self._channel_process_dict[channel_id] = {}
handler_type = ChannelHandlerTypes.ALERTS.value
self._channel_process_dict[channel_id][handler_type] = {}
process_details = self._channel_process_dict[channel_id][handler_type]
process_details['component_name'] = \
CONSOLE_ALERTS_HANDLER_NAME_TEMPLATE.format(channel_name)
process_details['process'] = process
process_details['channel_id'] = channel_id
process_details['channel_name'] = channel_name
process_details['channel_type'] = ChannelTypes.CONSOLE.value
def _create_and_start_log_alerts_handler(self, channel_id: str,
channel_name: str) -> None:
process = multiprocessing.Process(target=start_log_alerts_handler,
args=(channel_id, channel_name))
process.daemon = True
log_and_print("Creating a new process for the alerts handler of "
"log channel {}".format(channel_name), self.logger)
process.start()
if channel_id not in self._channel_process_dict:
self._channel_process_dict[channel_id] = {}
handler_type = ChannelHandlerTypes.ALERTS.value
self._channel_process_dict[channel_id][handler_type] = {}
process_details = self._channel_process_dict[channel_id][handler_type]
process_details['component_name'] = \
LOG_ALERTS_HANDLER_NAME_TEMPLATE.format(channel_name)
process_details['process'] = process
process_details['channel_id'] = channel_id
process_details['channel_name'] = channel_name
process_details['channel_type'] = ChannelTypes.LOG.value
def _start_persistent_channels(self) -> None:
# Start the console channel in a separate process if it is not yet
# started or it is not alive. This must be done in case of a restart of
# the manager.
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if CONSOLE_CHANNEL_ID not in self._channel_process_dict or \
not self.channel_process_dict[CONSOLE_CHANNEL_ID][
alerts_handler_type]['process'].is_alive():
self._create_and_start_console_alerts_handler(CONSOLE_CHANNEL_ID,
CONSOLE_CHANNEL_NAME)
# Start the LOG channel in a separate process if it is not yet started
# or it is not alive. This must be done in case of a restart of the
# manager.
if LOG_CHANNEL_ID not in self._channel_process_dict or \
not self.channel_process_dict[LOG_CHANNEL_ID][
alerts_handler_type]['process'].is_alive():
self._create_and_start_log_alerts_handler(LOG_CHANNEL_ID,
LOG_CHANNEL_NAME)
def _process_telegram_configs(self, sent_configs: Dict) -> Dict:
if ChannelTypes.TELEGRAM.value in self.channel_configs:
current_configs = self.channel_configs[ChannelTypes.TELEGRAM.value]
else:
current_configs = {}
# This contains all the correct latest channel configs. All current
# configs are correct configs, therefore start from the current and
# modify as we go along according to the updates. This is done just in
# case an error occurs.
correct_configs = copy.deepcopy(current_configs)
try:
new_configs = get_newly_added_configs(
sent_configs, current_configs)
for config_id in new_configs:
config = new_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
bot_token = config['bot_token']
chat_id = config['chat_id']
alerts = str_to_bool(config['alerts'])
commands = str_to_bool(config['commands'])
parent_ids = config['parent_ids'].split(',')
chain_names = config['parent_names'].split(',')
associated_chains = dict(zip(parent_ids, chain_names))
# If Telegram Alerts are enabled on this channel, start an
# alerts handler for this channel
if alerts:
self._create_and_start_telegram_alerts_handler(
bot_token, chat_id, channel_id, channel_name)
correct_configs[config_id] = config
# If Telegram Commands are enabled on this channel, start a
# commands handler for this channel
if commands:
self._create_and_start_telegram_cmds_handler(
bot_token, chat_id, channel_id, channel_name,
associated_chains)
correct_configs[config_id] = config
modified_configs = get_modified_configs(sent_configs,
current_configs)
for config_id in modified_configs:
# Get the latest updates
config = sent_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
bot_token = config['bot_token']
chat_id = config['chat_id']
alerts = str_to_bool(config['alerts'])
commands = str_to_bool(config['commands'])
parent_ids = config['parent_ids'].split(',')
chain_names = config['parent_names'].split(',')
associated_chains = dict(zip(parent_ids, chain_names))
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
if not alerts:
del self.channel_process_dict[channel_id][
alerts_handler_type]
log_and_print("Killed the alerts handler of {} "
.format(channel_name), self.logger)
else:
log_and_print(
"Restarting the alerts handler of {} with latest "
"configuration".format(channel_name), self.logger)
self._create_and_start_telegram_alerts_handler(
bot_token, chat_id, channel_id, channel_name)
else:
if alerts:
log_and_print(
"Starting a new alerts handler for {}.".format(
channel_name), self.logger)
self._create_and_start_telegram_alerts_handler(
bot_token, chat_id, channel_id, channel_name)
commands_handler_type = ChannelHandlerTypes.COMMANDS.value
if commands_handler_type in \
self.channel_process_dict[channel_id]:
previous_commands_process = self.channel_process_dict[
channel_id][commands_handler_type]['process']
previous_commands_process.terminate()
previous_commands_process.join()
if not commands:
del self.channel_process_dict[channel_id][
commands_handler_type]
log_and_print("Killed the commands handler of {} "
.format(channel_name), self.logger)
else:
log_and_print(
"Restarting the commands handler of {} with latest "
"configuration".format(channel_name), self.logger)
self._create_and_start_telegram_cmds_handler(
bot_token, chat_id, channel_id, channel_name,
associated_chains)
else:
if commands:
log_and_print(
"Starting a new commands handler for {}.".format(
channel_name), self.logger)
self._create_and_start_telegram_cmds_handler(
bot_token, chat_id, channel_id, channel_name,
associated_chains)
# Delete the state entries if both commands and alerts are
# disabled on the Telegram channel. Otherwise, save the config
# as a process must be running
if not commands and not alerts:
del self.channel_process_dict[channel_id]
del correct_configs[config_id]
else:
correct_configs[config_id] = config
removed_configs = get_removed_configs(
sent_configs, current_configs)
for config_id in removed_configs:
config = removed_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
log_and_print("Killed the alerts handler of {} ".format(
channel_name), self.logger)
commands_handler_type = ChannelHandlerTypes.COMMANDS.value
if commands_handler_type in \
self.channel_process_dict[channel_id]:
previous_commands_process = self.channel_process_dict[
channel_id][commands_handler_type]['process']
previous_commands_process.terminate()
previous_commands_process.join()
log_and_print("Killed the commands handler of {} ".format(
channel_name), self.logger)
del self.channel_process_dict[channel_id]
del correct_configs[config_id]
except Exception as e:
# If we encounter an error during processing, this error must be
# logged and the message must be acknowledged so that it is removed
# from the queue
self.logger.error("Error when processing {}".format(sent_configs))
self.logger.exception(e)
return correct_configs
def _process_slack_configs(self, sent_configs: Dict) -> Dict:
if ChannelTypes.SLACK.value in self.channel_configs:
current_configs = self.channel_configs[ChannelTypes.SLACK.value]
else:
current_configs = {}
# This contains all the correct latest channel configs. All current
# configs are correct configs, therefore start from the current and
# modify as we go along according to the updates. This is done just in
# case an error occurs.
correct_configs = copy.deepcopy(current_configs)
try:
new_configs = get_newly_added_configs(
sent_configs, current_configs)
for config_id in new_configs:
config = new_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
bot_token = config['bot_token']
app_token = config['app_token']
bot_channel_id = config['bot_channel_id']
alerts = str_to_bool(config['alerts'])
commands = str_to_bool(config['commands'])
parent_ids = config['parent_ids'].split(',')
chain_names = config['parent_names'].split(',')
associated_chains = dict(zip(parent_ids, chain_names))
# If Slack Alerts are enabled on this channel, start an
# alerts handler for this channel
if alerts:
self._create_and_start_slack_alerts_handler(
bot_token, app_token, bot_channel_id, channel_id,
channel_name)
correct_configs[config_id] = config
# If Slack Commands are enabled on this channel, start a
# commands handler for this channel
if commands:
self._create_and_start_slack_cmds_handler(
bot_token, app_token, bot_channel_id, channel_id,
channel_name, associated_chains)
correct_configs[config_id] = config
modified_configs = get_modified_configs(sent_configs,
current_configs)
for config_id in modified_configs:
# Get the latest updates
config = sent_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
bot_token = config['bot_token']
app_token = config['app_token']
bot_channel_id = config['bot_channel_id']
alerts = str_to_bool(config['alerts'])
commands = str_to_bool(config['commands'])
parent_ids = config['parent_ids'].split(',')
chain_names = config['parent_names'].split(',')
associated_chains = dict(zip(parent_ids, chain_names))
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
if not alerts:
del self.channel_process_dict[channel_id][
alerts_handler_type]
log_and_print("Killed the alerts handler of {} "
.format(channel_name), self.logger)
else:
log_and_print(
"Restarting the alerts handler of {} with latest "
"configuration".format(channel_name), self.logger)
self._create_and_start_slack_alerts_handler(
bot_token, app_token, bot_channel_id, channel_id,
channel_name)
else:
if alerts:
log_and_print(
"Starting a new alerts handler for {}.".format(
channel_name), self.logger)
self._create_and_start_slack_alerts_handler(
bot_token, app_token, bot_channel_id, channel_id,
channel_name)
commands_handler_type = ChannelHandlerTypes.COMMANDS.value
if commands_handler_type in \
self.channel_process_dict[channel_id]:
previous_commands_process = self.channel_process_dict[
channel_id][commands_handler_type]['process']
previous_commands_process.terminate()
previous_commands_process.join()
if not commands:
del self.channel_process_dict[channel_id][
commands_handler_type]
log_and_print("Killed the commands handler of {} "
.format(channel_name), self.logger)
else:
log_and_print(
"Restarting the commands handler of {} with latest "
"configuration".format(channel_name), self.logger)
self._create_and_start_slack_cmds_handler(
bot_token, app_token, bot_channel_id, channel_id,
channel_name, associated_chains)
else:
if commands:
log_and_print(
"Starting a new commands handler for {}.".format(
channel_name), self.logger)
self._create_and_start_slack_cmds_handler(
bot_token, app_token, bot_channel_id, channel_id,
channel_name, associated_chains)
# Delete the state entries if both commands and alerts are
# disabled on the Slack channel. Otherwise, save the config
# as a process must be running
if not commands and not alerts:
del self.channel_process_dict[channel_id]
del correct_configs[config_id]
else:
correct_configs[config_id] = config
removed_configs = get_removed_configs(
sent_configs, current_configs)
for config_id in removed_configs:
config = removed_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
log_and_print("Killed the alerts handler of {} ".format(
channel_name), self.logger)
commands_handler_type = ChannelHandlerTypes.COMMANDS.value
if commands_handler_type in \
self.channel_process_dict[channel_id]:
previous_commands_process = self.channel_process_dict[
channel_id][commands_handler_type]['process']
previous_commands_process.terminate()
previous_commands_process.join()
log_and_print("Killed the commands handler of {} ".format(
channel_name), self.logger)
del self.channel_process_dict[channel_id]
del correct_configs[config_id]
except Exception as e:
# If we encounter an error during processing, this error must be
# logged and the message must be acknowledged so that it is removed
# from the queue
self.logger.error("Error when processing {}".format(sent_configs))
self.logger.exception(e)
return correct_configs
def _process_twilio_configs(self, sent_configs: Dict) -> Dict:
if ChannelTypes.TWILIO.value in self.channel_configs:
current_configs = self.channel_configs[ChannelTypes.TWILIO.value]
else:
current_configs = {}
# This contains all the correct latest channel configs. All current
# configs are correct configs, therefore start from the current and
# modify as we go along according to the updates. This is done just in
# case an error occurs.
correct_configs = copy.deepcopy(current_configs)
try:
new_configs = get_newly_added_configs(
sent_configs, current_configs)
for config_id in new_configs:
config = new_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
account_sid = config['account_sid']
auth_token = config['auth_token']
twilio_phone_number = config['twilio_phone_no']
numbers_to_dial = config['twilio_phone_numbers_to_dial_valid'] \
.split(',')
twiml = env.TWIML
twiml_is_url = env.TWIML_IS_URL
self._create_and_start_twilio_alerts_handler(
account_sid, auth_token, channel_id, channel_name,
twilio_phone_number, numbers_to_dial, twiml, twiml_is_url)
correct_configs[config_id] = config
modified_configs = get_modified_configs(sent_configs,
current_configs)
for config_id in modified_configs:
# Get the latest updates
config = sent_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
account_sid = config['account_sid']
auth_token = config['auth_token']
twilio_phone_number = config['twilio_phone_no']
numbers_to_dial = config['twilio_phone_numbers_to_dial_valid'] \
.split(',')
twiml = env.TWIML
twiml_is_url = env.TWIML_IS_URL
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
log_and_print("Restarting the alerts handler of {} with "
"latest configuration".format(channel_name),
self.logger)
self._create_and_start_twilio_alerts_handler(
account_sid, auth_token, channel_id, channel_name,
twilio_phone_number, numbers_to_dial, twiml,
twiml_is_url)
correct_configs[config_id] = config
removed_configs = get_removed_configs(
sent_configs, current_configs)
for config_id in removed_configs:
config = removed_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
log_and_print("Killed the alerts handler of {} ".format(
channel_name), self.logger)
del self.channel_process_dict[channel_id]
del correct_configs[config_id]
except Exception as e:
# If we encounter an error during processing, this error must be
# logged and the message must be acknowledged so that it is removed
# from the queue
self.logger.error("Error when processing %s", sent_configs)
self.logger.exception(e)
return correct_configs
def _process_email_configs(self, sent_configs: Dict) -> Dict:
if ChannelTypes.EMAIL.value in self.channel_configs:
current_configs = self.channel_configs[ChannelTypes.EMAIL.value]
else:
current_configs = {}
# This contains all the correct latest channel configs. All current
# configs are correct configs, therefore start from the current and
# modify as we go along according to the updates. This is done just in
# case an error occurs.
correct_configs = copy.deepcopy(current_configs)
try:
new_configs = get_newly_added_configs(
sent_configs, current_configs)
for config_id in new_configs:
config = new_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
smtp = config['smtp']
email_from = config['email_from']
emails_to = config['emails_to'].split(',')
username = config['username']
password = config['password']
port = convert_to_int(config['port'], 0)
self._create_and_start_email_alerts_handler(
smtp, email_from, emails_to, channel_id, channel_name,
username, password, port)
correct_configs[config_id] = config
modified_configs = get_modified_configs(sent_configs,
current_configs)
for config_id in modified_configs:
# Get the latest updates
config = sent_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
smtp = config['smtp']
email_from = config['email_from']
emails_to = config['emails_to'].split(',')
username = config['username']
password = config['password']
port = convert_to_int(config['port'], 0)
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
log_and_print("Restarting the alerts handler of {} with "
"latest configuration".format(channel_name),
self.logger)
self._create_and_start_email_alerts_handler(
smtp, email_from, emails_to, channel_id, channel_name,
username, password, port)
correct_configs[config_id] = config
removed_configs = get_removed_configs(
sent_configs, current_configs)
for config_id in removed_configs:
config = removed_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
log_and_print("Killed the alerts handler of {} ".format(
channel_name), self.logger)
del self.channel_process_dict[channel_id]
del correct_configs[config_id]
except Exception as e:
# If we encounter an error during processing, this error must be
# logged and the message must be acknowledged so that it is removed
# from the queue
self.logger.error("Error when processing %s", sent_configs)
self.logger.exception(e)
return correct_configs
def _process_pagerduty_configs(self, sent_configs: Dict) -> Dict:
if ChannelTypes.PAGERDUTY.value in self.channel_configs:
current_configs = self.channel_configs[ChannelTypes.PAGERDUTY.value]
else:
current_configs = {}
# This contains all the correct latest channel configs. All current
# configs are correct configs, therefore start from the current and
# modify as we go along according to the updates. This is done just in
# case an error occurs.
correct_configs = copy.deepcopy(current_configs)
try:
new_configs = get_newly_added_configs(
sent_configs, current_configs)
for config_id in new_configs:
config = new_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
integration_key = config['integration_key']
self._create_and_start_pagerduty_alerts_handler(
integration_key, channel_id, channel_name)
correct_configs[config_id] = config
modified_configs = get_modified_configs(sent_configs,
current_configs)
for config_id in modified_configs:
# Get the latest updates
config = sent_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
integration_key = config['integration_key']
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
log_and_print("Restarting the alerts handler of {} with "
"latest configuration".format(channel_name),
self.logger)
self._create_and_start_pagerduty_alerts_handler(
integration_key, channel_id, channel_name)
correct_configs[config_id] = config
removed_configs = get_removed_configs(
sent_configs, current_configs)
for config_id in removed_configs:
config = removed_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
log_and_print("Killed the alerts handler of {} ".format(
channel_name), self.logger)
del self.channel_process_dict[channel_id]
del correct_configs[config_id]
except Exception as e:
# If we encounter an error during processing, this error must be
# logged and the message must be acknowledged so that it is removed
# from the queue
self.logger.error("Error when processing %s", sent_configs)
self.logger.exception(e)
return correct_configs
def _process_opsgenie_configs(self, sent_configs: Dict) -> Dict:
if ChannelTypes.OPSGENIE.value in self.channel_configs:
current_configs = self.channel_configs[ChannelTypes.OPSGENIE.value]
else:
current_configs = {}
# This contains all the correct latest channel configs. All current
# configs are correct configs, therefore start from the current and
# modify as we go along according to the updates. This is done just in
# case an error occurs.
correct_configs = copy.deepcopy(current_configs)
try:
new_configs = get_newly_added_configs(
sent_configs, current_configs)
for config_id in new_configs:
config = new_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
api_key = config['api_token']
eu_host = str_to_bool(config['eu'])
self._create_and_start_opsgenie_alerts_handler(
api_key, eu_host, channel_id, channel_name)
correct_configs[config_id] = config
modified_configs = get_modified_configs(sent_configs,
current_configs)
for config_id in modified_configs:
# Get the latest updates
config = sent_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
api_key = config['api_token']
eu_host = str_to_bool(config['eu'])
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
log_and_print("Restarting the alerts handler of {} with "
"latest configuration".format(channel_name),
self.logger)
self._create_and_start_opsgenie_alerts_handler(
api_key, eu_host, channel_id, channel_name)
correct_configs[config_id] = config
removed_configs = get_removed_configs(
sent_configs, current_configs)
for config_id in removed_configs:
config = removed_configs[config_id]
channel_id = config['id']
channel_name = config['channel_name']
alerts_handler_type = ChannelHandlerTypes.ALERTS.value
if alerts_handler_type in self.channel_process_dict[channel_id]:
previous_alerts_process = self.channel_process_dict[
channel_id][alerts_handler_type]['process']
previous_alerts_process.terminate()
previous_alerts_process.join()
log_and_print("Killed the alerts handler of {} ".format(
channel_name), self.logger)
del self.channel_process_dict[channel_id]
del correct_configs[config_id]
except Exception as e:
# If we encounter an error during processing, this error must be
# logged and the message must be acknowledged so that it is removed
# from the queue
self.logger.error("Error when processing %s", sent_configs)
self.logger.exception(e)
return correct_configs
def _process_configs(
self, ch: BlockingChannel, method: pika.spec.Basic.Deliver,
properties: pika.spec.BasicProperties, body: bytes) -> None:
sent_configs = json.loads(body)
self.logger.info("Received configs %s. Now processing.", sent_configs)
if 'DEFAULT' in sent_configs:
del sent_configs['DEFAULT']
if method.routing_key == 'channels.telegram_config':
updated_configs = self._process_telegram_configs(sent_configs)
self._channel_configs[ChannelTypes.TELEGRAM.value] = updated_configs
elif method.routing_key == 'channels.slack_config':
updated_configs = self._process_slack_configs(sent_configs)
self._channel_configs[ChannelTypes.SLACK.value] = updated_configs
elif method.routing_key == 'channels.twilio_config':
updated_configs = self._process_twilio_configs(sent_configs)
self._channel_configs[ChannelTypes.TWILIO.value] = updated_configs
elif method.routing_key == 'channels.email_config':
updated_configs = self._process_email_configs(sent_configs)
self._channel_configs[ChannelTypes.EMAIL.value] = updated_configs
elif method.routing_key == 'channels.pagerduty_config':
updated_configs = self._process_pagerduty_configs(sent_configs)
self._channel_configs[ChannelTypes.PAGERDUTY.value] = \
updated_configs
elif method.routing_key == 'channels.opsgenie_config':
updated_configs = self._process_opsgenie_configs(sent_configs)
self._channel_configs[ChannelTypes.OPSGENIE.value] = \
updated_configs
self.rabbitmq.basic_ack(method.delivery_tag, False)
def _process_ping(
self, ch: BlockingChannel, method: pika.spec.Basic.Deliver,
properties: pika.spec.BasicProperties, body: bytes) -> None:
data = body
self.logger.debug("Received %s", data)
heartbeat = {}
try:
heartbeat['component_name'] = self.name
heartbeat['running_processes'] = []
heartbeat['dead_processes'] = []
for channel_id, handlers in self.channel_process_dict.items():
for handler, process_details in handlers.items():
process = process_details['process']
component_name = process_details['component_name']
if process.is_alive():
heartbeat['running_processes'].append(component_name)
else:
heartbeat['dead_processes'].append(component_name)
process.join() # Just in case, to release resources
# Restart dead process
channel_type = process_details['channel_type']
if channel_type == ChannelTypes.TELEGRAM.value:
if handler == ChannelHandlerTypes.ALERTS.value:
self._create_and_start_telegram_alerts_handler(
process_details['bot_token'],
process_details['bot_chat_id'],
process_details['channel_id'],
process_details['channel_name'])
elif handler == ChannelHandlerTypes.COMMANDS.value:
self._create_and_start_telegram_cmds_handler(
process_details['bot_token'],
process_details['bot_chat_id'],
process_details['channel_id'],
process_details['channel_name'],
process_details['associated_chains'])
elif channel_type == ChannelTypes.SLACK.value:
if handler == ChannelHandlerTypes.ALERTS.value:
self._create_and_start_slack_alerts_handler(
process_details['bot_token'],
process_details['app_token'],
process_details['bot_channel_id'],
process_details['channel_id'],
process_details['channel_name'])
elif handler == ChannelHandlerTypes.COMMANDS.value:
self._create_and_start_slack_cmds_handler(
process_details['bot_token'],
process_details['app_token'],
process_details['bot_channel_id'],
process_details['channel_id'],
process_details['channel_name'],
process_details['associated_chains'])
elif channel_type == ChannelTypes.TWILIO.value:
self._create_and_start_twilio_alerts_handler(
process_details['account_sid'],
process_details['auth_token'],
process_details['channel_id'],
process_details['channel_name'],
process_details['call_from'],
process_details['call_to'],
process_details['twiml'],
process_details['twiml_is_url'])
elif channel_type == ChannelTypes.EMAIL.value:
self._create_and_start_email_alerts_handler(
process_details['smtp'],
process_details['email_from'],
process_details['emails_to'],
process_details['channel_id'],
process_details['channel_name'],
process_details['username'],
process_details['password'],
process_details['port']
)
elif channel_type == ChannelTypes.PAGERDUTY.value:
self._create_and_start_pagerduty_alerts_handler(
process_details['integration_key'],
process_details['channel_id'],
process_details['channel_name'],
)
elif channel_type == ChannelTypes.OPSGENIE.value:
self._create_and_start_opsgenie_alerts_handler(
process_details['api_key'],
process_details['eu_host'],
process_details['channel_id'],
process_details['channel_name'],
)
elif channel_type == ChannelTypes.CONSOLE.value:
self._create_and_start_console_alerts_handler(
process_details['channel_id'],
process_details['channel_name'])
elif channel_type == ChannelTypes.LOG.value:
self._create_and_start_log_alerts_handler(
process_details['channel_id'],
process_details['channel_name'])
heartbeat['timestamp'] = datetime.now().timestamp()
except Exception as e:
# If we encounter an error during processing log the error and
# return so that no heartbeat is sent
self.logger.error("Error when processing %s", data)
self.logger.exception(e)
return
# Send heartbeat if processing was successful
try:
self._send_heartbeat(heartbeat)
except MessageWasNotDeliveredException as e:
# Log the message and do not raise it as there is no use in
# re-trying to send a heartbeat
self.logger.exception(e)
except Exception as e:
# For any other exception raise it.
raise e
def start(self) -> None:
log_and_print("{} started.".format(self), self.logger)
self._initialise_rabbitmq()
while True:
try:
self._start_persistent_channels()
self._listen_for_data()
except (pika.exceptions.AMQPConnectionError,
pika.exceptions.AMQPChannelError) as e:
# If we have either a channel error or connection error, the
# channel is reset, therefore we need to re-initialise the
# connection or channel settings
raise e
except Exception as e:
self.logger.exception(e)
raise e
# If termination signals are received, terminate all child process and
# close the connection with rabbitmq before exiting
def _on_terminate(self, signum: int, stack: FrameType) -> None:
log_and_print(
"{} is terminating. Connections with RabbitMQ will be closed, and "
"any running channel handlers will be stopped gracefully. "
"Afterwards the {} process will exit.".format(self, self),
self.logger)
self.disconnect_from_rabbit()
for _, handlers in self.channel_process_dict.items():
for handler, process_details in handlers.items():
log_and_print("Terminating {}".format(
process_details['component_name']), self.logger)
process = process_details['process']
process.terminate()
process.join()
log_and_print("{} terminated.".format(self), self.logger)
sys.exit()
def _send_data(self, *args) -> None:
"""
We are not implementing the _send_data function because with respect to
rabbit, the channels manager only sends heartbeats.
"""
pass
|
py | b403cca769263936ef962f6c46ebb6bbebf212aa | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm^33jy7w#*zm9-1=z$rh6n60=yq#r9!$jkfl73$cj6a()l4$o)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User'
|
py | b403cd36f9ca4aab7b7b6aded4b6ca897022a62a | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["digitalio", "busio"]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Adafruit BLE_Apple_Media Library'
copyright = u'2020 Scott Shawcroft'
author = u'Scott Shawcroft'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/favicon.ico'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdafruitBle_apple_mediaLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AdafruitBLE_Apple_MediaLibrary.tex', u'AdafruitBLE_Apple_Media Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'AdafruitBLE_Apple_Medialibrary', u'Adafruit BLE_Apple_Media Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AdafruitBLE_Apple_MediaLibrary', u'Adafruit BLE_Apple_Media Library Documentation',
author, 'AdafruitBLE_Apple_MediaLibrary', 'One line description of project.',
'Miscellaneous'),
]
|
py | b403cec2774c708f0e7dd48862eed2de0859637a | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import pdb
from torch.autograd import Variable
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn.utils.weight_norm as wtnrm
import numpy as np
from keras.preprocessing.sequence import pad_sequences
import sys
sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/MT_Transformer/')
from MT_TransV1.Trans_Decoder import Decoder
from MT_TransV1.Trans_Encoder import Encoder
#import sys
#sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/MT_Transformer/MT_Transformer')
#--------------------------------------------------------------------------
class Transformer(nn.Module):
"""An encoder-decoder framework only includes attention. """
def __init__(self,args):
super(Transformer, self).__init__()
self.label_smoothing = args.label_smoothing
self.encoder = Encoder(args=args,MT_flag=True)
self.decoder = Decoder(args=args)
#----------------------------------
def forward(self, padded_Src_seq,padded_Tgt_seq):
###conv layers
#General Transformer MT model
encoder_padded_outputs, *_ = self.encoder(padded_Src_seq)
#output_dict = self.decoder(padded_Tgt_seq, encoder_padded_outputs)
pred, gold = self.decoder(padded_Tgt_seq, encoder_padded_outputs)
#cost, CER = cal_performance(pred, gold,self.decoder.IGNORE_ID,normalize_length=False,smoothing=self.decoder.label_smoothing)
#breakpoint()
# output_dict={'cost':cost, 'CER':CER, 'smp_pred':pred,'smp_gold':gold}
#output_dict = {'cost':cost, 'dec_slf_attn_list':dec_slf_attn_list, 'dec_enc_attn_list':dec_enc_attn_list, 'Char_cer':CER, 'Word_cer':CER}
return pred, gold
#=============================================================================================================
#=============================================================================================================
#==============================================================================
def predict(self, Src_tokens,args):
#print("went to the decoder loop")
with torch.no_grad():
#### read feature matrices
smp_Src_labels = torch.LongTensor(Src_tokens)
smp_Src_labels = smp_Src_labels.cuda() if args.gpu else smp_Src_labels
smp_Src_labels = smp_Src_labels.unsqueeze(0)
#General Transformer ASR model
encoder_padded_outputs, *_ = self.encoder(smp_Src_labels)
nbest_hyps,scoring_list = self.decoder.recognize_batch_beam_autoreg_LM_multi_hyp(encoder_padded_outputs,args.beam,args.Am_weight,args.gamma,args.LM_model,args.len_pen,args)
#===================================================================================
beam_len = nbest_hyps.size(0)
hyp = {'score': 0.0, 'yseq': None,'state': None, 'alpha_i_list':None, 'Text_seq':None}
#===============================================
Output_dict=[]
for I in range(beam_len):
new_hyp={}
new_hyp['yseq'] = nbest_hyps[I]
new_hyp['score'] = scoring_list[I].sum()
#new_hyp['Text_seq'] = self.decoder.get_charecters_for_sequences(nbest_hyps[I].unsqueeze(0))
new_hyp['Text_seq'] = self.decoder.get_charecters_for_sequences(nbest_hyps[I].unsqueeze(0),self.decoder.Tgt_model,self.decoder.pad_index,self.decoder.eos_id,self.decoder.word_unk)
new_hyp['state'] = hyp['state']
new_hyp['alpha_i_list'] = hyp['alpha_i_list']
Output_dict.append(new_hyp)
return Output_dict
#----------------------------------------------------------------
#=============================================================================================================
#=============================================================================================================
#-------------------------------------------------------------------------------------------------------------
#=============================================================================================================
#-------------------------------------------------------------------------------------------------------------
class TransformerOptimizer(object):
"""A simple wrapper class for learning rate scheduling"""
def __init__(self, optimizer, k, d_model, step_num=0, warmup_steps=4000, warm_restart=200000):
self.optimizer = optimizer
self.optimizer_org = optimizer
self.k = k
#present_lr=[param_group['lr'] for param_group in self.optimizer.param_groups]
self.init_lr = d_model ** (-0.5)
self.warmup_steps = warmup_steps
self.step_num = step_num
self.reduction_factor=1
self.warm_restart = warm_restart
def zero_grad(self):
self.optimizer.zero_grad()
def step(self):
self._update_lr()
self.optimizer.step()
self.warm_restartfn()
def _update_lr(self):
self.step_num += 1
lr = self.k * self.init_lr * min(self.step_num ** (-0.5), self.step_num * (self.warmup_steps ** (-1.5)))
#print(lr,self.step_num ** (-0.5),self.step_num * self.warmup_steps ** (-1.5),self.reduction_factor)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict)
def state_dict(self):
return self.optimizer.state_dict()
def set_k(self, k):
self.k = k
def set_step_num(self, step_num):
self.step_num=step_num
def reduce_learning_rate(self, k):
self.reduction_factor = self.reduction_factor*k
#print(self.reduction_factor)
def print_lr(self):
present_lr=[param_group['lr'] for param_group in self.optimizer.param_groups]
return present_lr[0]
def warm_restartfn(self):
if (self.step_num%self.warm_restart==0):
self.optimizer = self.optimizer_org
self.step_num = self.warm_restart
#=============================================================================================================
#---------------------------------------------------------------------------------------------------------------
#===============================================================================================================
#===============================================================================================================
#===============================================================================================================
#===============================================================================================================
#===============================================================================================================
|
py | b403cec741a8a498284e6be804571ca94aeefc01 | # Paper: Link Prediction Based on Graph Neural Networks (NeurIPS 2018)
import math
import random
import os.path as osp
from itertools import chain
import numpy as np
from sklearn.metrics import roc_auc_score
from scipy.sparse.csgraph import shortest_path
import torch
import torch.nn.functional as F
from torch.nn import BCEWithLogitsLoss
from torch.nn import ModuleList, Linear, Conv1d, MaxPool1d
from torch_geometric.datasets import Planetoid
from torch_geometric.nn import GCNConv, global_sort_pool
from torch_geometric.data import Data, InMemoryDataset, DataLoader
from torch_geometric.utils import (negative_sampling, add_self_loops,
train_test_split_edges, k_hop_subgraph,
to_scipy_sparse_matrix)
class SEALDataset(InMemoryDataset):
def __init__(self, dataset, num_hops, split='train'):
self.data = dataset[0]
self.num_hops = num_hops
super(SEALDataset, self).__init__(dataset.root)
index = ['train', 'val', 'test'].index(split)
self.data, self.slices = torch.load(self.processed_paths[index])
@property
def processed_file_names(self):
return ['SEAL_train_data.pt', 'SEAL_val_data.pt', 'SEAL_test_data.pt']
def process(self):
random.seed(12345)
torch.manual_seed(12345)
data = train_test_split_edges(self.data)
edge_index, _ = add_self_loops(data.train_pos_edge_index)
data.train_neg_edge_index = negative_sampling(
edge_index, num_nodes=data.num_nodes,
num_neg_samples=data.train_pos_edge_index.size(1))
self.__max_z__ = 0
# Collect a list of subgraphs for training, validation and test.
train_pos_list = self.extract_enclosing_subgraphs(
data.train_pos_edge_index, data.train_pos_edge_index, 1)
train_neg_list = self.extract_enclosing_subgraphs(
data.train_neg_edge_index, data.train_pos_edge_index, 0)
val_pos_list = self.extract_enclosing_subgraphs(
data.val_pos_edge_index, data.train_pos_edge_index, 1)
val_neg_list = self.extract_enclosing_subgraphs(
data.val_neg_edge_index, data.train_pos_edge_index, 0)
test_pos_list = self.extract_enclosing_subgraphs(
data.test_pos_edge_index, data.train_pos_edge_index, 1)
test_neg_list = self.extract_enclosing_subgraphs(
data.test_neg_edge_index, data.train_pos_edge_index, 0)
# Convert labels to one-hot features.
for data in chain(train_pos_list, train_neg_list, val_pos_list,
val_neg_list, test_pos_list, test_neg_list):
data.x = F.one_hot(data.z, self.__max_z__ + 1).to(torch.float)
torch.save(self.collate(train_pos_list + train_neg_list),
self.processed_paths[0])
torch.save(self.collate(val_pos_list + val_neg_list),
self.processed_paths[1])
torch.save(self.collate(test_pos_list + test_neg_list),
self.processed_paths[2])
def extract_enclosing_subgraphs(self, link_index, edge_index, y):
data_list = []
for src, dst in link_index.t().tolist():
sub_nodes, sub_edge_index, mapping, _ = k_hop_subgraph(
[src, dst], self.num_hops, edge_index, relabel_nodes=True)
src, dst = mapping.tolist()
# Remove target link from the subgraph.
mask1 = (sub_edge_index[0] != src) | (sub_edge_index[1] != dst)
mask2 = (sub_edge_index[0] != dst) | (sub_edge_index[1] != src)
sub_edge_index = sub_edge_index[:, mask1 & mask2]
# Calculate node labeling.
z = self.drnl_node_labeling(sub_edge_index, src, dst,
num_nodes=sub_nodes.size(0))
data = Data(x=self.data.x[sub_nodes], z=z,
edge_index=sub_edge_index, y=y)
data_list.append(data)
return data_list
def drnl_node_labeling(self, edge_index, src, dst, num_nodes=None):
# Double-radius node labeling (DRNL).
src, dst = (dst, src) if src > dst else (src, dst)
adj = to_scipy_sparse_matrix(edge_index, num_nodes=num_nodes).tocsr()
idx = list(range(src)) + list(range(src + 1, adj.shape[0]))
adj_wo_src = adj[idx, :][:, idx]
idx = list(range(dst)) + list(range(dst + 1, adj.shape[0]))
adj_wo_dst = adj[idx, :][:, idx]
dist2src = shortest_path(adj_wo_dst, directed=False, unweighted=True,
indices=src)
dist2src = np.insert(dist2src, dst, 0, axis=0)
dist2src = torch.from_numpy(dist2src)
dist2dst = shortest_path(adj_wo_src, directed=False, unweighted=True,
indices=dst-1)
dist2dst = np.insert(dist2dst, src, 0, axis=0)
dist2dst = torch.from_numpy(dist2dst)
dist = dist2src + dist2dst
dist_over_2, dist_mod_2 = dist // 2, dist % 2
z = 1 + torch.min(dist2src, dist2dst)
z += dist_over_2 * (dist_over_2 + dist_mod_2 - 1)
z[src] = 1.
z[dst] = 1.
z[torch.isnan(z)] = 0.
self.__max_z__ = max(int(z.max()), self.__max_z__)
return z.to(torch.long)
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid')
dataset = Planetoid(path, 'Cora')
train_dataset = SEALDataset(dataset, num_hops=2, split='train')
val_dataset = SEALDataset(dataset, num_hops=2, split='val')
test_dataset = SEALDataset(dataset, num_hops=2, split='test')
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32)
test_loader = DataLoader(test_dataset, batch_size=32)
class DGCNN(torch.nn.Module):
def __init__(self, hidden_channels, num_layers, GNN=GCNConv, k=0.6):
super(DGCNN, self).__init__()
if k < 1: # Transform percentile to number.
num_nodes = sorted([data.num_nodes for data in train_dataset])
k = num_nodes[int(math.ceil(k * len(num_nodes))) - 1]
k = max(10, k)
self.k = int(k)
self.convs = ModuleList()
self.convs.append(GNN(train_dataset.num_features, hidden_channels))
for i in range(0, num_layers - 1):
self.convs.append(GNN(hidden_channels, hidden_channels))
self.convs.append(GNN(hidden_channels, 1))
conv1d_channels = [16, 32]
total_latent_dim = hidden_channels * num_layers + 1
conv1d_kws = [total_latent_dim, 5]
self.conv1 = Conv1d(1, conv1d_channels[0], conv1d_kws[0],
conv1d_kws[0])
self.maxpool1d = MaxPool1d(2, 2)
self.conv2 = Conv1d(conv1d_channels[0], conv1d_channels[1],
conv1d_kws[1], 1)
dense_dim = int((self.k - 2) / 2 + 1)
dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1]
self.lin1 = Linear(dense_dim, 128)
self.lin2 = Linear(128, 1)
def forward(self, x, edge_index, batch):
xs = [x]
for conv in self.convs:
xs += [torch.tanh(conv(xs[-1], edge_index))]
x = torch.cat(xs[1:], dim=-1)
# Global pooling.
x = global_sort_pool(x, batch, self.k)
x = x.unsqueeze(1) # [num_graphs, 1, k * hidden]
x = F.relu(self.conv1(x))
x = self.maxpool1d(x)
x = F.relu(self.conv2(x))
x = x.view(x.size(0), -1) # [num_graphs, dense_dim]
# MLP.
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
return x
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DGCNN(hidden_channels=32, num_layers=3).to(device)
optimizer = torch.optim.Adam(params=model.parameters(), lr=0.0001)
def train():
model.train()
total_loss = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
logits = model(data.x, data.edge_index, data.batch)
loss = BCEWithLogitsLoss()(logits.view(-1), data.y.to(torch.float))
loss.backward()
optimizer.step()
total_loss += loss.item() * data.num_graphs
return total_loss / len(train_dataset)
@torch.no_grad()
def test(loader):
model.eval()
y_pred, y_true = [], []
for data in loader:
data = data.to(device)
logits = model(data.x, data.edge_index, data.batch)
y_pred.append(logits.view(-1).cpu())
y_true.append(data.y.view(-1).cpu().to(torch.float))
return roc_auc_score(torch.cat(y_true), torch.cat(y_pred))
best_val_auc = test_auc = 0
for epoch in range(1, 51):
loss = train()
val_auc = test(val_loader)
if val_auc > best_val_auc:
best_val_auc = val_auc
test_auc = test(test_loader)
print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Val: {val_auc:.4f}, '
f'Test: {test_auc:.4f}')
|
py | b403d02926036bfc113d9a9915c927c30b2880a4 | #!/usr/bin/env python
'''
Spherical symmetry needs to be carefully treated in the atomic calculation.
The default initial guess may break the spherical symmetry. To preserve the
spherical symmetry in the atomic calculation, it is often needed to tune the
initial guess and SCF model.
See also 31-cr_atom_rohf_tune_init_guess.py
'''
import numpy
from pyscf import gto, scf
#
# Method 1: Construct the atomic initial guess from cation.
#
mol = gto.Mole()
mol.verbose = 4
mol.atom = 'V'
mol.basis = 'ccpvtz'
mol.symmetry = True
mol.spin = 0
mol.charge = 5
mol.build()
mf = scf.ROHF(mol)
mf.kernel()
# The output of .analyze() method can help to verify whether the spherical
# symmetry is conserved.
#mf.analyze()
# Restore the neutral atom
mol.spin = 5
mol.charge = 0
mf.irrep_nelec = mf.get_irrep_nelec()
mf.irrep_nelec['s+0'] = (3,3)
mf.irrep_nelec['d-2'] = (1,0)
mf.irrep_nelec['d-1'] = (1,0)
mf.irrep_nelec['d+0'] = (1,0)
mf.irrep_nelec['d+1'] = (1,0)
mf.irrep_nelec['d+2'] = (1,0)
dm = mf.make_rdm1()
mf.kernel(dm)
#mf.analyze()
#
# Regular SCF iterations sometimes break the spherical symmetry while the
# second order SCF method works slightly better.
#
mf = mf.newton()
mf.kernel(dm)
#mf.analyze()
#
# Method 2: Construct the atomic initial guess of large basis from a
# calculation of small basis.
#
mol = gto.Mole()
mol.verbose = 4
mol.atom = 'V'
mol.basis = 'minao'
mol.symmetry = True
mol.spin = 0
mol.charge = 5
mol.build()
mf = scf.ROHF(mol)
mf.kernel()
#
# Setup the system with large basis set
#
mol1 = gto.Mole()
mol1.verbose = 4
mol1.atom = 'V'
mol1.basis = 'ccpvtz'
mol1.symmetry = True
mol1.spin = 5
mol1.charge = 0
mol1.build()
dm = mf.make_rdm1()
dm = scf.addons.project_dm_nr2nr(mol, dm, mol1)
mf = scf.ROHF(mol1)
mf.kernel(dm)
#mf.analyze()
#
# Second order SCF can be applied on the project density matrix as well
#
mf = mf.newton()
mf.kernel(dm)
#mf.analyze()
########################################
# Spherical symmetry was not supported until PySCF-1.7.4. SO3 symmetry was
# recogonized as Dooh. Code below is token from old examples.
#
# Construct the atomic initial guess from cation.
#
mol = gto.Mole()
mol.verbose = 4
mol.atom = 'V'
mol.basis = 'ccpvtz'
mol.symmetry = 'Dooh'
mol.spin = 0
mol.charge = 5
mol.build()
mf = scf.ROHF(mol)
mf.kernel()
# Restore the neutral atom
mol.spin = 5
mol.charge = 0
mf.irrep_nelec = mf.get_irrep_nelec()
mf.irrep_nelec['A1g'] = (4,3)
mf.irrep_nelec['E1gx'] = (1,0)
mf.irrep_nelec['E1gy'] = (1,0)
mf.irrep_nelec['E2gx'] = (1,0)
mf.irrep_nelec['E2gy'] = (1,0)
dm = mf.make_rdm1()
mf.kernel(dm)
#mf.analyze()
|
py | b403d02e53b29fbc66f509be9f10a0e9c3e444d2 | import asyncio
async def run_command(*args):
# Create a subprocess
process = await asyncio.create_subprocess_exec(
*args,
# stdout must be a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
# return stdout
return stdout.decode().strip()
loop = asyncio.get_event_loop()
# Gather uname and date commands
commands = asyncio.gather(
run_command('uname'),
run_command('date'))
# Run the commands
uname, date = loop.run_until_complete(commands)
# Print a report
print('uname: {}, date: {}'.format(uname, date))
loop.close()
|
py | b403d1492f901f3b96b5188e8c790a9b52897445 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Convolution Model"""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import numpy as np
from .core import CompoundModel
class Convolution(CompoundModel):
"""
Wrapper class for a convolution model.
Parameters
----------
operator: tuple
The SPECIAL_OPERATORS entry for the convolution being used.
model : Model
The model for the convolution.
kernel: Model
The kernel model for the convolution.
bounding_box : tuple
A bounding box to define the limits of the integration
approximation for the convolution.
resolution : float
The resolution for the approximation of the convolution.
cache : bool, optional
Allow convolution computation to be cached for reuse. This is
enabled by default.
Notes
-----
This is wrapper is necessary to handle the limitations of the
pseudospectral convolution binary operator implemented in
astropy.convolution under `~astropy.convolution.convolve_fft`. In this
`~astropy.convolution.convolve_fft` it is assumed that the inputs ``array``
and ``kernel`` span a sufficient portion of the support of the functions of
the convolution. Consequently, the ``Compound`` created by the
`~astropy.convolution.convolve_models` function makes the assumption that
one should pass an input array that sufficiently spans this space. This means
that slightly different input arrays to this model will result in different
outputs, even on points of intersection between these arrays.
This issue is solved by requiring a ``bounding_box`` together with a
resolution so that one can pre-calculate the entire domain and then
(by default) cache the convolution values. The function then just
interpolates the results from this cache.
"""
def __init__(self, operator, model, kernel, bounding_box, resolution, cache=True):
super().__init__(operator, model, kernel)
self.bounding_box = bounding_box
self._resolution = resolution
self._cache_convolution = cache
self._kwargs = None
self._convolution = None
def clear_cache(self):
"""
Clears the cached convolution
"""
self._kwargs = None
self._convolution = None
def _get_convolution(self, **kwargs):
if (self._convolution is None) or (self._kwargs != kwargs):
domain = self.bounding_box.domain(self._resolution)
mesh = np.meshgrid(*domain)
data = super().__call__(*mesh, **kwargs)
from scipy.interpolate import RegularGridInterpolator
convolution = RegularGridInterpolator(domain, data)
if self._cache_convolution:
self._kwargs = kwargs
self._convolution = convolution
else:
convolution = self._convolution
return convolution
@staticmethod
def _convolution_inputs(*args):
not_scalar = np.where([not np.isscalar(arg) for arg in args])[0]
if len(not_scalar) == 0:
return np.array(args), (1,)
else:
output_shape = args[not_scalar[0]].shape
if not all(args[index].shape == output_shape for index in not_scalar):
raise ValueError('Values have differing shapes')
inputs = []
for arg in args:
if np.isscalar(arg):
inputs.append(np.full(output_shape, arg))
else:
inputs.append(arg)
return np.reshape(inputs, (len(inputs), -1)).T, output_shape
@staticmethod
def _convolution_outputs(outputs, output_shape):
return outputs.reshape(output_shape)
def __call__(self, *args, **kw):
inputs, output_shape = self._convolution_inputs(*args)
convolution = self._get_convolution(**kw)
outputs = convolution(inputs)
return self._convolution_outputs(outputs, output_shape)
|
py | b403d20ffcc5c4449058231c7dd0cfe5dd55c628 | import meaningcloud.Request
class SentimentRequest(meaningcloud.Request):
endpoint = 'sentiment-2.1'
otherparams = None
extraheaders = None
type_ = ""
def __init__(self, key, lang=None, txt=None, txtf='plain', url=None, doc=None, otherparams=None, extraheaders=None, server='https://api.meaningcloud.com/'):
"""
SentimentRequest constructor
:param key:
License key
:param lang:
Language used in the request
:param txt:
Text to use in the API calls
:param txtf:
Format of the text
:param url:
Url to use in the API calls
:param doc:
File to use in the API calls
:param otherparams:
Array where other params can be added to be used in the API call
:param extraheaders:
Array where other headers can be added to be used in the request
:param server:
String with the server the requests will be sent to
"""
if server[len(server)-1] != '/':
server += '/'
self._params = {}
meaningcloud.Request.__init__(self, (server + self.endpoint), key)
self.otherarams = otherparams
self.extraheaders = extraheaders
self._url = server + self.endpoint
self.addParam('key', key)
self.addParam('lang', lang)
self.addParam('txtf', txtf)
if txt:
type_ = 'txt'
elif doc:
type_ = 'doc'
elif url:
type_ = 'url'
else:
type_ = 'default'
options = {'doc': lambda: self.setContentFile(doc),
'url': lambda: self.setContentUrl(url),
'txt': lambda: self.setContentTxt(txt),
'default': lambda: self.setContentTxt(txt)
}
options[type_]()
if otherparams:
for key in otherparams:
self.addParam(key, otherparams[key])
def sendReq(self):
return self.sendRequest(self.extraheaders)
|
py | b403d294a757f48bc0b4bfc9713a4b36862792dd | # Generated by Django 3.0.7 on 2021-03-28 04:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='language_category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='Type_category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='jobPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('job_title', models.CharField(max_length=200)),
('job_description', models.TextField()),
('roles_And_responsibilities', models.TextField()),
('no_of_position', models.IntegerField()),
('deadline', models.DateField(default=django.utils.timezone.now)),
('seniority_level', models.CharField(max_length=50)),
('employment_type', models.CharField(max_length=50)),
('salary', models.IntegerField(blank=True, null=True)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('skills', models.ManyToManyField(to='recurit.language_category')),
],
),
]
|
py | b403d2e434473deb6ea97e070e2eb6ef4e85f302 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: deeplabv2.py
# Author: Tao Hu <[email protected]>
import cv2
import tensorflow as tf
import argparse
from six.moves import zip
import os
import numpy as np
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.utils.gpu import get_nr_gpu
from tensorpack.utils.segmentation.segmentation import visualize_label, predict_scaler
from tensorpack.utils.stats import MIoUStatistics
from tensorpack.utils import logger
from tensorpack.dataflow.imgaug.misc import RandomCropWithPadding
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
import tensorpack.tfutils.symbolic_functions as symbf
from tqdm import tqdm
from seg_utils import RandomResize
from resnet_model import (
preresnet_group, preresnet_basicblock, preresnet_bottleneck,
resnet_group, resnet_basicblock, resnet_bottleneck, se_resnet_bottleneck,
resnet_backbone)
CLASS_NUM = 19
IMAGE_H = 1024
IMAGE_W = 2048
IGNORE_LABEL = 255
epoch_scale = 3
class Model(ModelDesc):
def _get_inputs(self):
## Set static shape so that tensorflow knows shape at compile time.
return [InputDesc(tf.float32, [None, IMAGE_H, IMAGE_W, 3], 'image'),
InputDesc(tf.int32, [None, IMAGE_H, IMAGE_W], 'gt')]
def _build_graph(self, inputs):
def resnet101(image):
mode = 'resnet'
depth = 101
basicblock = preresnet_basicblock if mode == 'preact' else resnet_basicblock
bottleneck = {
'resnet': resnet_bottleneck,
'preact': preresnet_bottleneck,
'se': se_resnet_bottleneck}[mode]
num_blocks, block_func = {
18: ([2, 2, 2, 2], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck),
152: ([3, 8, 36, 3], bottleneck)
}[depth]
def get_logits(image):
with argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format="NHWC"):
return resnet_backbone(
image, num_blocks,
preresnet_group if mode == 'preact' else resnet_group, block_func, class_num = CLASS_NUM)
return get_logits(image)
image, label = inputs
image = image - tf.constant([104, 116, 122], dtype='float32')
label = tf.identity(label, name="label")
predict = resnet101(image)
costs = []
prob = tf.nn.softmax(predict, name='prob')
label4d = tf.expand_dims(label, 3, name='label4d')
new_size = prob.get_shape()[1:3]
cost = symbf.softmax_cross_entropy_with_ignore_label(logits=predict, label=label4d,
class_num=CLASS_NUM)
prediction = tf.argmax(prob, axis=-1,name="prediction")
cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss
costs.append(cost)
if get_current_tower_context().is_training:
wd_w = tf.train.exponential_decay(2e-4, get_global_step_var(),
80000, 0.7, True)
wd_cost = tf.multiply(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='wd_cost')
costs.append(wd_cost)
add_param_summary(('.*/W', ['histogram'])) # monitor W
self.cost = tf.add_n(costs, name='cost')
add_moving_summary(costs + [self.cost])
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2.5e-4, trainable=False)
opt = tf.train.AdamOptimizer(lr, epsilon=2.5e-4)
return optimizer.apply_grad_processors(
opt, [gradproc.ScaleGradient(
[('aspp.*_conv/W', 10),('aspp.*_conv/b',20)])])
def get_data(name, meta_dir, batch_size):
isTrain = name == 'train'
ds = dataset.Cityscapes(meta_dir, name, shuffle=True)
if isTrain:
ds = MapData(ds, RandomResize)
if isTrain:
shape_aug = [
RandomCropWithPadding((IMAGE_H,IMAGE_W),IGNORE_LABEL),
imgaug.Flip(horiz=True),
]
else:
shape_aug = []
ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)
if isTrain:
ds = BatchData(ds, batch_size)
ds = PrefetchDataZMQ(ds, 1)
else:
ds = BatchData(ds, 1)
return ds
def view_data( meta_dir, batch_size):
ds = RepeatedData(get_data('train',meta_dir, batch_size), -1)
ds.reset_state()
for ims, labels in ds.get_data():
for im, label in zip(ims, labels):
#aa = visualize_label(label)
#pass
cv2.imshow("im", im / 255.0)
cv2.imshow("raw-label", label)
cv2.imshow("color-label", visualize_label(label))
cv2.waitKey(0)
def get_config(meta_dir, batch_size):
logger.auto_set_dir()
nr_tower = max(get_nr_gpu(), 1)
dataset_train = get_data('train', meta_dir, batch_size)
steps_per_epoch = dataset_train.size() * epoch_scale
dataset_val = get_data('val', meta_dir, batch_size)
return TrainConfig(
dataflow=dataset_train,
callbacks=[
ModelSaver(),
ScheduledHyperParamSetter('learning_rate', [(2, 1e-4), (4, 1e-5), (6, 8e-6)]),
HumanHyperParamSetter('learning_rate'),
PeriodicTrigger(CalculateMIoU(CLASS_NUM), every_k_epochs=1),
ProgressBar(["cross_entropy_loss","cost","wd_cost"])#uncomment it to debug for every step
],
model=Model(),
steps_per_epoch=steps_per_epoch,
max_epoch=10,
nr_tower = nr_tower
)
def run(model_path, image_path, output):
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(model_path),
input_names=['image'],
output_names=['output' + str(k) for k in range(1, 7)])
predictor = OfflinePredictor(pred_config)
im = cv2.imread(image_path)
assert im is not None
im = cv2.resize(
im, (im.shape[1] // 16 * 16, im.shape[0] // 16 * 16)
)[None, :, :, :].astype('float32')
outputs = predictor(im)
if output is None:
for k in range(6):
pred = outputs[k][0]
cv2.imwrite("out{}.png".format(
'-fused' if k == 5 else str(k + 1)), pred * 255)
else:
pred = outputs[5][0]
cv2.imwrite(output, pred * 255)
def proceed_validation(args, is_save = True, is_densecrf = False):
import cv2
name = "val"
ds = dataset.Cityscapes(args.meta_dir, name)
ds = BatchData(ds, 1)
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(args.load),
input_names=['image'],
output_names=['prob'])
predictor = OfflinePredictor(pred_config)
from tensorpack.utils.fs import mkdir_p
result_dir = os.path.join("result_on_{}".format(name))
mkdir_p(result_dir)
i = 0
stat = MIoUStatistics(CLASS_NUM)
logger.info("start validation....")
for image, label in tqdm(ds.get_data()):
label = np.squeeze(label)
image = np.squeeze(image)
prediction = predict_scaler(image, predictor, scales=[0.9, 1, 1.1], classes=CLASS_NUM, tile_size=(IMAGE_H,IMAGE_W), is_densecrf = is_densecrf)
prediction = np.argmax(prediction, axis=2)
stat.feed(prediction, label)
if is_save:
cv2.imwrite(os.path.join(result_dir,"{}.png".format(i)), np.concatenate((image, visualize_label(label), visualize_label(prediction)), axis=1))
i += 1
logger.info("mIoU: {}".format(stat.mIoU))
logger.info("mean_accuracy: {}".format(stat.mean_accuracy))
logger.info("accuracy: {}".format(stat.accuracy))
class CalculateMIoU(Callback):
def __init__(self, nb_class):
self.nb_class = nb_class
def _setup_graph(self):
self.pred = self.trainer.get_predictor(
['image'], ['prob'])
def _before_train(self):
pass
def _trigger(self):
global args
self.val_ds = get_data('val', args.meta_dir, args.batch_size)
self.val_ds.reset_state()
self.stat = MIoUStatistics(self.nb_class)
for image, label in tqdm(self.val_ds.get_data()):
label = np.squeeze(label)
image = np.squeeze(image)
prediction = predict_scaler(image, self.pred, scales=[0.9, 1, 1.1], classes=CLASS_NUM, tile_size=(IMAGE_H,IMAGE_W),
is_densecrf=False)
prediction = np.argmax(prediction, axis=2)
self.stat.feed(prediction, label)
self.trainer.monitors.put_scalar("mIoU", self.stat.mIoU)
self.trainer.monitors.put_scalar("mean_accuracy", self.stat.mean_accuracy)
self.trainer.monitors.put_scalar("accuracy", self.stat.accuracy)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default="2", help='comma separated list of GPU(s) to use.')
parser.add_argument('--meta_dir', default="../metadata/cityscapes", help='meta dir')
parser.add_argument('--load', default="../resnet101.npz", help='load model')
#parser.add_argument('--load', default="train_log/deeplabv2.naked.cs/model-26712", help='load model')
parser.add_argument('--view', help='view dataset', action='store_true')
parser.add_argument('--run', help='run model on images')
parser.add_argument('--batch_size', type=int, default = 1, help='batch_size')
parser.add_argument('--output', help='fused output filename. default to out-fused.png')
parser.add_argument('--validation', action='store_true', help='validate model on validation images')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.view:
view_data(args.meta_dir,args.batch_size)
elif args.run:
run(args.load, args.run, args.output)
elif args.validation:
proceed_validation(args)
else:
config = get_config(args.meta_dir,args.batch_size)
if args.load:
config.session_init = get_model_loader(args.load)
launch_train_with_config(
config,
SyncMultiGPUTrainer(max(get_nr_gpu(), 1)))
|
py | b403d3a96599eef58f310bb1e9f95a5a14de0200 | """
Minimal example showing the use of the CaseConverterMode.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
import os
os.environ['QT_API'] = 'pyside2'
# os.environ['QT_API'] = 'pyqt5'
from pyqode.qt import QtWidgets
from pyqode.core.api import CodeEdit
from pyqode.core.backend import server
from pyqode.core.modes import CaseConverterMode
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
editor = CodeEdit()
editor.backend.start(server.__file__)
editor.resize(800, 600)
print(editor.modes.append(CaseConverterMode()))
editor.show()
editor.setPlainText(
'Press Ctrl+Shift+U to convert selected text to upper case\n'
'and Ctrl+U to convert the text to lower case.', '', '')
editor.selectAll()
app.exec_()
editor.close()
del editor
del app
|
py | b403d3f0a0760ca4db13556d107cdae18ceaf156 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""Module for analysis of RIXS spectra.
This module is based on three class, one for dealing with photon events lists,
one for dealing with single spectra, a another one to deal with many spectra at
a time.
.. autosummary::
photon_events
spectrum
spectra
"""
# standard libraries
import matplotlib.pyplot as plt
from pathlib import Path
import numpy as np
import copy
from matplotlib.transforms import Bbox
import warnings
from scipy.optimize import curve_fit
# backpack
from . import arraymanip as am
from .arraymanip import index
from . import filemanip as fm
from . import figmanip as figm
class photon_events():
"""Creates a ``photon_event`` class type object to deal with photon events lists.
Args:
filepath (string or pathlib.Path, optional): filepath to file. It overwrites
the ``data`` argument.
data (list or array, optional): three column list (or array) with photon
events. Column order should be x, y, and intensity.
delimiter (str, optional): The string used to separate values. If whitespaces are used,
consecutive whitespaces act as delimiter. Use ``\\t`` for tab. The default is comma (,).
x_max (float, optional): maximum x value. If ``None``, it will be infered
by the data.
y_max (float, optional): maximum y value. If ``None``, it will be infered
by the data.
"""
def __init__(self, filepath=None, data=None, delimiter=',', x_max=None, y_max=None):
# basic attr
self.data = None
self.filepath = None
self.x_max = None
self.y_max = None
# binning attr
self.hist = None
self.bins = None
self.bins_size = None
self.x_edges = None
self.y_edges = None
self.x_centers = None
self.y_centers = None
# offset attr
self.offsets = None
self.offsets_ref = None
self.offsets_ranges = None
self.offsets_func = None
self.offsets_par = None
# spectrum attr
self.spectrum = None
self.load(filepath=filepath, data=data, delimiter=delimiter, x_max=x_max, y_max=y_max)
self.set_binning(bins=1)
self.calculate_offsets()
self.fit_offsets(deg=0)
self.calculate_spectrum()
def load(self, filepath=None, data=None, delimiter=',', x_max=None, y_max=None):
"""Load photon events data from file or assign data directly.
The file/data must have three columns, x, y, and intensity.
One can pass the values for x_max and y_max through the file header by
using the tag ``# x_max <value>`` and ``# y_max <value>``, respectively.
args:
data (list or array, optional): three column list (or array) with photon
events. Column order should be x, y, and intensity.
filepath (string or pathlib.Path, optional): filepath to file. It overwrites
the ``data`` argument.
delimiter (str, optional): The string used to separate values. If whitespaces are used,
consecutive whitespaces act as delimiter. Use ``\\t`` for tab. The default is comma (,).
x_max (float, optional): maximum x value. If ``None``, it will be infered
by the data.
y_max (float, optional): maximum y value. If ``None``, it will be infered
by the data.
returns:
None
See Also:
:py:func:`photon_events.save`.
"""
x_max_flag = False
y_max_flag = False
if filepath is not None:
self.filepath = Path(filepath)
# check x_max and y_max
for row in fm.load_Comments(filepath=self.filepath):
if row.startswith('# x_max') or row.startswith('#x_max'):
self.x_max = float(row.split('x_max')[-1])
x_max_flag = True
elif row.startswith('# y_max') or row.startswith('#y_max'):
self.y_max = float(row.split('y_max')[-1])
y_max_flag = True
# get data
self.data = fm.load_data(filepath=Path(filepath), force_array=True, delimiter=delimiter)
else:
self.filepath = None
if data is None:
warnings.warn('No filepath or data to load.', stacklevel=2)
return
else:
self.data = copy.deepcopy(data)
# infer x_max and y_max if necessary
if x_max_flag is False:
if x_max is None:
self.x_max = max(self.data[:, 0])
else:
self.x_max = copy.deepcopy(x_max)
if y_max_flag is False:
if y_max is None:
self.y_max = max(self.data[:, 1])
else:
self.y_max = copy.deepcopy(y_max)
def save(self, filepath, delimiter=','):
"""Saves photon events data to a file.
args:
filepath (string or pathlib.Path, optional): filepath to file.
delimiter (str, optional): The string used to separate values. If whitespaces are used,
consecutive whitespaces act as delimiter. Use ``\\t`` for tab. The default is comma (,).
returns:
None
note:
x_max and y_max values are saved in the header.
See Also:
:py:func:`photon_events.load`.
"""
header = f'x_max {self.x_max}\n'
header += f'y_max {self.y_max}\n'
header += f'x y I'
fm.save_data(self.data, filepath=Path(filepath), delimiter=delimiter, header=header)
def set_binning(self, bins=None, bins_size=None):
"""Compute the histogram of the data set (binning of the data).
args:
bins (int or tuple, optional): number of bins. If one value is given,
this is used for both x and y directions. If two values are given,
they are used separetely for the x and y directions, respectively.
bins_size (int or tuple, optional): size of the bins. This overwrites
the argument ``bins``. If one value is given,
this is used for both x and y directions. If two values are given,
they are used separetely for the x and y directions, respectively.
return:
None
"""
if bins_size is not None:
try:
if len(bins_size) == 1:
x_bins_size, y_bins_size = bins_size[0], bins_size[0]
else:
x_bins_size, y_bins_size = bins_size[0], bins_size[1]
except TypeError:
x_bins_size, y_bins_size = bins_size, bins_size
x_bins = int(self.x_max/x_bins_size)
y_bins = int(self.y_max/y_bins_size)
else:
if bins is None:
warnings.warn('Bins not defined.', stacklevel=2)
return
else:
try:
if len(bins) == 1:
x_bins, y_bins = bins[0], bins[0]
else:
x_bins, y_bins = bins[0], bins[1]
except TypeError:
x_bins, y_bins = bins, bins
bins_size = (self.x_max/x_bins, self.y_max/y_bins)
self.bins = (x_bins, y_bins)
self.bins_size = copy.deepcopy(bins_size)
self.hist, self.x_edges, self.y_edges = np.histogram2d(self.data[:, 0],
self.data[:, 1],
bins=(x_bins, y_bins),
weights=self.data[:, 2],
range=((0, self.x_max), (0, self.y_max))
)
self.x_centers = am.movingaverage(self.x_edges, window_size=2, remove_boundary_effects=True)
self.y_centers = am.movingaverage(self.y_edges, window_size=2, remove_boundary_effects=True)
def calculate_offsets(self, ref=0, mode='cross-correlation', ranges=None):
"""Calculate the offset of each column relative to a reference column.
args:
ref (int, optional): reference column. The offset of all other columns
is calculated based on the reference column. Default is 0.
mode (string, optional): method used to calculate the offsets.
The current options are: 'cross-correlation', and 'max'.
ranges (list, optional): a pair of x values or a list of pairs. Each pair represents
the start and stop of a data range. If None, the whole data set is used.
returns:
None
"""
if self.hist is None:
warnings.warn('Data not binned yet. Use binning()', stacklevel=2)
return
if ranges is None:
ranges = [[0, self.y_max]]
# check ranges
for r in ranges:
if r[0] > max(self.y_centers) or r[-1] < min(self.y_centers):
raise ValueError('Selected ranges outside data range (y_centers).')
self.offsets = np.zeros(self.hist.shape[0])
y_centers, ref_column = am.extract(self.y_centers, self.hist[ref], ranges=ranges)
for i in range(self.hist.shape[0]):
y_centers, column = am.extract(self.y_centers, self.hist[i], ranges=ranges)
if mode == 'cross-correlation':
cross_correlation = np.correlate(column, ref_column, mode='same')
self.offsets[i] = y_centers[np.argmax(cross_correlation)]
elif mode == 'max':
self.offsets[i] = y_centers[np.argmax(column)]
# if mode == 'cross-correlation': self.offsets -= self.offsets[ref]
# if mode == 'max': self.offsets -= self.offsets[ref]
self.offsets -= self.offsets[ref]
self.offsets_ref = copy.copy(ref)
self.offsets_ranges = copy.copy(ranges)
def fit_offsets(self, deg=1, f=None):
"""Find the curve that fits the offset values.
args:
deg (int, optional): degree for the polnomial fit. The default is 1.
f (function, optional): a function y = f(x, a, b, ...) that returns the
value of y as a function of x and other parameters to be fitted.
This overwrites the polynomal fit based on the argument ``deg``.
returns:
None
"""
x2fit = self.x_centers
if x2fit is None:
warnings.warn('Data not binned yet. Use binning()', stacklevel=2)
return
y2fit = self.offsets
if y2fit is None:
warnings.warn('Offsets not defined. Use get_offsets().', stacklevel=2)
return
if f is None:
if deg < 0:
warnings.warn('deg must be a positive value or zero.', stacklevel=2)
return
popt = np.polyfit(x2fit, y2fit, deg=deg)
f = np.poly1d(popt)
else:
popt, pcov = curve_fit(f, x2fit, y2fit)
f = lambda x: f(x, *popt)
self.offsets_func = lambda x: f(x)
self.offsets_par = popt
def offsets_correction(self):
"""Uses the offsets fitted curve to adjust the photon events."""
f = lambda x, y: (x, y-self.offsets_func(x))
self.apply_correction(f=f)
def apply_correction(self, f):
"""Changes the values of x, y based on a function.
args:
f (function): function ``x, y = f(x, y)`` that takes as input the
position of a photon event and returns its corrected values.
returns:
None
"""
self.data[:, 0], self.data[:, 1] = f(self.data[:, 0], self.data[:, 1])
self.x_max, self.y_max = f(self.x_max, self.y_max)
self.set_binning(bins=self.bins)
def calculate_spectrum(self, y_bins=None, y_bins_size=None):
"""Sum the photon events in the x direction.
args:
y_bins (int, optional): number of y bins. If None, the current binning is used.
bins_size (int or tuple, optional): size of the y bins. This overwrites
the argument ``y_bins``. If None, the current binning is used.
returns:
None
"""
if y_bins is None:
self.spectrum = spectrum(data=np.vstack((self.y_centers, sum(self.hist))).transpose())
else:
temp = photon_events(data=self.data)
if y_bins_size is not None:
temp.set_binning(bins_size=(self.x_max+1, y_bins_size))
elif y_bins is not None:
temp.set_binning(bins=(1, y_bins))
self.spectrum = spectrum(data=np.vstack((temp.y_centers, sum(temp.hist))).transpose())
def plot(self, ax=None, pointsize=1, show_bins=(False, False), show_offsets=False, show_offsets_fit=False, **kwargs):
"""Plot photon events.
args:
ax (matplotlib.axes, optional): axes for plotting on.
pointsize (int, optional): photon events point size. Default is 1.
show_bins (bool, optional): if True, bins edges are displayed in cyan.
A tuple of bools can also be used to display only x bins or y bins,
e. g., ``show_bins = (True, False)`` will display only x bins.
The default is (False, False).
show_offsets (bool, optional): if True, offsets are displayed in yellow
over its respectively bin. The ranges of data used to calculate
the offsets are marked by green and red lines.
show_offsets_fit (bool, optional): if True, the fit of the curve
defined by the offsets values is displayed in yellow.
The ranges of data used to calculate
the offsets are marked by green and red lines.
**kwargs: kwargs are passed to ``plt.plot()`` that plots the data (photon events).
returns:
matplotlib.axes
"""
if ax is None:
fig = figm.figure()
ax = fig.add_subplot(111)
ax.set_facecolor('black')
if 'marker' not in kwargs:
kwargs['marker'] = 'o'
if 'ms' not in kwargs:
kwargs['ms'] = pointsize
if 'mfc' not in kwargs:
kwargs['mfc'] = 'white'
if 'markeredgewidth' not in kwargs:
kwargs['markeredgewidth'] = 0
ax.plot(self.data[:, 0], self.data[:, 1],
linewidth=0,
**kwargs)
try:
if len(show_bins) == 1:
show_bins = (show_bins[0], show_bins[0])
except TypeError:
show_bins = (show_bins, show_bins)
if show_bins[0]:
plt.vlines(self.x_edges, 0, self.y_max, color='cyan', linewidth=0.8, zorder=3)
if show_bins[1]:
plt.hlines(self.y_edges, 0, self.x_max, color='cyan', linewidth=0.5, zorder=3)
if show_offsets:
if self.offsets is None:
warnings.warn('Offsets not defined. Use get_offsets().', stacklevel=2)
else:
c = self.y_centers[np.argmax(self.hist[0])]
self.plot_offsets(ax=ax, shift=(-self.offsets[0] + c), color='yellow', zorder=10)
if show_offsets_fit:
if self.offsets is None:
warnings.warn('Offsets not fitted. Use fit_offsets().', stacklevel=2)
else:
c = self.y_centers[np.argmax(self.hist[0])]
self.plot_offsets_fit(ax=ax, shift=(-self.offsets[0] + c), linewidth=1, color='yellow', zorder=10)
# x, y = self.offsets_f(np.linspace(0, self.x_max, 1000), np.zeros(1000))
# plt.plot(x, y-y[0]+c, linewidth=1, color='yellow')
if show_offsets or show_offsets_fit:
for r in self.offsets_ranges:
plt.axhline(r[0], color='green', linewidth=2, zorder=10)
plt.axhline(r[1], color='red', linewidth=2, zorder=10)
return ax
def plot_offsets(self, ax=None, shift=0, **kwargs):
"""Plot offsets as function of x values (center of x bins).
args:
ax (matplotlib.axes, optional): axes for plotting on.
shift (int, optional): vertical shift. Default is 0.
**kwargs: kwargs are passed to ``plt.scatter()`` that plots the data.
returns:
matplotlib.axes
"""
if ax is None:
fig = figm.figure()
ax = fig.add_subplot(111)
if self.offsets is None:
warnings.warn('Offsets not defined. Use get_offsets().', stacklevel=2)
else:
ax.scatter(self.x_centers, self.offsets+shift, **kwargs)
return ax
def plot_offsets_fit(self, ax=None, shift=0, **kwargs):
"""Plot the offsets fitted curve as function of x values.
args:
ax (matplotlib.axes, optional): axes for plotting on.
shift (int, optional): vertical shift. Default is 0.
**kwargs: kwargs are passed to ``plt.plot()`` that plots the data.
returns:
matplotlib.axes
"""
if ax is None:
fig = figm.figure()
ax = fig.add_subplot(111)
if self.offsets_func is None:
warnings.warn('Offsets not defined. Use get_offsets().', stacklevel=2)
else:
x = np.linspace(0, self.x_max, 200)
y = self.offsets_func(x)
ax.plot(x, y+shift, **kwargs)
return ax
def plot_columns(self, ax=None, columns='all', show_ranges=False, vertical_increment=0, **kwargs):
"""Plot columns (intensity as function of y values (center of y bins).
args:
ax (matplotlib.axes, optional): axes for plotting on.
columns (int, string or list, optional): number of the columns to plot.
It can be a single int or a list of int's. If
``columns = 'all'``, all columns are ploted.
vertical_increment (int, optional): if one column is plotted, it
adds a vertical offset to the plotted curve. If many columns are ploted,
``vertical_increment`` defines
the vertical offset between each curve. Default is 0.
show_ranges (bool, optional): show ranges in which offsets were calculated.
**kwargs: kwargs are passed to ``plt.plot()`` that plots the data.
returns:
matplotlib.axes
"""
if ax is None:
fig = figm.figure()
ax = fig.add_subplot(111)
if 'marker' not in kwargs:
kwargs['marker'] = 'o'
if 'ms' not in kwargs:
kwargs['ms'] = 5
if columns == 'all':
columns = np.arange(0, self.hist.shape[0])
i = 0
try:
if len(columns) == 1:
ax.plot(self.y_centers, self.hist[columns[0]]+vertical_increment, label=columns[0], **kwargs)
else:
for i in range(len(columns)):
ax.plot(self.y_centers, self.hist[columns[i]]-i*vertical_increment, label=columns[i], **kwargs)
except TypeError:
ax.plot(self.y_centers, self.hist[columns]+vertical_increment, label=columns,**kwargs)
plt.legend()
if show_ranges:
if self.offsets_func is None:
warnings.warn('Offsets range not defined. Use get_offsets().', stacklevel=2)
else:
for r in self.offsets_ranges:
plt.axvline(r[0], color='green', linewidth=1.2, zorder=10)
plt.axvline(r[1], color='red', linewidth=1.2, zorder=10)
return ax
# def calculate_overlaps(self, x_min_between_events=5e-6, y_min_between_events=5e-6):
#
# overlap_x = [abs(self.data[:, 0]-x)<x_min_between_events for x in self.data[:, 0]]
# overlap_y = [abs(self.data[:, 1]-y)<y_min_between_events for y in self.data[:, 1]]
#
# overlaps = [sum(x*y)>1 for x, y in zip(overlap_x, overlap_y)]
# self.n_overlaps = sum(overlaps)/2
#
# data_overlaped = []
# for idx, photon_event in enumerate(data):
# if overlaps[idx]:
# data_overlaped.append(photon_event)
# self.data_overlaped = np.array(data_overlaped)
#
# self.x_min_between_events = x_min_between_events
# self.y_min_between_events = y_min_between_events
# def plot_overlaped(self, ax=None, pointsize=5):
#
# if ax is None:
# fig = figm.figure()
# ax = fig.add_subplot(111)
# ax.set_facecolor('black')
#
# ax.errorbar(self.data_overlaped[:, 0]*10**3, self.data_overlaped[:, 1]*10**3,
# linewidth=0,
# fmt='o',
# mfc = 'red',
# elinewidth = 1,
# yerr=self.y_min_between_events *10**3,
# xerr=self.x_min_between_events *10**3,
# marker='o',
# ms=pointsize)
# return ax
# Attributes:
# data (list or array): three column list (or array) with photon
# events. Column order should be x, y, and intensity.
# filepath (string or pathlib.Path): filepath to file.
# x_max (float): maximum x value.
# y_max (float): maximum y value.
#
# hist (list): data histogram.
# bins (list): number of x, y bins.
# bins_size (list): size of x, y bins
# x_edges (list): edges of x bins.
# y_edges (list): edges of y bins.
# x_centers (list): center of x bins.
# y_centers (list): center of y bins.
#
# offsets = None
# offsets_ref = None
# offsets_ranges = None
# offsets_func = None
# offsets_par = None
#
# # spectrum attr
# spectrum = None
#
# The first thing that photon_events does is to call :py:func:`photon_events.load`,
# which loads photon events data from file or assign data directly. The
# file/data must have three columns, x, y, and intensity. One can pass the values
# for x_max and y_max through the file header by
# using the tag ``# x_max <value>`` and ``# y_max <value>``, respectively.
#
# note:
# One can assign the data directly by editing the ``data`` attribute, but
# it is advised to used the :py:func:`photon_events.load` method
# as it adjusts other related attributes (x_max, y_max, filepath).
class spectrum():
"""Creates a ``spectrum`` class type object to deal with (x, y) data types.
Args:
filepath (string or pathlib.Path, optional): filepath to file. It overwrites
the ``data`` argument.
data (list or array, optional): three column list (or array) with photon
events. Column order should be x, y, and intensity.
delimiter (str, optional): The string used to separate values. If whitespaces are used,
consecutive whitespaces act as delimiter. Use ``\\t`` for tab. The default is comma (,).
"""
def __init__(self, filepath=None, data=None, delimiter=','):
self.data = None
self.x = None
self.y = None
self.filepath = None
self.load(filepath=filepath, data=data, delimiter=delimiter)
def load(self, filepath=None, data=None, delimiter=','):
"""Load spectrum from file or assign data directly.
The file/data must have two columns, x (energy or distance) and intensity.
args:
filepath (string or pathlib.Path, optional): filepath to file. It overwrites
the ``data`` argument.
data (list or array, optional): three column list (or array) with photon
events. Column order should be x, y, and intensity.
delimiter (str, optional): The string used to separate values. If whitespaces are used,
consecutive whitespaces act as delimiter. Use ``\\t`` for tab. The default is comma (,).
returns:
None
See Also:
:py:func:`spectrum.save`.
"""
if filepath is not None:
self.filepath = Path(filepath)
self.data = fm.load_data(self.filepath, delimiter=delimiter, force_array=True)
else:
self.filepath = None
if data is None:
warnings.warn('No filepath or data to load.', stacklevel=2)
return
else:
self.data = copy.deepcopy(data)
self.x = self.data[:, 0]
self.y = self.data[:, 1]
def save(self, filepath, delimiter=',', header=None):
r"""Saves spectrum to a file.
args:
filepath (string or pathlib.Path, optional): filepath to file.
delimiter (str, optional): The string used to separate values.
If whitespaces are used, consecutive whitespaces act as delimiter. Use ``\\t`` for tab. The default is comma (,).
header (string, optional): text to add at the beginning of the file. Use ``\n`` for new line. Comment flag (#) is added automatically.
returns:
None
See Also:
:py:func:`spectrum.load`.
"""
fm.save_data(self.data, filepath=Path(filepath), delimiter=delimiter, header=header)
def apply_correction(self, f):
"""Changes the values of x, y based on a function.
args:
f (function): function ``x, y = f(x, y)`` that takes as input the
position of a photon event and returns its corrected values.
returns:
None
"""
self.data[:, 0], self.data[:, 1] = f(self.data[:, 0], self.data[:, 1])
def calib(self, dispersion, position_energy_pair=(0, 0), normalized=False):
"""Calibrate data (from length to energy).
args:
dispersion (number): dispersion of the diffraction grating in
units of [energy/lenght].
position_energy_pair (tuple, optional): a y position and its energy value
of the isoenergetic line at that position.
normalized (bool, optional): if True, spectrum is normalized by its
maximum value.
returns:
None
"""
const = position_energy_pair[1] - dispersion*position_energy_pair[0]
if normalized:
f = lambda x, y: (x*dispersion + const, y/max(y))
else:
f = lambda x, y: (x*dispersion + const, y)
self.apply_correction(f=f)
def interp(self, x=None, start=None, stop=None, num=1000, step=None):
"""Interpolate data.
args:
x (list or array, optional): The x-coordinates at which to
evaluate the interpolated values. This overwrites all other arguments.
start (number, optional): The starting value of the sequence. If None,
the minium x value will be used.
stop (number, optional): The end value of the sequence. If None,
the maximum x value will be used.
num (int, optional): Number of samples to generate.
step (number, optional): Spacing between values. This overwrites ``num``.
returns:
None
"""
if x is None:
if start is None:
start = min(self.data[:, 0])
if stop is None:
stop = max(self.data[:, 0])
if step is not None: # step overwrites num
# temp = np.arange(start, stop, step=step)
# temp = np.zeros((len(temp), self.data.shape[1]))
x = np.arange(start, stop, step=step)
else:
# temp = np.zeros((num, self.data.shape[1]))
x = np.linspace(start, stop, num=num)
# temp[:, 1] =
self.data = np.column_stack((x, np.interp(x, self.data[:, 0], self.data[:, 1])))
# return spectrum(data=temp)
def apply_shift(self, shift, mode='hard'):
"""Shift data.
Args:
shift (float or int): shift value.
mode (string, optional): If ``mode='x'`` or ``mode='hard'``, y is fully preserved
while x is shifted. If ``mode='y'``, ``'interp'``, or ``'soft'``, x is preserved
while y is interpolated with a shift. If ``mode='roll'``, x is also preserved
and y elements are rolled along the array (``shift`` value must be an integer).
Warning:
It is always better to use ``mode='hard'`` or ``'roll'`` since the form of y is fully
preserved (no interpolation). After applying a shift using the ``mode='interp'``,
one can apply a
'inverse' shift to retrieve the original data. The diference between the retrieved
y data and the original data will give an ideia of the information loss
caused by the interpolation.
Returns:
None
"""
# print(
# x = self.data[:, 0]
# y = self.data[:, 1]
#
# if mode == 'y' or mode == 'interp' or mode=='soft':
# y = np.interp(x, x + shift, y)
#
# elif mode == 'x' or mode == 'hard':
# x = np.array(x) + shift
#
# elif mode == 'roll' or mode == 'rotate':
# y = np.roll(y, shift)
# if shift > 0:
# y[:shift] = 0
# elif shift < 0:
# y[shift:] = 0
# self.apply_correction(self, f)
x, y = am.shift(self.data[:, 0], self.data[:, 1], shift=shift, mode=mode)
self.data = np.column_stack((x, y))
def peak_fit(self, ranges=None, **kwargs):
r"""Fit a peak with a pseudo-voigt curve.
.. math:: y(x) = A \left[ m \frac{w^2}{w^2 + (x-c)^2} + (1-m) e^{-\frac{4 \ln(2) (x-c)^2}{w^2}} \right]
Args:
ranges (list): a pair of x values or a list of pairs. Each pair represents
the start and stop of a data range.
**kwargs: kwargs are passed to :py:func:`brixs.arraymanip.peak_fit`.
Note:
By default, peak assimetry is taken into account.
Returns:
1) 2 column (x,y) array with "Smoothed" fitted peak. This is just the
fitted peak array with a linear interpolation with 100 times more data points.
2) An array with the optimized parameters for Amplitude, Center, FWHM and offset.
"""
if ranges is None:
ranges = [[0, self.y_max]]
x, y = am.extract(self.data[:, 0], self.data[:, 1], ranges)
# guess
if 'guess_A' not in kwargs: kwargs['guess_A'] = max(y)
if 'guess_c' not in kwargs: kwargs['guess_c'] = x[am.index(y, max(y))]
if 'guess_w' not in kwargs: kwargs['guess_w'] = (x[1] - x[0])*10
if 'guess_offset' not in kwargs: kwargs['guess_offset'] = np.mean(y)
if 'asymmetry' not in kwargs: kwargs['asymmetry'] = True
if 'fixed_m' not in kwargs: kwargs['fixed_m'] = 0.5
_, arr100, popt_2 = am.peak_fit(x, y, **kwargs)
return arr100, popt_2
def plot(self, ax=None, normalized=True, vertical_increment=0, shift=0, factor=1, **kwargs):
"""Plot spectrum.
args:
ax (matplotlib.axes, optional): axes for plotting on.
normalized (bool, optional): if True, spectrum is normalized by its
maximum value.
vertical_increment (float or int, optional): defines the vertical offset. Default is 0.
shift (float or int): horizontal shift value. Default is 0.
factor (float or int): multiplicative factor. Default is 1.
show_ranges (bool, optional): show ranges in which offsets were calculated.
**kwargs: kwargs are passed to ``plt.plot()`` that plots the data.
returns:
matplotlib.axes
"""
if ax is None:
fig = figm.figure()
ax = fig.add_subplot(111)
if 'marker' not in kwargs:
kwargs['marker'] = 'o'
if 'ms' not in kwargs:
kwargs['ms'] = 5
if normalized:
ax.plot((self.data[:, 0] + shift), (self.data[:, 1]/max(self.data[:, 1]))*factor + vertical_increment, **kwargs)
else:
ax.plot((self.data[:, 0] + shift), self.data[:, 1]*factor + vertical_increment, **kwargs)
return ax
class spectra():
"""Creates a ``spectra`` class type object to deal with many spectrum at a time.
Args:
folderpath (string or pathlib.Path, optional): path to folder with
spectra data files. It overwrites the ``data`` argument.
data (list or array, optional): list of :py:class:`spectrum` objects.
delimiter (str, optional): The string used to separate values. If whitespaces are used,
consecutive whitespaces act as delimiter. Use ``\\t`` for tab. The default is comma (,).
"""
def __init__(self, folderpath=None, data=None, delimiter=','):
# basic attr
self.spectrum = None
self.folderpath = folderpath
# shift attr
self.shift_mode = None
self.shifts = None
self.shift_ranges = None
# sum attr
self.sum = None
self.load(folderpath=folderpath, data=data, delimiter=delimiter)
def get_spectra_count(self):
"""Returns the number of spectra."""
return len(self.spectrum)
def get_filelist(self):
"""Returns a filepath list of all spectra."""
return [x.filepath for x in self.spectrum]
def get_specrum_by_filename(self, filename):
"""Returns a idx list of spectra associated with filename.
"""
filelist = self.get_filelist()
filelist = [Path(x) if x is not None else None for x in filelist]
return [idx for idx, s in enumerate(filelist) if filename == filelist.name]
def append(self, filepath=None, data=None, delimiter=','):
"""Add spectrum to the spectrum list.
args:
filepath (string or pathlib.Path, optional): filepath to file. It overwrites
the ``data`` argument.
data (spectrum obj, optional): spectrum object to be added.
delimiter (str, optional): The string used to separate values. If whitespaces are used,
consecutive whitespaces act as delimiter. Use ``\\t`` for tab. The default is comma (,).
returns:
None
See Also:
:py:func:`spectra.exclude`.
"""
if filepath is not None:
self.spectrum.append(spectrum(filepath=filepath, delimiter=','))
elif data is not None:
self.spectrum.append(data)
else:
warnings.warn('No filepath or data to load.', stacklevel=2)
return
def exclude(self, idx):
"""Exclude spectrum from the spectrum list.
args:
idx (int): index of the spectrum.
returns:
None
See Also:
:py:func:`spectra.append`.
"""
del self.spectrum[idx]
def save(self, folderpath=None, prefix='', suffix='_spectrum', delimiter=',', header=None):
r"""Saves spectra in a folder.
args:
folderpath (string or pathlib.Path, optional): path to folder.
delimiter (str, optional): The string used to separate values.
If whitespaces are used, consecutive whitespaces act as delimiter.
Use ``\\t`` for tab. The default is comma (,).
header (string, optional): text to add at the beginning of each file.
Use ``\n`` for new line. Comment flag (#) is added automatically.
returns:
None
See Also:
:py:func:`spectra.load`.
"""
n_digits = figm.n_digits(self.get_spectra_count-1)[0]
for idx, s in enumerate(self.spectrum):
filename = f'{prefix}' + f'{idx}'.zfill(n_digits) + f'{suffix}'
s.save(filepath=folderpath/filename, delimiter=',', header=None)
def load(self, folderpath=None, data=None, delimiter=','):
"""Load all spectra from folder or assign data directly.
Each file/data must have two columns, x (energy or distance) and intensity.
args:
folderpath (string or pathlib.Path, optional): path to folder with
spectra data files. It overwrites the ``data`` argument.
data (list or array, optional): list of :py:class:`spectrum` objects.
delimiter (str, optional): The string used to separate values. If whitespaces are used,
consecutive whitespaces act as delimiter. Use ``\\t`` for tab. The default is comma (,).
returns:
None
See Also:
:py:func:`spectra.save`.
"""
if folderpath is not None:
self.folderpath = Path(folderpath)
for filepath in fm.filelist(self.folderpath):
self.append(filepath=filepath, delimiter=delimiter)
else:
self.filepath = None
if data is None:
warnings.warn('No filepath or data to load.', stacklevel=2)
return
else:
self.spectrum = copy.deepcopy(data)
def calib(self, dispersion, position_energy_pair=(0, 0), normalized=False):
"""Calibrate data (from length to energy).
args:
dispersion (number): dispersion of the diffraction grating in
units of [energy/lenght].
position_energy_pair (tuple, optional): a y position and its energy value
of the isoenergetic line at that position.
normalized (bool, optional): if True, spectrum is normalized by its
maximum value.
returns:
None
"""
for spectrum in self.spectrum:
spectrum.calibrate(dispersion=dispersion, position_energy_pair=position_energy_pair, normalized=normalized)
def interp(self, x=None, start=None, stop=None, num=1000, step=None):
"""Interpolate data.
args:
x (list or array, optional): The x-coordinates at which to
evaluate the interpolated values. This overwrites all other arguments.
start (number, optional): The starting value of the sequence. If None,
the minium x value will be used.
stop (number, optional): The end value of the sequence. If None,
the maximum x value will be used.
num (int, optional): Number of samples to generate.
step (number, optional): Spacing between values. This overwrites ``num``.
returns:
None
"""
if x is None:
if start is None:
start = max([min(s.data[:, 0]) for s in self.spectrum])
if stop is None:
stop = min([max(s.data[:, 0]) for s in self.spectrum])
for spectrum in self.spectrum:
spectrum.interp(x=x, start=start, stop=stop, num=num, step=step)
def check_x(self, max_error=0.001):
"""Compare spectra to see if they have same x-coordinates.
args:
max_error (number, optional): percentage value of the max error.
Three checks are performed:
1) Checks if all spectra have same lenght.
2) Checks if the x step between two data points is the same through out all x-axis.
3) checks if the max diference between x arrays of two spectra is
less then ``max_error`` percentage of the step between points.
raises:
ValueError: If any x-coodinate of any two spectrum is different.
"""
# check length
for idx, spectrum in enumerate(self.spectrum):
try:
if len(spectrum.data) != len(self.spectrum[idx+1].data):
raise ValueError(f"Spectrum {idx} and {idx+1} have the different length.")
except IndexError:
pass
# check step
for idx, spectrum in enumerate(self.spectrum):
d = np.diff(spectrum.data[:, 0])
if (max(d) - min(d))*100/np.mean(np.diff(spectrum.data[:, 0])) > max_error:
raise ValueError(f"Step in the x-coordinate of spectrum {idx} seems not to be uniform.")
# check x
for idx, spectrum in enumerate(self.spectrum):
try:
step = spectrum.data[1, 0] - spectrum.data[0, 0]
if max(abs(spectrum.data[:, 0] - self.spectrum[idx+1].data[:, 0]))*100/step > max_error:
raise ValueError(f"Spectrum {idx} and {idx+1} seems to be different.")
# print(max(abs(spectrum.data[:, 0] - self.spectrum[idx+1].data[:, 0]))*100/step)
except IndexError:
pass
def calculate_shifts(self, ref=0, mode='cross-correlation', ranges=None, check_x=True, verbose=True):
"""Calculate the shift of each spectrum relative to a reference spectrum.
args:
ref (int, optional): index of reference spectrum. The shift of all other spectra
is calculated based on the reference spectrum. Default is 0.
mode (string, optional): method used to calculate the offsets.
The current options are: 'cross-correlation', and 'max'.
ranges (list, optional): a pair of x values or a list of pairs. Each pair represents
the start and stop of a data range. If None, the whole data set is used.
check_x (bool, optional): if True, it will check if the x-coordinate
of all spectra is the same.
verbose (bool,optional): turn verbose on/off.
returns:
None
"""
if ranges is None:
ranges = [[min(self.spectrum[ref].data[:, 0]), max(self.spectrum[ref].data[:, 0])]]
if mode == 'cross-correlation':
if check_x:
self.check_x()
self.shifts = np.zeros(len(self.spectrum))
_, y_ref = am.extract(self.spectrum[ref].data[:, 0], self.spectrum[ref].data[:, 1], ranges=ranges)
for i, spectrum in enumerate(self.spectrum):
_, y = am.extract(spectrum.data[:, 0], spectrum.data[:, 1], ranges=ranges)
cross_correlation = np.correlate(y_ref, y, mode='Same')
self.shifts[i] = np.argmax(cross_correlation)
if verbose:
print(f'spectrum {i} shift calculated!')
elif mode == 'fit':
for i, spectrum in enumerate(self.spectrum):
_, popt = spectrum.peak_fit(ranges)
self.shifts[i] = popt[1]
if verbose:
print(f'spectrum {i} shift calculated!')
self.shifts -= self.shifts[ref]
# print(self.shifts)
# self.shifts = self.shifts*(self.spectrum[ref].data[1, 0] - self.spectrum[ref].data[0, 0])
# print(self.shifts)
# print([int(x) for x in self.shifts/(self.spectrum[ref].data[1, 0] - self.spectrum[ref].data[0, 0])])
self.shift_mode = mode
self.shift_ranges = ranges
def shifts_correction(self, mode=None):
"""Shift data.
Args:
shift (float or int): shift value.
mode (string, optional): If None, the best mode will be selected.
If ``mode='x'`` or ``mode='hard'``, y is fully preserved
while x is shifted. If ``mode='y'``, ``'interp'``, or ``'soft'``, x is preserved
while y is interpolated with a shift. If ``mode='roll'``, x is also preserved
and y elements are rolled along the array (``shift`` value must be an integer).
Warning:
It is always better to use ``mode='hard'`` or ``'roll'`` since the form of y is fully
preserved (no interpolation). After applying a shift using the ``mode='interp'``,
one can apply a
'inverse' shift to retrieve the original data. The diference between the retrieved
y data and the original data will give an ideia of the information loss
caused by the interpolation.
Returns:
None
"""
if mode is None:
if self.shift_mode == 'cross-correlation':
mode = 'roll'
elif self.shift_mode == 'fit':
mode = 'soft'
else:
warnings.warn(f'Shift mode ({self.shift_mode}) not recognized.', stacklevel=2)
return
for i in range(self.get_spectra_count()):
self.spectrum[i].apply_shift(shift=self.shifts[i], mode=mode)
def crop(self, start=None, stop=None):
"""Crop spectra ends.
args:
start (number, optional): The starting value. If None,
the minium x value will be used.
stop (number, optional): The end value. If None,
the maximum x value will be used.
returns:
None
"""
if start is None:
start = max([min(s.data[:, 0]) for s in self.spectrum])
if stop is None:
stop = min([max(s.data[:, 0]) for s in self.spectrum])
for spectrum in self.spectrum:
step = spectrum.data[1, 0] - spectrum.data[0, 0]
data_range = (start + step/2, stop - step/2)
a, b = am.extract(spectrum.data[:, 0], spectrum.data[:, 1], ranges=(data_range,) )
temp = np.zeros((len(a), 2))
temp[:, 0] = a
temp[:, 1] = b
spectrum.data = copy.deepcopy(temp)
def calculate_sum(self):
"""Sum all spectra."""
self.check_x()
temp = copy.deepcopy(self.spectrum[0])
for i in range(1, self.get_spectra_count()):
temp.data[:, 1] += self.spectrum[i].data[:, 1]
self.sum = spectrum(data=temp.data)
def plot(self, ax=None, idx='all', normalized=True, vertical_increment=0, shift=0, factor=1, show_ranges=False, **kwargs):
"""Plot spectra.
args:
ax (matplotlib.axes, optional): axes for plotting on.
normalized (bool, optional): if True, spectrum is normalized by its
maximum value.
vertical_increment (int, optional): if one spectrum is plotted, it
adds a vertical offset to the plotted curve. If many spectra are ploted,
``vertical_increment`` defines
the vertical offset between each curve. Default is 0.
shift (float or int): horizontal shift value. Default is 0.
factor (float or int): multiplicative factor. Default is 1.
show_ranges (bool, optional): show ranges in which shifts were calculated.
**kwargs: kwargs are passed to ``plt.plot()`` that plots the data.
returns:
matplotlib.axes
"""
if ax is None:
fig = figm.figure()
ax = fig.add_subplot(111)
if 'marker' not in kwargs:
kwargs['marker'] = 'o'
if 'ms' not in kwargs:
kwargs['ms'] = 5
if idx == 'all':
for i in range(len(self.spectrum)):
self.spectrum[i].plot(ax=ax, normalized=normalized, vertical_increment=-vertical_increment*i, shift=shift, factor=factor, label=i, **kwargs)
else:
try:
if len(idx) == 1:
self.spectrum[idx[0]].plot(ax=ax, normalized=normalized, vertical_increment=vertical_increment, shift=shift, factor=factor, label=idx[0], **kwargs)
else:
for i in range(len(idx)):
self.spectrum[i].plot(ax=ax, normalized=normalized, vertical_increment=-vertical_increment*i, shift=shift, factor=factor, label=i, **kwargs)
except TypeError:
self.spectrum[idx].plot(ax=ax, normalized=normalized, vertical_increment=vertical_increment, shift=shift, factor=factor, label=idx, **kwargs)
plt.legend()
if show_ranges:
if self.shifts is None:
warnings.warn('Shift range not defined. Use calculate_shifts().', stacklevel=2)
else:
for r in self.shift_ranges:
plt.axvline(r[0], color='green', linewidth=1.2, zorder=10)
plt.axvline(r[1], color='red', linewidth=1.2, zorder=10)
return ax
#
# Example:
# Simple usage: Gets a photon event list and remove the rotation of the
# detector.
#
# >>> import brixs
# >>> import matplotlib.pyplot as plt
# >>> import numpy as np
# >>> plt.ion()
# >>> # simulating a generic spectrum
# >>> I = brixs.dummy_spectrum(0, 0.2, excitations=[[0.5, 2, 2], [0.5, 4, 2]])
# >>> # simulating the photon_event list
# >>> data = brixs.dummy_photon_events(I, noise=0.02, background=0.01, y_zero_energy=-20, angle=2)
# >>> # initializing photon_events object
# >>> p = brixs.photon_events(data=data)
# >>> # set binning
# >>> p.set_binning((10, 50))
# >>> p.plot(show_bins=True)
#
# .. image:: _figs/bins.png
# :target: _static/bins.png
# :width: 600
# :align: center
#
# >>> # plot columns
# >>> p.plot_columns(columns='all', shift=100)
#
# .. image:: _figs/columns.png
# :target: _static/columns.png
# :width: 600
# :align: center
#
# >>> # fitting offsets
# >>> p.set_binning((10, 1000))
# >>> p.calculate_offsets(ranges=[[0, 0.005]])
# >>> p.fit_offsets()
# >>> p.plot(show_offsets=True, show_offsets_fit=True)
#
# .. image:: _figs/offsets.png
# :target: _static/offsets.png
# :width: 600
# :align: center
#
# .. image:: _figs/offsets_zoom.png
# :target: _static/offsets_zoom.png
# :width: 600
# :align: center
#
# >>> # remove offsets
# >>> p.offsets_correction()
# >>> p.plot()
#
# .. image:: _figs/final.png
# :target: _static/final.png
# :width: 600
# :align: center
#
# .. image:: _figs/final_zoom.png
# :target: _static/final_zoom.png
# :width: 600
# :align: center
|
py | b403d4552e2c35c29e7011b0996cebf4ba9764c5 | from uuid import uuid4
def Multsplit(string: str, items_of_split: list, Return: type = list, Mapping: dict = {}):
if len(items_of_split) == 1:
return "need more than one item for splitting"
elif len(items_of_split) == 0:
return "items_of_split is empty"
else:
replace = str(uuid4()) + str(uuid4()) + str(uuid4()) + str(uuid4())
for x in range(len(items_of_split)):
if items_of_split[x] in Mapping.keys():
string = string.replace(items_of_split[x],Mapping[items_of_split[x]])
else:
string = string.replace(items_of_split[x],replace)
string = string.split(replace)
if Return == list:
return string
elif Return == str:
return "".join(string)
else:
return "cannot find type"
|
py | b403d5362fceaac0a118f4f29e2a4f848801a9a4 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from datetime import timedelta
from mock import MagicMock, patch
from airflow import DAG
from airflow.contrib.operators.emr_create_job_flow_operator import EmrCreateJobFlowOperator
from airflow.models import TaskInstance
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
RUN_JOB_FLOW_SUCCESS_RETURN = {
'ResponseMetadata': {
'HTTPStatusCode': 200
},
'JobFlowId': 'j-8989898989'
}
class TestEmrCreateJobFlowOperator(unittest.TestCase):
# When
_config = {
'Name': 'test_job_flow',
'ReleaseLabel': '5.11.0',
'Steps': [{
'Name': 'test_step',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': [
'/usr/lib/spark/bin/run-example',
'{{ macros.ds_add(ds, -1) }}',
'{{ ds }}'
]
}
}]
}
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
# Mock out the emr_client (moto has incorrect response)
self.emr_client_mock = MagicMock()
self.operator = EmrCreateJobFlowOperator(
task_id='test_task',
aws_conn_id='aws_default',
emr_conn_id='emr_default',
job_flow_overrides=self._config,
region_name='ap-southeast-2',
dag=DAG('test_dag_id', default_args=args)
)
def test_init(self):
self.assertEqual(self.operator.aws_conn_id, 'aws_default')
self.assertEqual(self.operator.emr_conn_id, 'emr_default')
self.assertEqual(self.operator.region_name, 'ap-southeast-2')
def test_render_template(self):
ti = TaskInstance(self.operator, DEFAULT_DATE)
ti.render_templates()
expected_args = {
'Name': 'test_job_flow',
'ReleaseLabel': '5.11.0',
'Steps': [{
'Name': 'test_step',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': [
'/usr/lib/spark/bin/run-example',
(DEFAULT_DATE - timedelta(days=1)).strftime("%Y-%m-%d"),
DEFAULT_DATE.strftime("%Y-%m-%d"),
]
}
}]
}
self.assertDictEqual(self.operator.job_flow_overrides, expected_args)
def test_execute_returns_job_id(self):
self.emr_client_mock.run_job_flow.return_value = RUN_JOB_FLOW_SUCCESS_RETURN
# Mock out the emr_client creator
emr_session_mock = MagicMock()
emr_session_mock.client.return_value = self.emr_client_mock
self.boto3_session_mock = MagicMock(return_value=emr_session_mock)
with patch('boto3.session.Session', self.boto3_session_mock):
self.assertEqual(self.operator.execute(None), 'j-8989898989')
if __name__ == '__main__':
unittest.main()
|
py | b403d5fde45deebb1c046328f4bb8394a8225600 | import datetime
import uuid
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import graphene
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models import Q
from ...checkout import AddressType, models
from ...checkout.complete_checkout import complete_checkout
from ...checkout.error_codes import CheckoutErrorCode
from ...checkout.fetch import (
CheckoutLineInfo,
fetch_checkout_info,
fetch_checkout_lines,
get_valid_collection_points_for_checkout_info,
get_valid_shipping_method_list_for_checkout_info,
)
from ...checkout.utils import (
add_promo_code_to_checkout,
add_variants_to_checkout,
calculate_checkout_quantity,
change_billing_address_in_checkout,
change_shipping_address_in_checkout,
clear_delivery_method,
is_shipping_required,
recalculate_checkout_discount,
remove_promo_code_from_checkout,
validate_variants_in_checkout_lines,
)
from ...core import analytics
from ...core.exceptions import InsufficientStock, PermissionDenied, ProductNotPublished
from ...core.permissions import AccountPermissions
from ...core.tracing import traced_atomic_transaction
from ...core.transactions import transaction_with_commit_on_errors
from ...order import models as order_models
from ...product import models as product_models
from ...product.models import ProductChannelListing
from ...shipping import models as shipping_models
from ...warehouse import models as warehouse_models
from ...warehouse.availability import check_stock_quantity_bulk
from ..account.i18n import I18nMixin
from ..account.types import AddressInput
from ..channel.utils import clean_channel
from ..core.descriptions import (
ADDED_IN_31,
DEPRECATED_IN_3X_FIELD,
DEPRECATED_IN_3X_INPUT,
)
from ..core.enums import LanguageCodeEnum
from ..core.mutations import BaseMutation, ModelMutation
from ..core.scalars import UUID
from ..core.types.common import CheckoutError
from ..core.utils import from_global_id_or_error
from ..core.validators import (
validate_one_of_args_is_in_mutation,
validate_variants_available_in_channel,
)
from ..order.types import Order
from ..product.types import ProductVariant
from ..shipping.types import ShippingMethod
from ..utils import get_user_or_app_from_context
from ..warehouse.types import Warehouse
from .types import Checkout, CheckoutLine
from .utils import prepare_insufficient_stock_checkout_validation_error
ERROR_DOES_NOT_SHIP = "This checkout doesn't need shipping"
if TYPE_CHECKING:
from ...account.models import Address
from ...checkout.fetch import CheckoutInfo
def clean_delivery_method(
checkout_info: "CheckoutInfo",
lines: Iterable[CheckoutLineInfo],
method: Optional[Union[models.ShippingMethod, warehouse_models.Warehouse]],
) -> bool:
"""Check if current shipping method is valid."""
if not method:
# no shipping method was provided, it is valid
return True
if not is_shipping_required(lines):
raise ValidationError(
ERROR_DOES_NOT_SHIP, code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED.value
)
if not checkout_info.shipping_address:
raise ValidationError(
"Cannot choose a shipping method for a checkout without the "
"shipping address.",
code=CheckoutErrorCode.SHIPPING_ADDRESS_NOT_SET.value,
)
valid_methods = checkout_info.valid_delivery_methods
return method in valid_methods
def update_checkout_shipping_method_if_invalid(
checkout_info: "CheckoutInfo", lines: Iterable[CheckoutLineInfo]
):
quantity = calculate_checkout_quantity(lines)
# remove shipping method when empty checkout
if quantity == 0 or not is_shipping_required(lines):
clear_delivery_method(checkout_info)
is_valid = clean_delivery_method(
checkout_info=checkout_info,
lines=lines,
method=checkout_info.delivery_method_info.delivery_method,
)
if not is_valid:
clear_delivery_method(checkout_info)
def check_lines_quantity(
variants,
quantities,
country,
channel_slug,
allow_zero_quantity=False,
existing_lines=None,
replace=False,
):
"""Clean quantities and check if stock is sufficient for each checkout line.
By default, zero quantity is not allowed,
but if this validation is used for updating existing checkout lines,
allow_zero_quantities can be set to True
and checkout lines with this quantity can be later removed.
"""
for quantity in quantities:
if not allow_zero_quantity and quantity <= 0:
raise ValidationError(
{
"quantity": ValidationError(
"The quantity should be higher than zero.",
code=CheckoutErrorCode.ZERO_QUANTITY,
)
}
)
elif allow_zero_quantity and quantity < 0:
raise ValidationError(
{
"quantity": ValidationError(
"The quantity should be higher or equal zero.",
code=CheckoutErrorCode.ZERO_QUANTITY,
)
}
)
if quantity > settings.MAX_CHECKOUT_LINE_QUANTITY:
raise ValidationError(
{
"quantity": ValidationError(
"Cannot add more than %d times this item."
"" % settings.MAX_CHECKOUT_LINE_QUANTITY,
code=CheckoutErrorCode.QUANTITY_GREATER_THAN_LIMIT,
)
}
)
try:
check_stock_quantity_bulk(
variants,
country,
quantities,
channel_slug,
existing_lines=existing_lines,
replace=replace,
)
except InsufficientStock as e:
errors = [
ValidationError(
f"Could not add items {item.variant}. "
f"Only {item.available_quantity} remaining in stock.",
code=e.code,
)
for item in e.items
]
raise ValidationError({"quantity": errors})
def validate_variants_available_for_purchase(variants_id: set, channel_id: int):
today = datetime.date.today()
is_available_for_purchase = Q(
available_for_purchase__lte=today,
product__variants__id__in=variants_id,
channel_id=channel_id,
)
available_variants = ProductChannelListing.objects.filter(
is_available_for_purchase
).values_list("product__variants__id", flat=True)
not_available_variants = variants_id.difference(set(available_variants))
if not_available_variants:
variant_ids = [
graphene.Node.to_global_id("ProductVariant", pk)
for pk in not_available_variants
]
error_code = CheckoutErrorCode.PRODUCT_UNAVAILABLE_FOR_PURCHASE
raise ValidationError(
{
"lines": ValidationError(
"Cannot add lines for unavailable for purchase variants.",
code=error_code, # type: ignore
params={"variants": variant_ids},
)
}
)
def get_checkout_by_token(token: uuid.UUID, prefetch_lookups: Iterable[str] = []):
try:
checkout = models.Checkout.objects.prefetch_related(*prefetch_lookups).get(
token=token
)
except ObjectDoesNotExist:
raise ValidationError(
{
"token": ValidationError(
f"Couldn't resolve to a node: {token}.",
code=CheckoutErrorCode.NOT_FOUND.value,
)
}
)
return checkout
class CheckoutLineInput(graphene.InputObjectType):
quantity = graphene.Int(required=True, description="The number of items purchased.")
variant_id = graphene.ID(required=True, description="ID of the product variant.")
class CheckoutCreateInput(graphene.InputObjectType):
channel = graphene.String(
description="Slug of a channel in which to create a checkout."
)
lines = graphene.List(
CheckoutLineInput,
description=(
"A list of checkout lines, each containing information about "
"an item in the checkout."
),
required=True,
)
email = graphene.String(description="The customer's email address.")
shipping_address = AddressInput(
description=(
"The mailing address to where the checkout will be shipped. "
"Note: the address will be ignored if the checkout "
"doesn't contain shippable items."
)
)
billing_address = AddressInput(description="Billing address of the customer.")
language_code = graphene.Argument(
LanguageCodeEnum, required=False, description="Checkout language code."
)
class CheckoutCreate(ModelMutation, I18nMixin):
created = graphene.Field(
graphene.Boolean,
description=(
"Whether the checkout was created or the current active one was returned. "
"Refer to checkoutLinesAdd and checkoutLinesUpdate to merge a cart "
"with an active checkout."
),
deprecation_reason=f"{DEPRECATED_IN_3X_FIELD} Always returns `True`.",
)
class Arguments:
input = CheckoutCreateInput(
required=True, description="Fields required to create checkout."
)
class Meta:
description = "Create a new checkout."
model = models.Checkout
return_field_name = "checkout"
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def clean_checkout_lines(
cls, lines, country, channel
) -> Tuple[List[product_models.ProductVariant], List[int]]:
variant_ids = [line["variant_id"] for line in lines]
variants = cls.get_nodes_or_error(
variant_ids,
"variant_id",
ProductVariant,
qs=product_models.ProductVariant.objects.prefetch_related(
"product__product_type"
),
)
quantities = [line["quantity"] for line in lines]
variant_db_ids = {variant.id for variant in variants}
validate_variants_available_for_purchase(variant_db_ids, channel.id)
validate_variants_available_in_channel(
variant_db_ids, channel.id, CheckoutErrorCode.UNAVAILABLE_VARIANT_IN_CHANNEL
)
check_lines_quantity(variants, quantities, country, channel.slug)
return variants, quantities
@classmethod
def retrieve_shipping_address(cls, user, data: dict) -> Optional["Address"]:
if data.get("shipping_address") is not None:
return cls.validate_address(
data["shipping_address"], address_type=AddressType.SHIPPING
)
if user.is_authenticated:
return user.default_shipping_address
return None
@classmethod
def retrieve_billing_address(cls, user, data: dict) -> Optional["Address"]:
if data.get("billing_address") is not None:
return cls.validate_address(
data["billing_address"], address_type=AddressType.BILLING
)
if user.is_authenticated:
return user.default_billing_address
return None
@classmethod
def clean_input(cls, info, instance: models.Checkout, data, input_cls=None):
user = info.context.user
channel = data.pop("channel")
cleaned_input = super().clean_input(info, instance, data)
cleaned_input["channel"] = channel
cleaned_input["currency"] = channel.currency_code
shipping_address = cls.retrieve_shipping_address(user, data)
billing_address = cls.retrieve_billing_address(user, data)
if shipping_address:
country = shipping_address.country.code
else:
country = channel.default_country
# Resolve and process the lines, retrieving the variants and quantities
lines = data.pop("lines", None)
if lines:
(
cleaned_input["variants"],
cleaned_input["quantities"],
) = cls.clean_checkout_lines(lines, country, cleaned_input["channel"])
# Use authenticated user's email as default email
if user.is_authenticated:
email = data.pop("email", None)
cleaned_input["email"] = email or user.email
language_code = data.get("language_code", settings.LANGUAGE_CODE)
cleaned_input["language_code"] = language_code
cleaned_input["shipping_address"] = shipping_address
cleaned_input["billing_address"] = billing_address
cleaned_input["country"] = country
return cleaned_input
@classmethod
@traced_atomic_transaction()
def save(cls, info, instance: models.Checkout, cleaned_input):
channel = cleaned_input["channel"]
# Create the checkout object
instance.save()
# Set checkout country
country = cleaned_input["country"]
instance.set_country(country)
# Create checkout lines
variants = cleaned_input.get("variants")
quantities = cleaned_input.get("quantities")
if variants and quantities:
try:
add_variants_to_checkout(instance, variants, quantities, channel.slug)
except InsufficientStock as exc:
error = prepare_insufficient_stock_checkout_validation_error(exc)
raise ValidationError({"lines": error})
except ProductNotPublished as exc:
raise ValidationError(
"Can't create checkout with unpublished product.",
code=exc.code,
)
# Save addresses
shipping_address = cleaned_input.get("shipping_address")
if shipping_address and instance.is_shipping_required():
shipping_address.save()
instance.shipping_address = shipping_address.get_copy()
billing_address = cleaned_input.get("billing_address")
if billing_address:
billing_address.save()
instance.billing_address = billing_address.get_copy()
instance.save()
@classmethod
def get_instance(cls, info, **data):
instance = super().get_instance(info, **data)
user = info.context.user
if user.is_authenticated:
instance.user = user
return instance
@classmethod
def perform_mutation(cls, _root, info, **data):
channel_input = data.get("input", {}).get("channel")
channel = clean_channel(channel_input, error_class=CheckoutErrorCode)
if channel:
data["input"]["channel"] = channel
response = super().perform_mutation(_root, info, **data)
info.context.plugins.checkout_created(response.checkout)
response.created = True
return response
class CheckoutLinesAdd(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
required=False,
)
token = UUID(description="Checkout token.", required=False)
lines = graphene.List(
CheckoutLineInput,
required=True,
description=(
"A list of checkout lines, each containing information about "
"an item in the checkout."
),
)
class Meta:
description = (
"Adds a checkout line to the existing checkout."
"If line was already in checkout, its quantity will be increased."
)
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def validate_checkout_lines(
cls, variants, quantities, country, channel_slug, lines=None
):
check_lines_quantity(
variants, quantities, country, channel_slug, existing_lines=lines
)
@classmethod
def clean_input(
cls,
checkout,
variants,
quantities,
checkout_info,
lines,
manager,
discounts,
replace,
):
channel_slug = checkout_info.channel.slug
cls.validate_checkout_lines(
variants, quantities, checkout.get_country(), channel_slug, lines=lines
)
variants_db_ids = {variant.id for variant in variants}
validate_variants_available_for_purchase(variants_db_ids, checkout.channel_id)
validate_variants_available_in_channel(
variants_db_ids,
checkout.channel_id,
CheckoutErrorCode.UNAVAILABLE_VARIANT_IN_CHANNEL,
)
if variants and quantities:
try:
checkout = add_variants_to_checkout(
checkout,
variants,
quantities,
channel_slug,
skip_stock_check=True, # already checked by validate_checkout_lines
replace=replace,
)
except ProductNotPublished as exc:
raise ValidationError(
"Can't add unpublished product.",
code=exc.code,
)
lines = fetch_checkout_lines(checkout)
checkout_info.valid_shipping_methods = (
get_valid_shipping_method_list_for_checkout_info(
checkout_info, checkout_info.shipping_address, lines, discounts, manager
)
)
checkout_info.valid_pick_up_points = (
get_valid_collection_points_for_checkout_info(
checkout_info, checkout_info.shipping_address, lines
)
)
return lines
@classmethod
def perform_mutation(
cls, _root, info, lines, checkout_id=None, token=None, replace=False
):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
discounts = info.context.discounts
manager = info.context.plugins
variant_ids = [line.get("variant_id") for line in lines]
variants = cls.get_nodes_or_error(variant_ids, "variant_id", ProductVariant)
quantities = [line.get("quantity") for line in lines]
checkout_info = fetch_checkout_info(checkout, [], discounts, manager)
lines = fetch_checkout_lines(checkout)
lines = cls.clean_input(
checkout,
variants,
quantities,
checkout_info,
lines,
manager,
discounts,
replace,
)
checkout_info.valid_shipping_methods = (
get_valid_shipping_method_list_for_checkout_info(
checkout_info, checkout_info.shipping_address, lines, discounts, manager
)
)
checkout_info.valid_pick_up_points = (
get_valid_collection_points_for_checkout_info(
checkout_info, checkout_info.shipping_address, lines
)
)
update_checkout_shipping_method_if_invalid(checkout_info, lines)
recalculate_checkout_discount(
manager, checkout_info, lines, info.context.discounts
)
manager.checkout_updated(checkout)
return CheckoutLinesAdd(checkout=checkout)
class CheckoutLinesUpdate(CheckoutLinesAdd):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Meta:
description = "Updates checkout line in the existing checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def validate_checkout_lines(
cls, variants, quantities, country, channel_slug, lines=None
):
check_lines_quantity(
variants,
quantities,
country,
channel_slug,
allow_zero_quantity=True,
existing_lines=lines,
replace=True,
)
@classmethod
def perform_mutation(cls, root, info, lines, checkout_id=None, token=None):
return super().perform_mutation(
root, info, lines, checkout_id, token, replace=True
)
class CheckoutLineDelete(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
required=False,
)
token = UUID(description="Checkout token.", required=False)
line_id = graphene.ID(description="ID of the checkout line to delete.")
class Meta:
description = "Deletes a CheckoutLine."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, line_id, checkout_id=None, token=None):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
line = cls.get_node_or_error(
info, line_id, only_type=CheckoutLine, field="line_id"
)
if line and line in checkout.lines.all():
line.delete()
manager = info.context.plugins
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines, info.context.discounts, manager
)
update_checkout_shipping_method_if_invalid(checkout_info, lines)
recalculate_checkout_discount(
manager, checkout_info, lines, info.context.discounts
)
manager.checkout_updated(checkout)
return CheckoutLineDelete(checkout=checkout)
class CheckoutCustomerAttach(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(
required=False,
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
)
customer_id = graphene.ID(
required=False,
description=(
"ID of customer to attach to checkout. Can be used to attach customer "
"to checkout by staff or app. Requires IMPERSONATE_USER permission."
),
)
token = UUID(description="Checkout token.", required=False)
class Meta:
description = "Sets the customer as the owner of the checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def check_permissions(cls, context):
return context.user.is_authenticated or context.app
@classmethod
def perform_mutation(
cls, _root, info, checkout_id=None, token=None, customer_id=None
):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
# Raise error when trying to attach a user to a checkout
# that is already owned by another user.
if checkout.user_id:
raise PermissionDenied()
if customer_id:
requestor = get_user_or_app_from_context(info.context)
if not requestor.has_perm(AccountPermissions.IMPERSONATE_USER):
raise PermissionDenied()
customer = cls.get_node_or_error(info, customer_id, only_type="User")
else:
customer = info.context.user
checkout.user = customer
checkout.email = customer.email
checkout.save(update_fields=["email", "user", "last_change"])
info.context.plugins.checkout_updated(checkout)
return CheckoutCustomerAttach(checkout=checkout)
class CheckoutCustomerDetach(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
required=False,
)
token = UUID(description="Checkout token.", required=False)
class Meta:
description = "Removes the user assigned as the owner of the checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def check_permissions(cls, context):
return context.user.is_authenticated or context.app
@classmethod
def perform_mutation(cls, _root, info, checkout_id=None, token=None):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
requestor = get_user_or_app_from_context(info.context)
if not requestor.has_perm(AccountPermissions.IMPERSONATE_USER):
# Raise error if the current user doesn't own the checkout of the given ID.
if checkout.user and checkout.user != info.context.user:
raise PermissionDenied()
checkout.user = None
checkout.save(update_fields=["user", "last_change"])
info.context.plugins.checkout_updated(checkout)
return CheckoutCustomerDetach(checkout=checkout)
class CheckoutShippingAddressUpdate(BaseMutation, I18nMixin):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(
required=False,
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
)
token = UUID(description="Checkout token.", required=False)
shipping_address = AddressInput(
required=True,
description="The mailing address to where the checkout will be shipped.",
)
class Meta:
description = "Update shipping address in the existing checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def process_checkout_lines(
cls, lines: Iterable["CheckoutLineInfo"], country: str, channel_slug: str
) -> None:
variant_ids = [line_info.variant.id for line_info in lines]
variants = list(
product_models.ProductVariant.objects.filter(
id__in=variant_ids
).prefetch_related("product__product_type")
) # FIXME: is this prefetch needed?
quantities = [line_info.line.quantity for line_info in lines]
check_lines_quantity(variants, quantities, country, channel_slug)
@classmethod
def perform_mutation(
cls, _root, info, shipping_address, checkout_id=None, token=None
):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(
token, prefetch_lookups=["lines__variant__product__product_type"]
)
# DEPRECATED
if checkout_id:
pk = cls.get_global_id_or_error(
checkout_id, only_type=Checkout, field="checkout_id"
)
try:
checkout = models.Checkout.objects.prefetch_related(
"lines__variant__product__product_type"
).get(pk=pk)
except ObjectDoesNotExist:
raise ValidationError(
{
"checkout_id": ValidationError(
f"Couldn't resolve to a node: {checkout_id}",
code=CheckoutErrorCode.NOT_FOUND,
)
}
)
lines = fetch_checkout_lines(checkout)
if not is_shipping_required(lines):
raise ValidationError(
{
"shipping_address": ValidationError(
ERROR_DOES_NOT_SHIP,
code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED,
)
}
)
shipping_address = cls.validate_address(
shipping_address,
address_type=AddressType.SHIPPING,
instance=checkout.shipping_address,
info=info,
)
discounts = info.context.discounts
manager = info.context.plugins
checkout_info = fetch_checkout_info(checkout, lines, discounts, manager)
country = shipping_address.country.code
checkout.set_country(country, commit=True)
# Resolve and process the lines, validating variants quantities
if lines:
cls.process_checkout_lines(lines, country, checkout_info.channel.slug)
update_checkout_shipping_method_if_invalid(checkout_info, lines)
with traced_atomic_transaction():
shipping_address.save()
change_shipping_address_in_checkout(
checkout_info, shipping_address, lines, discounts, manager
)
recalculate_checkout_discount(manager, checkout_info, lines, discounts)
manager.checkout_updated(checkout)
return CheckoutShippingAddressUpdate(checkout=checkout)
class CheckoutBillingAddressUpdate(CheckoutShippingAddressUpdate):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(
required=False,
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} "
"Use token instead."
),
)
token = UUID(description="Checkout token.", required=False)
billing_address = AddressInput(
required=True, description="The billing address of the checkout."
)
class Meta:
description = "Update billing address in the existing checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(
cls, _root, info, billing_address, checkout_id=None, token=None
):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
billing_address = cls.validate_address(
billing_address,
address_type=AddressType.BILLING,
instance=checkout.billing_address,
info=info,
)
with traced_atomic_transaction():
billing_address.save()
change_billing_address_in_checkout(checkout, billing_address)
info.context.plugins.checkout_updated(checkout)
return CheckoutBillingAddressUpdate(checkout=checkout)
class CheckoutLanguageCodeUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(
required=False,
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
)
token = UUID(description="Checkout token.", required=False)
language_code = graphene.Argument(
LanguageCodeEnum, required=True, description="New language code."
)
class Meta:
description = "Update language code in the existing checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, language_code, checkout_id=None, token=None):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
checkout.language_code = language_code
checkout.save(update_fields=["language_code", "last_change"])
info.context.plugins.checkout_updated(checkout)
return CheckoutLanguageCodeUpdate(checkout=checkout)
class CheckoutEmailUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
required=False,
)
token = UUID(description="Checkout token.", required=False)
email = graphene.String(required=True, description="email.")
class Meta:
description = "Updates email address in the existing checkout object."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, email, checkout_id=None, token=None):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
checkout.email = email
cls.clean_instance(info, checkout)
checkout.save(update_fields=["email", "last_change"])
info.context.plugins.checkout_updated(checkout)
return CheckoutEmailUpdate(checkout=checkout)
class CheckoutShippingMethodUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
checkout_id = graphene.ID(
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
required=False,
)
token = UUID(description="Checkout token.", required=False)
shipping_method_id = graphene.ID(required=True, description="Shipping method.")
class Meta:
description = "Updates the shipping address of the checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(
cls, _root, info, shipping_method_id, checkout_id=None, token=None
):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
manager = info.context.plugins
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines, info.context.discounts, manager
)
if not is_shipping_required(lines):
raise ValidationError(
{
"shipping_method": ValidationError(
ERROR_DOES_NOT_SHIP,
code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED,
)
}
)
shipping_method = cls.get_node_or_error(
info,
shipping_method_id,
only_type=ShippingMethod,
field="shipping_method_id",
qs=shipping_models.ShippingMethod.objects.prefetch_related(
"postal_code_rules"
),
)
shipping_method_is_valid = clean_delivery_method(
checkout_info=checkout_info,
lines=lines,
method=shipping_method,
)
if not shipping_method_is_valid:
raise ValidationError(
{
"shipping_method": ValidationError(
"This shipping method is not applicable.",
code=CheckoutErrorCode.SHIPPING_METHOD_NOT_APPLICABLE,
)
}
)
checkout.shipping_method = shipping_method
checkout.save(update_fields=["shipping_method", "last_change"])
recalculate_checkout_discount(
manager, checkout_info, lines, info.context.discounts
)
manager.checkout_updated(checkout)
return CheckoutShippingMethodUpdate(checkout=checkout)
class CheckoutDeliveryMethodUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
token = UUID(description="Checkout token.", required=False)
delivery_method_id = graphene.ID(
description="Delivery Method ID (`Warehouse` ID or `ShippingMethod` ID).",
required=False,
)
class Meta:
description = (
f"{ADDED_IN_31} Updates the delivery method "
"(shipping method or pick up point) of the checkout."
)
error_type_class = CheckoutError
@classmethod
def perform_on_shipping_method(
cls, info, shipping_method_id, checkout_info, lines, checkout, manager
):
shipping_method = cls.get_node_or_error(
info,
shipping_method_id,
only_type=ShippingMethod,
field="delivery_method_id",
qs=shipping_models.ShippingMethod.objects.prefetch_related(
"postal_code_rules"
),
)
cls._check_delivery_method(
checkout_info, lines, shipping_method=shipping_method, collection_point=None
)
cls._update_delivery_method(
manager,
checkout,
shipping_method=shipping_method,
collection_point=None,
)
recalculate_checkout_discount(
manager, checkout_info, lines, info.context.discounts
)
return CheckoutDeliveryMethodUpdate(checkout=checkout)
@classmethod
def perform_on_collection_point(
cls, info, collection_point_id, checkout_info, lines, checkout, manager
):
collection_point = cls.get_node_or_error(
info,
collection_point_id,
only_type=Warehouse,
field="delivery_method_id",
qs=warehouse_models.Warehouse.objects.select_related("address"),
)
cls._check_delivery_method(
checkout_info,
lines,
shipping_method=None,
collection_point=collection_point,
)
cls._update_delivery_method(
manager, checkout, shipping_method=None, collection_point=collection_point
)
return CheckoutDeliveryMethodUpdate(checkout=checkout)
@staticmethod
def _check_delivery_method(
checkout_info,
lines,
*,
shipping_method: Optional[ShippingMethod],
collection_point: Optional[Warehouse]
) -> None:
delivery_method = shipping_method
error_msg = "This shipping method is not applicable."
if collection_point is not None:
delivery_method = collection_point
error_msg = "This pick up point is not applicable."
delivery_method_is_valid = clean_delivery_method(
checkout_info=checkout_info, lines=lines, method=delivery_method
)
if not delivery_method_is_valid:
raise ValidationError(
{
"delivery_method_id": ValidationError(
error_msg,
code=CheckoutErrorCode.DELIVERY_METHOD_NOT_APPLICABLE.value,
)
}
)
@staticmethod
def _update_delivery_method(
manager,
checkout: Checkout,
*,
shipping_method: Optional[ShippingMethod],
collection_point: Optional[Warehouse]
) -> None:
checkout.shipping_method = shipping_method
checkout.collection_point = collection_point
checkout.save(
update_fields=["shipping_method", "collection_point", "last_change"]
)
manager.checkout_updated(checkout)
@staticmethod
def _resolve_delivery_method_type(id_) -> Optional[str]:
if id_ is None:
return None
possible_types = ("Warehouse", "ShippingMethod")
type_, id_ = from_global_id_or_error(id_)
str_type = str(type_)
if str_type not in possible_types:
raise ValidationError(
{
"delivery_method_id": ValidationError(
"ID does not belong to Warehouse or ShippingMethod",
code=CheckoutErrorCode.INVALID.value,
)
}
)
return str_type
@classmethod
def perform_mutation(
cls,
_,
info,
token,
delivery_method_id=None,
):
checkout = get_checkout_by_token(token)
manager = info.context.plugins
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(
checkout, lines, info.context.discounts, manager
)
if not is_shipping_required(lines):
raise ValidationError(
{
"delivery_method": ValidationError(
ERROR_DOES_NOT_SHIP,
code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED,
)
}
)
type_name = cls._resolve_delivery_method_type(delivery_method_id)
if type_name == "Warehouse":
return cls.perform_on_collection_point(
info, delivery_method_id, checkout_info, lines, checkout, manager
)
return cls.perform_on_shipping_method(
info, delivery_method_id, checkout_info, lines, checkout, manager
)
class CheckoutComplete(BaseMutation):
order = graphene.Field(Order, description="Placed order.")
confirmation_needed = graphene.Boolean(
required=True,
default_value=False,
description=(
"Set to true if payment needs to be confirmed"
" before checkout is complete."
),
)
confirmation_data = graphene.JSONString(
required=False,
description=(
"Confirmation data used to process additional authorization steps."
),
)
class Arguments:
checkout_id = graphene.ID(
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
required=False,
)
token = UUID(description="Checkout token.", required=False)
store_source = graphene.Boolean(
default_value=False,
description=(
"Determines whether to store the payment source for future usage. "
f"{DEPRECATED_IN_3X_INPUT} Use checkoutPaymentCreate for this action."
),
)
redirect_url = graphene.String(
required=False,
description=(
"URL of a view where users should be redirected to "
"see the order details. URL in RFC 1808 format."
),
)
payment_data = graphene.JSONString(
required=False,
description=(
"Client-side generated data required to finalize the payment."
),
)
class Meta:
description = (
"Completes the checkout. As a result a new order is created and "
"a payment charge is made. This action requires a successful "
"payment before it can be performed. "
"In case additional confirmation step as 3D secure is required "
"confirmationNeeded flag will be set to True and no order created "
"until payment is confirmed with second call of this mutation."
)
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(
cls, _root, info, store_source, checkout_id=None, token=None, **data
):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
tracking_code = analytics.get_client_id(info.context)
with transaction_with_commit_on_errors():
try:
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info,
checkout_id or token,
only_type=Checkout,
field="checkout_id",
)
except ValidationError as e:
# DEPRECATED
if checkout_id:
token = cls.get_global_id_or_error(
checkout_id, only_type=Checkout, field="checkout_id"
)
order = order_models.Order.objects.get_by_checkout_token(token)
if order:
if not order.channel.is_active:
raise ValidationError(
{
"channel": ValidationError(
"Cannot complete checkout with inactive channel.",
code=CheckoutErrorCode.CHANNEL_INACTIVE.value,
)
}
)
# The order is already created. We return it as a success
# checkoutComplete response. Order is anonymized for not logged in
# user
return CheckoutComplete(
order=order, confirmation_needed=False, confirmation_data={}
)
raise e
manager = info.context.plugins
lines = fetch_checkout_lines(checkout)
validate_variants_in_checkout_lines(lines)
checkout_info = fetch_checkout_info(
checkout, lines, info.context.discounts, manager
)
requestor = get_user_or_app_from_context(info.context)
if requestor.has_perm(AccountPermissions.IMPERSONATE_USER):
# Allow impersonating user and process a checkout by using user details
# assigned to checkout.
customer = checkout.user or AnonymousUser()
else:
customer = info.context.user
order, action_required, action_data = complete_checkout(
manager=manager,
checkout_info=checkout_info,
lines=lines,
payment_data=data.get("payment_data", {}),
store_source=store_source,
discounts=info.context.discounts,
user=customer,
app=info.context.app,
site_settings=info.context.site.settings,
tracking_code=tracking_code,
redirect_url=data.get("redirect_url"),
)
# If gateway returns information that additional steps are required we need
# to inform the frontend and pass all required data
return CheckoutComplete(
order=order,
confirmation_needed=action_required,
confirmation_data=action_data,
)
class CheckoutAddPromoCode(BaseMutation):
checkout = graphene.Field(
Checkout, description="The checkout with the added gift card or voucher."
)
class Arguments:
checkout_id = graphene.ID(
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
required=False,
)
token = UUID(description="Checkout token.", required=False)
promo_code = graphene.String(
description="Gift card code or voucher code.", required=True
)
class Meta:
description = "Adds a gift card or a voucher to a checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, promo_code, checkout_id=None, token=None):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
manager = info.context.plugins
discounts = info.context.discounts
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, discounts, manager)
add_promo_code_to_checkout(
manager,
checkout_info,
lines,
promo_code,
discounts,
)
checkout_info.valid_shipping_methods = (
get_valid_shipping_method_list_for_checkout_info(
checkout_info, checkout_info.shipping_address, lines, discounts, manager
)
)
update_checkout_shipping_method_if_invalid(checkout_info, lines)
manager.checkout_updated(checkout)
return CheckoutAddPromoCode(checkout=checkout)
class CheckoutRemovePromoCode(BaseMutation):
checkout = graphene.Field(
Checkout, description="The checkout with the removed gift card or voucher."
)
class Arguments:
checkout_id = graphene.ID(
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
required=False,
)
token = UUID(description="Checkout token.", required=False)
promo_code = graphene.String(
description="Gift card code or voucher code.", required=True
)
class Meta:
description = "Remove a gift card or a voucher from a checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, promo_code, checkout_id=None, token=None):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
manager = info.context.plugins
checkout_info = fetch_checkout_info(
checkout, [], info.context.discounts, manager
)
remove_promo_code_from_checkout(checkout_info, promo_code)
manager.checkout_updated(checkout)
return CheckoutRemovePromoCode(checkout=checkout)
|
py | b403d76d315fe1aac1a694fa62085cd66bbdc4bc | import sys
from jetbot import *
# from jetbot import webcam
args = ["alset_train.py", sys.argv[1], sys.argv[2]]
r = Robot(args)
webcam_run(r)
|
py | b403d7766a34cd38d19be6aca089e0c8af26c273 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in WideNDeep model classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from keras import activations
from keras import backend as K
from keras import layers as layer_module
from keras.engine import base_layer
from keras.engine import data_adapter
from keras.engine import training as keras_training
from keras.utils import generic_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.experimental.WideDeepModel')
class WideDeepModel(keras_training.Model):
r"""Wide & Deep Model for regression and classification problems.
This model jointly train a linear and a dnn model.
Example:
```python
linear_model = LinearModel()
dnn_model = keras.Sequential([keras.layers.Dense(units=64),
keras.layers.Dense(units=1)])
combined_model = WideDeepModel(linear_model, dnn_model)
combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse'])
# define dnn_inputs and linear_inputs as separate numpy arrays or
# a single numpy array if dnn_inputs is same as linear_inputs.
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
# or define a single `tf.data.Dataset` that contains a single tensor or
# separate tensors for dnn_inputs and linear_inputs.
dataset = tf.data.Dataset.from_tensors(([linear_inputs, dnn_inputs], y))
combined_model.fit(dataset, epochs)
```
Both linear and dnn model can be pre-compiled and trained separately
before jointly training:
Example:
```python
linear_model = LinearModel()
linear_model.compile('adagrad', 'mse')
linear_model.fit(linear_inputs, y, epochs)
dnn_model = keras.Sequential([keras.layers.Dense(units=1)])
dnn_model.compile('rmsprop', 'mse')
dnn_model.fit(dnn_inputs, y, epochs)
combined_model = WideDeepModel(linear_model, dnn_model)
combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse'])
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
```
"""
def __init__(self, linear_model, dnn_model, activation=None, **kwargs):
"""Create a Wide & Deep Model.
Args:
linear_model: a premade LinearModel, its output must match the output of
the dnn model.
dnn_model: a `tf.keras.Model`, its output must match the output of the
linear model.
activation: Activation function. Set it to None to maintain a linear
activation.
**kwargs: The keyword arguments that are passed on to BaseLayer.__init__.
Allowed keyword arguments include `name`.
"""
super(WideDeepModel, self).__init__(**kwargs)
base_layer.keras_premade_model_gauge.get_cell('WideDeep').set(True)
self.linear_model = linear_model
self.dnn_model = dnn_model
self.activation = activations.get(activation)
def call(self, inputs, training=None):
if not isinstance(inputs, (tuple, list)) or len(inputs) != 2:
linear_inputs = dnn_inputs = inputs
else:
linear_inputs, dnn_inputs = inputs
linear_output = self.linear_model(linear_inputs)
# pylint: disable=protected-access
if self.dnn_model._expects_training_arg:
if training is None:
training = K.learning_phase()
dnn_output = self.dnn_model(dnn_inputs, training=training)
else:
dnn_output = self.dnn_model(dnn_inputs)
output = tf.nest.map_structure(lambda x, y: (x + y), linear_output, dnn_output)
if self.activation:
return tf.nest.map_structure(self.activation, output)
return output
# This does not support gradient scaling and LossScaleOptimizer.
def train_step(self, data):
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
x, y, sample_weight = data_adapter.expand_1d((x, y, sample_weight))
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
if isinstance(self.optimizer, (list, tuple)):
linear_vars = self.linear_model.trainable_variables
dnn_vars = self.dnn_model.trainable_variables
linear_grads, dnn_grads = tape.gradient(loss, (linear_vars, dnn_vars))
linear_optimizer = self.optimizer[0]
dnn_optimizer = self.optimizer[1]
linear_optimizer.apply_gradients(zip(linear_grads, linear_vars))
dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))
else:
trainable_variables = self.trainable_variables
grads = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(grads, trainable_variables))
return {m.name: m.result() for m in self.metrics}
def _make_train_function(self):
# Only needed for graph mode and model_to_estimator.
has_recompiled = self._recompile_weights_loss_and_weighted_metrics()
self._check_trainable_weights_consistency()
# If we have re-compiled the loss/weighted metric sub-graphs then create
# train function even if one exists already. This is because
# `_feed_sample_weights` list has been updated on re-compile.
if getattr(self, 'train_function', None) is None or has_recompiled:
# Restore the compiled trainable state.
current_trainable_state = self._get_trainable_state()
self._set_trainable_state(self._compiled_trainable_state)
inputs = (
self._feed_inputs + self._feed_targets + self._feed_sample_weights)
if not isinstance(K.symbolic_learning_phase(), int):
inputs += [K.symbolic_learning_phase()]
if isinstance(self.optimizer, (list, tuple)):
linear_optimizer = self.optimizer[0]
dnn_optimizer = self.optimizer[1]
else:
linear_optimizer = self.optimizer
dnn_optimizer = self.optimizer
with K.get_graph().as_default():
with K.name_scope('training'):
# Training updates
updates = []
linear_updates = linear_optimizer.get_updates(
params=self.linear_model.trainable_weights, # pylint: disable=protected-access
loss=self.total_loss)
updates += linear_updates
dnn_updates = dnn_optimizer.get_updates(
params=self.dnn_model.trainable_weights, # pylint: disable=protected-access
loss=self.total_loss)
updates += dnn_updates
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
metrics = self._get_training_eval_metrics()
metrics_tensors = [
m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access
]
with K.name_scope('training'):
# Gets loss and metrics. Updates weights at each call.
fn = K.function(
inputs, [self.total_loss] + metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
setattr(self, 'train_function', fn)
# Restore the current trainable state
self._set_trainable_state(current_trainable_state)
def get_config(self):
linear_config = generic_utils.serialize_keras_object(self.linear_model)
dnn_config = generic_utils.serialize_keras_object(self.dnn_model)
config = {
'linear_model': linear_config,
'dnn_model': dnn_config,
'activation': activations.serialize(self.activation),
}
base_config = base_layer.Layer.get_config(self)
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
linear_config = config.pop('linear_model')
linear_model = layer_module.deserialize(linear_config, custom_objects)
dnn_config = config.pop('dnn_model')
dnn_model = layer_module.deserialize(dnn_config, custom_objects)
activation = activations.deserialize(
config.pop('activation', None), custom_objects=custom_objects)
return cls(
linear_model=linear_model,
dnn_model=dnn_model,
activation=activation,
**config)
|
py | b403d88666bcd9920c64babb8e9b8c0e1aed6367 | from collections import OrderedDict
from importlib import import_module
import logging
from django.utils.functional import SimpleLazyObject
from zentral.conf import settings
logger = logging.getLogger('zentral.core.secret_engines')
class SecretEngines:
separator = "$"
noop_engine_name = "noop"
noop_engine_backend = "zentral.core.secret_engines.backends.cleartext"
@staticmethod
def _get_secret_engine_class(module_path):
class_name = "SecretEngine"
module = import_module(module_path)
return getattr(module, class_name)
def load_config(self, config):
self.secret_engines = OrderedDict()
self.default_secret_engine = None
# add configured engines
for secret_engine_name, secret_engine_conf in config.items():
if secret_engine_name == self.noop_engine_name:
self.logger.error("'%s' is a reserved engine name. skipped!", self.noop_engine_name)
continue
if self.separator in secret_engine_name:
self.logger.error("'%' not allowed in secret engine name. skipped!", self.separator)
continue
secret_engine_conf = secret_engine_conf.copy()
secret_engine_conf['secret_engine_name'] = secret_engine_name
secret_engine_class = self._get_secret_engine_class(secret_engine_conf.pop('backend'))
secret_engine = secret_engine_class(secret_engine_conf)
self.secret_engines[secret_engine_name] = secret_engine
if secret_engine.default:
if self.default_secret_engine:
logger.error('Multiple default secret engines')
else:
self.default_secret_engine = secret_engine
# add noop engine
noop_secret_engine_class = self._get_secret_engine_class(self.noop_engine_backend)
noop_secret_engine = noop_secret_engine_class({'secret_engine_name': self.noop_engine_name})
self.secret_engines[self.noop_engine_name] = noop_secret_engine
# default default secret engine
if not self.default_secret_engine:
logger.info("No default secret engine")
for secret_engine_name, secret_engine in self.secret_engines.items():
logger.info("Use '%s' secret engine as default", secret_engine_name)
self.default_secret_engine = secret_engine
break
def __init__(self, settings):
self.load_config(settings.get("secret_engines", {}))
def __len__(self):
return len(self.secret_engines)
def get(self, secret_engine_name):
return self.secret_engines.get(secret_engine_name)
secret_engines = SimpleLazyObject(lambda: SecretEngines(settings))
class EncryptionError(Exception):
pass
class DecryptionError(Exception):
pass
def encrypt(data, **context):
if not isinstance(data, bytes):
raise TypeError("a bytes object is required")
default_secret_engine = secret_engines.default_secret_engine
try:
encoded_data = default_secret_engine.encrypt(data, **context)
except Exception as exc:
raise EncryptionError(f"Secret engine {default_secret_engine.name} encryption error") from exc
return "{}${}".format(default_secret_engine.name, encoded_data)
def encrypt_str(data, **context):
if not isinstance(data, str):
raise TypeError("a str object is required")
return encrypt(data.encode("utf-8"), **context)
def decrypt(token, **context):
secret_engine_name, data = token.split("$")
secret_engine = secret_engines.get(secret_engine_name)
if not secret_engine:
raise DecryptionError(f"Unknown secret engine: '{secret_engine_name}'")
try:
return secret_engine.decrypt(data, **context)
except Exception as exc:
raise DecryptionError("Secret engine decryption error") from exc
def decrypt_str(token, **context):
return decrypt(token, **context).decode("utf-8")
def rewrap(token, **context):
return encrypt(decrypt(token, **context), **context)
|
py | b403da26b179832a776d9558ff8f540175e4a4eb | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 14 19:20:26 2020
@authors: jacob vincent, isaac keohane, raul ferraz
"""
import os
import sys
import random
import numpy as np
import pandas as pd
module_path = os.path.abspath(os.path.join('../src'))
if module_path not in sys.path:
sys.path.append(module_path)
from matplotlib import pyplot as plt
from utils.file import load_from_json
import tensorflow.keras as ker
from misc_basicFuncs import getMaxIndex
# load configs
trans_configs = load_from_json("configs/athena-mnist.json")
model_configs = load_from_json("configs/model-mnist.json")
data_configs = load_from_json("configs/data-mnist.json")
verbose = 10 # print statements in this script
verModel = 0
activations = ["sigmoid","relu","elu"]
# set the activation for model training
activation = activations[2]
# set boolean to get individual evaluations or bulk for each category
getEachEval = True
getOverallEval = True
################################################################
def trainNewModel(inputData, trueData, epochs=7, verbose=2, active="relu"):
model = ker.models.Sequential([
ker.layers.Flatten(input_shape=(28, 28)),
ker.layers.Dense(128, activation=active),
ker.layers.Dense(128, activation=active),
ker.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=ker.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(inputData, trueData, epochs=7)
return model
############################################################
# load data
cleanData = np.load(data_configs.get("bs_file"))
trueLabels = np.load(data_configs.get("label_file"))
ensPred = np.load("models/ensemPredic_benignInput_probs.npy")
ensPred_indexes = np.zeros(np.shape(ensPred)[0])
trueLabels_indexes = np.zeros(np.shape(trueLabels)[0])
for i in range(np.shape(ensPred)[0]):
pred = getMaxIndex(ensPred[i])
trueLab = getMaxIndex(ensPred[i])
ensPred_indexes[i]=pred
trueLabels_indexes[i]=trueLab
#Clean Nans and extrenious values from arrays
nans = np.argwhere(np.isnan(ensPred_indexes))
cleanData = np.delete(cleanData,nans,0)
ensPred_indexes = np.delete(ensPred_indexes,nans,0)
trueLabels_indexes = np.delete(trueLabels_indexes,nans,0)
# Train ML Model
model = trainNewModel(cleanData[:8000,:,:,0], ensPred_indexes[:8000],active=activation)
if(verbose>4): print("finished training model")
# create dataframe to save evaluation results
cols=["ae_type","label","accuracy","loss"]
results = pd.DataFrame(columns=cols)
#### Evaluate benign samples
evalOut = model.evaluate(cleanData[8000:], trueLabels_indexes[8000:], verbose=verModel)
if(verbose>4): print("{} finished evaluating -- accuracy: {}".format("benigns",evalOut[1]))
# save to DataFrame
newRow = {cols[0]:"benign",cols[1]:"benign",cols[2]:evalOut[1],cols[3]:evalOut[0]}
results = results.append(newRow, ignore_index=True)
### Evaluate AE inputs that correspond to weak defenses
# load the filenames from configs
ae_dir = os.path.abspath(data_configs.get("ae_dir"))
ae_files = data_configs.get("ae_files_wds")
# create numpy array to add data from different AE files into for getting
# overall evaluation
allData = np.zeros((0,28,28,1))
allLabels = np.zeros((0))
# iterate through each file, evaluate, and save to dataframe
for file in ae_files:
# set filepaths
filePath = os.path.join(ae_dir,file)
aeLabel = file.replace(".npy","")
# load data
data = np.load(filePath)
#remove nan data from above to line up
data=np.delete(data,nans,0)
#get test data subset
data=data[8000:]
trueLabs = trueLabels_indexes[8000:]
# evaluate
if(getEachEval):
evalOut = model.evaluate(data, trueLabs , verbose=verModel)
# save results
if(verbose>9): print("{} finished evaluating -- accuracy: {}".format(aeLabel,evalOut[1]))
newRow = {cols[0]:"aes_wds",cols[1]:aeLabel,cols[2]:evalOut[1],cols[3]:evalOut[0]}
results = results.append(newRow, ignore_index=True)
if(getOverallEval):
allData = np.concatenate((allData,data),axis=0)
allLabels = np.concatenate((allLabels,trueLabs),axis=0)
if(verbose>4): print("finished evaluating ensemble defense AEs")
# if getOverallEval run "allData" through evaluation
if(getOverallEval):
evalOut = model.evaluate(allData, allLabels , verbose=verModel)
newRow = {cols[0]:"aes_wds_overall",cols[1]:"aes_wds_overall",cols[2]:evalOut[1],cols[3]:evalOut[0]}
results = results.append(newRow, ignore_index=True)
if(verbose>4): print("finished evaluating overall accuracy ensemble defense AEs")
if(verbose>4): print("overall accuracy = {}".format(evalOut[1]))
### Evaluate AE inputs generated in task 1
# load the filenames from configs
ae_files = data_configs.get("ae_files_task1")
# create numpy array to add data from different AE files into for getting
# overall evaluation
allData = np.zeros((0,28,28,1))
allLabels = np.zeros((0))
# iterate through each file, evaluate, and save to dataframe
for file in ae_files:
# set filepaths
filePath = os.path.join(ae_dir,file)
aeLabel = file.replace(".npy","")
# load data
data = np.load(filePath)
#remove nan data from above to line up
data=np.delete(data,nans,0)
# get test data subset
data = data[8000:]
trueLabs = trueLabels_indexes[8000:]
# evaluate
if(getEachEval):
evalOut = model.evaluate(data, trueLabs, verbose=verModel)
# save results
if(verbose>9): print("{} finished evaluating -- accuracy: {}".format(aeLabel,evalOut[1]))
newRow = {cols[0]:"aes_task1",cols[1]:aeLabel,cols[2]:evalOut[1],cols[3]:evalOut[0]}
results = results.append(newRow, ignore_index=True)
if(getOverallEval):
allData = np.concatenate((allData,data),axis=0)
allLabels = np.concatenate((allLabels,trueLabs),axis=0)
if(verbose>4): print("finished evaluating task1 AEs")
# if getOverallEval run "allData" through evaluation
if(getOverallEval):
evalOut = model.evaluate(allData, allLabels , verbose=verModel)
newRow = {cols[0]:"aes_task1_overall",cols[1]:"aes_task1_overall",cols[2]:evalOut[1],cols[3]:evalOut[0]}
results = results.append(newRow, ignore_index=True)
if(verbose>4): print("finished evaluating overall accuracy task1 AEs")
if(verbose>4): print("overall accuracy = {}".format(evalOut[1]))
# save data
results.to_csv("results/model_eval_results_{}.csv".format(activation))
if(verbose>0): print("finished running, saved evaluation output to model_eval_results_{}.csv".format(activation))
|
py | b403da5585f497e2d1671449bd918ab830561a59 | import math
import torch
from torch import nn, Tensor
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.nn.modules.utils import _pair
from torch.jit.annotations import Optional, Tuple
from torchvision.extension import _assert_has_ops
def deform_conv2d(
input: Tensor,
offset: Tensor,
weight: Tensor,
bias: Optional[Tensor] = None,
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
dilation: Tuple[int, int] = (1, 1),
) -> Tensor:
"""
Performs Deformable Convolution, described in Deformable Convolutional Networks
Arguments:
input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor
offset (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width,
out_height, out_width]): offsets to be applied for each position in the
convolution kernel.
weight (Tensor[out_channels, in_channels // groups, kernel_height, kernel_width]):
convolution weights, split into groups of size (in_channels // groups)
bias (Tensor[out_channels]): optional bias of shape (out_channels,). Default: None
stride (int or Tuple[int, int]): distance between convolution centers. Default: 1
padding (int or Tuple[int, int]): height/width of padding of zeroes around
each image. Default: 0
dilation (int or Tuple[int, int]): the spacing between kernel elements. Default: 1
Returns:
output (Tensor[batch_sz, out_channels, out_h, out_w]): result of convolution
Examples::
>>> input = torch.rand(4, 3, 10, 10)
>>> kh, kw = 3, 3
>>> weight = torch.rand(5, 3, kh, kw)
>>> # offset should have the same spatial size as the output
>>> # of the convolution. In this case, for an input of 10, stride of 1
>>> # and kernel size of 3, without padding, the output size is 8
>>> offset = torch.rand(4, 2 * kh * kw, 8, 8)
>>> out = deform_conv2d(input, offset, weight)
>>> print(out.shape)
>>> # returns
>>> torch.Size([4, 5, 8, 8])
"""
_assert_has_ops()
out_channels = weight.shape[0]
if bias is None:
bias = torch.zeros(out_channels, device=input.device, dtype=input.dtype)
stride_h, stride_w = _pair(stride)
pad_h, pad_w = _pair(padding)
dil_h, dil_w = _pair(dilation)
weights_h, weights_w = weight.shape[-2:]
_, n_in_channels, in_h, in_w = input.shape
n_offset_grps = offset.shape[1] // (2 * weights_h * weights_w)
n_weight_grps = n_in_channels // weight.shape[1]
if n_offset_grps == 0:
raise RuntimeError(
"the shape of the offset tensor at dimension 1 is not valid. It should "
"be a multiple of 2 * weight.size[2] * weight.size[3].\n"
"Got offset.shape[1]={}, while 2 * weight.size[2] * weight.size[3]={}".format(
offset.shape[1], 2 * weights_h * weights_w))
return torch.ops.torchvision.deform_conv2d(
input,
weight,
offset,
bias,
stride_h, stride_w,
pad_h, pad_w,
dil_h, dil_w,
n_weight_grps,
n_offset_grps)
class DeformConv2d(nn.Module):
"""
See deform_conv2d
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
):
super(DeformConv2d, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.weight = Parameter(torch.empty(out_channels, in_channels // groups,
self.kernel_size[0], self.kernel_size[1]))
if bias:
self.bias = Parameter(torch.empty(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor, offset: Tensor) -> Tensor:
"""
Arguments:
input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor
offset (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width,
out_height, out_width]): offsets to be applied for each position in the
convolution kernel.
"""
return deform_conv2d(input, offset, self.weight, self.bias, stride=self.stride,
padding=self.padding, dilation=self.dilation)
def __repr__(self) -> str:
s = self.__class__.__name__ + '('
s += '{in_channels}'
s += ', {out_channels}'
s += ', kernel_size={kernel_size}'
s += ', stride={stride}'
s += ', padding={padding}' if self.padding != (0, 0) else ''
s += ', dilation={dilation}' if self.dilation != (1, 1) else ''
s += ', groups={groups}' if self.groups != 1 else ''
s += ', bias=False' if self.bias is None else ''
s += ')'
return s.format(**self.__dict__)
|
py | b403db30258fb7de287ac90ed445a06dbbf73572 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 17:04:47 2018
-------------------------------------------------------------------------------
=============================== VarNet Library ================================
-------------------------------------------------------------------------------
Authors: Reza Khodayi-mehr and Michael M Zavlanos
[email protected]
http://people.duke.edu/~rk157/
Department of Mechanical Engineering and Materials Science,
Duke University, Durham, NC 27708, USA.
Copyright (c) 2019 Reza Khodayi-mehr - licensed under the MIT License
For a full copyright statement see the accompanying LICENSE.md file.
For theoretical derivations as well as numerical experiment results, see:
Reza Khodayi-mehr and Michael M Zavlanos. VarNet: Variational neural networks
for the solution of partial differential equations, 2019.
https://arxiv.org/pdf/1912.07443.pdf
To examine the functionalities of the VarNet library, see the acompanying
Operater files.
The code is fully functional with the following module versions:
- tensorflow: 1.10.0
- numpy: 1.16.4
- scipy: 1.2.1
- matplotlib: 3.0.3
-------------------------------------------------------------------------------
This file provides the classes for 2D contour plotting.
"""
#%% Modules:
import numpy as np
shape = np.shape
reshape = np.reshape
size = np.size
import matplotlib.pyplot as plt
from UtilityFunc import UF
uf = UF()
#%% Contour plot class:
class ContourPlot():
"""Class to plot the contours of a given function."""
def __init__(self,
domain,
tInterval=None,
discNum=51):
"""
Initializer for the contour plot.
Inputs:
domain: an insatnce of Domain class containing domain information.
tInterval [1x2]: time interval (default: time-independent)
discNum: number of spatial discretization points
Attributes:
status: status of the problem whose plots are requested:
'1D-time': 1D time-dependent problem
'2D': 2D time-independent problem
'2D-time': 2D time-dependent problem
isOutside: True for points that do not lie inside domain
x_coord: 1D discretization of the x-ccordinate
y_coord: 1D discretization of the y-ccordinate
X_coord: x-ccordinate of the meshgrid stacked in a column
Y_coord: y-ccordinate of the meshgrid stacked in a column
(y-coordinate may refer to time or 2nd coordinate in 2D problems)
xx: x-coordinate in meshgrid format
yy: y-coordinate in meshgrid format
"""
dim = domain.dim
lim = domain.lim
hx = (lim[1,0] - lim[0,0])/(discNum-1) # element size
x_coord = np.linspace(lim[0,0], lim[1,0], discNum) # x-discretization
if dim==1 and uf.isnone(tInterval):
raise ValueError('contour plot unavailable for 1D, time-independent problems!')
elif dim==1:
status = '1D-time'
hy = (tInterval[1] - tInterval[0])/(discNum-1) # element size
y_coord = np.linspace(tInterval[0], tInterval[1], discNum) # t-discretization
if dim==2:
hy = (lim[1,1] - lim[0,1])/(discNum-1) # element size
y_coord = np.linspace(lim[0,1], lim[1,1], discNum) # y-discretization
if uf.isnone(tInterval):
status = '2D'
else:
status = '2D-time'
# Mesh grid:
xx, yy = np.meshgrid(x_coord, y_coord, sparse=False)
# Function input:
X_coord = np.tile(x_coord, discNum) # copy for y
X_coord = reshape(X_coord, [len(X_coord), 1])
Y_coord = np.repeat(y_coord, discNum) # copy for x
Y_coord = reshape(Y_coord, [len(Y_coord), 1])
# Determine the points that lie outside the domain:
if status=='1D-time':
isOutside = np.zeros(discNum**2, dtype=bool)
else:
Input = np.concatenate([X_coord, Y_coord], axis=1)
isOutside = np.logical_not(domain.isInside(Input))
# Store data:
self.status = status
self.discNum = discNum
self.tInterval = tInterval
self.he = np.array([hx, hy])
self.isOutside = isOutside
self.x_coord = reshape(x_coord, [discNum,1])
self.y_coord = reshape(y_coord, [discNum,1])
self.X_coord = X_coord
self.Y_coord = Y_coord
self.xx = xx
self.yy = yy
self.domain = domain
def conPlot(self, func, t=None, figNum=None, title=None, fill_val=0.):
"""
Function to plot the contour field.
Inputs:
func: callable function of (x,t)
t: time instance for 2D time-dependent problems
title [string]: contour plot title
figNum: figure number to draw on
fill_val: value to be used for obstacles
Note that the function 'func' must handle the obstcles by assigning
neutral values to the grid over the obstacles.
"""
if not callable(func):
raise ValueError('field function must be callable!')
if self.status=='2D-time' and uf.isnone(t):
raise ValueError('time must be provided for 2D time-dependent problems!')
status = self.status
discNum = self.discNum
isOutside = self.isOutside
X_coord = self.X_coord
Y_coord = self.Y_coord
domain = self.domain
# Construct the field:
if status=='1D-time':
field = func(X_coord, Y_coord)
elif status=='2D':
Input = np.concatenate([X_coord, Y_coord], axis=1)
field = func(Input)
elif status=='2D-time':
Input = np.concatenate([X_coord, Y_coord], axis=1)
field = func(Input,t)
# Process the field:
if not shape(field)[0]==discNum**2:
raise ValueError('output of the function should be a column vector with size {}!'.format(discNum**2))
elif size(shape(field))==1:
field = reshape(field, [discNum**2,1])
field[isOutside,:] = fill_val
field = np.reshape(field, [discNum, discNum])
# Create the figure and plot the domain frame:
if uf.isnone(figNum):
figNum=0
plt.figure(figNum)
if domain.dim>1:
domain.domPlot(addDescription=False, figNum=figNum, frameColor='w')
# Plot the contour field:
cP = plt.contourf(self.xx, self.yy, field)
plt.colorbar(cP)
if status=='1D-time':
plt.xlabel('$x$')
plt.ylabel('time')
else:
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
if not uf.isnone(title):
plt.title(title)
plt.axis('scaled')
return field
def animPlot(self, func, t=[], figNum=None, title=None, fill_val=0.):
"""
Function to plot the animation of 2D time-dependent field.
Inputs:
func: callable function of (x,t)
t: time instance vector for 2D time-dependent problems
"""
# Error handling:
if not callable(func):
raise ValueError('field function must be callable!')
if self.status=='1D-time' or self.status=='2D':
raise ValueError('animation contour plot is only available for 2D time-dependent problems!')
if uf.isnone(figNum):
figNum=0
# Data:
discNum = self.discNum
X_coord = self.X_coord
Y_coord = self.Y_coord
isOutside = self.isOutside
domain = self.domain
Input = np.concatenate([X_coord, Y_coord], axis=1)
# If time sequence is not provided:
if np.size(t)==0:
tInterval = self.tInterval
t = np.linspace(tInterval[0], tInterval[1], num=5)
# Loop over time:
for ti in t:
plt.figure(figNum)
domain.domPlot(addDescription=False, figNum=figNum, frameColor='w')
field = func(Input, ti)
# Process the field:
if not shape(field)[0]==discNum**2:
raise ValueError('output of the function should be a column vector with size {}!'.format(discNum**2))
elif size(shape(field))==1:
field = reshape(field, [discNum**2,1])
field[isOutside,:] = fill_val
field = np.reshape(field, [discNum, discNum])
# Contour plot:
cP = plt.contourf(self.xx, self.yy, field)
plt.colorbar(cP)
titleT = 't = {0:.2f}s'.format(ti)
if not uf.isnone(title):
title2 = title + '-' + titleT
else:
title2 = titleT
plt.title(title2)
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.axis('scaled')
plt.show()
plt.pause(1) # pause 1sec before plotting the next contour
def snap1Dt(self, func, t, lineOpt=None, figNum=None, title=None):
"""
Function to plot snapshots for 1D time-dependent function.
Inputs:
func: callable function of (x,t)
t: vector of time instances corresponding to the snapshot
lineOpt: line options to allow comparison between different functions
figNum: figure number to draw on
"""
# Error handling:
if not callable(func):
raise ValueError('field function must be callable!')
if not self.status=='1D-time':
raise ValueError('Function is specific to 1D time-dependent problems!')
x_coord = self.x_coord
field = func(x_coord, t)
if uf.isnone(figNum):
plt.figure()
else:
plt.figure(figNum)
if uf.isnone(lineOpt):
plt.plot(x_coord, field)
else:
plt.plot(x_coord, field, lineOpt)
plt.xlabel('$x$')
if not uf.isnone(title):
plt.title(title)
plt.grid(True)
|
py | b403db98cc2b55f2258d99357a4511d1d095b03c | from django.shortcuts import render
# Create your views here.
from django.views.generic import ListView, DetailView
from service.models import Service
# def service(request):
# return render(request, 'service/service_detail.html')
# 服务详情
class ServiceDetailView(DetailView):
model = Service
def get_context_data(self, **kwargs):
# pk = int(self.kwargs[self.pk_url_kwarg])
kwargs['service_list'] = Service.objects.all()
return super().get_context_data(**kwargs)
|
py | b403df3169455e33f17651853c0dce51932670b3 | import json
from abc import abstractmethod, ABCMeta
from typing import Generic
import docker
from thriftybuilder._logging import create_logger
from thriftybuilder.build_configurations import BuildConfigurationType, DockerBuildConfiguration
from thriftybuilder.checksums import ChecksumCalculator, DockerChecksumCalculator
from thriftybuilder.common import ThriftyBuilderBaseError
from thriftybuilder.configuration import DockerRegistry
from thriftybuilder.storage import ChecksumStorage
logger = create_logger(__name__)
class UploadError(ThriftyBuilderBaseError):
"""
Error raised during build artifact upload.
"""
class ImageNotFoundError(UploadError):
"""
Error raised if image to be uploaded is not found.
"""
def __init__(self, name: str, tag: str):
self.name = name
self.tag = tag
super().__init__(f"Error uploading image: name={self.name}, tag={self.tag}")
class BuildArtifactUploader(Generic[BuildConfigurationType], metaclass=ABCMeta):
"""
Uploader of build artifacts resulting from a build to a remote repository.
"""
@abstractmethod
def _upload(self, build_configuration: BuildConfigurationType):
"""
Uploads the artifacts generated when the given configuration is built.
:param build_configuration: the configuration that has been built
"""
def __init__(self, checksum_storage: ChecksumStorage,
checksum_calculator: ChecksumCalculator[BuildConfigurationType]):
"""
Constructor.
:param checksum_storage: store of build artifact checksums
:param checksum_calculator: artifact checksum calculator
"""
self.checksum_storage = checksum_storage
self.checksum_calculator = checksum_calculator
def upload(self, build_configuration: BuildConfigurationType):
"""
Uploads the artifacts generated when the given configuration is built.
:param build_configuration: the configuration that has been built
"""
import logging
logging.basicConfig(level=logging.DEBUG)
self._upload(build_configuration)
checksum = self.checksum_calculator.calculate_checksum(build_configuration)
self.checksum_storage.set_checksum(build_configuration.identifier, checksum)
class DockerUploader(BuildArtifactUploader[DockerBuildConfiguration]):
"""
Uploader of Docker images resulting from a build to a remote repository.
"""
DEFAULT_DOCKER_REGISTRY = DockerRegistry("docker.io")
_TEXT_ENCODING = "utf-8"
def __init__(self, checksum_storage: ChecksumStorage, docker_registry: DockerRegistry=DEFAULT_DOCKER_REGISTRY,
checksum_calculator: ChecksumCalculator[DockerBuildConfiguration]=None):
checksum_calculator = checksum_calculator if checksum_calculator is not None else DockerChecksumCalculator()
super().__init__(checksum_storage, checksum_calculator)
self.docker_registry = docker_registry
self._docker_client = docker.from_env()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
self._docker_client.close()
def _upload(self, build_configuration: DockerBuildConfiguration):
repository_location = self.docker_registry.get_repository_location(build_configuration.name)
auth_config = None
if self.docker_registry.username is not None and self.docker_registry.password is not None:
auth_config = {"username": self.docker_registry.username, "password": self.docker_registry.password}
for tag in build_configuration.tags:
# Docker is a bit odd in that it requires the image to be tagged to indicate where it is to be uploaded
logger.info(f"Tagging image {build_configuration.identifier} as {repository_location} with tag: {tag}")
self._docker_client.api.tag(build_configuration.identifier, repository=repository_location, tag=tag)
logger.info(f"Uploading image to {repository_location} with tag: {tag}")
upload_stream = self._docker_client.images.push(repository_location, tag, stream=True,
auth_config=auth_config)
for line in upload_stream:
line = line.decode(DockerUploader._TEXT_ENCODING)
for sub_line in line.split("\r\n"):
if len(sub_line) > 0:
parsed_sub_line = json.loads(sub_line.strip())
logger.debug(parsed_sub_line)
if "error" in parsed_sub_line:
if "image does not exist" in parsed_sub_line["error"]:
raise ImageNotFoundError(build_configuration.name, tag)
else:
raise UploadError(parsed_sub_line["error"])
|
py | b403e0818890f4bf22d9755880a7653170f9725e | # Hangman Game (Jogo da Forca)
# Programação Orientada a Objetos
# Desenvolvedora: Natália Freitas Araújo
# Desafio - Lab 03 - Python Fundamentos para Análise de Dados - DSA
from game import Hangman
import random as rd
# Função para ler uma palavra de forma aleatória do banco de palavras
def rand_word():
with open("./data/words.txt", "rt") as f:
bank = f.readlines()
return bank[rd.randint(0, len(bank)-1)].strip()
# Função Main - Execução do Programa
def main():
# Objeto
game = Hangman(rand_word())
# Enquanto o jogo não tiver terminado, print do status, solicita uma letra e faz a leitura do caracter
check = False
while check is False:
game.print_game_status()
letter = input('\n\nInforme uma letra: ').lower()
game.guess(letter)
game.hide_word()
check = game.hangman_over()
# Verifica o status do jogo
game.print_game_status()
# De acordo com o status, imprime mensagem na tela para o usuário
if game.hangman_won():
print('\nParabéns! Você venceu!!')
print('A palavra era ' + game.word)
else:
print('\nGame over! Você perdeu.')
print('A palavra era ' + game.word)
print('\nFoi bom jogar com você! Agora vá estudar!\n')
# Executa o programa
if __name__ == "__main__":
main()
|
py | b403e08fe1f16e48ae748a52276c71d3b333680a | import gzip
from urllib.parse import urlparse
import requests
from parsel import Selector
"""
This snippet example show how to download and parse sitemaps using `parsel` and `requests` packages.
"""
def download_sitemap(url):
response = requests.get(url)
text = response.text
# if url points to a gzipped file - decompress it
if urlparse(response.url).path.endswith('.gz'):
text = gzip.decompress(response.content).decode(response.encoding or 'utf8')
selector = Selector(text)
# urls are under <loc> tag
# if they are direct articles they have <url> tag parent
urls = selector.xpath('//url/loc/text()').extract()
if not urls:
urls = selector.xpath('//loc/text()').extract()
return urls
if __name__ == '__main__':
# for example try nytimes sitemaps!
urls = download_sitemap('https://www.nytimes.com/sitemaps/www.nytimes.com/sitemap.xml.gz')
print('\n'.join(urls[:4]))
|
py | b403e0a0aabd7cdab81e0b1432e186a7cd76de26 | from pathlib import Path
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
from rdflib import Graph, Literal, RDFS, RDF
from tccm_model.tccm_model import *
from tccm_api.namespaces import NAMESPACES, NCIT
from utils import curie_to_uri
ROOT = Path(__file__).parent.parent
def get_ncit():
resp = urlopen("https://evs.nci.nih.gov/ftp1/NCI_Thesaurus/Thesaurus.FLAT.zip")
zipfile = ZipFile(BytesIO(resp.read()))
graph = Graph()
graph.namespace_manager.bind('skos', SKOS)
graph.namespace_manager.bind('sh', SH)
graph.namespace_manager.bind('dc', DC)
graph.namespace_manager.bind('ncit', NCIT)
cs_uri = URIRef("http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl")
graph.add((cs_uri, RDF.type, SKOS.ConceptScheme))
graph.add((cs_uri, DC.description, Literal(
"A vocabulary for clinical care, translational and basic research, and public information and administrative activities.")))
graph.add((cs_uri, RDFS.seeAlso, Literal("https://ncithesaurus.nci.nih.gov/ncitbrowser/")))
graph.add((cs_uri, SH.namespace, URIRef("http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#")))
graph.add((cs_uri, SH.prefix, Literal("NCIT")))
for line in zipfile.open("Thesaurus.txt"):
tokens = line.decode("utf-8").split("\t")
uri = URIRef(tokens[1][1:-1])
graph.add((uri, RDF.type, SKOS.Concept))
graph.add((uri, SKOS.notation, Literal(tokens[0])))
graph.add((uri, SKOS.definition, Literal(tokens[4])))
graph.add((uri, SKOS.prefLabel, Literal(tokens[3].split("|")[0])))
if tokens[2]:
for code in tokens[2].split("|"):
code = code.strip()
sc_uri = URIRef(curie_to_uri(f"NCIT:{code}", NAMESPACES))
graph.add((uri, SKOS.broader, sc_uri))
see_also = f"https://ncit.nci.nih.gov/ncitbrowser/pages/concept_details.jsf?dictionary=NCI%20Thesaurus&code={tokens[0]}"
graph.add((uri, RDFS.seeAlso, Literal(see_also)))
graph.add((uri, SKOS.inScheme, cs_uri))
with open('ncit-termci.ttl', 'w') as file:
file.write(graph.serialize(format='turtle').decode('utf-8'))
if __name__ == '__main__':
get_ncit()
|
py | b403e247cc63d36d4709e1a1874bdcdd656d42c8 | import igraph
import json
import numpy as np
numNodes = 6
numComputers = 4
numEvents = 20
#### Topology
# create computer list
compList = []
for i in range(numComputers):
compList.append("comp" + str(i))
# create basic graph with 5 nodes
g = igraph.Graph(numNodes)
g.add_edges([(0, 4), (1, 4), (2, 4), (3, 4), (5, 4)])
# create node attributes and labels
g.vs['DataRate'] = '1000Mbps'
g.vs['MTU'] = 1500
g.vs['InterframeGap'] = '96ns'
for i in range(g.vcount()):
g.vs[i]['label'] = 'ID: {0}\n{1}'.format(i, g.vs[i]['DataRate'])
g.vs['size'] = 80
g.vs['color'] = 'green'
g.vs['Computer'] = ['{0}'.format(d) for d in compList]
g.vs['cost'] = 2
# create edge attributes and labels
g.es['Delay'] = ['{0}ms'.format(d) for d in [2, 10, 30, 200, 56]]
g.es['label'] = g.es['Delay']
# output graph as *.graphml
g.save('use_case1.graphml')
# plot graph as pdf
igraph.plot(g, 'use_case1_graphml.pdf', bbox=(0, 0, 1024, 768), margin=100)
#### Events
events = []
for i in range(numEvents):
e = {}
e['name'] = 'event{0}'.format(i)
e['start'] = str(np.random.randint(10000))
e['protocol'] = np.random.choice(["tcp", "udp", "udptest"])
e['datatransfer'] = '{0}'.format(np.random.randint(3 * 1024))
e['src'] = str(np.random.choice(compList))
e['dst'] = str(np.random.choice(compList))
while e['src'] == e['dst']:
e['dst'] = str(np.random.choice(compList))
events.append(e)
events_json = {'events' : events}
with open('use_case1_events.json', 'w') as ofile:
json.dump(events_json, ofile, sort_keys=True, indent=2)
|
py | b403e255b1b552f2ec67879ec90d6a37c615019f |
# We represent an n-bit string --- that is, an element of {0, 1}^n --- in Python as a tuple of 0s and 1s.
def string(n, m):
'''Converts a non-negative Python integer m to its corresponding bit string. As necessary, pads with leading 0s to bring the number of bits up to n.'''
s = ()
while m >= 1:
s = (m % 2,) + s
m = m // 2
s = (n - len(s)) * (0,) + s
return s
def integer(s):
'''Converts a bit string to its corresponding non-negative Python integer.'''
m = 0
for k in range(len(s)):
m = 2 * m + s[k]
return m
def next(s):
'''Given an n-bit string, returns the next n-bit string. The order is lexicographic, except that there is a string after 1...1, namely 0...0.'''
k = len(s) - 1
while k >= 0 and s[k] == 1:
k -= 1
if k < 0:
return len(s) * (0,)
else:
return s[:k] + (1,) + (len(s) - k - 1) * (0,)
def addition(s, t):
'''Returns the mod-2 sum of two n-bit strings.'''
return tuple([(s[i] + t[i]) % 2 for i in range(len(s))])
def dot(s, t):
'''Returns the mod-2 dot product of two n-bit strings.'''
return sum([s[i] * t[i] for i in range(len(s))]) % 2
def reduction(a):
'''A is a list of m >= 1 bit strings of equal dimension n >= 1. In other words, A is a non-empty m x n binary matrix. Returns the reduced row-echelon form of A. A itself is left unaltered.'''
b = a.copy()
m = len(b)
n = len(b[0])
rank = 0
for j in range(n):
# Try to swap two rows to make b[rank, j] a leading 1.
i = rank
while i < m and b[i][j] == 0:
i += 1
if i != m:
# Perform the swap.
temp = b[i]
b[i] = b[rank]
b[rank] = temp
# Reduce all leading 1s below the one we just made.
for i in range(rank + 1, m):
if b[i][j] == 1:
b[i] = addition(b[i], b[rank])
rank += 1
for j in range(n - 1, -1, -1):
# Try to find the leading 1 in column j.
i = m - 1
while i >= 0 and b[i][j] != 1:
i -= 1
if i >= 0:
# Use the leading 1 at b[i, j] to reduce 1s above it.
for k in range(i):
if b[k][j] == 1:
b[k] = addition(b[k], b[i])
return b
|
py | b403e37face3573d16179936e0ccc0e9ce5e3220 | import sys
import argparse
from workflow import Workflow, ICON_WEB, ICON_WARNING, ICON_NOTE, web, PasswordNotFound, Workflow3
def main(wf):
def googleFilter(filename):
return 'google' in filename
def exchangeFilter(filename):
return 'exchange' in filename
import os
from workflow.notify import notify
key = os.environ['settings_value']
value = os.environ['value_to_store']
wf.logger.debug(" Key: %s", key)
wf.logger.debug(" Value: %s", value)
if key == 'password':
wf.save_password('today.workflow.password',value)
notify('Password updated')
else:
wf.settings[key] = {'value':value}
# wf.store_data(key, value)
text = os.environ['text_to_display']
if key == 'use_google':
wf.clear_cache(googleFilter)
if value == '0':
notify("Google Calendar Support", u'\u274C Disabled')
else:
notify("Google Calendar Support", u'\u2705 Enabled')
elif key == 'use_exchange':
wf.clear_cache(exchangeFilter)
if '0' == value:
notify("Exchange Server Support", u'\u274c Disabled')
else:
notify("Exchange Server Support", u'\u2705 Enabled')
elif key == 'use_ntlm':
def exchangeFilter(filename):
return 'exchange' in filename
# Clear outlook events because we are changing the auth type
wf.clear_cache(exchangeFilter)
if '0' == value:
notify("NTLM Authentication", u'\u274c Disabled')
else:
notify("NTLM Authentication", u'\u2705 Enabled')
elif key == 'use_ssl':
if '0' == value:
value = u'\u274c Disabled'
else:
value = u'\u2705 Enabled'
notify(text, value)
else:
notify('Updated ' + text, "To: " + value)
if __name__ == u"__main__":
wf = Workflow3(libraries=['./lib'])
wf.logger.debug(' _______________ ____ ______ ')
wf.logger.debug(' / ___/_ __/ __ \/ __ \/ ____/ ')
wf.logger.debug(' \__ \ / / / / / / /_/ / __/ ')
wf.logger.debug(' ___/ // / / /_/ / _, _/ /___ ')
wf.logger.debug(' /____//_/ \____/_/ |_/_____/ DATA ')
wf.logger.debug(' ')
sys.exit(wf.run(main)) |
py | b403e3b9fd23fac0054c1d2dd0101a614f411d1c | """
Large banner plugin models
"""
from django.db import models
from django.utils.text import Truncator
from django.utils.translation import gettext_lazy as _
from cms.models.pluginmodel import CMSPlugin
from filer.fields.image import FilerImageField
from .defaults import LARGEBANNER_TEMPLATES
# pylint: disable=model-no-explicit-unicode,line-too-long
#
# We choose to ignore the 'model-no-explicit-unicode' since we are running
# python 3, and pylint-django fails at detecting the python 2 compatibility
# layer [1] handled by Django CMS [2]. And we also ignore the 'line-too-long'
# warning caused by reference urls.
#
# [1] https://docs.djangoproject.com/en/1.11/ref/utils/#django.utils.encoding.python_2_unicode_compatible # noqa
# [2] https://github.com/divio/django-cms/blob/3.5.2/cms/models/pluginmodel.py#L171
class LargeBanner(CMSPlugin):
"""
Model to configure a home page banner with background image, logo and title.
"""
title = models.CharField(max_length=255)
background_image = FilerImageField(
related_name="background_image",
verbose_name=_("background image"),
on_delete=models.SET_NULL,
null=True,
blank=True,
)
logo = FilerImageField(
related_name="logo",
verbose_name=_("logo"),
on_delete=models.PROTECT,
null=True,
blank=True,
)
logo_alt_text = models.CharField(max_length=255, blank=True)
template = models.CharField(
_("Template"),
max_length=150,
choices=LARGEBANNER_TEMPLATES,
default=LARGEBANNER_TEMPLATES[0][0],
blank=False,
help_text=_("Choose template to render plugin."),
)
content = models.TextField(_("Content"), blank=True, default="")
def __str__(self):
return Truncator(self.title).words(6, truncate="...")
|
py | b403e3e4884c5afeccf821be84cf37ca264740d3 | from flask import Flask, render_template, request
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
app = Flask(__name__)
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
@app.route("/")
def home():
return render_template("index.html")
@app.route("/get")
def get_bot_response():
userText = request.args.get('msg')
# Let's chat for 5 lines
for step in range(5):
# encode the new user input, add the eos_token and return a tensor in Pytorch
new_user_input_ids = tokenizer.encode(userText + tokenizer.eos_token, return_tensors='pt')
# append the new user input tokens to the chat history
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
# generated a response while limiting the total chat history to 1000 tokens,
chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
return str(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True))
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', port=8080)
|
py | b403e44d1805d2113a3c9379f993d6757e9363e5 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import re
import os
import ssl
import sys
import traceback
import hashlib
from six.moves.urllib.request import urlopen, Request
from six.moves.urllib.error import URLError
from six.moves.urllib.parse import urljoin
import multiprocessing.pool
try:
# Python 2 had these in the HTMLParser package.
from HTMLParser import HTMLParser, HTMLParseError
except ImportError:
# In Python 3, things moved to html.parser
from html.parser import HTMLParser
# Also, HTMLParseError is deprecated and never raised.
class HTMLParseError(Exception):
pass
import llnl.util.tty as tty
import spack.config
import spack.cmd
import spack.url
import spack.stage
import spack.error
import spack.util.crypto
from spack.util.compression import ALLOWED_ARCHIVE_TYPES
# Timeout in seconds for web requests
_timeout = 10
class LinkParser(HTMLParser):
"""This parser just takes an HTML page and strips out the hrefs on the
links. Good enough for a really simple spider. """
def __init__(self):
HTMLParser.__init__(self)
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr, val in attrs:
if attr == 'href':
self.links.append(val)
class NonDaemonProcess(multiprocessing.Process):
"""Process tha allows sub-processes, so pools can have sub-pools."""
@property
def daemon(self):
return False
@daemon.setter
def daemon(self, value):
pass
if sys.version_info[0] < 3:
class NonDaemonPool(multiprocessing.pool.Pool):
"""Pool that uses non-daemon processes"""
Process = NonDaemonProcess
else:
class NonDaemonContext(type(multiprocessing.get_context())):
Process = NonDaemonProcess
class NonDaemonPool(multiprocessing.pool.Pool):
"""Pool that uses non-daemon processes"""
def __init__(self, *args, **kwargs):
kwargs['context'] = NonDaemonContext()
super(NonDaemonPool, self).__init__(*args, **kwargs)
def _spider(url, visited, root, depth, max_depth, raise_on_error):
"""Fetches URL and any pages it links to up to max_depth.
depth should initially be zero, and max_depth is the max depth of
links to follow from the root.
Prints out a warning only if the root can't be fetched; it ignores
errors with pages that the root links to.
Returns a tuple of:
- pages: dict of pages visited (URL) mapped to their full text.
- links: set of links encountered while visiting the pages.
"""
pages = {} # dict from page URL -> text content.
links = set() # set of all links seen on visited pages.
# root may end with index.html -- chop that off.
if root.endswith('/index.html'):
root = re.sub('/index.html$', '', root)
try:
context = None
verify_ssl = spack.config.get('config:verify_ssl')
pyver = sys.version_info
if (pyver < (2, 7, 9) or (3,) < pyver < (3, 4, 3)):
if verify_ssl:
tty.warn("Spack will not check SSL certificates. You need to "
"update your Python to enable certificate "
"verification.")
elif verify_ssl:
# We explicitly create default context to avoid error described in
# https://blog.sucuri.net/2016/03/beware-unverified-tls-certificates-php-python.html
context = ssl.create_default_context()
else:
context = ssl._create_unverified_context()
# Make a HEAD request first to check the content type. This lets
# us ignore tarballs and gigantic files.
# It would be nice to do this with the HTTP Accept header to avoid
# one round-trip. However, most servers seem to ignore the header
# if you ask for a tarball with Accept: text/html.
req = Request(url)
req.get_method = lambda: "HEAD"
resp = _urlopen(req, timeout=_timeout, context=context)
if "Content-type" not in resp.headers:
tty.debug("ignoring page " + url)
return pages, links
if not resp.headers["Content-type"].startswith('text/html'):
tty.debug("ignoring page " + url + " with content type " +
resp.headers["Content-type"])
return pages, links
# Do the real GET request when we know it's just HTML.
req.get_method = lambda: "GET"
response = _urlopen(req, timeout=_timeout, context=context)
response_url = response.geturl()
# Read the page and and stick it in the map we'll return
page = response.read().decode('utf-8')
pages[response_url] = page
# Parse out the links in the page
link_parser = LinkParser()
subcalls = []
link_parser.feed(page)
while link_parser.links:
raw_link = link_parser.links.pop()
abs_link = urljoin(response_url, raw_link.strip())
links.add(abs_link)
# Skip stuff that looks like an archive
if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES):
continue
# Skip things outside the root directory
if not abs_link.startswith(root):
continue
# Skip already-visited links
if abs_link in visited:
continue
# If we're not at max depth, follow links.
if depth < max_depth:
subcalls.append((abs_link, visited, root,
depth + 1, max_depth, raise_on_error))
visited.add(abs_link)
if subcalls:
pool = NonDaemonPool(processes=len(subcalls))
try:
results = pool.map(_spider_wrapper, subcalls)
for sub_pages, sub_links in results:
pages.update(sub_pages)
links.update(sub_links)
finally:
pool.terminate()
pool.join()
except URLError as e:
tty.debug(e)
if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError):
tty.warn("Spack was unable to fetch url list due to a certificate "
"verification problem. You can try running spack -k, "
"which will not check SSL certificates. Use this at your "
"own risk.")
if raise_on_error:
raise NoNetworkConnectionError(str(e), url)
except HTMLParseError as e:
# This error indicates that Python's HTML parser sucks.
msg = "Got an error parsing HTML."
# Pre-2.7.3 Pythons in particular have rather prickly HTML parsing.
if sys.version_info[:3] < (2, 7, 3):
msg += " Use Python 2.7.3 or newer for better HTML parsing."
tty.warn(msg, url, "HTMLParseError: " + str(e))
except Exception as e:
# Other types of errors are completely ignored, except in debug mode.
tty.debug("Error in _spider: %s:%s" % (type(e), e),
traceback.format_exc())
return pages, links
def _spider_wrapper(args):
"""Wrapper for using spider with multiprocessing."""
return _spider(*args)
def _urlopen(*args, **kwargs):
"""Wrapper for compatibility with old versions of Python."""
# We don't pass 'context' parameter to urlopen because it
# was introduces only starting versions 2.7.9 and 3.4.3 of Python.
if 'context' in kwargs and kwargs['context'] is None:
del kwargs['context']
return urlopen(*args, **kwargs)
def spider(root_url, depth=0):
"""Gets web pages from a root URL.
If depth is specified (e.g., depth=2), then this will also follow
up to <depth> levels of links from the root.
This will spawn processes to fetch the children, for much improved
performance over a sequential fetch.
"""
pages, links = _spider(root_url, set(), root_url, 0, depth, False)
return pages, links
def find_versions_of_archive(archive_urls, list_url=None, list_depth=0):
"""Scrape web pages for new versions of a tarball.
Arguments:
archive_urls:
URL or sequence of URLs for different versions of a
package. Typically these are just the tarballs from the package
file itself. By default, this searches the parent directories
of archives.
Keyword Arguments:
list_url:
URL for a listing of archives. Spack wills scrape these
pages for download links that look like the archive URL.
list_depth:
Max depth to follow links on list_url pages. Default 0.
"""
if not isinstance(archive_urls, (list, tuple)):
archive_urls = [archive_urls]
# Generate a list of list_urls based on archive urls and any
# explicitly listed list_url in the package
list_urls = set()
if list_url:
list_urls.add(list_url)
for aurl in archive_urls:
list_urls.add(spack.url.find_list_url(aurl))
# Add '/' to the end of the URL. Some web servers require this.
additional_list_urls = set()
for lurl in list_urls:
if not lurl.endswith('/'):
additional_list_urls.add(lurl + '/')
list_urls.update(additional_list_urls)
# Grab some web pages to scrape.
pages = {}
links = set()
for lurl in list_urls:
pg, lnk = spider(lurl, depth=list_depth)
pages.update(pg)
links.update(lnk)
# Scrape them for archive URLs
regexes = []
for aurl in archive_urls:
# This creates a regex from the URL with a capture group for
# the version part of the URL. The capture group is converted
# to a generic wildcard, so we can use this to extract things
# on a page that look like archive URLs.
url_regex = spack.url.wildcard_version(aurl)
# We'll be a bit more liberal and just look for the archive
# part, not the full path.
url_regex = os.path.basename(url_regex)
# We need to add a / to the beginning of the regex to prevent
# Spack from picking up similarly named packages like:
# https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz
# https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz
# https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz
# https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz
url_regex = '/' + url_regex
# We need to add a $ anchor to the end of the regex to prevent
# Spack from picking up signature files like:
# .asc
# .md5
# .sha256
# .sig
# However, SourceForge downloads still need to end in '/download'.
url_regex += r'(\/download)?$'
regexes.append(url_regex)
# Build a dict version -> URL from any links that match the wildcards.
versions = {}
for url in links:
if any(re.search(r, url) for r in regexes):
try:
ver = spack.url.parse_version(url)
versions[ver] = url
except spack.url.UndetectableVersionError:
continue
return versions
def get_checksums_for_versions(
url_dict, name, first_stage_function=None, keep_stage=False):
"""Fetches and checksums archives from URLs.
This function is called by both ``spack checksum`` and ``spack
create``. The ``first_stage_function`` argument allows the caller to
inspect the first downloaded archive, e.g., to determine the build
system.
Args:
url_dict (dict): A dictionary of the form: version -> URL
name (str): The name of the package
first_stage_function (callable): function that takes a Stage and a URL;
this is run on the stage of the first URL downloaded
keep_stage (bool): whether to keep staging area when command completes
Returns:
(str): A multi-line string containing versions and corresponding hashes
"""
sorted_versions = sorted(url_dict.keys(), reverse=True)
# Find length of longest string in the list for padding
max_len = max(len(str(v)) for v in sorted_versions)
num_ver = len(sorted_versions)
tty.msg("Found {0} version{1} of {2}:".format(
num_ver, '' if num_ver == 1 else 's', name),
"",
*spack.cmd.elide_list(
["{0:{1}} {2}".format(str(v), max_len, url_dict[v])
for v in sorted_versions]))
print()
archives_to_fetch = tty.get_number(
"How many would you like to checksum?", default=1, abort='q')
if not archives_to_fetch:
tty.die("Aborted.")
versions = sorted_versions[:archives_to_fetch]
urls = [url_dict[v] for v in versions]
tty.msg("Downloading...")
version_hashes = []
i = 0
for url, version in zip(urls, versions):
try:
with spack.stage.Stage(url, keep=keep_stage) as stage:
# Fetch the archive
stage.fetch()
if i == 0 and first_stage_function:
# Only run first_stage_function the first time,
# no need to run it every time
first_stage_function(stage, url)
# Checksum the archive and add it to the list
version_hashes.append((version, spack.util.crypto.checksum(
hashlib.sha256, stage.archive_file)))
i += 1
except spack.stage.FailedDownloadError:
tty.msg("Failed to fetch {0}".format(url))
except Exception as e:
tty.msg("Something failed on {0}, skipping.".format(url),
" ({0})".format(e))
if not version_hashes:
tty.die("Could not fetch any versions for {0}".format(name))
# Find length of longest string in the list for padding
max_len = max(len(str(v)) for v, h in version_hashes)
# Generate the version directives to put in a package.py
version_lines = "\n".join([
" version('{0}', {1}sha256='{2}')".format(
v, ' ' * (max_len - len(str(v))), h) for v, h in version_hashes
])
num_hash = len(version_hashes)
tty.msg("Checksummed {0} version{1} of {2}".format(
num_hash, '' if num_hash == 1 else 's', name))
return version_lines
class SpackWebError(spack.error.SpackError):
"""Superclass for Spack web spidering errors."""
class VersionFetchError(SpackWebError):
"""Raised when we can't determine a URL to fetch a package."""
class NoNetworkConnectionError(SpackWebError):
"""Raised when an operation can't get an internet connection."""
def __init__(self, message, url):
super(NoNetworkConnectionError, self).__init__(
"No network connection: " + str(message),
"URL was: " + str(url))
self.url = url
|
py | b403e4fa9c667918809b117cc2c3de99af403b57 | # -*- coding: utf-8 -*-
#import numpy as np
#from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
#from matplotlib.figure import Figure
import matplotlib.pyplot as plt
type = ('AD', 'NC')
y_pred = (0.78,0.22)
fig, ax = plt.subplots(1,1,figsize=(7,5)) # 1x1 figure matrix 생성, 가로(7인치)x세로(5인치) 크기지정
ax.set_xlim([0.0,0.99])
ax.xaxis.set_tick_params(labelsize=10)
ax.set_xlabel('Prediction Accuracy', fontsize=14)
h = plt.barh(type, y_pred,color=['salmon','skyblue'])
plt.legend(h,type)
plt.savefig('predict.png', format='png', dpi=300)
for p in ax.patches:
percentage = '{:.2f}%'.format(100*p.get_width()/100)
x = p.get_x() + p.get_width()+0.02
y = p.get_y() + p.get_height()/2
ax.annotate(percentage, (x,y), fontsize=15)
plt.show()
|
py | b403e5a0edac01c226a25b018c368b15258f5db6 | from ..oauth import OAuth2
class Amazon(OAuth2):
auth_uri = 'https://www.dropbox.com/1/oauth2/authorize'
token_uri = 'https://api.dropbox.com/1/oauth2/token'
|
py | b403e5f14d4de9a0e5e458146b4b7e4443277514 | # Generated by Django 2.0.13 on 2019-07-11 03:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('application_projects', '0004_project_rename_name_to_title'),
]
operations = [
migrations.AddField(
model_name='project',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='owned_projects', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='project',
name='lead',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='lead_projects', to=settings.AUTH_USER_MODEL),
),
]
|
py | b403e60fe55e72d4c9db303dadf359036ab6e702 | from bmi.formulas import calculate_bmi
from bmi.utils import get_count
import pandas as pd
import bmi.constants as const
import swifter # noqa: F401
def get_bmi(df):
return df.swifter.apply(
lambda row: calculate_bmi(row.WeightKg, row.HeightCm), axis=1
)
def bmi_cat_risk(bmi_value, bmi_table):
bmi_category, health_risk = [], []
for row in bmi_table.itertuples():
try:
if float(row.BmiRangeKg_m2[0]) <= bmi_value \
<= float(row.BmiRangeKg_m2[1]):
bmi_category.append(row.BmiCategory)
health_risk.append(row.HealthRisk)
except ValueError:
if 'below' == row.BmiRangeKg_m2[0]:
if bmi_value <= float(row.BmiRangeKg_m2[1]):
bmi_category.append(row.BmiCategory)
health_risk.append(row.HealthRisk)
if 'above' == row.BmiRangeKg_m2[1]:
if bmi_value >= float(row.BmiRangeKg_m2[0]):
bmi_category.append(row.BmiCategory)
health_risk.append(row.HealthRisk)
return pd.Series({'BmiCategory': "".join(bmi_category),
'HealthRisk': "".join(health_risk)})
def run(patient_data, bmi_category):
bmi_df = pd.DataFrame(patient_data)
bmi_df['Bmi'] = get_bmi(bmi_df)
bmi_table = pd.json_normalize(const.BMI_HEALTH_TABLE)
bmi_df[['BmiCategory', 'HealthRisk']] = bmi_df.swifter.apply(
lambda row: bmi_cat_risk(row.Bmi, bmi_table), axis=1
)
# BMI table for a person
print(bmi_df)
# Total person
total_person = get_count(bmi_df, 'BmiCategory', bmi_category)
return "\nTotal number of {0} people are: {1}\n"\
.format(bmi_category, total_person)
if __name__ == '__main__': # pragma: no cover
print(run(const.PATIENT_DATA, const.BMI_CATEGORY))
|
py | b403e6d52719d83cd8505db3f5921dee7af13d92 | """Process Cascades
MIT License (MIT)
Copyright (c) 2015 Julien BLEGEAN <[email protected]>
"""
import networkx as nx
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import re
import datetime
import redis
import string
import numpy as np
import math
import Image
import community
import itertools
import os
from operator import itemgetter
import sys
from collections import Counter
import random
from correlations import correlations
import clusters
# add to graph
def process(current,rtlist) :
"""Process tweet to add his hashtags and user to the graphs
:param tid : tweet id
:param uid : user id
:param tags : hashtags list
"""
global graphs,found
orig = rtlist[-1]
rtlist.append(current)
if orig not in graphs :
graphs[orig] = nx.Graph()
for node in rtlist :
graphs[orig].add_node(node)
for i in range(len(rtlist)-1):
graphs[orig].add_edge(rtlist[i], rtlist[i+1])
# redis connection
rtc = redis.StrictRedis(host='localhost', port=6379, db=1)
r = redis.StrictRedis(host='localhost', port=6379, db=0)
found = 0
totalt = 0
# graph table
graphs = {}
tweets = {}
# input tweets
filepath = str(sys.argv[1])
# check time window
time = []
i = 0
for ar in sys.argv :
i = i + 1
if ar == "-h" :
time.append(sys.argv[i])
time.append(sys.argv[i+1])
if len(time) > 0 :
dfrom = datetime.datetime.strptime(time[0], '%H:%M')
dfrom = datetime.time(dfrom.hour,dfrom.minute)
dto = datetime.datetime.strptime(time[1], '%H:%M')
dto = datetime.time(dto.hour,dto.minute)
topusers = open("../../../data/users/users_fw_11_08_10msg+").readlines()
topusers = set([u.strip().split(",")[1] for u in topusers])
def username2id(id) :
user = r.get(tweet[2])
if user == None :
return None
user = re.findall('"((?:(?!(?:",")).)*)"', user)
user = user[1].lower()
return user
rtclusters = {}
# counter
k = 0
topusers11 = ["michaelskolnik","antoniofrench","pzfeed","pdpj","youranonnews","khaledbeydoun","womenonthemove1"]
# iterate tweets
with open(filepath) as f:
for tweet in f:
if math.fmod(k,100000) == 0 :
print(k)
# get the content
tweet = re.findall('"((?:(?!(?:",")).)*)"', tweet)
# parse date
tdate = datetime.datetime.strptime(tweet[1], '%Y-%m-%d %H:%M:%S')
tdate = datetime.time(tdate.hour,tdate.minute)
# if the time is good
if len(time) == 0 or len(time) > 0 and tdate >= dfrom and tdate <= dto :
uname = username2id(tweet[2])
current = (uname,len(tweets))
tweets[(uname,tweet[3])] = current
# check if it is an RT
rtlist = []
rt = re.findall(r"RT @([a-zA-Z0-9-_]*):? (.*)",tweet[3])
while len(rt) > 0 :
rt = rt[0]
rtname = rt[0]
mess = rt[1]
if (rtname.lower(),mess) not in tweets :
tweets[(rtname.lower(),mess)] = (rtname.lower(),len(tweets))
rtlist.append(tweets[(rtname.lower(),mess)])
rt = re.findall(r"RT @([a-zA-Z0-9-_]*):? (.*)",mess)
if len(rtlist) > 0 :
# add to graph
if tweet[2] in topusers and rtlist[0][0] in topusers11 :
if rtlist[0][0] not in rtclusters :
rtclusters[rtlist[0][0]] = []
rtclusters[rtlist[0][0]].append(uname)
process(current,rtlist)
found += 1
k = k + 1
for c in rtclusters :
rtclusters[c] = set(rtclusters[c])
def distJ(a,b) :
return float(len(a & b)) / len(a | b)
distM = []
for u,i in zip(topusers11,range(len(topusers11))) :
tmp = []
for v,j in zip(topusers11,range(len(topusers11))) :
dd = distJ(rtclusters[u],rtclusters[v])
print(u)
print(v)
print(dd)
tmp.append(dd)
distM.append(tmp)
distM = np.array(distM)
print(distM)
#plt.clf()
#plt.figure(figsize=[7,7])
#plt.pcolor(distM,cmap=plt.cm.Blues)
#plt.colorbar(orientation='vertical')
#plt.savefig("heatmap11.png",dpi = 200)
graphs = sorted(graphs.items(), key=lambda x: len(x[1].nodes()), reverse = True)
if "-draw" in sys.argv :
#for g in graphs[:100] :
#print("%s : %s" %(g[0],len(g[1].nodes())))
tweets = {v: k for k, v in tweets.items()}
#graphs = graphs[:10]
#print([tuple(tweets[g[0]]) for g in graphs])
#G = nx.union_all([u[1] for u in graphs])
print(tweets[graphs[0][0]])
G = graphs[0][1]
print("now create layout..")
pos = nx.graphviz_layout(G,prog="sfdp")
nx.draw(G,pos, linewidths=0, node_size = 5, with_labels = False, alpha = 0.5,font_size = 6, node_color='#862B59', edge_color='#cccccc',arrows=True)
plt.legend(prop={'size':6})
plt.savefig("graph.png",dpi=200)
if "-counter" in sys.argv :
counter = Counter()
for g in graphs[:100] :
uname = g[0][0]
counter[uname] += 1
print(counter)
vals = Counter([v[1] for v in counter.most_common()]).most_common()
vals = sorted(vals,key=itemgetter(0), reverse=True)
x = [v[1] for v in vals]
y = [v[0] for v in vals]
plt.clf()
plt.figure(figsize=[7,7])
plt.plot(x,y,'-o')
plt.xlabel('number of users')
plt.ylabel('number of tweets')
plt.suptitle('User frequencies in the top 100 retweets')
plt.savefig("freqInfluentUsers.png",dpi = 200)
|
py | b403e7288a8ba978d4d81e1607669eafb477e373 | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.18.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1StatusCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'V1Statuses',
'status': 'str',
'reason': 'str',
'message': 'str',
'last_update_time': 'datetime',
'last_transition_time': 'datetime'
}
attribute_map = {
'type': 'type',
'status': 'status',
'reason': 'reason',
'message': 'message',
'last_update_time': 'last_update_time',
'last_transition_time': 'last_transition_time'
}
def __init__(self, type=None, status=None, reason=None, message=None, last_update_time=None, last_transition_time=None, local_vars_configuration=None): # noqa: E501
"""V1StatusCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._status = None
self._reason = None
self._message = None
self._last_update_time = None
self._last_transition_time = None
self.discriminator = None
if type is not None:
self.type = type
if status is not None:
self.status = status
if reason is not None:
self.reason = reason
if message is not None:
self.message = message
if last_update_time is not None:
self.last_update_time = last_update_time
if last_transition_time is not None:
self.last_transition_time = last_transition_time
@property
def type(self):
"""Gets the type of this V1StatusCondition. # noqa: E501
:return: The type of this V1StatusCondition. # noqa: E501
:rtype: V1Statuses
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1StatusCondition.
:param type: The type of this V1StatusCondition. # noqa: E501
:type type: V1Statuses
"""
self._type = type
@property
def status(self):
"""Gets the status of this V1StatusCondition. # noqa: E501
:return: The status of this V1StatusCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1StatusCondition.
:param status: The status of this V1StatusCondition. # noqa: E501
:type status: str
"""
self._status = status
@property
def reason(self):
"""Gets the reason of this V1StatusCondition. # noqa: E501
:return: The reason of this V1StatusCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1StatusCondition.
:param reason: The reason of this V1StatusCondition. # noqa: E501
:type reason: str
"""
self._reason = reason
@property
def message(self):
"""Gets the message of this V1StatusCondition. # noqa: E501
:return: The message of this V1StatusCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1StatusCondition.
:param message: The message of this V1StatusCondition. # noqa: E501
:type message: str
"""
self._message = message
@property
def last_update_time(self):
"""Gets the last_update_time of this V1StatusCondition. # noqa: E501
:return: The last_update_time of this V1StatusCondition. # noqa: E501
:rtype: datetime
"""
return self._last_update_time
@last_update_time.setter
def last_update_time(self, last_update_time):
"""Sets the last_update_time of this V1StatusCondition.
:param last_update_time: The last_update_time of this V1StatusCondition. # noqa: E501
:type last_update_time: datetime
"""
self._last_update_time = last_update_time
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1StatusCondition. # noqa: E501
:return: The last_transition_time of this V1StatusCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1StatusCondition.
:param last_transition_time: The last_transition_time of this V1StatusCondition. # noqa: E501
:type last_transition_time: datetime
"""
self._last_transition_time = last_transition_time
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StatusCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1StatusCondition):
return True
return self.to_dict() != other.to_dict()
|
py | b403e8fa0ec43d7606b7af3c8cae0d9278e2b9cd | import argparse
import logging
from pathlib import Path
import tempfile
from .swaggertosdk.SwaggerToSdkNewCLI import (
build_project,
)
from .swaggertosdk.SwaggerToSdkCore import (
CONFIG_FILE,
read_config,
solve_relative_path,
extract_conf_from_readmes,
get_input_paths,
get_repo_tag_meta,
)
_LOGGER = logging.getLogger(__name__)
def generate(
config_path, sdk_folder, project_pattern, readme, restapi_git_folder, autorest_bin=None, force_generation=False
):
sdk_folder = Path(sdk_folder).expanduser()
config = read_config(sdk_folder, config_path)
global_conf = config["meta"]
repotag = get_repo_tag_meta(global_conf)
global_conf["autorest_options"] = solve_relative_path(global_conf.get("autorest_options", {}), sdk_folder)
global_conf["envs"] = solve_relative_path(global_conf.get("envs", {}), sdk_folder)
global_conf["advanced_options"] = solve_relative_path(global_conf.get("advanced_options", {}), sdk_folder)
if restapi_git_folder:
restapi_git_folder = Path(restapi_git_folder).expanduser()
# Look for configuration in Readme
if readme:
swagger_files_in_pr = [readme]
else:
if not restapi_git_folder:
raise ValueError("RestAPI folder must be set if you don't provide a readme.")
swagger_files_in_pr = list(restapi_git_folder.glob("specification/**/readme.md"))
_LOGGER.info(f"Readme files: {swagger_files_in_pr}")
extract_conf_from_readmes(
swagger_files_in_pr, restapi_git_folder, repotag, config, force_generation=force_generation
)
with tempfile.TemporaryDirectory() as temp_dir:
for project, local_conf in config.get("projects", {}).items():
if readme:
if str(readme) not in project:
_LOGGER.info("Skip project %s (readme was %s)", project, readme)
continue
else:
if project_pattern and not any(p in project for p in project_pattern):
_LOGGER.info("Skip project %s", project)
continue
local_conf["autorest_options"] = solve_relative_path(local_conf.get("autorest_options", {}), sdk_folder)
if readme and readme.startswith("http"):
# Simplify here, do not support anything else than Readme.md
absolute_markdown_path = readme
_LOGGER.info(f"HTTP Markdown input: {absolute_markdown_path}")
else:
markdown_relative_path, optional_relative_paths = get_input_paths(global_conf, local_conf)
_LOGGER.info(f"Markdown input: {markdown_relative_path}")
_LOGGER.info(f"Optional inputs: {optional_relative_paths}")
absolute_markdown_path = None
if markdown_relative_path:
absolute_markdown_path = Path(restapi_git_folder or "", markdown_relative_path).resolve()
if optional_relative_paths:
local_conf.setdefault("autorest_options", {})["input-file"] = [
Path(restapi_git_folder or "", input_path).resolve() for input_path in optional_relative_paths
]
build_project(temp_dir, project, absolute_markdown_path, sdk_folder, global_conf, local_conf, autorest_bin)
return config
def generate_main():
"""Main method"""
parser = argparse.ArgumentParser(
description="Build SDK using Autorest, offline version.", formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
"--rest-folder",
"-r",
dest="restapi_git_folder",
default=None,
help="Rest API git folder. [default: %(default)s]",
)
parser.add_argument(
"--project",
"-p",
dest="project",
action="append",
help="Select a specific project. Do all by default. You can use a substring for several projects.",
)
parser.add_argument("--readme", "-m", dest="readme", help="Select a specific readme. Must be a path")
parser.add_argument(
"--config",
"-c",
dest="config_path",
default=CONFIG_FILE,
help="The JSON configuration format path [default: %(default)s]",
)
parser.add_argument(
"--autorest", dest="autorest_bin", help="Force the Autorest to be executed. Must be a executable command."
)
parser.add_argument(
"-f",
"--force",
dest="force",
action="store_true",
help="Should I force generation if SwaggerToSdk tag is not found",
)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Verbosity in INFO mode")
parser.add_argument("--debug", dest="debug", action="store_true", help="Verbosity in DEBUG mode")
parser.add_argument(
"--sdk-folder", "-s", dest="sdk_folder", default=".", help="A Python SDK folder. [default: %(default)s]"
)
args = parser.parse_args()
main_logger = logging.getLogger()
if args.verbose or args.debug:
logging.basicConfig()
main_logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
generate(
args.config_path,
args.sdk_folder,
args.project,
args.readme,
args.restapi_git_folder,
args.autorest_bin,
args.force,
)
if __name__ == "__main__":
generate_main()
|
py | b403e9081c242cbfb893e7daacf6964525a53ac7 | import numpy as np
from .tensor import *
class ReLU(Operator):
def forward(self, x):
data = x.data
self.loc = data >= 0
return Tensor(data*self.loc)
def backward(self, x, precedents):
u, = precedents
u.grad += x.grad*self.loc
class LeakyReLU(Operator):
def __init__(self, leaky_rate=0.01):
self.leaky_rate = leaky_rate
def forward(self, x):
data = x.data
loc = data >= 0
self.effc = loc + self.leaky_rate*(1-loc)
return Tensor(data*self.effc)
def backward(self, x, precedents):
u, = precedents
u.grad += x.grad*self.effc
class Sigmoid(Operator):
def forward(self, x):
data = x.data
self.result = 1.0/(1.0+np.exp(-data))
return Tensor(self.result)
def backward(self, x, precedents):
u, = precedents
u.grad += x.grad*(self.result)*(1-self.result)
class Conv2d(Operator):
def __init__(self, padding=(0, 0), stride=(1, 1)):
self.padding = padding
self.stride = stride
def forward(self, t, weight):
t = t.data
w = weight.data
t = make_padding(t, self.padding)
B, C, iH, iW = t.shape
iC, oC, kH, kW = w.shape
assert C == iC, 'Conv2d channels in not equal.'
return Tensor(batch_conv2d_f(t, w, self.stride))
def backward(self, x, precedents):
t, weight = precedents
t.grad += unwrap_padding(
batch_conv2d_im_backward_f(x.grad, weight.data, self.stride),
self.padding
)
weight.grad += batch_conv2d_weight_backward_f(
x.grad,
make_padding(t.data, self.padding),
self.stride
)
def batch_conv2d_f(x, kernel, stride=(1, 1)):
x = im2bchwkl(x, kernel.shape[-2:], stride)
return np.tensordot(x, kernel, [(1, 4, 5), (0, 2, 3)]).transpose(0, 3, 1, 2)
def batch_conv2d_weight_backward_f(kernel, input, stride=(1, 1)):
'''kernel is result tensor grad, input is original tensor'''
B, C, H, W = kernel.shape
x = im2bchwkl(input, kernel.shape[-2:], dilation=stride)
return np.tensordot(x, kernel, [(0, 4, 5), (0, 2, 3)]).transpose(0, 3, 1, 2)
def batch_conv2d_im_backward_f(x, kernel, stride=(1, 1)):
'''input is result tensor grad, kernel is weight tensor'''
ksize = kernel.shape
x = dilate_input(x, stride)
x = make_padding(x, ((ksize[2]-1), (ksize[3]-1)))
return batch_transposed_conv2d_f(x, kernel, invert=True)
def batch_transposed_conv2d_f(x, kernel, invert=False):
ksize = kernel.shape
x = transpose_kernel(
im2bchwkl(x, ksize[-2:])
)
i = 1 if invert else 0
return np.tensordot(x, kernel, [(1, 4, 5), (i, 2, 3)]).transpose(0, 3, 1, 2)
def im2bchwkl(input, ksize, stride=(1, 1), padding=(0, 0), dilation=(1, 1), writeable=False):
if padding != (0, 0):
assert not writeable, 'No writable in padding mode.'
input = make_padding(input, (padding[0], padding[1]))
isize = input.shape
istrides = input.strides
H = (isize[2]-(dilation[0]*(ksize[0]-1)+1))/(stride[0])+1
W = (isize[3]-(dilation[1]*(ksize[1]-1)+1))/(stride[1])+1
assert int(H) == H and int(W) == W, 'conv2d not aligned'
H = int(H)
W = int(W)
istrides = list(istrides+istrides[-2:])
istrides[2] *= stride[0]
istrides[3] *= stride[1]
istrides[4] *= dilation[0]
istrides[5] *= dilation[1]
return np.lib.stride_tricks.as_strided(input,
(isize[0], isize[1], H,
W, ksize[0], ksize[1]),
istrides,
writeable=writeable,
)
def make_padding(input, padding):
if padding == (0, 0):
return input
b, c, h, w = input.shape
p, q = padding
result = np.zeros((b, c, h+2*p, w+2*q), dtype=np.float32)
result[:, :, p:-p, q:-q] = input
return result
def unwrap_padding(input, padding):
if padding == (0, 0):
return input
p, q = padding
return input[..., p:-p, q:-q]
def transpose_kernel(kernel):
return kernel[..., ::-1, ::-1]
def dilate_input(input, stride=(1, 1)):
if stride == (1, 1):
return input
isize = input.shape
x = np.zeros((isize[0], isize[1], (isize[2]-1) *
stride[0]+1, (isize[3]-1)*stride[1]+1), dtype=np.float32)
x[..., ::stride[0], ::stride[1]] = input
return x
|
py | b403e912cbddd2ba989e564b5a775b5a69f95c49 | import uvicorn
import pickle
from fastapi import FastAPI
from music import Music
app = FastAPI()
with open("model.pkl", "rb") as f:
model = pickle.load(f)
@app.get('/')
def index():
return {'message': 'This is the homepage of the API '}
@app.post('/predict')
def get_music_category(data: Music):
received = data.dict()
acousticness = received['acousticness']
danceability = received['danceability']
energy = received['energy']
instrumentalness = received['instrumentalness']
liveness = received['liveness']
speechiness = received['speechiness']
tempo = received['tempo']
valence = received['valence']
pred_name = model.predict([[acousticness, danceability, energy,
instrumentalness, liveness, speechiness, tempo, valence]]).tolist()[0]
return {'prediction': pred_name}
if __name__ == '__main__':
uvicorn.run(app, host='127.0.0.1', port=4000, debug=True)
|
py | b403e93f2e82012eb4c49ef550854117832db4d7 | import datetime
from couchdbkit import ResourceNotFound
from django.utils.safestring import mark_safe
import logging
import numpy
import pytz
from corehq.apps.indicators.models import DynamicIndicatorDefinition, CombinedCouchViewIndicatorDefinition
from dimagi.utils.decorators.memoized import memoized
from mvp.models import MVP
from mvp.reports import MVPIndicatorReport
class HealthCoordinatorReport(MVPIndicatorReport):
"""
MVP Custom Report: MVIS Health Coordinator
"""
slug = "health_coordinator"
name = "MVIS Health Coordinator Report"
report_template_path = "mvp/reports/health_coordinator.html"
flush_layout = True
hide_filters = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.select.GroupFilter']
emailable = True
@property
def timezone(self):
return pytz.utc
@property
@memoized
def template_report(self):
if self.is_rendered_as_email:
self.report_template_path = "mvp/reports/health_coordinator_email.html"
return super(HealthCoordinatorReport, self).template_report
@property
def report_context(self):
report_matrix = []
month_headers = None
for category_group in self.indicator_slugs:
category_indicators = []
total_rowspan = 0
for slug in category_group['indicator_slugs']:
try:
indicator = DynamicIndicatorDefinition.get_current(MVP.NAMESPACE, self.domain, slug,
wrap_correctly=True)
if self.is_rendered_as_email:
retrospective = indicator.get_monthly_retrospective(user_ids=self.user_ids)
else:
retrospective = indicator.get_monthly_retrospective(return_only_dates=True)
if not month_headers:
month_headers = self.get_month_headers(retrospective)
if isinstance(indicator, CombinedCouchViewIndicatorDefinition):
table = self.get_indicator_table(retrospective)
indicator_rowspan = 3
else:
table = self.get_indicator_row(retrospective)
indicator_rowspan = 1
total_rowspan += indicator_rowspan + 1
category_indicators.append(dict(
title=indicator.description,
table=table,
load_url="%s?indicator=%s" % (self.get_url(self.domain, render_as='partial'), indicator.slug),
rowspan=indicator_rowspan
))
except (AttributeError, ResourceNotFound):
logging.info("Could not grab indicator %s in domain %s" % (slug, self.domain))
report_matrix.append(dict(
category_title=category_group['category_title'],
category_slug=category_group['category_slug'],
rowspan=total_rowspan,
indicators=category_indicators,
))
return dict(
months=month_headers,
report=report_matrix,
)
@property
def indicator_slugs(self):
return [
{
'category_title': "Vital Events",
'category_slug': 'vital_events',
'indicator_slugs': [
"num_births_occured",
"num_births_recorded",
"maternal_deaths",
"neonatal_deaths",
"infant_deaths",
"under5_deaths",
"over5_deaths",
]
},
{
'category_title': "Visits",
'category_slug': 'chw_visits',
'indicator_slugs': [
"households_routine_visit_past90days", # A1 - 23, all set
"households_routine_visit_past30days", # A1 - 44, all set
"pregnant_routine_visit_past30days", # A1 - 46
"pregnant_routine_checkup_proportion_6weeks",
"neonate_routine_visit_past7days", # A1 - 47
"newborn_7day_visit_proportion", # A2 - 6, denom slightly off
"under1_check_ups_proportion",
"under5_routine_visit_past30days", # A1 - 45
"urgent_referrals_proportion", # A2 - 13, updated to spec
]
},
{
'category_title': "Maternal Health",
'category_slug': 'maternal_health',
'indicator_slugs': [
"no_anc_proportion", # A3 - 2
"anc4_proportion", # A2 - 3
"facility_births_proportion", # A2 - 4
"low_birth_weight_proportion",
"family_planning_proportion", # A2 - 1
]
},
{
'category_title': "Child Health",
'category_slug': 'child_health',
'indicator_slugs': [
"muac_routine_proportion",
"muac_wasting_proportion",
"moderate_muac_wasting_proportion",
"severe_muac_wasting_proportion",
"under5_diarrhea_ors_proportion", # A2 - 37
"under5_diarrhea_zinc_proportion", # B - 38
"under5_complicated_fever_referred_proportion",
"under5_complicated_fever_facility_followup_proportion",
"under1_immunized_proportion", # A2 - 8
"under6month_exclusive_breastfeeding_proportion",
]
},
{
'category_title': "Malaria",
'category_slug': 'malaria',
'indicator_slugs': [
"under5_fever_rdt_proportion",
"under5_fever_rdt_positive_proportion",
"under5_fever_rdt_not_received_proportion",
"under5_fever_rdt_positive_medicated_proportion",
"under5_fever_rdt_negative_medicated_proportion",
"over5_positive_rdt_medicated_proportion",
]
},
{
'category_title': "Household health",
'category_slug': 'household_health',
'indicator_slugs': [
"functioning_bednet_proportion",
"handwashing_near_latrine_proportion",
]
}
]
def get_month_headers(self, retrospective):
headers = list()
month_fmt = "%b %Y"
num_months = len(retrospective)
for i, result in enumerate(retrospective):
month = result.get('date')
month_text = month.strftime(month_fmt) if isinstance(month, datetime.datetime) else "Unknown"
month_desc = "(-%d)" % (num_months-(i+1)) if (num_months-i) > 1 else "(Current)"
headers.append(mark_safe("%s<br />%s" % (month_text, month_desc)))
return headers
def get_indicator_table(self, retrospective):
n_row = [i.get('numerator', 0) for i in retrospective]
d_row = [i.get('denominator', 0) for i in retrospective]
r_row = [i.get('ratio') for i in retrospective]
n_stats = []
d_stats = []
r_stats = []
for i in range(len(retrospective)):
if r_row[i] is not None:
n_stats.append(n_row[i])
d_stats.append(d_row[i])
r_stats.append(r_row[i])
n_row.extend(self._get_statistics(n_stats))
d_row.extend(self._get_statistics(d_stats))
r_row.extend(self._get_statistics(r_stats))
return dict(
numerators=self._format_row(n_row),
denominators=self._format_row(d_row),
percentages=self._format_row(r_row, True)
)
def _format_row(self, row, as_percent=False):
formatted = list()
num_cols = len(row)
for i, val in enumerate(row):
if val is not None and not numpy.isnan(val):
text = "%.f%%" % (val*100) if as_percent else "%d" % int(val)
else:
text = "--"
if i == num_cols-4:
css = "current_month"
elif i > num_cols-4:
css = "summary"
else:
css = ""
formatted.append(dict(
raw_value=val,
text=text,
css=css
))
return formatted
def _get_statistics(self, nonzero_row):
if nonzero_row:
return [numpy.average(nonzero_row), numpy.median(nonzero_row), numpy.std(nonzero_row)]
return [None]*3
def get_indicator_row(self, retrospective):
row = [i.get('value', 0) for i in retrospective]
nonzero_row = [r for r in row if r]
row.extend(self._get_statistics(nonzero_row))
return dict(
numerators=self._format_row(row)
)
def get_response_for_indicator(self, indicator):
try:
retrospective = indicator.get_monthly_retrospective(user_ids=self.user_ids)
if isinstance(indicator, CombinedCouchViewIndicatorDefinition):
table = self.get_indicator_table(retrospective)
else:
table = self.get_indicator_row(retrospective)
return {
'table': table,
}
except AttributeError:
pass
return None
|
py | b403e9c711f2f8df001c2cc99dca9b3ea2c02d6e | # -*- coding: utf-8 -*-
"""
Tracing Span Sender Interface for both Clients.
@author Hao Song ([email protected])`
"""
class WavefrontTracingSpanSender(object):
"""Tracing Span Sender Interface for both Clients."""
# pylint: disable=too-many-arguments
def send_span(self, name, start_millis, duration_millis, source, trace_id,
span_id, parents, follows_from, tags, span_logs):
"""
Send span data via proxy.
Wavefront Tracing Span Data format
<tracingSpanName> source=<source> [pointTags] <start_millis>
<duration_milli_seconds>
Example: "getAllUsers source=localhost
traceId=7b3bf470-9456-11e8-9eb6-529269fb1459
spanId=0313bafe-9457-11e8-9eb6-529269fb1459
parent=2f64e538-9457-11e8-9eb6-529269fb1459
application=Wavefront http.method=GET
1533531013 343500"
@param name: Span Name
@type name: str
@param start_millis: Start time
@type start_millis: long
@param duration_millis: Duration time
@type duration_millis: long
@param source: Source
@type source: str
@param trace_id: Trace ID
@type trace_id: UUID
@param span_id: Span ID
@type span_id: UUID
@param parents: Parents Span ID
@type parents: List of UUID
@param follows_from: Follows Span ID
@type follows_from: List of UUID
@param tags: Tags
@type tags: list
@param span_logs: Span Log
"""
raise NotImplementedError
def send_span_now(self, spans):
"""
Send a list of spans immediately.
Have to construct the data manually by calling
common.utils.metric_to_line_data()
@param spans: List of string spans data
@type spans: list[str]
"""
raise NotImplementedError
|
py | b403e9ce83ecbdcadfc75f88e3b37ede9e605275 | from feaflow.airflow_config import AirflowSchedulerConfig
from feaflow.job_config import JobConfig
from feaflow.source.pandas import PandasDataFrameSourceConfig
def test_loop(project_misc):
jobs = project_misc.scan_jobs()
loop_data = [
{"name": "l1", "schedule_interval": "0 6 * * *"},
{"name": "l2", "schedule_interval": "1 7 * * *"},
{"name": "l3", "schedule_interval": "2 8 * * *"},
]
for loop_variables in loop_data:
job_name = "test_job4_" + loop_variables["name"]
conf: JobConfig = next(filter(lambda j: j.name == job_name, jobs))
assert conf.name == "test_job4_" + loop_variables["name"]
assert isinstance(conf.scheduler, AirflowSchedulerConfig)
assert conf.scheduler.schedule_interval == loop_variables["schedule_interval"]
assert len(conf.sources) == 1
assert isinstance(conf.sources[0], PandasDataFrameSourceConfig)
assert conf.sources[0].file.path == "{{ project_root }}/../data/pandas_df1.csv"
def test_variables(project_misc):
jobs = project_misc.scan_jobs()
l1_conf: JobConfig = next(filter(lambda j: j.name == "test_job4_l1", jobs))
assert l1_conf.variables == {"fields": ["field1", "field2"], "table": "var_table"}
assert l1_conf.loop_variables == {"name": "l1", "schedule_interval": "0 6 * * *"}
|
py | b403ea55059e8e9fe64d9c7414644ebbc97652bd | import dataclasses
from typing import List, Optional, Tuple
from blspy import AugSchemeMPL, G2Element
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import Program, INFINITE_COST
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.condition_opcodes import ConditionOpcode
from chia.types.spend_bundle import CoinSpend, SpendBundle
from chia.util.condition_tools import conditions_dict_for_solution
from chia.util.ints import uint64
from chia.wallet.puzzles.cc_loader import CC_MOD, LOCK_INNER_PUZZLE
from chia.wallet.puzzles.genesis_by_coin_id_with_0 import (
genesis_coin_id_for_genesis_coin_checker,
lineage_proof_for_coin,
lineage_proof_for_genesis,
lineage_proof_for_zero,
)
NULL_SIGNATURE = G2Element()
ANYONE_CAN_SPEND_PUZZLE = Program.to(1) # simply return the conditions
# information needed to spend a cc
# if we ever support more genesis conditions, like a re-issuable coin,
# we may need also to save the `genesis_coin_mod` or its hash
@dataclasses.dataclass
class SpendableCC:
coin: Coin
genesis_coin_id: bytes32
inner_puzzle: Program
lineage_proof: Program
def cc_puzzle_for_inner_puzzle(mod_code, genesis_coin_checker, inner_puzzle) -> Program:
"""
Given an inner puzzle, generate a puzzle program for a specific cc.
"""
return mod_code.curry(mod_code.get_tree_hash(), genesis_coin_checker, inner_puzzle)
# return mod_code.curry([mod_code.get_tree_hash(), genesis_coin_checker, inner_puzzle])
def cc_puzzle_hash_for_inner_puzzle_hash(mod_code, genesis_coin_checker, inner_puzzle_hash) -> bytes32:
"""
Given an inner puzzle hash, calculate a puzzle program hash for a specific cc.
"""
gcc_hash = genesis_coin_checker.get_tree_hash()
return mod_code.curry(mod_code.get_tree_hash(), gcc_hash, inner_puzzle_hash).get_tree_hash(
gcc_hash, inner_puzzle_hash
)
def lineage_proof_for_cc_parent(parent_coin: Coin, parent_inner_puzzle_hash: bytes32) -> Program:
return Program.to(
(
1,
[parent_coin.parent_coin_info, parent_inner_puzzle_hash, parent_coin.amount],
)
)
def subtotals_for_deltas(deltas) -> List[int]:
"""
Given a list of deltas corresponding to input coins, create the "subtotals" list
needed in solutions spending those coins.
"""
subtotals = []
subtotal = 0
for delta in deltas:
subtotals.append(subtotal)
subtotal += delta
# tweak the subtotals so the smallest value is 0
subtotal_offset = min(subtotals)
subtotals = [_ - subtotal_offset for _ in subtotals]
return subtotals
def coin_spend_for_lock_coin(
prev_coin: Coin,
subtotal: int,
coin: Coin,
) -> CoinSpend:
puzzle_reveal = LOCK_INNER_PUZZLE.curry(prev_coin.as_list(), subtotal)
coin = Coin(coin.name(), puzzle_reveal.get_tree_hash(), uint64(0))
coin_spend = CoinSpend(coin, puzzle_reveal, Program.to(0))
return coin_spend
def bundle_for_spendable_cc_list(spendable_cc: SpendableCC) -> Program:
pair = (spendable_cc.coin.as_list(), spendable_cc.lineage_proof)
return Program.to(pair)
def spend_bundle_for_spendable_ccs(
mod_code: Program,
genesis_coin_checker: Program,
spendable_cc_list: List[SpendableCC],
inner_solutions: List[Program],
sigs: Optional[List[G2Element]] = [],
) -> SpendBundle:
"""
Given a list of `SpendableCC` objects and inner solutions for those objects, create a `SpendBundle`
that spends all those coins. Note that it the signature is not calculated it, so the caller is responsible
for fixing it.
"""
N = len(spendable_cc_list)
if len(inner_solutions) != N:
raise ValueError("spendable_cc_list and inner_solutions are different lengths")
input_coins = [_.coin for _ in spendable_cc_list]
# figure out what the output amounts are by running the inner puzzles & solutions
output_amounts = []
for cc_spend_info, inner_solution in zip(spendable_cc_list, inner_solutions):
error, conditions, cost = conditions_dict_for_solution(
cc_spend_info.inner_puzzle, inner_solution, INFINITE_COST
)
total = 0
if conditions:
for _ in conditions.get(ConditionOpcode.CREATE_COIN, []):
total += Program.to(_.vars[1]).as_int()
output_amounts.append(total)
coin_spends = []
deltas = [input_coins[_].amount - output_amounts[_] for _ in range(N)]
subtotals = subtotals_for_deltas(deltas)
if sum(deltas) != 0:
raise ValueError("input and output amounts don't match")
bundles = [bundle_for_spendable_cc_list(_) for _ in spendable_cc_list]
for index in range(N):
cc_spend_info = spendable_cc_list[index]
puzzle_reveal = cc_puzzle_for_inner_puzzle(mod_code, genesis_coin_checker, cc_spend_info.inner_puzzle)
prev_index = (index - 1) % N
next_index = (index + 1) % N
prev_bundle = bundles[prev_index]
my_bundle = bundles[index]
next_bundle = bundles[next_index]
solution = [
inner_solutions[index],
prev_bundle,
my_bundle,
next_bundle,
subtotals[index],
]
coin_spend = CoinSpend(input_coins[index], puzzle_reveal, Program.to(solution))
coin_spends.append(coin_spend)
if sigs is None or sigs == []:
return SpendBundle(coin_spends, NULL_SIGNATURE)
else:
return SpendBundle(coin_spends, AugSchemeMPL.aggregate(sigs))
def is_cc_mod(inner_f: Program):
"""
You may want to generalize this if different `CC_MOD` templates are supported.
"""
return inner_f == CC_MOD
def check_is_cc_puzzle(puzzle: Program):
r = puzzle.uncurry()
if r is None:
return False
inner_f, args = r
return is_cc_mod(inner_f)
def uncurry_cc(puzzle: Program) -> Optional[Tuple[Program, Program, Program]]:
"""
Take a puzzle and return `None` if it's not a `CC_MOD` cc, or
a triple of `mod_hash, genesis_coin_checker, inner_puzzle` if it is.
"""
r = puzzle.uncurry()
if r is None:
return r
inner_f, args = r
if not is_cc_mod(inner_f):
return None
mod_hash, genesis_coin_checker, inner_puzzle = list(args.as_iter())
return mod_hash, genesis_coin_checker, inner_puzzle
def get_lineage_proof_from_coin_and_puz(parent_coin, parent_puzzle):
r = uncurry_cc(parent_puzzle)
if r:
mod_hash, genesis_checker, inner_puzzle = r
lineage_proof = lineage_proof_for_cc_parent(parent_coin, inner_puzzle.get_tree_hash())
else:
if parent_coin.amount == 0:
lineage_proof = lineage_proof_for_zero(parent_coin)
else:
lineage_proof = lineage_proof_for_genesis(parent_coin)
return lineage_proof
def spendable_cc_list_from_coin_spend(coin_spend: CoinSpend, hash_to_puzzle_f) -> List[SpendableCC]:
"""
Given a `CoinSpend`, extract out a list of `SpendableCC` objects.
Since `SpendableCC` needs to track the inner puzzles and a `Coin` only includes
puzzle hash, we also need a `hash_to_puzzle_f` function that turns puzzle hashes into
the corresponding puzzles. This is generally either a `dict` or some kind of DB
(if it's large or persistent).
"""
spendable_cc_list = []
coin = coin_spend.coin
puzzle = Program.from_bytes(bytes(coin_spend.puzzle_reveal))
r = uncurry_cc(puzzle)
if r:
mod_hash, genesis_coin_checker, inner_puzzle = r
lineage_proof = lineage_proof_for_cc_parent(coin, inner_puzzle.get_tree_hash())
else:
lineage_proof = lineage_proof_for_coin(coin)
for new_coin in coin_spend.additions():
puzzle = hash_to_puzzle_f(new_coin.puzzle_hash)
if puzzle is None:
# we don't recognize this puzzle hash, skip it
continue
r = uncurry_cc(puzzle)
if r is None:
# this isn't a cc puzzle
continue
mod_hash, genesis_coin_checker, inner_puzzle = r
genesis_coin_id = genesis_coin_id_for_genesis_coin_checker(genesis_coin_checker)
cc_spend_info = SpendableCC(new_coin, genesis_coin_id, inner_puzzle, lineage_proof)
spendable_cc_list.append(cc_spend_info)
return spendable_cc_list
|
py | b403eaacde73a14d5adbd1afe027518ce39b2e86 | #!/usr/bin/env python3
# =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2 Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3 Neither the names of the copyright holders nor the names of the
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =======================================================================
#
"""presenter server module"""
import os
import sys
import signal
import argparse
import logging
WEB_SERVER = None
APP_SERVER = None
RUN_SERVER = None
SERVER_TYPE = ""
USAGE_INFO = "python3 prensenter_server.py [-h] --app \n\t\t\t\t{hand_write}"
HAND_WRITE_MAP = {"web_server": "hand_write.src.web",
"app_server": "hand_write.src.hand_write_server"
}
APP_CONF_MAP = {"hand_write": HAND_WRITE_MAP}
def arg_parse():
'''arg_parse'''
global WEB_SERVER
global APP_SERVER
global SERVER_TYPE
parser = argparse.ArgumentParser(usage=USAGE_INFO)
parser.add_argument('--app', type=str, required=True,
choices=['hand_write'],
help="Application type corresponding to Presenter Server.")
args = parser.parse_args()
SERVER_TYPE = args.app
app_conf = APP_CONF_MAP.get(SERVER_TYPE)
WEB_SERVER = __import__(app_conf.get("web_server"), fromlist=True)
APP_SERVER = __import__(app_conf.get("app_server"), fromlist=True)
def start_app():
global RUN_SERVER
# start socket server for presenter agent communication
RUN_SERVER = APP_SERVER.run()
if RUN_SERVER is None:
return False
logging.info("presenter server starting, type: %s", SERVER_TYPE)
# start web ui
return WEB_SERVER.start_webapp()
def stop_app():
WEB_SERVER.stop_webapp()
RUN_SERVER.stop_thread()
def close_all_thread(signum, frame):
'''close all thread of the process, and exit.'''
logging.info("receive signal, signum:%s, frame:%s", signum, frame)
stop_app()
logging.info("presenter server exit by Ctrl + c")
sys.exit()
def check_server_exist():
pid = os.getpid()
cmd = "ps -ef|grep -v {}|grep -w presenter_server|grep {}" \
.format(pid, SERVER_TYPE)
ret = os.system(cmd)
return ret
def main_process():
'''Main function entrance'''
arg_parse()
if check_server_exist() == 0:
print("Presenter Server type \"%s\" already exist!" %(SERVER_TYPE))
return True
# process signal, when receive "Ctrl + c" signal,
# stop all thead and exit the progress.
signal.signal(signal.SIGINT, close_all_thread)
signal.signal(signal.SIGTERM, close_all_thread)
start_app()
return True
if __name__ == "__main__":
main_process()
|
py | b403ebe3a659d1a3927dfc60eb0dd6542c171f31 | """Resolwe models validation utils."""
import json
import re
from pathlib import Path
import jsonschema
from django.contrib.staticfiles import finders
from django.core.exceptions import ValidationError
from resolwe.flow.utils import dict_dot, iterate_dict, iterate_fields, iterate_schema
class DirtyError(ValidationError):
"""Error raised when required fields missing."""
def validation_schema(name):
"""Return json schema for json validation."""
schemas = {
"processor": "processSchema.json",
"descriptor": "descriptorSchema.json",
"field": "fieldSchema.json",
"type": "typeSchema.json",
}
if name not in schemas:
raise ValueError()
field_schema_file = finders.find("flow/{}".format(schemas["field"]), all=True)[0]
with open(field_schema_file, "r") as fn:
field_schema = fn.read()
if name == "field":
return json.loads(field_schema.replace("{{PARENT}}", ""))
schema_file = finders.find("flow/{}".format(schemas[name]), all=True)[0]
with open(schema_file, "r") as fn:
schema = fn.read()
return json.loads(
schema.replace("{{FIELD}}", field_schema).replace("{{PARENT}}", "/field")
)
TYPE_SCHEMA = validation_schema("type")
def validate_schema(
instance, schema, test_required=True, data_location=None, skip_missing_data=False
):
"""Check if DictField values are consistent with our data types.
Perform basic JSON schema validation and our custom validations:
* check that required fields are given (if `test_required` is set
to ``True``)
* check if ``basic:file:`` and ``list:basic:file`` fields match
regex given in schema (only if ``validate_regex`` is defined in
schema for coresponding fields) and exists (only if
``data_location`` is given)
* check if directories referenced in ``basic:dir:`` and
``list:basic:dir``fields exist (only if ``data_location`` is
given)
* check that referenced ``Data`` objects (in ``data:<data_type>``
and ``list:data:<data_type>`` fields) exists and are of type
``<data_type>``
* check that referenced ``Storage`` objects (in ``basic:json``
fields) exists
:param list instance: Instance to be validated
:param list schema: Schema for validation
:param bool test_required: Flag for testing if all required fields
are present. It is usefule if validation is run before ``Data``
object is finished and there are some field stil missing
(default: ``False``)
:param :class:`~resolwe.storage.models.FileStorage` data_location:
data location used for checking if files and directories exist
(default: ``None``)
:param bool skip_missing_data: Don't raise an error if referenced
``Data`` object does not exist
:rtype: None
:raises ValidationError: if ``instance`` doesn't match schema
defined in ``schema``
"""
from ..storage import Storage # Prevent circular import.
path_prefix = None
if data_location:
path_prefix = Path(data_location.get_path())
def validate_refs(field):
"""Validate reference paths."""
for ref_filename in field.get("refs", []):
ref_path: Path = path_prefix / ref_filename
file_exists = ref_path.exists()
if not file_exists:
raise ValidationError(
"Path referenced in `refs` ({}) does not exist.".format(ref_path)
)
if not (ref_path.is_file() or ref_path.is_dir()):
raise ValidationError(
"Path referenced in `refs` ({}) is neither a file or directory.".format(
ref_path
)
)
def validate_file(field, regex):
"""Validate file name (and check that it exists)."""
filename = field["file"]
if regex and not re.search(regex, filename):
raise ValidationError(
"File name {} does not match regex {}".format(filename, regex)
)
if path_prefix:
path: Path = path_prefix / filename
if not path.exists():
raise ValidationError(
"Referenced path ({}) does not exist.".format(path)
)
if not path.is_file():
raise ValidationError(
"Referenced path ({}) is not a file.".format(path)
)
validate_refs(field)
def validate_dir(field):
"""Check that dirs and referenced files exists."""
dirname = field["dir"]
if path_prefix:
path: Path = path_prefix / dirname
if not path.exists():
raise ValidationError(
"Referenced path ({}) does not exist.".format(path)
)
if not path.is_dir():
raise ValidationError(
"Referenced path ({}) is not a directory.".format(path)
)
validate_refs(field)
def validate_data(data_pk, type_):
"""Check that `Data` objects exist and is of right type."""
from ..data import Data # prevent circular import
data_qs = Data.objects.filter(pk=data_pk).values("process__type")
if not data_qs.exists():
if skip_missing_data:
return
raise ValidationError(
"Referenced `Data` object does not exist (id:{})".format(data_pk)
)
data = data_qs.first()
if not data["process__type"].startswith(type_):
raise ValidationError(
"Data object of type `{}` is required, but type `{}` is given. "
"(id:{})".format(type_, data["process__type"], data_pk)
)
def validate_range(value, interval, name):
"""Check that given value is inside the specified range."""
if not interval:
return
if value < interval[0] or value > interval[1]:
raise ValidationError(
"Value of field '{}' is out of range. It should be between {} and {}.".format(
name, interval[0], interval[1]
)
)
is_dirty = False
dirty_fields = []
for _schema, _fields, _ in iterate_schema(instance, schema):
name = _schema["name"]
is_required = _schema.get("required", True)
if test_required and is_required and name not in _fields:
is_dirty = True
dirty_fields.append(name)
if name in _fields:
field = _fields[name]
type_ = _schema.get("type", "")
# Treat None as if the field is missing.
if not is_required and field is None:
continue
try:
jsonschema.validate([{"type": type_, "value": field}], TYPE_SCHEMA)
except jsonschema.exceptions.ValidationError as ex:
raise ValidationError(ex.message)
choices = [choice["value"] for choice in _schema.get("choices", [])]
allow_custom_choice = _schema.get("allow_custom_choice", False)
if choices and not allow_custom_choice and field not in choices:
raise ValidationError(
"Value of field '{}' must match one of predefined choices. "
"Current value: {}".format(name, field)
)
if type_ == "basic:file:":
validate_file(field, _schema.get("validate_regex"))
elif type_ == "list:basic:file:":
for obj in field:
validate_file(obj, _schema.get("validate_regex"))
elif type_ == "basic:dir:":
validate_dir(field)
elif type_ == "list:basic:dir:":
for obj in field:
validate_dir(obj)
elif (
type_ == "basic:json:" and not Storage.objects.filter(pk=field).exists()
):
raise ValidationError(
"Referenced `Storage` object does not exist (id:{})".format(field)
)
elif type_.startswith("data:"):
validate_data(field, type_)
elif type_.startswith("list:data:"):
for data_id in field:
validate_data(data_id, type_[5:]) # remove `list:` from type
elif type_ == "basic:integer:" or type_ == "basic:decimal:":
validate_range(field, _schema.get("range"), name)
elif type_ == "list:basic:integer:" or type_ == "list:basic:decimal:":
for obj in field:
validate_range(obj, _schema.get("range"), name)
try:
# Check that schema definitions exist for all fields
for _, _ in iterate_fields(instance, schema):
pass
except KeyError as ex:
raise ValidationError(str(ex))
if is_dirty:
dirty_fields = ['"{}"'.format(field) for field in dirty_fields]
raise DirtyError(
"Required fields {} not given.".format(", ".join(dirty_fields))
)
def validate_data_object(data, skip_missing_data=False):
"""Validate data object.
Data object is validated only when worker is done with processing.
"""
validate_schema(
data.input, data.process.input_schema, skip_missing_data=skip_missing_data
)
validate_schema(
data.output, data.process.output_schema, data_location=data.location
)
def validate_process_subtype(supertype_name, supertype, subtype_name, subtype):
"""Perform process subtype validation.
:param supertype_name: Supertype name
:param supertype: Supertype schema
:param subtype_name: Subtype name
:param subtype: Subtype schema
:return: A list of validation error strings
"""
errors = []
for item in supertype:
# Ensure that the item exists in subtype and has the same schema.
for subitem in subtype:
if item["name"] != subitem["name"]:
continue
for key in set(item.keys()) | set(subitem.keys()):
if key in ("label", "description"):
# Label and description can differ.
continue
elif key == "required":
# A non-required item can be made required in subtype, but not the
# other way around.
item_required = item.get("required", True)
subitem_required = subitem.get("required", False)
if item_required and not subitem_required:
errors.append(
"Field '{}' is marked as required in '{}' and optional in '{}'.".format(
item["name"], supertype_name, subtype_name
)
)
elif item.get(key, None) != subitem.get(key, None):
errors.append(
"Schema for field '{}' in type '{}' does not match supertype '{}'.".format(
item["name"], subtype_name, supertype_name
)
)
break
else:
errors.append(
"Schema for type '{}' is missing supertype '{}' field '{}'.".format(
subtype_name, supertype_name, item["name"]
)
)
return errors
def validate_process_types(queryset=None):
"""Perform process type validation.
:param queryset: Optional process queryset to validate
:return: A list of validation error strings
"""
if not queryset:
from ..process import Process
queryset = Process.objects.all()
processes = {}
for process in queryset:
dict_dot(
processes,
process.type.replace(":", ".") + "__schema__",
process.output_schema,
)
errors = []
for path, key, value in iterate_dict(
processes, exclude=lambda key, value: key == "__schema__"
):
if "__schema__" not in value:
continue
# Validate with any parent types.
for length in range(len(path), 0, -1):
parent_type = ".".join(path[:length] + ["__schema__"])
try:
parent_schema = dict_dot(processes, parent_type)
except KeyError:
continue
errors += validate_process_subtype(
supertype_name=":".join(path[:length]),
supertype=parent_schema,
subtype_name=":".join(path + [key]),
subtype=value["__schema__"],
)
return errors
|
py | b403ec447c705780256ec494daa3a621ecef8d3b | from django import template
register = template.Library()
@register.simple_tag(name="formSubmit")
def form_submit(text):
first = f'<div class="submitLabel">{text}</div>'
second = '<img style="display:none;" class="waitSpin submitLoading" src="/static/base/IMG/loading.svg"/>'
return first + second |
py | b403ec451f198689a676d6ed64ee687048ccc142 |
import numpy as np
from kaldo.helpers.logger import get_logger
from sparse import COO
logging = get_logger()
def list_of_replicas(atoms, replicated_atoms):
n_atoms = atoms.positions.shape[0]
n_replicas = int(replicated_atoms.positions.shape[0] / n_atoms)
list_of_cells = (replicated_atoms.positions - atoms.positions).reshape((n_replicas, ))
return list_of_cells
def calculate_gradient(x, input_atoms):
"""
Construct the calculate_gradient based on the given structure and atom object
Set a copy for the atom object so that
the progress of the optimization is traceable
Force is the negative of the calculate_gradient
"""
atoms = input_atoms.copy()
input_atoms.positions = np.reshape(x, (int(x.size / 3.), 3))
gr = -1. * input_atoms.get_forces()
grad = np.reshape(gr, gr.size)
input_atoms.positions = atoms.positions
return grad
def calculate_single_second(replicated_atoms, atom_id, second_order_delta):
"""
Compute the numerator of the approximated second matrices
(approximated force from forward difference -
approximated force from backward difference )
"""
n_replicated_atoms = len(replicated_atoms.numbers)
second_per_atom = np.zeros((3, n_replicated_atoms * 3))
for alpha in range(3):
for move in (-1, 1):
shift = np.zeros((n_replicated_atoms, 3))
shift[atom_id, alpha] += move * second_order_delta
second_per_atom[alpha, :] += move * calculate_gradient(replicated_atoms.positions + shift,
replicated_atoms)
return second_per_atom
def calculate_second(atoms, replicated_atoms, second_order_delta, is_verbose=False):
# TODO: remove supercell
"""
Core method to compute second order force constant matrices
Approximate the second order force constant matrices
using central difference formula
"""
logging.info('Calculating second order potential derivatives, ' + 'finite difference displacement: %.3e angstrom'%second_order_delta)
n_unit_cell_atoms = len(atoms.numbers)
replicated_atoms = replicated_atoms
n_replicated_atoms = len(replicated_atoms.numbers)
n_atoms = n_unit_cell_atoms
n_replicas = int(n_replicated_atoms / n_unit_cell_atoms)
second = np.zeros((n_atoms, 3, n_replicated_atoms * 3))
for i in range(n_atoms):
if is_verbose:
logging.info('calculating forces on atom ' + str(i))
second[i] = calculate_single_second(replicated_atoms, i, second_order_delta)
second = second.reshape((1, n_unit_cell_atoms, 3, n_replicas, n_unit_cell_atoms, 3))
second = second / (2. * second_order_delta)
return second
def calculate_third(atoms, replicated_atoms, third_order_delta, distance_threshold=None, is_verbose=False):
"""
Compute third order force constant matrices by using the central
difference formula for the approximation
"""
logging.info('Calculating third order potential derivatives, ' + 'finite difference displacement: %.3e angstrom'%third_order_delta)
n_atoms = len(atoms.numbers)
replicated_atoms = replicated_atoms
n_replicas = int(replicated_atoms.positions.shape[0] / n_atoms)
i_at_sparse = []
i_coord_sparse = []
jat_sparse = []
j_coord_sparse = []
k_sparse = []
value_sparse = []
n_forces_to_calculate = n_replicas * (n_atoms * 3) ** 2
n_forces_done = 0
n_forces_skipped = 0
for iat in range(n_atoms):
for jat in range(n_replicas * n_atoms):
is_computing = True
m, j_small = np.unravel_index(jat, (n_replicas, n_atoms))
if (distance_threshold is not None):
dxij = atoms.positions[iat] - (list_of_replicas[m] + atoms.positions[j_small])
if (np.linalg.norm(dxij) > distance_threshold):
is_computing = False
n_forces_skipped += 9
if is_computing:
if is_verbose:
logging.info('calculating forces on atoms: ' + str(iat) + ',' + str(jat))
for icoord in range(3):
for jcoord in range(3):
value = calculate_single_third(atoms, replicated_atoms, iat, icoord, jat, jcoord,
third_order_delta)
for id in range(value.shape[0]):
i_at_sparse.append(iat)
i_coord_sparse.append(icoord)
jat_sparse.append(jat)
j_coord_sparse.append(jcoord)
k_sparse.append(id)
value_sparse.append(value[id])
n_forces_done += 9
if (n_forces_done + n_forces_skipped % 300) == 0:
logging.info('Calculate third derivatives ' + str
(int((n_forces_done + n_forces_skipped) / n_forces_to_calculate * 100)) + '%')
logging.info('total forces to calculate third : ' + str(n_forces_to_calculate))
logging.info('forces calculated : ' + str(n_forces_done))
logging.info('forces skipped (outside distance threshold) : ' + str(n_forces_skipped))
coords = np.array([i_at_sparse, i_coord_sparse, jat_sparse, j_coord_sparse, k_sparse])
shape = (n_atoms, 3, n_replicas * n_atoms, 3, n_replicas * n_atoms * 3)
phifull = COO(coords, np.array(value_sparse), shape)
phifull = phifull.reshape \
((n_atoms * 3, n_replicas * n_atoms * 3, n_replicas * n_atoms * 3))
return phifull
def calculate_single_third(atoms, replicated_atoms, iat, icoord, jat, jcoord, third_order_delta):
n_in_unit_cell = len(atoms.numbers)
n_replicated_atoms = len(replicated_atoms.numbers)
n_supercell = int(replicated_atoms.positions.shape[0] / n_in_unit_cell)
phi_partial = np.zeros((n_supercell * n_in_unit_cell * 3))
for isign in (1, -1):
for jsign in (1, -1):
shift = np.zeros((n_replicated_atoms, 3))
shift[iat, icoord] += isign * third_order_delta
shift[jat, jcoord] += jsign * third_order_delta
phi_partial[:] += isign * jsign * calculate_single_third_with_shift(atoms, replicated_atoms, shift)
return phi_partial / (4. * third_order_delta * third_order_delta)
def calculate_single_third_with_shift(atoms, replicated_atoms, shift):
n_in_unit_cell = len(atoms.numbers)
n_supercell = int(replicated_atoms.positions.shape[0] / n_in_unit_cell)
phi_partial = np.zeros((n_supercell * n_in_unit_cell * 3))
phi_partial[:] = (-1. * calculate_gradient(replicated_atoms.positions + shift, replicated_atoms))
return phi_partial
|
py | b403ec9ffdfb3e21349ac3d4b2ee0095783302b1 | import copy
import pytest
from .. import wide_dataframe_benchmark
from ..tests._asserts import assert_cli, assert_context
HELP = """
Usage: conbench wide-dataframe [OPTIONS]
Run wide-dataframe benchmark(s).
For each benchmark option, the first option value is the default.
Valid benchmark combinations:
--use-legacy-dataset=true
--use-legacy-dataset=false
To run all combinations:
$ conbench wide-dataframe --all=true
Options:
--use-legacy-dataset [false|true]
--all BOOLEAN [default: False]
--cpu-count INTEGER
--iterations INTEGER [default: 1]
--drop-caches BOOLEAN [default: False]
--gc-collect BOOLEAN [default: True]
--gc-disable BOOLEAN [default: True]
--show-result BOOLEAN [default: True]
--show-output BOOLEAN [default: False]
--run-id TEXT Group executions together with a run id.
--run-name TEXT Name of run (commit, pull request, etc).
--help Show this message and exit.
"""
benchmark = wide_dataframe_benchmark.WideDataframeBenchmark()
cases, case_ids = benchmark.cases, benchmark.case_ids
def assert_benchmark(result, case):
munged = copy.deepcopy(result)
assert munged["tags"] == {
"name": "wide-dataframe",
"cpu_count": None,
"use_legacy_dataset": case[0],
}
assert_context(munged)
@pytest.mark.parametrize("case", cases, ids=case_ids)
def test_wide_dataframe(case):
[(result, output)] = benchmark.run(case, iterations=1)
assert_benchmark(result, case)
assert "100 rows x 10000 columns" in str(output)
def test_wide_dataframe_cli():
command = ["conbench", "wide-dataframe", "--help"]
assert_cli(command, HELP)
|
py | b403ed75ffc5fadd03d8f88f762574336ff03041 | from .delhivery import Delhivery
from .ekart import Ekart
PROVIDERS = {
"delhivery": Delhivery,
"ekart": Ekart,
}
def get_track_data(provider: str, waybill: str) -> object:
provider_obj = PROVIDERS[provider](waybill)
return provider_obj.run()
def get_providers() -> list:
return list(PROVIDERS.keys())
|
py | b403ed96777e337e7c9bee6c5c96d87131baf63a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = 'Emanuele Bertoldi <[email protected]>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.5'
from django.test import TestCase
from django.template import Context
from django.contrib.auth import get_user_model
from djangoerp.menus.models import Menu
class MenuPluggetFuncTestCase(TestCase):
def test_with_menu_id(self):
"""Tests context manipulation when "menu_id" is in the given context.
"""
# WARNING: Don't put this line at module level due to issues with
# plugget auto-discovering.
from ..pluggets import menu
m = Menu.objects.create(slug="test-menu")
context = menu(Context({"menu_id": m.pk}))
self.assertTrue("menu_id" not in context)
self.assertTrue("name" in context)
self.assertEqual(context['name'], "test-menu")
def test_without_menu_id(self):
"""Tests context manipulation when "menu_id" is not in the given context.
"""
# WARNING: Don't put this line at module level due to issues with
# plugget auto-discovering.
from ..pluggets import menu
context = menu(Context())
self.assertTrue("menu_id" not in context)
self.assertTrue("name" not in context)
class BookmarksMenuPluggetFuncTestCase(TestCase):
def test_with_user(self):
"""Tests context manipulation when "user" is in the given context.
"""
# WARNING: Don't put this line at module level due to issues with
# plugget auto-discovering.
from ..pluggets import bookmarks_menu
u = get_user_model().objects.create(username="u", password="p")
context = bookmarks_menu(Context({"user": u}))
self.assertTrue("menu_id" not in context)
self.assertTrue("name" in context)
self.assertEqual(context['name'], "user_%d_bookmarks" % u.pk)
def test_without_user(self):
"""Tests context manipulation when "user" is not in the given context.
"""
# WARNING: Don't put this line at module level due to issues with
# plugget auto-discovering.
from ..pluggets import bookmarks_menu
context = bookmarks_menu(Context())
self.assertTrue("menu_id" not in context)
self.assertTrue("name" not in context)
|
py | b403ee22631b34e4332cf12f6ffaec2e39adab36 | import cx_Oracle
from os import system
def cls(): return system('cls')
cls()
connection = cx_Oracle.connect(
user="demopython",
password="5678",
dsn="localhost/xepdb1"
)
print("Successfully connected to Oracle Database")
cursor = connection.cursor()
# Create a table
cursor.execute("""
begin
execute immediate 'drop table todoitem';
exception when others then if sqlcode <> -942 then raise; end if;
end;""")
cursor.execute("""
create table todoitem (
id number generated always as identity,
description varchar2(4000),
creation_ts timestamp with time zone default current_timestamp,
done number(1,0),
primary key (id))"""
)
# Insert some data
rows = [("Task 1", 0),
("Task 2", 0),
("Task 3", 1),
("Task 4", 0),
("Task 5", 1)
]
cursor.executemany(
"insert into todoitem (description, done) values(:1, :2)",
rows)
print(cursor.rowcount, "Rows Inserted")
connection.commit()
# Now query the rows back
for row in cursor.execute('select description, done from todoitem where done=1'):
if (row[1]):
print(row[0], "is done")
else:
print(row[0], "is NOT done")
|
py | b403eeca19e82cdd8763fa19136f920d2d3157f5 | class ReactionModel:
"""Base class for all reaction models.
All child classes must implement the following member functions:
* calc_source()
* calc_jacob_prim()
Refer to finite_rate_irrev_reaction.py for examples.
"""
def __init__(self):
pass
|
py | b403ef8802b874a4c3d1caee29c641f63f9f94ed | from rest_framework.test import APITestCase, APIRequestFactory,\
force_authenticate
from api.v2.views import ImageViewSet as ViewSet
from api.tests.factories import UserFactory, AnonymousUserFactory, ImageFactory
from django.core.urlresolvers import reverse
from unittest import skip
# class GetListTests(APITestCase):
#
# def setUp(self):
# self.view = ViewSet.as_view({'get': 'list'})
# self.anonymous_user = AnonymousUserFactory()
# self.user = UserFactory.create()
# self.staff_user = UserFactory.create(is_staff=True)
#
# self.image = ImageFactory.create(created_by=self.user)
#
# factory = APIRequestFactory()
# url = reverse('api:v2:application-list')
# self.request = factory.get(url)
# force_authenticate(self.request, user=self.user)
# self.response = self.view(self.request)
#
# def test_is_public(self):
# force_authenticate(self.request, user=self.anonymous_user)
# response = self.view(self.request)
# self.assertEquals(response.status_code, 200)
#
# def test_is_visible_to_authenticated_user(self):
# force_authenticate(self.request, user=self.user)
# response = self.view(self.request)
# self.assertEquals(response.status_code, 200)
#
# def test_response_is_paginated(self):
# response = self.response
# self.assertIn('count', response.data)
# self.assertIn('results', response.data)
#
# def test_response_contains_expected_fields(self):
# force_authenticate(self.request, user=self.user)
# response = self.view(self.request)
# data = response.data.get('results')[0]
#
# self.assertEquals(len(data), 12, "Unexepcted # of arguments in API endpoint")
# self.assertIn('id', data)
# self.assertIn('url', data)
# self.assertIn('uuid', data)
# self.assertIn('name', data)
# self.assertIn('description', data)
# self.assertIn('is_public', data)
# self.assertIn('icon', data)
# self.assertIn('tags', data)
# self.assertIn('created_by', data)
# self.assertIn('start_date', data)
# self.assertIn('end_date', data)
#
#
# class GetDetailTests(APITestCase):
#
# def setUp(self):
# self.view = ViewSet.as_view({'get': 'retrieve'})
# self.anonymous_user = AnonymousUserFactory()
# self.user = UserFactory.create()
#
# self.image = ImageFactory.create(created_by=self.user)
#
# factory = APIRequestFactory()
# url = reverse('api:v2:application-detail', args=(self.user.id,))
# self.request = factory.get(url)
#
# @skip("Broken as of 30b3e784a0fdf82db51c0f0a08dd3b8c3a8d4aec")
# def test_is_public(self):
# force_authenticate(self.request, user=self.anonymous_user)
# response = self.view(self.request, pk=self.image.id)
# self.assertEquals(response.status_code, 200)
#
# def test_is_visible_to_authenticated_user(self):
# force_authenticate(self.request, user=self.user)
# response = self.view(self.request, pk=self.image.id)
# self.assertEquals(response.status_code, 200)
#
# def test_response_contains_expected_fields(self):
# force_authenticate(self.request, user=self.user)
# response = self.view(self.request, pk=self.image.id)
# data = response.data
#
# self.assertEquals(len(data), 12, "Unexepcted # of arguments in API endpoint")
# self.assertIn('id', data)
# self.assertIn('url', data)
# self.assertIn('uuid', data)
# self.assertIn('name', data)
# self.assertIn('description', data)
# self.assertIn('is_public', data)
# self.assertIn('icon', data)
# self.assertIn('tags', data)
# self.assertIn('created_by', data)
# self.assertIn('start_date', data)
# self.assertIn('end_date', data)
class CreateTests(APITestCase):
def test_endpoint_does_not_exist(self):
self.assertTrue('post' not in ViewSet.http_method_names)
class UpdateTests(APITestCase):
def test_endpoint_does_exist(self):
self.assertTrue('put' in ViewSet.http_method_names)
class DeleteTests(APITestCase):
def test_endpoint_does_not_exist(self):
self.assertTrue('delete' not in ViewSet.http_method_names)
|
py | b403efb24ab255578e89d2901abc0ff903f7bcab | import torch
import torch.nn.functional as F
from cogdl.layers import GCNLayer
from cogdl.trainers.gae_trainer import GAETrainer
from .. import BaseModel, register_model
from .gcn import TKipfGCN
@register_model("gae")
class GAE(TKipfGCN):
@classmethod
def build_model_from_args(cls, args):
return cls(args.num_features, args.hidden_size, args.num_layers, args.dropout)
def __init__(self, in_feats, hidden_size, num_layers, dropout):
super(GAE, self).__init__(in_feats, hidden_size, 1, num_layers, dropout)
def make_loss(self, data, adj):
embeddings = self.embed(data)
return (
F.binary_cross_entropy(F.softmax(torch.mm(embeddings, embeddings.t())), adj, reduction="sum")
/ data.x.shape[0]
)
def get_features(self, data):
return self.embed(data).detach()
@staticmethod
def get_trainer(args=None):
return GAETrainer
@register_model("vgae")
class VGAE(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--num-features", type=int)
parser.add_argument("--hidden-size", type=int, default=64)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(args.num_features, args.hidden_size)
def __init__(self, num_features, hidden_size):
super(VGAE, self).__init__()
self.num_features = num_features
self.hidden_size = hidden_size
self.conv1 = GCNLayer(self.num_features, self.hidden_size)
self.conv2_mean = GCNLayer(self.hidden_size, self.hidden_size)
self.conv2_var = GCNLayer(self.hidden_size, self.hidden_size)
def reparameterize(self, mean, log_var):
sigma = torch.exp(log_var)
z = mean + torch.randn_like(log_var) * sigma
return z
def encode(self, graph):
graph.add_remaining_self_loops()
graph.sym_norm()
h = graph.x
h = self.conv1(graph, h)
h = F.relu(h)
mean = self.conv2_mean(graph, h)
log_var = self.conv2_var(graph, h)
return mean, log_var
def decode(self, x):
return torch.sigmoid(torch.matmul(x, x.t()))
def forward(self, graph):
mean, log_var = self.encode(graph)
return self.reparameterize(mean, log_var)
def get_features(self, graph):
return self.forward(graph).detach()
def make_loss(self, data, adj):
mean, log_var = self.encode(data)
z = self.reparameterize(mean, log_var)
mat = self.decode(z)
recon_loss = F.binary_cross_entropy(mat, adj, reduction="sum")
var = torch.exp(log_var)
kl_loss = 0.5 * torch.mean(torch.sum(mean * mean + var - log_var - 1, dim=1))
print("recon_loss = %.3f, kl_loss = %.3f" % (recon_loss, kl_loss))
return recon_loss + kl_loss
@staticmethod
def get_trainer(args):
return GAETrainer
|
py | b403f15956d0c465a2d5dfbbefaef434aa08e0f3 | from django.contrib import admin
from .forms import UrlForm, UrlOverrideForm
from .models import Url, UrlOverride
from .urls import urlpatterns
__all__ = ["UrlAdmin", "UrlOverrideInlineAdmin"]
class UrlOverrideInlineAdmin(admin.StackedInline):
model = UrlOverride
form = UrlOverrideForm
extra = 0
@admin.register(Url)
class UrlAdmin(admin.ModelAdmin):
form = UrlForm
inlines = [UrlOverrideInlineAdmin]
list_display = ("internal_name", "get_model_url", "date_modified", )
search_fields = ("manual_url", "internal_name", "relative_path", "mailto", "phone")
list_filter = ("site__name",)
ordering = ("internal_name", "date_modified", )
def get_urls(self):
return urlpatterns + super().get_urls()
def get_model_url(self, obj):
return obj.get_url(obj.site)
get_model_url.short_description = "URL"
|
py | b403f24f7c4419f552b986f036dbe55e126c6989 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
__version__ = '0.10.1'
__version_info__ = (0, 10, 1)
|
py | b403f32befef7b569e6ea5a3373b45963ccb73e9 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import collections
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import List
from typing import Mapping
from typing import Tuple
from typing import TypeVar
from typing import Union
import pandas as pd
import apache_beam as beam
from apache_beam import transforms
from apache_beam.dataframe import expressions
from apache_beam.dataframe import frames # pylint: disable=unused-import
from apache_beam.dataframe import partitionings
from apache_beam.utils import windowed_value
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports
from apache_beam.pvalue import PCollection
T = TypeVar('T')
TARGET_PARTITION_SIZE = 1 << 23 # 8M
MAX_PARTITIONS = 1000
DEFAULT_PARTITIONS = 100
MIN_PARTITIONS = 10
PER_COL_OVERHEAD = 1000
class DataframeTransform(transforms.PTransform):
"""A PTransform for applying function that takes and returns dataframes
to one or more PCollections.
DataframeTransform will accept a PCollection with a schema and batch it
into dataframes if necessary. In this case the proxy can be omitted:
(pcoll | beam.Row(key=..., foo=..., bar=...)
| DataframeTransform(lambda df: df.group_by('key').sum()))
It is also possible to process a PCollection of dataframes directly, in this
case a proxy must be provided. For example, if pcoll is a PCollection of
dataframes, one could write::
pcoll | DataframeTransform(lambda df: df.group_by('key').sum(), proxy=...)
To pass multiple PCollections, pass a tuple of PCollections wich will be
passed to the callable as positional arguments, or a dictionary of
PCollections, in which case they will be passed as keyword arguments.
Args:
yield_elements: (optional, default: "schemas") If set to "pandas", return
PCollections containing the raw Pandas objects (DataFrames or Series),
if set to "schemas", return an element-wise PCollection, where DataFrame
and Series instances are expanded to one element per row. DataFrames are
converted to schema-aware PCollections, where column values can be
accessed by attribute.
include_indexes: (optional, default: False) When yield_elements="schemas",
if include_indexes=True, attempt to include index columns in the output
schema for expanded DataFrames. Raises an error if any of the index
levels are unnamed (name=None), or if any of the names are not unique
among all column and index names.
"""
def __init__(
self, func, proxy=None, yield_elements="schemas", include_indexes=False):
self._func = func
self._proxy = proxy
self._yield_elements = yield_elements
self._include_indexes = include_indexes
def expand(self, input_pcolls):
# Avoid circular import.
from apache_beam.dataframe import convert
# Convert inputs to a flat dict.
input_dict = _flatten(input_pcolls) # type: Dict[Any, PCollection]
proxies = _flatten(self._proxy) if self._proxy is not None else {
tag: None
for tag in input_dict.keys()
}
input_frames = {
k: convert.to_dataframe(pc, proxies[k])
for k, pc in input_dict.items()
} # type: Dict[Any, DeferredFrame]
# Apply the function.
frames_input = _substitute(input_pcolls, input_frames)
if isinstance(frames_input, dict):
result_frames = self._func(**frames_input)
elif isinstance(frames_input, tuple):
result_frames = self._func(*frames_input)
else:
result_frames = self._func(frames_input)
# Compute results as a tuple.
result_frames_dict = _flatten(result_frames)
keys = list(result_frames_dict.keys())
result_frames_tuple = tuple(result_frames_dict[key] for key in keys)
result_pcolls_tuple = convert.to_pcollection(
*result_frames_tuple,
label='Eval',
always_return_tuple=True,
yield_elements=self._yield_elements,
include_indexes=self._include_indexes)
# Convert back to the structure returned by self._func.
result_pcolls_dict = dict(zip(keys, result_pcolls_tuple))
return _substitute(result_frames, result_pcolls_dict)
class _DataframeExpressionsTransform(transforms.PTransform):
def __init__(self, outputs):
self._outputs = outputs
def expand(self, inputs):
return self._apply_deferred_ops(inputs, self._outputs)
def _apply_deferred_ops(
self,
inputs, # type: Dict[expressions.Expression, PCollection]
outputs, # type: Dict[Any, expressions.Expression]
): # -> Dict[Any, PCollection]
"""Construct a Beam graph that evaluates a set of expressions on a set of
input PCollections.
:param inputs: A mapping of placeholder expressions to PCollections.
:param outputs: A mapping of keys to expressions defined in terms of the
placeholders of inputs.
Returns a dictionary whose keys are those of outputs, and whose values are
PCollections corresponding to the values of outputs evaluated at the
values of inputs.
Logically, `_apply_deferred_ops({x: a, y: b}, {f: F(x, y), g: G(x, y)})`
returns `{f: F(a, b), g: G(a, b)}`.
"""
class ComputeStage(beam.PTransform):
"""A helper transform that computes a single stage of operations.
"""
def __init__(self, stage):
self.stage = stage
def default_label(self):
return '%s:%s' % (self.stage.ops, id(self))
def expand(self, pcolls):
scalar_inputs = [expr for expr in self.stage.inputs if is_scalar(expr)]
tabular_inputs = [
expr for expr in self.stage.inputs if not is_scalar(expr)
]
if len(tabular_inputs) == 0:
partitioned_pcoll = next(pcolls.values()).pipeline | beam.Create([{}])
elif self.stage.partitioning != partitionings.Nothing():
# Partitioning required for these operations.
# Compute the number of partitions to use for the inputs based on
# the estimated size of the inputs.
if self.stage.partitioning == partitionings.Singleton():
# Always a single partition, don't waste time computing sizes.
num_partitions = 1
else:
# Estimate the sizes from the outputs of a *previous* stage such
# that using these estimates will not cause a fusion break.
input_sizes = [
estimate_size(input, same_stage_ok=False)
for input in tabular_inputs
]
if None in input_sizes:
# We were unable to (cheaply) compute the size of one or more
# inputs.
num_partitions = DEFAULT_PARTITIONS
else:
num_partitions = beam.pvalue.AsSingleton(
input_sizes
| 'FlattenSizes' >> beam.Flatten()
| 'SumSizes' >> beam.CombineGlobally(sum)
| 'NumPartitions' >> beam.Map(
lambda size: max(
MIN_PARTITIONS,
min(MAX_PARTITIONS, size // TARGET_PARTITION_SIZE))))
partition_fn = self.stage.partitioning.partition_fn
class Partition(beam.PTransform):
def expand(self, pcoll):
return (
pcoll
# Attempt to create batches of reasonable size.
| beam.ParDo(_PreBatch())
# Actually partition.
| beam.FlatMap(partition_fn, num_partitions)
# Don't bother shuffling empty partitions.
| beam.Filter(lambda k_df: len(k_df[1])))
# Arrange such that partitioned_pcoll is properly partitioned.
main_pcolls = {
expr._id: pcolls[expr._id] | 'Partition_%s_%s' %
(self.stage.partitioning, expr._id) >> Partition()
for expr in tabular_inputs
} | beam.CoGroupByKey()
partitioned_pcoll = main_pcolls | beam.ParDo(_ReBatch())
else:
# Already partitioned, or no partitioning needed.
assert len(tabular_inputs) == 1
tag = tabular_inputs[0]._id
partitioned_pcoll = pcolls[tag] | beam.Map(lambda df: {tag: df})
side_pcolls = {
expr._id: beam.pvalue.AsSingleton(pcolls[expr._id])
for expr in scalar_inputs
}
# Actually evaluate the expressions.
def evaluate(partition, stage=self.stage, **side_inputs):
def lookup(expr):
# Use proxy if there's no data in this partition
return expr.proxy(
).iloc[:0] if partition[expr._id] is None else partition[expr._id]
session = expressions.Session(
dict([(expr, lookup(expr)) for expr in tabular_inputs] +
[(expr, side_inputs[expr._id]) for expr in scalar_inputs]))
for expr in stage.outputs:
yield beam.pvalue.TaggedOutput(expr._id, expr.evaluate_at(session))
return partitioned_pcoll | beam.FlatMap(evaluate, **
side_pcolls).with_outputs()
class Stage(object):
"""Used to build up a set of operations that can be fused together.
Note that these Dataframe "stages" contain a CoGBK and hence are often
split across multiple "executable" stages.
"""
def __init__(self, inputs, partitioning):
self.inputs = set(inputs)
if len(self.inputs) > 1 and partitioning == partitionings.Nothing():
# We have to shuffle to co-locate, might as well partition.
self.partitioning = partitionings.Index()
else:
self.partitioning = partitioning
self.ops = []
self.outputs = set()
def __repr__(self, indent=0):
if indent:
sep = '\n' + ' ' * indent
else:
sep = ''
return (
"Stage[%sinputs=%s, %spartitioning=%s, %sops=%s, %soutputs=%s]" % (
sep,
self.inputs,
sep,
self.partitioning,
sep,
self.ops,
sep,
self.outputs))
# First define some helper functions.
def output_is_partitioned_by(expr, stage, partitioning):
if partitioning == partitionings.Nothing():
# Always satisfied.
return True
elif stage.partitioning == partitionings.Singleton():
# Within a stage, the singleton partitioning is trivially preserved.
return True
elif expr in stage.inputs:
# Inputs are all partitioned by stage.partitioning.
return stage.partitioning.is_subpartitioning_of(partitioning)
elif expr.preserves_partition_by().is_subpartitioning_of(partitioning):
# Here expr preserves at least the requested partitioning; its outputs
# will also have this partitioning iff its inputs do.
if expr.requires_partition_by().is_subpartitioning_of(partitioning):
# If expr requires at least this partitioning, we will arrange such
# that its inputs satisfy this.
return True
else:
# Otherwise, recursively check all the inputs.
return all(
output_is_partitioned_by(arg, stage, partitioning)
for arg in expr.args())
else:
return False
def common_stages(stage_lists):
# Set intersection, with a preference for earlier items in the list.
if stage_lists:
for stage in stage_lists[0]:
if all(stage in other for other in stage_lists[1:]):
yield stage
@memoize
def is_scalar(expr):
return not isinstance(expr.proxy(), pd.core.generic.NDFrame)
@memoize
def expr_to_stages(expr):
assert expr not in inputs
# First attempt to compute this expression as part of an existing stage,
# if possible.
#
# If expr does not require partitioning, just grab any stage, else grab
# the first stage where all of expr's inputs are partitioned as required.
# In either case, use the first such stage because earlier stages are
# closer to the inputs (have fewer intermediate stages).
required_partitioning = expr.requires_partition_by()
for stage in common_stages([expr_to_stages(arg) for arg in expr.args()
if arg not in inputs]):
if all(output_is_partitioned_by(arg, stage, required_partitioning)
for arg in expr.args() if not is_scalar(arg)):
break
else:
# Otherwise, compute this expression as part of a new stage.
stage = Stage(expr.args(), required_partitioning)
for arg in expr.args():
if arg not in inputs:
# For each non-input argument, declare that it is also available in
# this new stage.
expr_to_stages(arg).append(stage)
# It also must be declared as an output of the producing stage.
expr_to_stage(arg).outputs.add(arg)
stage.ops.append(expr)
# Ensure that any inputs for the overall transform are added
# in downstream stages.
for arg in expr.args():
if arg in inputs:
stage.inputs.add(arg)
# This is a list as given expression may be available in many stages.
return [stage]
def expr_to_stage(expr):
# Any will do; the first requires the fewest intermediate stages.
return expr_to_stages(expr)[0]
# Ensure each output is computed.
for expr in outputs.values():
if expr not in inputs:
expr_to_stage(expr).outputs.add(expr)
@memoize
def stage_to_result(stage):
return {expr._id: expr_to_pcoll(expr)
for expr in stage.inputs} | ComputeStage(stage)
@memoize
def expr_to_pcoll(expr):
if expr in inputs:
return inputs[expr]
else:
return stage_to_result(expr_to_stage(expr))[expr._id]
@memoize
def estimate_size(expr, same_stage_ok):
# Returns a pcollection of ints whose sum is the estimated size of the
# given expression.
pipeline = next(iter(inputs.values())).pipeline
label = 'Size[%s, %s]' % (expr._id, same_stage_ok)
if is_scalar(expr):
return pipeline | label >> beam.Create([0])
elif same_stage_ok:
return expr_to_pcoll(expr) | label >> beam.Map(_total_memory_usage)
elif expr in inputs:
return None
else:
# This is the stage to avoid.
expr_stage = expr_to_stage(expr)
# If the stage doesn't start with a shuffle, it's not safe to fuse
# the computation into its parent either.
has_shuffle = expr_stage.partitioning != partitionings.Nothing()
# We assume the size of an expression is the sum of the size of its
# inputs, which may be off by quite a bit, but the goal is to get
# within an order of magnitude or two.
arg_sizes = []
for arg in expr.args():
if is_scalar(arg):
continue
elif arg in inputs:
return None
arg_size = estimate_size(
arg,
same_stage_ok=has_shuffle and expr_to_stage(arg) != expr_stage)
if arg_size is None:
return None
arg_sizes.append(arg_size)
return arg_sizes | label >> beam.Flatten(pipeline=pipeline)
# Now we can compute and return the result.
return {k: expr_to_pcoll(expr) for k, expr in outputs.items()}
def _total_memory_usage(frame):
assert isinstance(frame, (pd.core.generic.NDFrame, pd.Index))
try:
size = frame.memory_usage()
if not isinstance(size, int):
size = size.sum() + PER_COL_OVERHEAD * len(size)
else:
size += PER_COL_OVERHEAD
return size
except AttributeError:
# Don't know, assume it's really big.
float('inf')
class _PreBatch(beam.DoFn):
def __init__(self, target_size=TARGET_PARTITION_SIZE):
self._target_size = target_size
def start_bundle(self):
self._parts = collections.defaultdict(list)
self._running_size = 0
def process(
self,
part,
window=beam.DoFn.WindowParam,
timestamp=beam.DoFn.TimestampParam):
part_size = _total_memory_usage(part)
if part_size >= self._target_size:
yield part
else:
self._running_size += part_size
self._parts[window, timestamp].append(part)
if self._running_size >= self._target_size:
yield from self.finish_bundle()
def finish_bundle(self):
for (window, timestamp), parts in self._parts.items():
yield windowed_value.WindowedValue(
pd.concat(parts), timestamp, (window, ))
self.start_bundle()
class _ReBatch(beam.DoFn):
"""Groups all the parts from various workers into the same dataframe.
Also groups across partitions, up to a given data size, to recover some
efficiency in the face of over-partitioning.
"""
def __init__(self, target_size=TARGET_PARTITION_SIZE):
self._target_size = target_size
def start_bundle(self):
self._parts = collections.defaultdict(lambda: collections.defaultdict(list))
self._running_size = 0
def process(
self,
element,
window=beam.DoFn.WindowParam,
timestamp=beam.DoFn.TimestampParam):
_, tagged_parts = element
for tag, parts in tagged_parts.items():
for part in parts:
self._running_size += _total_memory_usage(part)
self._parts[window, timestamp][tag].extend(parts)
if self._running_size >= self._target_size:
yield from self.finish_bundle()
def finish_bundle(self):
for (window, timestamp), tagged_parts in self._parts.items():
yield windowed_value.WindowedValue( # yapf break
{
tag: pd.concat(parts) if parts else None
for (tag, parts) in tagged_parts.items()
},
timestamp, (window, ))
self.start_bundle()
def memoize(f):
cache = {}
def wrapper(*args, **kwargs):
key = args, tuple(sorted(kwargs.items()))
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return wrapper
def _dict_union(dicts):
result = {}
for d in dicts:
result.update(d)
return result
def _flatten(
valueish, # type: Union[T, List[T], Tuple[T], Dict[Any, T]]
root=(), # type: Tuple[Any, ...]
):
# type: (...) -> Mapping[Tuple[Any, ...], T]
"""Given a nested structure of dicts, tuples, and lists, return a flat
dictionary where the values are the leafs and the keys are the "paths" to
these leaves.
For example `{a: x, b: (y, z)}` becomes `{(a,): x, (b, 0): y, (b, 1): c}`.
"""
if isinstance(valueish, dict):
return _dict_union(_flatten(v, root + (k, )) for k, v in valueish.items())
elif isinstance(valueish, (tuple, list)):
return _dict_union(
_flatten(v, root + (ix, )) for ix, v in enumerate(valueish))
else:
return {root: valueish}
def _substitute(valueish, replacements, root=()):
"""Substitutes the values in valueish with those in replacements where the
keys are as in _flatten.
For example,
```
_substitute(
{a: x, b: (y, z)},
{(a,): X, (b, 0): Y, (b, 1): Z})
```
returns `{a: X, b: (Y, Z)}`.
"""
if isinstance(valueish, dict):
return type(valueish)({
k: _substitute(v, replacements, root + (k, ))
for (k, v) in valueish.items()
})
elif isinstance(valueish, (tuple, list)):
return type(valueish)((
_substitute(v, replacements, root + (ix, ))
for (ix, v) in enumerate(valueish)))
else:
return replacements[root]
|
py | b403f38945e4a1b6cf26570d81cea34320ffaeb0 | import fence.resources.admin as adm
from fence.models import Project, Bucket, ProjectToBucket, CloudProvider, StorageAccess
def test_get_project(db_session, awg_users):
info = adm.get_project_info(db_session, "test_project_1")
assert info["name"] == "test_project_1"
def test_get_all_projects(db_session, awg_users):
projects = adm.get_all_projects(db_session)["projects"]
info = {
project["name"]: {
"auth_id": project["auth_id"],
"name": project["name"],
"associated buckets": project["associated buckets"],
"description": project["description"],
}
for project in projects
}
expected = {
"test_project_1": {
"auth_id": u"phs_project_1",
"associated buckets": [],
"description": None,
"name": u"test_project_1",
},
"test_project_2": {
"auth_id": u"phs_project_2",
"associated buckets": [],
"description": None,
"name": u"test_project_2",
},
}
assert info == expected
def test_create_project(db_session, awg_users, providers):
project = (
db_session.query(Project).filter_by(name="test_project_for_creation").first()
)
assert project == None
adm.create_project(
db_session,
"test_project_for_creation",
"test_project_for_creation_auth_id",
["test-cleversafe"],
)
project = (
db_session.query(Project).filter_by(name="test_project_for_creation").first()
)
assert project.name == "test_project_for_creation"
assert project.auth_id == "test_project_for_creation_auth_id"
provider = db_session.query(CloudProvider).filter_by(name="test-cleversafe").first()
access = (
db_session.query(StorageAccess)
.filter_by(project_id=project.id, provider_id=provider.id)
.first()
)
assert access != None
def test_delete_project(db_session, awg_users):
project = db_session.query(Project).filter_by(name="test_project_1").first()
assert project.name == "test_project_1"
adm.delete_project(db_session, "test_project_1")
project = db_session.query(Project).filter_by(name="test_project_1").first()
assert project == None
def test_create_bucket_in_project(db_session, providers):
adm.create_bucket_on_project(
db_session, "project_with_bucket", "new_bucket", "test-cleversafe"
)
project = db_session.query(Project).filter_by(name="project_with_bucket").first()
bucket = db_session.query(Bucket).filter_by(name="new_bucket").first()
provider = db_session.query(CloudProvider).filter_by(name="test-cleversafe").first()
bucket_in_project = (
db_session.query(ProjectToBucket)
.filter_by(bucket_id=bucket.id, project_id=project.id)
.first()
)
assert bucket_in_project != None
assert bucket.provider_id == provider.id
def test_delete_bucket_from_project(db_session, providers):
bucket = db_session.query(Bucket).filter_by(name="first_bucket").first()
assert bucket != None
project_to_bucket = (
db_session.query(ProjectToBucket).filter_by(bucket_id=bucket.id).first()
)
assert project_to_bucket != None
adm.delete_bucket_on_project(db_session, "project_with_bucket", "first_bucket")
removed_bucket = db_session.query(Bucket).filter_by(name="first_bucket").first()
assert removed_bucket == None
project_to_bucket = (
db_session.query(ProjectToBucket).filter_by(id=bucket.id).first()
)
assert project_to_bucket == None
|
py | b403f4b284c33cdb58020797656e46adb0c9ebe0 | from __future__ import print_function
import argparse
import io
import json
import sys
def check_json(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='JSON filenames to check.')
args = parser.parse_args(argv)
retval = 0
for filename in args.filenames:
try:
json.load(io.open(filename, encoding='UTF-8'))
except (ValueError, UnicodeDecodeError) as exc:
print('{}: Failed to json decode ({})'.format(filename, exc))
retval = 1
return retval
if __name__ == '__main__':
sys.exit(check_json())
|
py | b403f4c9c143f86e29cb592c9d645975534e1097 | """ multiple lines of comments
print('apple\t$1.99 per lb')
# Single comment
multiple lines of comments
hello
# Example 1: Add two numbers
number1 = 20
number2 = 30
addition = number1 + number2
print('The sum is: ', addition)
# Example 2: Using input() function to collect a message from the keyboard
msa = input('Enter a message: ')
print(msa)
# Example 3: Casting - convert from one data type into another
number3 = input('Enter a number: ')
number3 = int(number3)
doubleNum3 = number3*2
print('The number is double to: ', doubleNum3)
# Example 4: Ask the user to enter the height and width of a right triangle as a float value. Calculate the hyp using python math operators and display the result.
# hyp = (x^2+y^2)^0.5
h = float(input('Enter the height: '))
w = float(input('Enter the width: '))
hyp = (h**2+w**2)**0.5 # double astericks are for raising it to an exponent
hyp = int(hyp)
print('A triangle with sides: ', h , ' and ', w, 'has a hypotenus of ', hyp)
# Example 5: Concatenate strings using %s
lastName = "Roy"
firstName = "Puja"
PhoneNumber = '347-712-7519'
num = 2
message = 'The first name is: %s \n The last name is: %s \n The number is: %s'
print(message %(firstName, lastName,num))
print('The first name is: ' + firstName + ' ' +str(num))
# Example 6: split() method for strings
a = "Hello World! Welcome! to! Python"
print(a)
print(a.split("!", 2))
# Example 7: find() method
b = "Hello World"
index = b.find('o')
print('The index for letter o is: ', index)
index1 = b.find('o', 5)
print('The index for letter o after index 5 is: ', index1)
# Example 8: in or not in operator
msg = "Hello World"
answer = 'e' not in msg
print('Is character e in the string?', answer)
# Example 9: Concatenate strings using + operator
a = "apple"
b = " , "
c = "grapes"
fruits = a + b + c
print(fruits)
# Example 10: format() method strings
age = 36
txt = "My name is John, and I am {}" # In Python, u have to put curly brackets for printing a variable's output
nameAge = txt.format(age)
print(nameAge)
"""
print('ACTIVITY 1')
name = input('Enter a name: ')
number = input('Enter number 1: ')
number = int(number)
number2 = input('Enter number 2: ')
number2 = int(number2)
print('Welcome to Python programming ', name)
sum = number + number2
difference = number - number2
product = number * number2
quotient = number / number2
remainder = number % number2
print('The sum of', number, 'and' , number2 , 'is' , sum)
print('The difference of', number, 'and', number2 , 'is' , difference)
print('The product of', number, 'and' , number2 , 'is' , product)
print('The quotient of', number, 'and' , number2 , 'is' , quotient)
print('The remainder of', number, 'and' , number2 , 'is' , remainder)
|
py | b403f524caa49e606dd0cc303f8bd33f504d7ca2 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class TriggerOperations(object):
"""TriggerOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.artifacts.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_triggers_by_workspace(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.TriggerListResponse"]
"""Lists triggers.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TriggerListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.synapse.artifacts.models.TriggerListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.TriggerListResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.get_triggers_by_workspace.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TriggerListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.CloudError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_triggers_by_workspace.metadata = {'url': '/triggers'} # type: ignore
def create_or_update_trigger(
self,
trigger_name, # type: str
properties, # type: "models.Trigger"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.TriggerResource"
"""Creates or updates a trigger.
:param trigger_name: The trigger name.
:type trigger_name: str
:param properties: Properties of the trigger.
:type properties: ~azure.synapse.artifacts.models.Trigger
:param if_match: ETag of the trigger entity. Should only be specified for update, for which it
should match existing entity or can be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TriggerResource, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.TriggerResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.TriggerResource"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_trigger = models.TriggerResource(properties=properties)
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_or_update_trigger.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_trigger, 'TriggerResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('TriggerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_trigger.metadata = {'url': '/triggers/{triggerName}'} # type: ignore
def get_trigger(
self,
trigger_name, # type: str
if_none_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["models.TriggerResource"]
"""Gets a trigger.
:param trigger_name: The trigger name.
:type trigger_name: str
:param if_none_match: ETag of the trigger entity. Should only be specified for get. If the ETag
matches the existing entity tag, or if * was provided, then no content will be returned.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TriggerResource, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.TriggerResource or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.TriggerResource"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
# Construct URL
url = self.get_trigger.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TriggerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_trigger.metadata = {'url': '/triggers/{triggerName}'} # type: ignore
def delete_trigger(
self,
trigger_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes a trigger.
:param trigger_name: The trigger name.
:type trigger_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
# Construct URL
url = self.delete_trigger.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete_trigger.metadata = {'url': '/triggers/{triggerName}'} # type: ignore
def _subscribe_trigger_to_events_initial(
self,
trigger_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.TriggerSubscriptionOperationStatus"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.TriggerSubscriptionOperationStatus"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
# Construct URL
url = self._subscribe_trigger_to_events_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TriggerSubscriptionOperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_subscribe_trigger_to_events_initial.metadata = {'url': '/triggers/{triggerName}/subscribeToEvents'} # type: ignore
def begin_subscribe_trigger_to_events(
self,
trigger_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.TriggerSubscriptionOperationStatus"]
"""Subscribe event trigger to events.
:param trigger_name: The trigger name.
:type trigger_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either TriggerSubscriptionOperationStatus or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.synapse.artifacts.models.TriggerSubscriptionOperationStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.TriggerSubscriptionOperationStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._subscribe_trigger_to_events_initial(
trigger_name=trigger_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TriggerSubscriptionOperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = LROBasePolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_subscribe_trigger_to_events.metadata = {'url': '/triggers/{triggerName}/subscribeToEvents'} # type: ignore
def get_event_subscription_status(
self,
trigger_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.TriggerSubscriptionOperationStatus"
"""Get a trigger's event subscription status.
:param trigger_name: The trigger name.
:type trigger_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TriggerSubscriptionOperationStatus, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.TriggerSubscriptionOperationStatus
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.TriggerSubscriptionOperationStatus"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
# Construct URL
url = self.get_event_subscription_status.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('TriggerSubscriptionOperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_event_subscription_status.metadata = {'url': '/triggers/{triggerName}/getEventSubscriptionStatus'} # type: ignore
def _unsubscribe_trigger_from_events_initial(
self,
trigger_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.TriggerSubscriptionOperationStatus"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.TriggerSubscriptionOperationStatus"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
# Construct URL
url = self._unsubscribe_trigger_from_events_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TriggerSubscriptionOperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_unsubscribe_trigger_from_events_initial.metadata = {'url': '/triggers/{triggerName}/unsubscribeFromEvents'} # type: ignore
def begin_unsubscribe_trigger_from_events(
self,
trigger_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.TriggerSubscriptionOperationStatus"]
"""Unsubscribe event trigger from events.
:param trigger_name: The trigger name.
:type trigger_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either TriggerSubscriptionOperationStatus or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.synapse.artifacts.models.TriggerSubscriptionOperationStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.TriggerSubscriptionOperationStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._unsubscribe_trigger_from_events_initial(
trigger_name=trigger_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TriggerSubscriptionOperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = LROBasePolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_unsubscribe_trigger_from_events.metadata = {'url': '/triggers/{triggerName}/unsubscribeFromEvents'} # type: ignore
def _start_trigger_initial(
self,
trigger_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
# Construct URL
url = self._start_trigger_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
_start_trigger_initial.metadata = {'url': '/triggers/{triggerName}/start'} # type: ignore
def begin_start_trigger(
self,
trigger_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts a trigger.
:param trigger_name: The trigger name.
:type trigger_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_trigger_initial(
trigger_name=trigger_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = LROBasePolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_trigger.metadata = {'url': '/triggers/{triggerName}/start'} # type: ignore
def _stop_trigger_initial(
self,
trigger_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
# Construct URL
url = self._stop_trigger_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
_stop_trigger_initial.metadata = {'url': '/triggers/{triggerName}/stop'} # type: ignore
def begin_stop_trigger(
self,
trigger_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops a trigger.
:param trigger_name: The trigger name.
:type trigger_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_trigger_initial(
trigger_name=trigger_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = LROBasePolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop_trigger.metadata = {'url': '/triggers/{triggerName}/stop'} # type: ignore
|
py | b403f5579efba0385f2f887dbadc49f66892661a | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))))
from veriloggen import *
import veriloggen.types.axi as axi
import veriloggen.types.ipxact as ipxact
def mkMain():
m = Module('main')
clk = m.Input('CLK')
rst = m.Input('RST')
led = m.Output('LED', 32)
myaxi = axi.AxiMaster(m, 'myaxi', clk, rst)
myaxi.disable_write()
fsm = FSM(m, 'fsm', clk, rst)
sum = m.Reg('sum', 32, initval=0)
led.assign(sum)
# read address (1)
araddr = 1024
arlen = 64
expected_sum = (araddr // 4 + araddr // 4 + arlen - 1) * arlen // 2
ack, counter = myaxi.read_request_counter(araddr, arlen, cond=fsm)
fsm.If(ack).goto_next()
# read data (1)
data, valid, last = myaxi.read_data(counter, cond=fsm)
fsm.If(valid)(
sum(sum + data)
)
fsm.Then().If(last).goto_next()
# read address (2)
araddr = 1024 + 1024
arlen = 64
expected_sum += (araddr // 4 + araddr // 4 + arlen - 1) * arlen // 2
ack, counter = myaxi.read_request_counter(araddr, arlen, cond=fsm)
fsm.If(ack).goto_next()
# read data (2)
data, valid, last = myaxi.read_data(counter, cond=fsm)
fsm.If(valid)(
sum(sum + data)
)
fsm.Then().If(last).goto_next()
fsm(
Systask('display', 'sum=%d expected_sum=%d', sum, expected_sum),
If(NotEql(sum, expected_sum))(Display('# verify: FAILED')).Else(Display('# verify: PASSED'))
)
fsm.goto_next()
fsm.make_always()
return m
def mkTest(memimg_name=None):
m = Module('test')
# target instance
main = mkMain()
# copy paras and ports
params = m.copy_params(main)
ports = m.copy_sim_ports(main)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst)
memory.connect(ports, 'myaxi')
uut = m.Instance(main, 'uut',
params=m.connect_params(main),
ports=m.connect_ports(main))
# simulation.setup_waveform(m, uut, m.get_vars())
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000 * 100),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile, sim_time=1000*100*2)
lines = rslt.splitlines()
if simtype == 'iverilog' or (simtype == 'verilator' and lines[-1].startswith('-')):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
# IP-XACT
m = mkMain()
ipxact.to_ipxact(m,
clk_ports=[('CLK', ('RST',))],
rst_ports=[('RST', 'ACTIVE_HIGH')])
|
py | b403f8e1f833404fca39ef0917f79e63036e2966 | from django.contrib import auth, messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.tokens import default_token_generator
from django.shortcuts import render, redirect
from django.views import View
from django.views.generic import CreateView, DetailView
from accounts.forms import RegistrationForm, UserForm, UserProfileForm
from accounts.mixins import TokenMixin
from accounts.models import Account, UserProfile
from accounts.utils import _profile, _redirect_to_next_page
from carts.utils import _move_cart_when_authenticate
from orders.models import Order
from shop.emails import Emails
class RegisterView(CreateView):
"""View for registration in the site."""
form_class = RegistrationForm
model = Account
def get(self, request, *args, **kwargs):
"""Render the register template."""
form = RegistrationForm()
context = {'form': form}
return render(request, 'accounts/register.html', context)
def post(self, request, *args, **kwargs):
"""
Register for new user and after create profile
for him. Try to move his cart items to new cart.
"""
form = RegistrationForm(request.POST)
if form.is_valid():
# create new user
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
phone_number = form.cleaned_data['phone_number']
email = form.cleaned_data['email']
password = form.cleaned_data['password']
username = email
user = Account.objects.create_user(first_name=first_name,
last_name=last_name,
email=email,
username=username,
password=password)
user.phone_number = phone_number
user.save()
# Create profile for user
_profile(user)
user = auth.authenticate(request=request, email=email, password=password)
# Login user and move his cart
if user is not None:
_move_cart_when_authenticate(request, user)
auth.login(request, user)
Emails(user=user, pk=user.pk, email=user.email, command='register')
return redirect('category_main')
context = {'form': form}
return render(request, 'accounts/register.html', context)
class LoginView(View):
"""View for logging in the site."""
def get(self, request, *args, **kwargs):
"""Render the login template."""
return render(request, 'accounts/login.html')
def post(self, request, *args, **kwargs):
"""
Authentication and authorization code,
gets email and password, if authentication
success tried to redirect to 'next' page
and move a cart to authorized user.
"""
email = request.POST['email']
password = request.POST['password']
user = auth.authenticate(request=request, username=email, password=password)
# login user and move his cart
if user is not None:
_move_cart_when_authenticate(request, user)
auth.login(request, user)
try:
_redirect_to_next_page(request)
except:
return redirect('category_main')
else:
messages.error(request, 'Неправильно введена почта или пароль')
return redirect('login')
class LogoutView(LoginRequiredMixin, View):
"""Logout view, only for has already logged in users."""
def get(self, request, *args, **kwargs):
auth.logout(request)
messages.success(request, 'Вы успешно вышли из системы')
return redirect('login')
class ConfirmEmailView(View):
"""View which render page for confirmation email."""
def dispatch(self, request, *args, **kwargs):
self.request_user = request.user
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
"""Render page for users who already have or haven't an email."""
if self.request_user.email:
Emails(user=self.request_user, pk=self.request_user.pk, email=self.request_user.email, command='confirm')
return render(request, 'accounts/confirm_email.html')
else:
return render(request, 'accounts/confirm_email.html', context={'user': self.request_user})
def post(self, request, *args, **kwargs):
"""If users registered by social account and doesn't have an email."""
email = request.POST['email']
Emails(user=self.request_user, pk=self.request_user.pk, email=email, command='confirm')
return redirect('category_main')
class ForgotPasswordView(View):
"""View for registered user, if forgot password."""
def get(self, request, *args, **kwargs):
return render(request, 'accounts/forgot_password.html')
def post(self, request, *args, **kwargs):
email = request.POST['email']
if Account.objects.filter(email=email).exists():
user = Account.objects.get(email__exact=email)
Emails(user=user, email=email, pk=user.pk, command='forgot').forgot_password()
messages.success(request, 'Письмо с инструкцией отправлено на вашу почту')
return redirect('login')
else:
messages.error(request, 'Пользователь с такой почтой не зарегистрирован!')
return redirect('forgot_password')
class ResetPasswordView(View):
"""View for resetting password, after user successfully validate information from email."""
def get(self, request, *args, **kwargs):
return render(request, 'accounts/reset_password.html')
def post(self, request, *args, **kwargs):
password = request.POST['password']
confirm_password = request.POST['confirm_password']
if password == confirm_password:
uid = request.session.get('uid')
user = Account.objects.get(pk=uid)
user.set_password(password)
user.save()
messages.success(request, 'Пароль успешно сброшен!')
return redirect('login')
class ChangePasswordView(LoginRequiredMixin, View):
"""View if user want to change password in user profile form."""
def post(self, request, *args, **kwargs):
current_password = request.POST['current_password']
new_password = request.POST['new_password']
confirm_password = request.POST['confirm_password']
user = Account.objects.get(pk__exact=request.user.pk)
if new_password == confirm_password:
success = user.check_password(current_password)
if success:
user.set_password(new_password)
user.save()
messages.success(request, 'Ваш пароль успешно обновлён!')
return redirect('login')
else:
messages.error(request, 'Текущий пароль введен не правильно.')
return redirect('dashboard')
else:
messages.error(request, 'Введенные пароли не совпадают.')
return redirect('dashboard')
class ResetPasswordValidateView(TokenMixin, View):
"""View which validate information from email and after resetting password."""
def get(self, request, *args, **kwargs):
if self.user is not None and default_token_generator.check_token(self.user, kwargs['token']):
request.session['uid'] = self.uid
messages.success(request, 'Пожалуйста сбросьте Ваш пароль')
return redirect('reset_password')
else:
messages.error(request, 'Ссылка устарела')
return redirect('login')
class ConfirmAccountView(TokenMixin, View):
"""View which validate information from email and after confirm email."""
def get(self, request, *args, **kwargs):
if self.user is not None and default_token_generator.check_token(self.user, kwargs['token']):
self.user.email = kwargs['email']
self.user.confirm_email = True
self.user.save()
messages.success(request, 'Поздравляем, Вы успешно подтвердили свою почту!')
if kwargs['command'] == 'register':
return redirect('category_main')
else:
return redirect('checkout')
else:
messages.error(request, 'Ошибка активации!')
return redirect('register')
class DashboardView(DetailView):
"""View for users dashboard where they can see their previous orders and change self information."""
model = UserProfile
template_name = 'accounts/dashboard.html'
def dispatch(self, request, *args, **kwargs):
"""Create new user profile if user logged by social account."""
self.request_user = request.user
try:
self.user_profile = UserProfile.objects.get(user=self.request_user)
except Exception:
_profile(self.request_user)
self.user_profile = UserProfile.objects.get(user=self.request_user)
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
orders = Order.objects.order_by('-created_at').filter(user_id=self.request_user.id, is_ordered=True)
orders_count = orders.count()
user_form = UserForm(instance=self.request_user)
profile_form = UserProfileForm(instance=self.user_profile)
context = {
'orders': orders,
'orders_count': orders_count,
'user_form': user_form,
'profile_form': profile_form,
'user_profile': self.user_profile,
}
return render(request, 'accounts/dashboard.html', context)
def post(self, request, *args, **kwargs):
user_form = UserForm(request.POST, instance=self.request_user)
profile_form = UserProfileForm(request.POST, request.FILES, instance=self.user_profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Ваши данные успешно обновлены!')
return redirect('dashboard')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.