input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<filename>src/framework/hls_tools.py
# ////////////////////////////////////////////////////////////////////////////////////////
# Copyright (c) by
# Company: IDEA LAB, The University of Texas at Dallas
# Author : <NAME>
#
# Originally Create Date: Mar-5, 2020
# Project Name : DNN_Framework
# Tool Versions : Python +3
#
# Description : Functions to run HLS and Logic synthesize, create .tcl files, load design reports
# Pars pragmas and create .tcl file for directives
# Dependencies : Vivado 2018 or newer, subprocess, panda
# Additional Comments:
#
# ///////////////////////////////////////////////////////////////////////////////////////
import os, shutil
import csv, time
from datetime import datetime
from shutil import copyfile
import sys, glob, json, random, pickle
import numpy as np
import yaml, random
import subprocess, psutil
from dnn_tools import *
from utils import beep
import pandas as pd
import pprint
import xml.etree.ElementTree as ET
lyr_map2syn = {
'IN':'read_input3D',
'CONV':'conv_3DT1',
'POOL':'ds_3DT1',
'FC':'fc_T1'}
lyr_syn2map = {
'read_input3D':'IN',
'conv_3DT1':'CONV',
'ds_3DT1':'POOL',
'fc_T1':'FC'}
def copy_solution_files(cfg, sol, specifier=None):
if specifier == None : temp = ''
else: temp = '_' + str(specifier)
sol_copy_name = os.path.join(cfg.paths.dse_json, 'design' + str(sol) + temp + '.json')
json_file = os.path.join(cfg.paths.design_model, 'hls' + str(sol), cfg.design_setting.solution_name,
'{}_data.json'.format(cfg.design_setting.solution_name))
if os.path.exists(json_file):
copyfile(json_file, sol_copy_name)
else:
print("PYTHON : Copy Solution Files : Solution file is not generated")
def collect_design_notes(cfg, additional_info, save_path=''):
notes = []
notes.append('Time completed : {}'.format(datetime.now().strftime("%d-%b-%Y (%H:%M)")))
notes.append('DSE Path : {}'.format(os.getcwd()))
notes.append('\n')
notes.append('Design : {}'.format(cfg.design_setting.topmodule))
notes.append('FPGA : {}'.format(cfg.FPGA.chip))
notes.append('Clock : {}'.format(cfg.FPGA.clock_freq))
notes.append('Vivado Version : {}'.format(cfg.design_setting.vivado_version))
notes.append('Logic Synthesize : {}'.format(cfg.design_setting.run_vivado_synthesize))
notes.append('\n')
notes.append('Directive_type : {}'.format(cfg.design_setting.syn_directive_type))
notes.append('Dataflow : {}'.format(cfg.design.dataflow))
notes.append('Shared Memory : {}'.format(cfg.design.fc_shared_memory))
notes.append('data_interface : {}'.format(cfg.design.data_interface))
notes.append('Module Interface : {}'.format(cfg.design.module_controller))
notes.append('\n')
for i in additional_info.keys():
notes.append('{} : {}\n'.format(i, additional_info[i]))
if 'dse' in cfg.run_options.mode:
for i in cfg.design_setting.DSE_setting.keys():
notes.append('{:<30s} : {}'.format(i, cfg.design_setting.DSE_setting[i]))
file = os.path.join(save_path, 'design note.txt')
with open(file, 'w') as f:
for line in notes:
f.write("%s\n" % line)
return notes
def extract_layer_info_from_jsons(cfg, json_path):
path = cfg.paths.dse_report
dse_results = utils.load_a_variable(os.path.join(path,'dse_config'))
cfg_results = utils.load_a_variable(os.path.join(path, 'lyr_configs'))
module_labels = ['clock period', 'latency', 'BRAM', 'LUT', 'FF', 'DSP']
sol_labels = ['syn_time','P_slice', 'P_BRAM', 'P_DSP', 'P_static', 'P_total','LUT_PS','FF_PS','DSP_PS','BRAM_PS']
lyr_label = ['w_in','w_out','lyr_in','lyr_out','w_ker','stride']
target_layer_name ='fc_T1'
target_layer = 3
rpt_data = []
for sol in range(len(dse_results)):
temp = []
for label in sol_labels:
temp.append(dse_results[sol][label])
for label in lyr_label:
temp.append(cfg_results[sol][target_layer][label])
for label in module_labels:
temp.append(dse_results[sol][target_layer_name][label])
rpt_data.append(temp)
header_labels = sol_labels + lyr_label + module_labels
csvfile = os.path.join(path,'dse_cfg_'+target_layer_name+'.csv')
df = pandas.DataFrame(rpt_data)
df.to_csv(csvfile, index=False, header=header_labels)
class hls_tools():
def __init__(self, cfg):
self.cfg = cfg
self.utils = utils(cfg)
self.pragmas_dict = {
'unroll': 'set_directive_unroll ',
'pipeline': 'set_directive_pipeline ',
'dataflow': 'set_directive_dataflow ',
'inline': 'set_directive_inline ',
'partition': 'set_directive_array_partition',
'reshape': 'set_directive_array_reshape ',
'interface': 'set_directive_interface ',
'mul': 'set_directive_allocation '
}
def create_syn_tcl_file(self, clk_period):
tcl_lines = []
tcl_lines.append("############################################################")
tcl_lines.append("## This file is generated automatically by python tool for {} Version".format(
self.cfg.design_setting.vivado_version))
tcl_lines.append("############################################################")
tcl_lines.append('puts \"CMD : run_hls_syn.tcl is running!\"')
tcl_lines.append('set sol_name [lindex $argv 2 ]')
tcl_lines.append('open_project hls$sol_name')
tcl_lines.append('set_top {}'.format(self.cfg.design_setting.topmodule))
for file in self.cfg.design.source_files:
tcl_lines.append('add_files {}'.format(file))
for file in self.cfg.design.tb_files:
tcl_lines.append('add_files -tb {}'.format(file))
if self.cfg.design_setting.vivado_version == 2020:
tcl_lines.append('open_solution -reset \"{}\" -flow_target vivado'.format(self.cfg.design_setting.solution_name))
tcl_lines.append('set_part {' + self.cfg.FPGA.part + '}')
else:
tcl_lines.append('open_solution -reset \"{}\"'.format(self.cfg.design_setting.solution_name))
tcl_lines.append('set_part {' + self.cfg.FPGA.part + '} -tool vivado')
tcl_lines.append('create_clock -period {} -name default'.format(clk_period))
tcl_lines.append('set_clock_uncertainty 12.5%')
tcl_lines.append("set json_file \"{}_{}.json\"".format(self.cfg.design_setting.solution_name, clk_period))
if 'dse_pragma' in self.cfg.run_options.mode:
tcl_lines.append('source \"{}_sol_list/solution_$sol_name.tcl\"'.format(self.cfg.design_setting.solution_name))
else:
tcl_lines.append('source \"./hls/{}/directives.tcl\"'.format(self.cfg.design_setting.solution_name))
if self.cfg.design_setting.Sim_setting['run_csim']:
tcl_lines.append('csim_design')
if self.cfg.run_options.mode not in ['sim'] or self.cfg.design_setting.Sim_setting['run_rtl_sim']:
tcl_lines.append('csynth_design')
if self.cfg.design_setting.Sim_setting['run_rtl_sim']:
tcl_lines.append('cosim_design')
if self.cfg.design_setting.run_vivado_synthesize:
tcl_lines.append('export_design -flow syn -rtl verilog -format ip_catalog')
elif self.cfg.design_setting.create_ip:
tcl_lines.append('export_design -format ip_catalog')
tcl_lines.append('quit')
filename = os.path.join(self.cfg.paths.design_model, "run_hls_syn.tcl")
self.utils.save_list_to_file(filename, tcl_lines)
def run_vivado_implementation(self, sol_counter, mode, print_out='silent', clean=False):
if not self.cfg.design_setting.run_vivado_power_analyzer:
PR_results = {'LUT_PS': 'NR', 'FF_PS': 'NR', 'DSP_PS': 'NR', 'BRAM_PS': 'NR', 'Timing_PS': 'NR'}
power = {'P_Clocks': 'NR', 'P_Signals': 'NR', 'P_Slice': 'NR', 'P_Block': 'NR', 'P_DSPs': 'NR', 'P_Static': 'NR', 'P_Total': 'NR'}
return PR_results, power
if self.cfg.run_options.mode in ['dse_pragma', 'dse_pragma_clock', 'dse_clk_pragma_cfg']:
dest_path = os.path.join(self.cfg.paths.design_model, 'hls{}'.format(sol_counter),
self.cfg.design_setting.solution_name)
else:
dest_path = self.cfg.paths.solution
start_time = self.utils.record_time()
impl_file = ['power_analyzer.tcl', 'run_power_analyzer.bat']
for fname in impl_file:
srcfile = os.path.join(self.cfg.paths.src, fname)
destfile = os.path.join(dest_path, 'impl', fname)
shutil.copyfile(srcfile, destfile)
os.chdir(os.path.join(dest_path, 'impl'))
version = self.cfg.design_setting.vivado_version
vivado_cmd = self.cfg.vivado_path[sys.platform][version]['VIVADO']
print("PYTHON : Running Power Analyzer ... ")
if sys.platform == 'win32':
sr = os.system('run_power_analyzer.bat')
elif sys.platform == 'linux':
cmd = '{} -notrace -mode batch -source power_analyzer.tcl >report_power.log || exit $?'.format(vivado_cmd)
sr = os.system(cmd)
else:
print("PYTHON : Wrong operating system selection")
sr = 1
if (sr != 0):
print("PYTHON : run_power_analyzer file not found, or a problem in bash file!")
return 'Er'
[mm, ss] = self.utils.end_and_print_time(start_time)
os.chdir(self.cfg.paths.design_top)
print("PYTHON : Power measured in Vivado within {:3d} Minutes and {:2d} Seconds".format(int(mm), int(ss)))
return self.read_impl_results(dest_path)
def cleanSolution(self):
filesToBeRemoved = [self.cfg.files.synLogFile, self.cfg.files.SolutionFile]
dir_list = [self.cfg.paths.rtl_out, self.cfg.paths.hls_out]
for fname in filesToBeRemoved:
if os.path.exists(fname):
os.remove(fname)
os.mkdir(fname)
else:
os.mkdir(fname)
for dir in dir_list:
if os.path.exists(dir):
shutil.rmtree(dir)
os.mkdir(dir)
else:
os.mkdir(dir)
def run_hls_synth(self, mode, time_out_min=10, print_out='silent', clean=False, sol=''):
if mode in ['', 'skip', 'syn_report', 'dse_report']:
print("PYTHON : Synthesize Skipped\n")
return True
version = self.cfg.design_setting.vivado_version
hls_cmd = self.cfg.vivado_path[sys.platform][version]['HLS']
os.chdir(self.cfg.paths.design_model)
print("PYTHON : Synthesis begins from python on {} Version {}".format(sys.platform, version))
if print_out == 'silent':
cmd = "{} -f run_hls_syn.tcl {} > synthesis_report{}.log".format(hls_cmd, sol, sol)
else:
cmd = "{} -f run_hls_syn.tcl {}".format(hls_cmd, sol)
if clean:
self.cleanSolution()
start_time = self.utils.record_time()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
try:
if not print_out == 'silent': print("The task PID is = {}".format(p.pid))
p.wait(time_out_min*60)
sr = 0
except subprocess.TimeoutExpired:
print("WARNING Timeout : process {} is killed by reaching {} Min!".format(p.pid, time_out_min))
sr = 1
for child in psutil.Process(p.pid).children(recursive=True):
if not print_out == 'silent': print("PID={:<4}, name={} is Killed".format(child.pid, child.name()))
child.kill()
p.kill()
time.sleep(1)
os.chdir(self.cfg.paths.design_top)
[mm, ss] = self.utils.end_and_print_time(start_time)
if (sr != 0):
print("PYTHON : Synthesis file not found, or a problem in bash file!")
return False
self.cfg.files.synLogFile = os.path.join(self.cfg.paths.design_model,'synthesis_report{}.log'.format(sol))
errors = self.utils.find_Aword_in_file(self.cfg.files.synLogFile, 'error', save_results=False)
# warning = self.utils.find_Aword_in_file(self.cfg.files.synLogFile, 'warning', save_results=True)
if errors != 0:
print("PYTHON : *** Synthesize Failed *** - Total synthesis time : {:3d} Minutes and {:2d} Seconds".format(
int(mm), int(ss)))
copyfile(self.cfg.files.synLogFile,
os.path.join(self.cfg.paths.solution, "failed_syn_log_sol{}.log".format(sol)))
return False
else:
print("PYTHON : *** Synthesize Passed *** - Total synthesis time : {:3d} Minutes and {:2d} Seconds".format(
int(mm), int(ss)))
return True
def read_single_syn_results_unused(self, print_out=False):
solutions_list = []
file = self.cfg.paths.solution + "/ip_test_data.json"
with open(file) as json_data:
syn_rslt = json.load(json_data)
json_data.close()
# except IOError:
# print("I/O Error," + " solution"+ str(solution) + "_Syn.json "+ " is not exist")
temp = syn_rslt["ModuleInfo"]["Metrics"][self.design.top_module]
syn_rslt_summary = {}
syn_rslt_summary['latencyBest'] = int(temp["Latency"]["LatencyBest"])
syn_rslt_summary['latencyWrst'] = int(temp["Latency"]["LatencyWorst"])
syn_rslt_summary['timing'] = float(temp["Timing"]["Estimate"])
syn_rslt_summary['Target'] = float(temp["Timing"]["Target"])
syn_rslt_summary['BRAM'] = int(temp["Area"]["BRAM_18K"])
syn_rslt_summary['LUT'] = int(temp["Area"]["LUT"])
syn_rslt_summary['FF'] = int(temp["Area"]["FF"])
syn_rslt_summary['DSP'] = int(temp["Area"]["DSP48E"])
if print_out:
print(json.dumps(syn_rslt_summary, indent=6))
return syn_rslt_summary
def single_pragma_gen(self, target_module_label, configs):
options = ''
if configs['options'] is not None:
for i in configs['options']:
options = options + "-{} ".format(i)
if configs['pragma'] in ['unroll', 'pipeline', 'inline']:
directive = "{} {} \"{}/{}\"".format(self.pragmas_dict[configs['pragma']], options, target_module_label,
configs['label'])
elif configs['pragma'] in ['partition', 'reshape', 'interface']:
directive = "{} {} \"{}\" {}".format(self.pragmas_dict[configs['pragma']], options, target_module_label,
configs['label'])
elif configs['pragma'] in ['mul']:
directive = "{} {} \"{}/{}\" {}".format(self.pragmas_dict[configs['pragma']], options, target_module_label,
configs['label'], configs['pragma'])
# print(directive)
return directive
def combinational_pragma_gen(self, target_module_label, configs):
options_list = []
if configs['options'] is None: # if there is no option, create an empty one
options_list.append('')
else: # if there is a option, add a no option
# options_list.append('')
for option in configs['options']: # create directives for all options
options_list.append('-' + option)
directives_list = []
directives_list.append('') # create an empty directive
for option in options_list:
if configs['pragma'] in ['unroll', 'pipeline', 'dataflow']:
directive = "{} {} \"{}/{}\"".format(self.pragmas_dict[configs['pragma']], option,
target_module_label, configs['label'])
elif configs['pragma'] in ['partition', 'reshape', 'interface']:
directive = "{} {} \"{}\" {}".format(self.pragmas_dict[configs['pragma']], option,
target_module_label, configs['label'])
elif configs['pragma'] in ['mul']:
directive = "{} {} \"{}/{}\" {}".format(self.pragmas_dict[configs['pragma']], option,
target_module_label, configs['label'], configs['pragma'])
elif configs['pragma'] in ['inline']:
directive = "{} {} \"{}\"".format(self.pragmas_dict[configs['pragma']], option,
target_module_label)
directives_list.append(directive)
return directives_list
def find_total_pragma_combinations(self, variable_pragmas):
total_comb = 1
for pragma in variable_pragmas:
total_comb = total_comb * len(pragma)
return total_comb
def pars_DA_design_pragmas(self, datamap):
fixed_pragmas = []
variable_pragmas = []
# fixed_pragmas.append('\n# -------------------------- {} ----------------------------'.format(key))
# fixed_pragmas.append('# ----------------------------------------------------------------')
for module in datamap.keys():
fixed_pragmas.append('')
modules = datamap[module]
if (datamap[module]) is not None:
for zone in modules.keys():
if datamap[module][zone] is not None:
for pragma in datamap[module][zone]:
if module == 'top_module':
target_module_label = self.cfg.design_setting.topmodule
else:
target_module_label = module
temp = self.combinational_pragma_gen(target_module_label, pragma)
if pragma['type'] == 'fix':
fixed_pragmas.append(temp[1])
elif pragma['type'] == 'var':
variable_pragmas.append(temp)
total_comb = self.find_total_pragma_combinations(variable_pragmas)
return fixed_pragmas, variable_pragmas, total_comb
def pars_design_pragmas(self, datamap):
fixed_pragmas = []
variable_pragmas = []
# fixed_pragmas.append('\n# -------------------------- {} ----------------------------'.format(key))
# fixed_pragmas.append('# ----------------------------------------------------------------')
for module in datamap.keys():
fixed_pragmas.append('')
modules | |
# Copyright 2019 UCLA Networked & Embedded Systems Laboratory (Author: <NAME>)
# 2020 Sogang University Auditory Intelligence Laboratory (Author: <NAME>)
#
# MIT License
import os
import sys
import argparse
import librosa
import data_utils
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn
from tensorboardX import SummaryWriter
from models import SiameseNetwork
from tools.pytorchtools import EarlyStopping
from torchsummary import summary
def train(train_loader, model, device, criterion, optim):
batch_idx = 0
num_total = 0
running_loss = 0
running_correct = 0
epoch_loss = 0
epoch_acc = 0
# +) train mode (parallel)
if device == 'cuda':
model = nn.DataParallel(model).train()
else:
model.train()
for batch_x, batch_x_copy, batch_y, batch_meta in train_loader:
batch_idx += 1
num_total += batch_x.size(0)
# +) wrapping
batch_x = batch_x.to(device)
batch_x_copy = batch_x_copy.to(device)
batch_y = batch_y.view(-1).type(torch.int64).to(device)
# +) forward
batch_out = model(batch_x, batch_x_copy)
_, batch_pred = batch_out.max(dim=1)
batch_loss = criterion(batch_out, batch_y)
# +) accmulate loss stats
running_loss += (batch_loss.item()*batch_x.size(0))
# +) accumulate accuracy stats
running_correct += (batch_pred == batch_y).sum(dim=0).item()
# +) print
if batch_idx % 10 == 0:
sys.stdout.write('\r \t {:.5f} {:.5f}'.format(running_correct/num_total, running_loss/num_total))
# +) zero gradient
optim.zero_grad()
# +) backward
batch_loss.backward()
# +) update
optim.step()
epoch_loss = running_loss/num_total
epoch_acc = running_correct/num_total
return epoch_loss, epoch_acc
def dev(dev_loader, model, device, criterion):
num_total = 0
running_correct = 0
running_loss = 0
epoch_loss = 0
epoch_acc = 0
# +) dev mode
model.eval()
for batch_x, batch_x_copy, batch_y, batch_meta in dev_loader:
num_total += batch_x.size(0)
batch_x = batch_x.to(device)
batch_x_copy = batch_x_copy.to(device)
batch_y = batch_y.view(-1).type(torch.int64).to(device)
batch_out = model(batch_x, batch_x_copy)
_, batch_pred = batch_out.max(dim=1)
batch_loss = criterion(batch_out, batch_y)
running_correct += (batch_pred == batch_y).sum(dim=0).item()
running_loss += (batch_loss.item()*batch_x.size(0))
epoch_loss = running_loss/num_total
epoch_acc = running_correct/num_total
return epoch_loss, epoch_acc
def train_fit(train_loader, model, device, criterion, optim):
batch_idx = 0
num_total = 0
running_loss = 0
epoch_loss = 0
# +) train mode (parallel)
if device == 'cuda':
model = nn.DataParallel(model).train()
else:
model.train()
for batch_x, batch_x_pair, batch_y, batch_meta in train_loader:
batch_idx += 1
num_total += batch_x.size(0)
# +) wrapping
batch_x = batch_x.to(device)
batch_x_pair = batch_x_pair.to(device)
batch_y = batch_y.view(-1).type(torch.int64).to(device)
# +) forward
batch_out = model(batch_x, batch_x_pair)
batch_loss = criterion(batch_out, batch_y)
# +) accmulate loss stats
running_loss += (batch_loss.item()*batch_x.size(0))
# +) print
if batch_idx % 10 == 0:
sys.stdout.write('\r \t {:.5f}'.format(running_loss/num_total))
# +) zero gradient
optim.zero_grad()
# +) backward
batch_loss.backward()
# +) update
optim.step()
epoch_loss = running_loss/num_total
return epoch_loss
def dev_fit(dev_loader, model, device, criterion):
num_total = 0
running_loss = 0
epoch_loss = 0
# +) dev mode
model.eval()
for batch_x, batch_x_pair, batch_y, batch_meta in dev_loader:
num_total += batch_x.size(0)
batch_x = batch_x.to(device)
batch_x_pair = batch_x_pair.to(device)
batch_y = batch_y.view(-1).type(torch.int64).to(device)
batch_out = model(batch_x, batch_x_pair)
batch_loss = criterion(batch_out, batch_y)
running_loss += (batch_loss.item()*batch_x.size(0))
epoch_loss = running_loss/num_total
return epoch_loss
def evaluate(eval_dataset, eval_loader, model, device, eval_output_path):
num_total = 0
file_name_list = []
key_list = []
attack_id_list = []
score_list = []
# +) eval mode
model.eval()
for batch_x, batch_x_pair, batch_y, batch_meta in eval_loader:
num_total += batch_x.size(0)
batch_x = batch_x.to(device)
batch_x_pair = batch_x_pair.to(device)
batch_out = model(batch_x, batch_x_pair)
# +) compute score
batch_score = (batch_out[:,1] - batch_out[:,0]).data.cpu().numpy().ravel()
# +) add outputs
file_name_list.extend(list(batch_meta[1]))
key_list.extend(['bonafide' if key == 1 else 'spoof' for key in list(batch_meta[4])])
attack_id_list.extend([eval_dataset.attack_id_dict_inv[attack_id.item()] for attack_id in list(batch_meta[3])])
score_list.extend(batch_score.tolist())
# +) save result
with open(eval_output_path, 'w') as f:
for file_name, attack_id, key, score in zip(file_name_list, attack_id_list, key_list, score_list):
f.write('{} {} {} {}\n'.format(file_name, attack_id, key, score))
class ContrastiveLoss(nn.Module):
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-9
def forward(self, result, target, size_average=True):
distances = (-result).pow(2).sum(1) # squared distances
losses = 0.5 * (target.float() * distances +
(1 + -1 * target).float() * F.relu(self.margin - (distances + self.eps).sqrt()).pow(2))
return losses.mean() if size_average else losses.sum()
class TripletLoss(nn.Module):
def __init__(self, margin=2.0):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative, size_average=True):
distance_positive = (anchor - positive).pow(2).sum(1) # .pow(.5)
distance_negative = (anchor - negative).pow(2).sum(1) # .pow(.5)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean() if size_average else losses.sum()
if __name__ == '__main__':
# 1) parser
parser = argparse.ArgumentParser()
# +) For data preparation
parser.add_argument('--track', type=str, default='LA') # LA, PA
parser.add_argument('--input_size', type=int, default=64000) # input size (ex. 64000)
parser.add_argument('--feature', type=str, default='mfcc') # spect, mfcc
parser.add_argument('--data_tag', type=str, default=0) # feature tag (ex. 0)
# +) For training
parser.add_argument('--train_batch_size', type=int, default=32)
parser.add_argument('--dev_batch_size', type=int, default=32)
parser.add_argument('--num_epochs', type=int, default=200)
parser.add_argument('--es_patience', type=int, default=7)
parser.add_argument('--embedding_size', type=int, default=None)
parser.add_argument('--model_comment', type=str, default=None)
# +) For optimizer
parser.add_argument('--loss', type=str, default='nll')
parser.add_argument('--optim', type=str, default='adam')
parser.add_argument('--lr', type=float, default=0.00005)
parser.add_argument('--wd', type=float, default=0)
parser.add_argument('--sched_factor', type=float, default=0.1)
parser.add_argument('--sched_patience', type=int, default=10)
parser.add_argument('--sched_min_lr', type=float, default=0)
# +) For evaluation
parser.add_argument('--eval_mode', action='store_true', default=False)
parser.add_argument('--eval_batch_size', type=int, default=None)
parser.add_argument('--eval_num_checkpoint', type=int, default=None)
# +) For Resume
parser.add_argument('--resume_mode', action='store_true', default=False)
parser.add_argument('--resume_num_checkpoint', type=int, default=None)
parser.add_argument('--fit_mode', action='store_true', default=False)
parser.add_argument('--fit_num_checkpoint', type=int, default=None)
parser.add_argument('--cp_fit_mode', action='store_true', default=False)
parser.add_argument('--cp_fit_num_checkpoint', type=int, default=None)
args = parser.parse_args()
# 2) model tag
model_tag = 'model_{}_{}_{}_{}_{}_{}_{}'.format(
args.track, args.input_size, args.feature, args.data_tag,
args.train_batch_size, args.num_epochs, args.embedding_size)
if args.model_comment:
model_tag = model_tag + '_{}'.format(args.model_comment)
print('model tag is ', model_tag)
# 3) model save path
if args.fit_mode:
if not os.path.exists('models/tune'):
os.mkdir('models/tune')
model_save_path = os.path.join('models/tune', model_tag)
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
print('model save path is ', model_save_path)
elif args.cp_fit_mode:
if not os.path.exists('models/cp-tune'):
os.mkdir('models/cp-tune')
model_save_path = os.path.join('models/cp-tune', model_tag)
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
print('model save path is ', model_save_path)
else:
if not os.path.exists('models/pre'):
os.mkdir('models/pre')
model_save_path = os.path.join('models/pre', model_tag)
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
print('model save path is ', model_save_path)
# 4) use cuda
if torch.cuda.is_available():
device = 'cuda'
print('device is ', device)
else:
device = 'cpu'
print('device is ', device)
# 5) eval
if args.eval_mode:
# +) eval dataset
print('========== eval dataset ==========')
eval_dataset = data_utils.Dataset(
track=args.track, data='eval', size=args.input_size, feature=args.feature, tag=args.data_tag)
eval_loader = DataLoader(eval_dataset, batch_size=args.eval_batch_size, shuffle=False, num_workers=8)
# +) load model
print('========== eval process ==========')
model = SiameseNetwork(args.embedding_size).to(device)
eval_checkpoint_path = '{}/epoch_{}.pth'.format(model_save_path, str(args.eval_num_checkpoint))
model.load_state_dict(torch.load(eval_checkpoint_path))
print('model loaded from ', eval_checkpoint_path)
# +) eval
eval_output_path = '{}/{}.result'.format(model_save_path, str(args.eval_num_checkpoint))
evaluate(eval_dataset, eval_loader, model, device, eval_output_path)
print('eval output saved to ', eval_output_path)
# 6) train & dev
else:
# +) dev dataset
print('========== dev dataset ==========')
dev_dataset = data_utils.Dataset(
track=args.track, data='dev', size=args.input_size, feature=args.feature, tag=args.data_tag)
dev_loader = DataLoader(dev_dataset, batch_size=args.dev_batch_size, shuffle=True, num_workers=8)
# +) train dataset
print('========== train dataset ==========')
train_dataset = data_utils.Dataset(
track=args.track, data='train', size=args.input_size, feature=args.feature, tag=args.data_tag)
train_loader = DataLoader(train_dataset, batch_size=args.dev_batch_size, shuffle=True, num_workers=8)
print('========== train process ==========')
# +) model init (check resume mode)
if args.resume_mode:
model = SiameseNetwork(args.embedding_size).to(device)
resume_checkpoint_path = '{}/epoch_{}.pth'.format(model_save_path, str(args.resume_num_checkpoint))
model.load_state_dict(torch.load(resume_checkpoint_path))
print('model for resume loaded from ', resume_checkpoint_path)
summary(model, input_size=[(1025,126), (1025,126)])
start = args.resume_num_checkpoint+1
# +) model init (check fine-tuning mode)
elif args.fit_mode:
model = SiameseNetwork(args.embedding_size).to(device)
fit_checkpoint_path = 'models/pre/{}/epoch_{}.pth'.format(model_tag, str(args.fit_num_checkpoint))
model.load_state_dict(torch.load(fit_checkpoint_path))
print('model for fit loaded from ', fit_checkpoint_path)
summary(model, input_size=[(1025,126), (1025,126)])
start = 1
# +) model init (check cp-fine-tuning mode)
elif args.cp_fit_mode:
model = SiameseNetwork(args.embedding_size).to(device)
fit_checkpoint_path = 'models/tune/{}/epoch_{}.pth'.format(model_tag, str(args.cp_fit_num_checkpoint))
model.load_state_dict(torch.load(fit_checkpoint_path))
print('model for fit loaded from ', fit_checkpoint_path)
summary(model, input_size=[(1025,126), (1025,126)])
start = 1
# +) model init
else:
model = SiameseNetwork(args.embedding_size).to(device)
summary(model, input_size=[(1025,126), (1025,126)])
start = 1
# +) loss
if args.loss == 'nll':
weight = torch.FloatTensor([1.0, 9.0]).to(device) # weight for loss (spoof:1, genuine:9)
criterion = nn.NLLLoss(weight=weight)
elif args.loss == 'cs':
criterion = ContrastiveLoss()
elif args.loss == 'tri':
criterion = TripletLoss()
# +) optimizer
if args.optim == 'adam':
optim = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
# +) scheduler
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optim, 'min', factor=args.sched_factor, patience=args.sched_patience, min_lr=args.sched_min_lr, verbose=False)
# +) early stopping
#early_stopping = EarlyStopping(patience=args.es_patience, verbose=False)
# +) fine-tuning mode
if args.fit_mode:
# +) tensorboardX, log
if not os.path.exists('logs/tune'):
os.mkdir('logs/tune')
writer = SummaryWriter('logs/tune/{}'.format(model_tag))
dev_losses = []
for epoch in range(start, args.num_epochs+1):
train_loss = train_fit(train_loader, model, device, criterion, optim)
dev_loss = dev_fit(dev_loader, model, device, criterion)
writer.add_scalar('train_loss', train_loss, epoch)
writer.add_scalar('dev_loss', dev_loss, epoch)
print('\n{} - train loss: {:.5f} - dev loss: {:.5f}'.format(epoch, train_loss, dev_loss))
torch.save(model.state_dict(), os.path.join(model_save_path, 'epoch_{}.pth'.format(epoch)))
dev_losses.append(dev_loss)
#early_stopping(dev_loss, model)
#if early_stopping.early_stop:
# print('early stopping !')
# break
scheduler.step(dev_loss)
minposs = dev_losses.index(min(dev_losses))+1
print('lowest dev loss at epoch is {}'.format(minposs))
# +) cp-fine-tuning mode
elif args.cp_fit_mode:
# +) tensorboardX, log
if not os.path.exists('logs/cp-tune'):
os.mkdir('logs/cp-tune')
writer = SummaryWriter('logs/cp-tune/{}'.format(model_tag))
dev_losses = []
for epoch in range(start, args.num_epochs+1):
train_loss = train_fit(train_loader, model, device, criterion, optim)
dev_loss = dev_fit(dev_loader, model, device, criterion)
writer.add_scalar('train_loss', train_loss, epoch)
writer.add_scalar('dev_loss', dev_loss, | |
<filename>line/f_CallService.py
#
# Autogenerated by Frugal Compiler (3.4.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from threading import Lock
from frugal.middleware import Method
from frugal.exceptions import TApplicationExceptionType
from frugal.exceptions import TTransportExceptionType
from frugal.processor import FBaseProcessor
from frugal.processor import FProcessorFunction
from frugal.util.deprecate import deprecated
from frugal.util import make_hashable
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.transport.TTransport import TTransportException
from .ttypes import *
class Iface(object):
def getUserStatus(self, ctx, mid):
"""
Args:
ctx: FContext
mid: string
"""
pass
def updateProfileAttributeForChannel(self, ctx, profileAttribute, value):
"""
Args:
ctx: FContext
profileAttribute: ProfileAttribute
value: string
"""
pass
def updateExtendedProfileAttribute(self, ctx, attr, extendedProfile):
"""
Args:
ctx: FContext
attr: ExtendedProfileAttribute
extendedProfile: ExtendedProfile
"""
pass
def getAllSimpleChannelContacts(self, ctx, statusSticonFallbackDisabled):
"""
Args:
ctx: FContext
statusSticonFallbackDisabled: boolean
"""
pass
def getUserIdentities(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def markPaidCallAd(self, ctx, dialedNumber, language, disableCallerId):
"""
Args:
ctx: FContext
dialedNumber: string
language: string
disableCallerId: boolean
"""
pass
def isGroupMember(self, ctx, groupId):
"""
Args:
ctx: FContext
groupId: string
"""
pass
def getPhoneInfoFromPhoneNumber(self, ctx, region, phoneNumber):
"""
Args:
ctx: FContext
region: string
phoneNumber: string
"""
pass
def redeemPaidCallVoucher(self, ctx, serial, language):
"""
Args:
ctx: FContext
serial: string
language: string
"""
pass
def getPreferredDisplayName(self, ctx, mids):
"""
Args:
ctx: FContext
mids: list of string
"""
pass
def getContactsForChannel(self, ctx, ids):
"""
Args:
ctx: FContext
ids: list of string
"""
pass
def getCallCreditProducts(self, ctx, appStoreCode, pgCode, country, language):
"""
Args:
ctx: FContext
appStoreCode: PaymentType
pgCode: PaymentPgType
country: string
language: string
"""
pass
def getCompactContacts(self, ctx, lastModifiedTimestamp):
"""
Args:
ctx: FContext
lastModifiedTimestamp: int (signed 64 bits)
"""
pass
def notifyNotiCenterEvent(self, ctx, event):
"""
Args:
ctx: FContext
event: NotiCenterEventData
"""
pass
def isInContact(self, ctx, mid):
"""
Args:
ctx: FContext
mid: string
"""
pass
def lookupGroupMembers(self, ctx, groupId, mids):
"""
Args:
ctx: FContext
groupId: string
mids: list of string
"""
pass
def getRoomInformation(self, ctx, roomMid):
"""
Args:
ctx: FContext
roomMid: string
"""
pass
def getGroupCall(self, ctx, chatMid):
"""
Args:
ctx: FContext
chatMid: string
"""
pass
def isAllowSecondaryDeviceLogin(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def getPrimaryClientForChannel(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def createRoomWithBuddy(self, ctx, reqSeq, buddyMid, contactIds):
"""
Args:
ctx: FContext
reqSeq: int (signed 32 bits)
buddyMid: string
contactIds: list of string
"""
pass
def getDisplayName(self, ctx, mid):
"""
Args:
ctx: FContext
mid: string
"""
pass
def getPaidCallMetadata(self, ctx, language):
"""
Args:
ctx: FContext
language: string
"""
pass
def getMid(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def getUserCountryForBilling(self, ctx, country, remoteIp):
"""
Args:
ctx: FContext
country: string
remoteIp: string
"""
pass
def getFavoriteGroupIdsForChannel(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def getPaidCallHistory(self, ctx, start, size, language):
"""
Args:
ctx: FContext
start: int (signed 64 bits)
size: int (signed 32 bits)
language: string
"""
pass
def sendPinCodeOperation(self, ctx, verifier):
"""
Args:
ctx: FContext
verifier: string
"""
pass
def inviteIntoGroupCall(self, ctx, chatMid, memberMids, mediaType):
"""
Args:
ctx: FContext
chatMid: string
memberMids: list of string
mediaType: GroupCallMediaType
"""
pass
def getFriendMids(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def getMetaProfile(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def sendMessageForChannel(self, ctx, message):
"""
Args:
ctx: FContext
message: Message
"""
pass
def activeBuddySubscriberCount(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def getCallCreditPurchaseHistory(self, ctx, request):
"""
Args:
ctx: FContext
request: CoinHistoryCondition
"""
pass
def isRoomMember(self, ctx, roomId):
"""
Args:
ctx: FContext
roomId: string
"""
pass
def sendSystemOAMessage(self, ctx, message):
"""
Args:
ctx: FContext
message: Message
"""
pass
def acquirePaidCallRoute(self, ctx, paidCallType, dialedNumber, language, networkCode, disableCallerId, referer, adSessionId):
"""
Args:
ctx: FContext
paidCallType: PaidCallType
dialedNumber: string
language: string
networkCode: string
disableCallerId: boolean
referer: string
adSessionId: string
"""
pass
def getGroupsForChannel(self, ctx, groupIds):
"""
Args:
ctx: FContext
groupIds: list of string
"""
pass
def getUserCreateTime(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def registerChannelCP(self, ctx, cpId, registerPassword):
"""
Args:
ctx: FContext
cpId: string
registerPassword: string
"""
pass
def reserveCallCreditPurchase(self, ctx, request):
"""
Args:
ctx: FContext
request: CoinPurchaseReservation
"""
pass
def acquirePaidCallCurrencyExchangeRate(self, ctx, language):
"""
Args:
ctx: FContext
language: string
"""
pass
def getRoomMemberMidsForAppPlatform(self, ctx, roomId):
"""
Args:
ctx: FContext
roomId: string
"""
pass
def getPaidCallBalanceList(self, ctx, language):
"""
Args:
ctx: FContext
language: string
"""
pass
def getPersonalInfos(self, ctx, requiredPersonalInfos):
"""
Args:
ctx: FContext
requiredPersonalInfos: set of PersonalInfo
"""
pass
def getPrimaryClientsForChannel(self, ctx, userMids):
"""
Args:
ctx: FContext
userMids: list of string
"""
pass
def addBuddyToContact(self, ctx, buddyMid):
"""
Args:
ctx: FContext
buddyMid: string
"""
pass
def getGroupMemberMidsForAppPlatform(self, ctx, groupId):
"""
Args:
ctx: FContext
groupId: string
"""
pass
def getUserLanguage(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def lookupPaidCall(self, ctx, dialedNumber, language, referer):
"""
Args:
ctx: FContext
dialedNumber: string
language: string
referer: string
"""
pass
def getExtendedProfile(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def getReverseCompactContacts(self, ctx, ids):
"""
Args:
ctx: FContext
ids: list of string
"""
pass
def getPaidCallAdStatus(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def findContactByUseridWithoutAbuseBlockForChannel(self, ctx, userid):
"""
Args:
ctx: FContext
userid: string
"""
pass
def getGroupMemberMids(self, ctx, groupId):
"""
Args:
ctx: FContext
groupId: string
"""
pass
def sendMessageWithoutRelationship(self, ctx, message):
"""
Args:
ctx: FContext
message: Message
"""
pass
def displayBuddySubscriberCountInBulk(self, ctx, mids):
"""
Args:
ctx: FContext
mids: list of string
"""
pass
def lookupRoomMembers(self, ctx, roomId, mids):
"""
Args:
ctx: FContext
roomId: string
mids: list of string
"""
pass
def getFavoriteMidsForChannel(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def getAllContactIdsForChannel(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def displayBuddySubscriberCount(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def getProfileForChannel(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def getUserTickets(self, ctx, userMids):
"""
Args:
ctx: FContext
userMids: list of string
"""
pass
def getOAFriendMids(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def searchPaidCallUserRate(self, ctx, countryCode, language):
"""
Args:
ctx: FContext
countryCode: string
language: string
"""
pass
def getJoinedGroupIdsForChannel(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def acquireGroupCallRoute(self, ctx, chatMid, mediaType):
"""
Args:
ctx: FContext
chatMid: string
mediaType: GroupCallMediaType
"""
pass
def getUserMidsWhoAddedMe(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def getIdentityCredential(self, ctx):
"""
Args:
ctx: FContext
"""
pass
def addOperationForChannel(self, ctx, opType, param1, param2, param3):
"""
Args:
ctx: FContext
opType: OpType
param1: string
param2: string
param3: string
"""
pass
def getSimpleChannelContacts(self, ctx, ids, statusSticonFallbackDisabled):
"""
Args:
ctx: FContext
ids: list of string
statusSticonFallbackDisabled: boolean
"""
pass
def getUserLastSentMessageTimeStamp(self, ctx, mid):
"""
Args:
ctx: FContext
mid: string
"""
pass
class Client(Iface):
def __init__(self, provider, middleware=None):
"""
Create a new Client with an FServiceProvider containing a transport
and protocol factory.
Args:
provider: FServiceProvider with TSynchronousTransport
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
self._transport = provider.get_transport()
self._protocol_factory = provider.get_protocol_factory()
self._oprot = self._protocol_factory.get_protocol(self._transport)
self._iprot = self._protocol_factory.get_protocol(self._transport)
self._write_lock = Lock()
middleware += provider.get_middleware()
self._methods = {
'getUserStatus': Method(self._getUserStatus, middleware),
'updateProfileAttributeForChannel': Method(self._updateProfileAttributeForChannel, middleware),
'updateExtendedProfileAttribute': Method(self._updateExtendedProfileAttribute, middleware),
'getAllSimpleChannelContacts': Method(self._getAllSimpleChannelContacts, middleware),
'getUserIdentities': Method(self._getUserIdentities, middleware),
'markPaidCallAd': Method(self._markPaidCallAd, middleware),
'isGroupMember': Method(self._isGroupMember, middleware),
'getPhoneInfoFromPhoneNumber': Method(self._getPhoneInfoFromPhoneNumber, middleware),
'redeemPaidCallVoucher': Method(self._redeemPaidCallVoucher, middleware),
'getPreferredDisplayName': Method(self._getPreferredDisplayName, middleware),
'getContactsForChannel': Method(self._getContactsForChannel, middleware),
'getCallCreditProducts': Method(self._getCallCreditProducts, middleware),
'getCompactContacts': Method(self._getCompactContacts, middleware),
'notifyNotiCenterEvent': Method(self._notifyNotiCenterEvent, middleware),
'isInContact': Method(self._isInContact, middleware),
'lookupGroupMembers': Method(self._lookupGroupMembers, middleware),
'getRoomInformation': Method(self._getRoomInformation, middleware),
'getGroupCall': Method(self._getGroupCall, middleware),
'isAllowSecondaryDeviceLogin': Method(self._isAllowSecondaryDeviceLogin, middleware),
'getPrimaryClientForChannel': Method(self._getPrimaryClientForChannel, middleware),
'createRoomWithBuddy': Method(self._createRoomWithBuddy, middleware),
'getDisplayName': Method(self._getDisplayName, middleware),
'getPaidCallMetadata': Method(self._getPaidCallMetadata, middleware),
'getMid': Method(self._getMid, middleware),
'getUserCountryForBilling': Method(self._getUserCountryForBilling, middleware),
'getFavoriteGroupIdsForChannel': Method(self._getFavoriteGroupIdsForChannel, middleware),
'getPaidCallHistory': Method(self._getPaidCallHistory, middleware),
'sendPinCodeOperation': Method(self._sendPinCodeOperation, middleware),
'inviteIntoGroupCall': Method(self._inviteIntoGroupCall, middleware),
'getFriendMids': Method(self._getFriendMids, middleware),
'getMetaProfile': Method(self._getMetaProfile, middleware),
'sendMessageForChannel': Method(self._sendMessageForChannel, middleware),
'activeBuddySubscriberCount': Method(self._activeBuddySubscriberCount, middleware),
'getCallCreditPurchaseHistory': Method(self._getCallCreditPurchaseHistory, middleware),
'isRoomMember': Method(self._isRoomMember, middleware),
'sendSystemOAMessage': Method(self._sendSystemOAMessage, middleware),
'acquirePaidCallRoute': Method(self._acquirePaidCallRoute, middleware),
'getGroupsForChannel': Method(self._getGroupsForChannel, middleware),
'getUserCreateTime': Method(self._getUserCreateTime, middleware),
'registerChannelCP': Method(self._registerChannelCP, middleware),
'reserveCallCreditPurchase': Method(self._reserveCallCreditPurchase, middleware),
'acquirePaidCallCurrencyExchangeRate': Method(self._acquirePaidCallCurrencyExchangeRate, middleware),
'getRoomMemberMidsForAppPlatform': Method(self._getRoomMemberMidsForAppPlatform, middleware),
'getPaidCallBalanceList': Method(self._getPaidCallBalanceList, middleware),
'getPersonalInfos': Method(self._getPersonalInfos, middleware),
'getPrimaryClientsForChannel': Method(self._getPrimaryClientsForChannel, middleware),
'addBuddyToContact': Method(self._addBuddyToContact, middleware),
'getGroupMemberMidsForAppPlatform': Method(self._getGroupMemberMidsForAppPlatform, middleware),
'getUserLanguage': Method(self._getUserLanguage, middleware),
'lookupPaidCall': Method(self._lookupPaidCall, middleware),
'getExtendedProfile': Method(self._getExtendedProfile, middleware),
'getReverseCompactContacts': Method(self._getReverseCompactContacts, middleware),
'getPaidCallAdStatus': Method(self._getPaidCallAdStatus, middleware),
'findContactByUseridWithoutAbuseBlockForChannel': Method(self._findContactByUseridWithoutAbuseBlockForChannel, middleware),
'getGroupMemberMids': Method(self._getGroupMemberMids, middleware),
'sendMessageWithoutRelationship': Method(self._sendMessageWithoutRelationship, middleware),
'displayBuddySubscriberCountInBulk': Method(self._displayBuddySubscriberCountInBulk, middleware),
'lookupRoomMembers': Method(self._lookupRoomMembers, middleware),
'getFavoriteMidsForChannel': Method(self._getFavoriteMidsForChannel, middleware),
'getAllContactIdsForChannel': Method(self._getAllContactIdsForChannel, middleware),
'displayBuddySubscriberCount': Method(self._displayBuddySubscriberCount, middleware),
'getProfileForChannel': Method(self._getProfileForChannel, middleware),
'getUserTickets': Method(self._getUserTickets, middleware),
'getOAFriendMids': Method(self._getOAFriendMids, middleware),
'searchPaidCallUserRate': Method(self._searchPaidCallUserRate, middleware),
'getJoinedGroupIdsForChannel': Method(self._getJoinedGroupIdsForChannel, middleware),
'acquireGroupCallRoute': Method(self._acquireGroupCallRoute, middleware),
'getUserMidsWhoAddedMe': Method(self._getUserMidsWhoAddedMe, middleware),
'getIdentityCredential': Method(self._getIdentityCredential, middleware),
'addOperationForChannel': Method(self._addOperationForChannel, middleware),
'getSimpleChannelContacts': Method(self._getSimpleChannelContacts, middleware),
'getUserLastSentMessageTimeStamp': Method(self._getUserLastSentMessageTimeStamp, middleware),
| |
DataBreakpointInfoResponse:
dataId: Optional[str]
"""An identifier for the data on which a data breakpoint can be registered with the setDataBreakpoints request or null if no data breakpoint is available."""
description: str
"""UI string that describes on what data the breakpoint is set on or why a data breakpoint is not available."""
accessTypes: Optional[List[Literal["read", "write", "readWrite"]]]
"""Optional attribute listing the available access types for a potential data breakpoint. A UI frontend could surface this information."""
canPersist: Optional[bool]
"""Optional attribute indicating that a potential data breakpoint could be persisted across sessions."""
@dataclass
class SetDataBreakpointsArguments:
"""
Arguments for 'setDataBreakpoints' request.
"""
breakpoints: List[DataBreakpoint]
"""The contents of this array replaces all existing data breakpoints. An empty array clears all data breakpoints."""
@dataclass
class SetInstructionBreakpointsArguments:
"""
Arguments for 'setInstructionBreakpoints' request
"""
breakpoints: List[InstructionBreakpoint]
"""The instruction references of the breakpoints"""
@dataclass
class ContinueRequest:
command: Literal["continue"]
arguments: ContinueArguments
@dataclass
class NextArguments:
"""
Arguments for 'next' request.
"""
threadId: int
"""Execute 'next' for this thread."""
granularity: Optional[SteppingGranularity]
"""Optional granularity to step. If no granularity is specified, a granularity of 'statement' is assumed."""
@dataclass
class StepInArguments:
"""
Arguments for 'stepIn' request.
"""
threadId: int
"""Execute 'stepIn' for this thread."""
targetId: Optional[int]
"""Optional id of the target to step into."""
granularity: Optional[SteppingGranularity]
"""Optional granularity to step. If no granularity is specified, a granularity of 'statement' is assumed."""
@dataclass
class StepOutArguments:
"""
Arguments for 'stepOut' request.
"""
threadId: int
"""Execute 'stepOut' for this thread."""
granularity: Optional[SteppingGranularity]
"""Optional granularity to step. If no granularity is specified, a granularity of 'statement' is assumed."""
@dataclass
class StepBackArguments:
"""
Arguments for 'stepBack' request.
"""
threadId: int
"""Execute 'stepBack' for this thread."""
granularity: Optional[SteppingGranularity]
"""Optional granularity to step. If no granularity is specified, a granularity of 'statement' is assumed."""
@dataclass
class ReverseContinueRequest:
command: Literal["reverseContinue"]
arguments: ReverseContinueArguments
@dataclass
class RestartFrameRequest:
command: Literal["restartFrame"]
arguments: RestartFrameArguments
@dataclass
class GotoRequest:
command: Literal["goto"]
arguments: GotoArguments
@dataclass
class PauseRequest:
command: Literal["pause"]
arguments: PauseArguments
@dataclass
class StackTraceArguments:
"""
Arguments for 'stackTrace' request.
"""
threadId: int
"""Retrieve the stacktrace for this thread."""
startFrame: Optional[int]
"""The index of the first frame to return; if omitted frames start at 0."""
levels: Optional[int]
"""The maximum number of frames to return. If levels is not specified or 0, all frames are returned."""
format: Optional[StackFrameFormat]
"""Specifies details on how to format the stack frames.\nThe attribute is only honored by a debug adapter if the capability 'supportsValueFormattingOptions' is true."""
@dataclass
class ScopesRequest:
command: Literal["scopes"]
arguments: ScopesArguments
@dataclass
class VariablesArguments:
"""
Arguments for 'variables' request.
"""
variablesReference: int
"""The Variable reference."""
filter: Optional[Literal["indexed", "named"]]
"""Optional filter to limit the child variables to either named or indexed. If omitted, both types are fetched."""
start: Optional[int]
"""The index of the first variable to return; if omitted children start at 0."""
count: Optional[int]
"""The number of variables to return. If count is missing or 0, all variables are returned."""
format: Optional[ValueFormat]
"""Specifies details on how to format the Variable values.\nThe attribute is only honored by a debug adapter if the capability 'supportsValueFormattingOptions' is true."""
@dataclass
class SetVariableArguments:
"""
Arguments for 'setVariable' request.
"""
variablesReference: int
"""The reference of the variable container."""
name: str
"""The name of the variable in the container."""
value: str
"""The value of the variable."""
format: Optional[ValueFormat]
"""Specifies details on how to format the response value."""
@dataclass
class ThreadsResponse:
threads: List[Thread]
"""All threads."""
@dataclass
class TerminateThreadsRequest:
command: Literal["terminateThreads"]
arguments: TerminateThreadsArguments
@dataclass
class ModulesRequest:
command: Literal["modules"]
arguments: ModulesArguments
@dataclass
class ModulesResponse:
modules: List[Module]
"""All modules or range of modules."""
totalModules: Optional[int]
"""The total number of modules available."""
@dataclass
class LoadedSourcesRequest:
command: Literal["loadedSources"]
arguments: Optional[LoadedSourcesArguments]
@dataclass
class EvaluateArguments:
"""
Arguments for 'evaluate' request.
"""
expression: str
"""The expression to evaluate."""
frameId: Optional[int]
"""Evaluate the expression in the scope of this stack frame. If not specified, the expression is evaluated in the global scope."""
context: Optional[str]
"""The context in which the evaluate request is run."""
format: Optional[ValueFormat]
"""Specifies details on how to format the Evaluate result.\nThe attribute is only honored by a debug adapter if the capability 'supportsValueFormattingOptions' is true."""
@dataclass
class EvaluateResponse:
result: str
"""The result of the evaluate request."""
type: Optional[str]
"""The optional type of the evaluate result.\nThis attribute should only be returned by a debug adapter if the client has passed the value true for the 'supportsVariableType' capability of the 'initialize' request."""
presentationHint: Optional[VariablePresentationHint]
"""Properties of a evaluate result that can be used to determine how to render the result in the UI."""
variablesReference: int
"""If variablesReference is > 0, the evaluate result is structured and its children can be retrieved by passing variablesReference to the VariablesRequest.\nThe value should be less than or equal to 2147483647 (2^31-1)."""
namedVariables: Optional[int]
"""The number of named child variables.\nThe client can use this optional information to present the variables in a paged UI and fetch them in chunks.\nThe value should be less than or equal to 2147483647 (2^31-1)."""
indexedVariables: Optional[int]
"""The number of indexed child variables.\nThe client can use this optional information to present the variables in a paged UI and fetch them in chunks.\nThe value should be less than or equal to 2147483647 (2^31-1)."""
memoryReference: Optional[str]
"""Optional memory reference to a location appropriate for this result.\nFor pointer type eval results, this is generally a reference to the memory address contained in the pointer.\nThis attribute should be returned by a debug adapter if the client has passed the value true for the 'supportsMemoryReferences' capability of the 'initialize' request."""
@dataclass
class SetExpressionArguments:
"""
Arguments for 'setExpression' request.
"""
expression: str
"""The l-value expression to assign to."""
value: str
"""The value expression to assign to the l-value expression."""
frameId: Optional[int]
"""Evaluate the expressions in the scope of this stack frame. If not specified, the expressions are evaluated in the global scope."""
format: Optional[ValueFormat]
"""Specifies how the resulting value should be formatted."""
@dataclass
class SetExpressionResponse:
value: str
"""The new value of the expression."""
type: Optional[str]
"""The optional type of the value.\nThis attribute should only be returned by a debug adapter if the client has passed the value true for the 'supportsVariableType' capability of the 'initialize' request."""
presentationHint: Optional[VariablePresentationHint]
"""Properties of a value that can be used to determine how to render the result in the UI."""
variablesReference: Optional[int]
"""If variablesReference is > 0, the value is structured and its children can be retrieved by passing variablesReference to the VariablesRequest.\nThe value should be less than or equal to 2147483647 (2^31-1)."""
namedVariables: Optional[int]
"""The number of named child variables.\nThe client can use this optional information to present the variables in a paged UI and fetch them in chunks.\nThe value should be less than or equal to 2147483647 (2^31-1)."""
indexedVariables: Optional[int]
"""The number of indexed child variables.\nThe client can use this optional information to present the variables in a paged UI and fetch them in chunks.\nThe value should be less than or equal to 2147483647 (2^31-1)."""
@dataclass
class StepInTargetsRequest:
command: Literal["stepInTargets"]
arguments: StepInTargetsArguments
@dataclass
class StepInTargetsResponse:
targets: List[StepInTarget]
"""The possible stepIn targets of the specified source location."""
@dataclass
class GotoTargetsResponse:
targets: List[GotoTarget]
"""The possible goto targets of the specified location."""
@dataclass
class CompletionsRequest:
command: Literal["completions"]
arguments: CompletionsArguments
@dataclass
class ExceptionInfoRequest:
command: Literal["exceptionInfo"]
arguments: ExceptionInfoArguments
@dataclass
class ExceptionInfoResponse:
body: ExceptionInfoResponseBody
@dataclass
class ExceptionInfoResponseBody:
exceptionId: str
"""ID of the exception that was thrown."""
description: Optional[str]
"""Descriptive text for the exception provided by the debug adapter."""
breakMode: ExceptionBreakMode
"""Mode that caused the exception notification to be raised."""
details: Optional[ExceptionDetails]
"""Detailed information about the exception."""
@dataclass
class ReadMemoryRequest:
command: Literal["readMemory"]
arguments: ReadMemoryArguments
@dataclass
class WriteMemoryRequest:
command: Literal["writeMemory"]
arguments: WriteMemoryArguments
@dataclass
class DisassembleRequest:
command: Literal["disassemble"]
arguments: DisassembleArguments
@dataclass
class Capabilities:
"""
Information about the capabilities of a debug adapter.
"""
supportsConfigurationDoneRequest: Optional[bool] = None
"""The debug adapter supports the 'configurationDone' request."""
supportsFunctionBreakpoints: Optional[bool] = None
"""The debug adapter supports function breakpoints."""
supportsConditionalBreakpoints: Optional[bool] = None
"""The debug adapter supports conditional breakpoints."""
supportsHitConditionalBreakpoints: Optional[bool] = None
"""The debug adapter supports breakpoints that break execution after a specified number of hits."""
supportsEvaluateForHovers: Optional[bool] = None
"""The debug adapter supports a (side effect free) evaluate request for data hovers."""
exceptionBreakpointFilters: Optional[List[ExceptionBreakpointsFilter]] = None
"""Available exception filter options for the 'setExceptionBreakpoints' request."""
supportsStepBack: Optional[bool] = None
"""The debug adapter supports stepping back via the 'stepBack' and 'reverseContinue' requests."""
supportsSetVariable: Optional[bool] = None
"""The debug adapter supports setting a variable to a value."""
supportsRestartFrame: Optional[bool] = None
"""The debug adapter supports restarting a frame."""
supportsGotoTargetsRequest: Optional[bool] = None
"""The debug adapter supports the 'gotoTargets' request."""
supportsStepInTargetsRequest: Optional[bool] = None
"""The debug adapter supports the 'stepInTargets' request."""
supportsCompletionsRequest: Optional[bool] = None
"""The debug adapter supports the 'completions' request."""
completionTriggerCharacters: Optional[List[str]] = None
"""The set of characters that should trigger completion in a REPL. If not specified, the UI should assume the '.' character."""
supportsModulesRequest: Optional[bool] = None
"""The debug adapter supports the 'modules' request."""
additionalModuleColumns: Optional[List[ColumnDescriptor]] = None
"""The set of additional module information exposed by the debug adapter."""
supportedChecksumAlgorithms: Optional[List[ChecksumAlgorithm]] = None
"""Checksum algorithms supported by the debug adapter."""
supportsRestartRequest: Optional[bool] = None
"""The debug adapter supports the 'restart' request. In this case a client should not implement 'restart' by terminating and relaunching the adapter but by calling the RestartRequest."""
supportsExceptionOptions: Optional[bool] = None
"""The debug adapter supports 'exceptionOptions' on the setExceptionBreakpoints request."""
supportsValueFormattingOptions: Optional[bool] = None
"""The debug adapter supports a 'format' attribute on the stackTraceRequest, variablesRequest, and evaluateRequest."""
supportsExceptionInfoRequest: Optional[bool] = None
"""The debug adapter supports the 'exceptionInfo' request."""
supportTerminateDebuggee: Optional[bool] = None
"""The debug adapter supports the 'terminateDebuggee' attribute on the 'disconnect' request."""
supportSuspendDebuggee: Optional[bool] = None
"""The debug adapter supports the 'suspendDebuggee' attribute on the 'disconnect' request."""
supportsDelayedStackTraceLoading: Optional[bool] = None
"""The debug adapter supports the delayed loading of parts of the stack, which requires that both | |
the enabled status"""
enabled = {}
targets = {}
for target, filepath in self.each_target_file():
logg.info("target %s", filepath)
targets[target] = filepath
enabled[target] = "static"
for unit in _all_common_targets:
targets[unit] = None
enabled[unit] = "static"
if unit in _all_common_enabled:
enabled[unit] = "enabled"
if unit in _all_common_disabled:
enabled[unit] = "disabled"
return [ (unit, enabled[unit]) for unit in sorted(targets) ]
def show_list_unit_files(self, *modules): # -> [ (unit,enabled) ]
"""[PATTERN]... -- List installed unit files
List installed unit files and their enablement state (as reported
by is-enabled). If one or more PATTERNs are specified, only units
whose filename (just the last component of the path) matches one of
them are shown. This command reacts to limitations of --type being
--type=service or --type=target (and --now for some basics)."""
result = []
if self._now:
basics = self.list_service_unit_basics()
result = [ (name, sysv + " " + filename) for name, sysv, filename in basics ]
elif self._unit_type == "target":
result = self.list_target_unit_files()
elif self._unit_type == "service":
result = self.list_service_unit_files()
elif self._unit_type:
logg.warning("unsupported unit --type=%s", self._unit_type)
else:
result = self.list_target_unit_files()
result += self.list_service_unit_files(*modules)
if self._no_legend:
return result
found = "%s unit files listed." % len(result)
return [ ("UNIT FILE", "STATE") ] + result + [ ("", ""), (found, "") ]
##
##
def get_description(self, unit, default = None):
return self.get_description_from(self.load_unit_conf(unit))
def get_description_from(self, conf, default = None): # -> text
""" Unit.Description could be empty sometimes """
if not conf: return default or ""
description = conf.get("Unit", "Description", default or "")
return self.expand_special(description, conf)
def read_pid_file(self, pid_file, default = None):
pid = default
if not pid_file:
return default
if not os.path.isfile(pid_file):
return default
if self.truncate_old(pid_file):
return default
try:
# some pid-files from applications contain multiple lines
for line in open(pid_file):
if line.strip():
pid = to_intN(line.strip())
break
except Exception as e:
logg.warning("bad read of pid file '%s': %s", pid_file, e)
return pid
def wait_pid_file(self, pid_file, timeout = None): # -> pid?
""" wait some seconds for the pid file to appear and return the pid """
timeout = int(timeout or (DefaultTimeoutStartSec/2))
timeout = max(timeout, (MinimumTimeoutStartSec))
dirpath = os.path.dirname(os.path.abspath(pid_file))
for x in xrange(timeout):
if not os.path.isdir(dirpath):
time.sleep(1) # until TimeoutStartSec/2
continue
pid = self.read_pid_file(pid_file)
if not pid:
time.sleep(1) # until TimeoutStartSec/2
continue
if not pid_exists(pid):
time.sleep(1) # until TimeoutStartSec/2
continue
return pid
return None
def test_pid_file(self, unit): # -> text
""" support for the testsuite.py """
conf = self.get_unit_conf(unit)
return self.pid_file_from(conf) or self.get_status_file_from(conf)
def pid_file_from(self, conf, default = ""):
""" get the specified pid file path (not a computed default) """
pid_file = self.get_pid_file(conf) or default
return os_path(self._root, self.expand_special(pid_file, conf))
def get_pid_file(self, conf, default = None):
return conf.get("Service", "PIDFile", default)
def read_mainpid_from(self, conf, default = None):
""" MAINPID is either the PIDFile content written from the application
or it is the value in the status file written by this systemctl.py code """
pid_file = self.pid_file_from(conf)
if pid_file:
return self.read_pid_file(pid_file, default)
status = self.read_status_from(conf)
if "MainPID" in status:
return to_intN(status["MainPID"], default)
return default
def clean_pid_file_from(self, conf):
pid_file = self.pid_file_from(conf)
if pid_file and os.path.isfile(pid_file):
try:
os.remove(pid_file)
except OSError as e:
logg.warning("while rm %s: %s", pid_file, e)
self.write_status_from(conf, MainPID=None)
def get_status_file(self, unit): # for testing
conf = self.get_unit_conf(unit)
return self.get_status_file_from(conf)
def get_status_file_from(self, conf, default = None):
status_file = self.get_StatusFile(conf)
# this not a real setting, but do the expand_special anyway
return os_path(self._root, self.expand_special(status_file, conf))
def get_StatusFile(self, conf, default = None): # -> text
""" file where to store a status mark """
status_file = conf.get("Service", "StatusFile", default)
if status_file:
return status_file
root = conf.root_mode()
folder = get_PID_DIR(root)
name = "%s.status" % conf.name()
return os.path.join(folder, name)
def clean_status_from(self, conf):
status_file = self.get_status_file_from(conf)
if os.path.exists(status_file):
os.remove(status_file)
conf.status = {}
def write_status_from(self, conf, **status): # -> bool(written)
""" if a status_file is known then path is created and the
give status is written as the only content. """
status_file = self.get_status_file_from(conf)
# if not status_file: return False
dirpath = os.path.dirname(os.path.abspath(status_file))
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
if conf.status is None:
conf.status = self.read_status_from(conf)
if True:
for key in sorted(status.keys()):
value = status[key]
if key.upper() == "AS": key = "ActiveState"
if key.upper() == "EXIT": key = "ExecMainCode"
if value is None:
try: del conf.status[key]
except KeyError: pass
else:
conf.status[key] = strE(value)
try:
with open(status_file, "w") as f:
for key in sorted(conf.status):
value = conf.status[key]
if key == "MainPID" and str(value) == "0":
logg.warning("ignore writing MainPID=0")
continue
content = "{}={}\n".format(key, str(value))
logg.debug("writing to %s\n\t%s", status_file, content.strip())
f.write(content)
except IOError as e:
logg.error("writing STATUS %s: %s\n\t to status file %s", status, e, status_file)
return True
def read_status_from(self, conf):
status_file = self.get_status_file_from(conf)
status = {}
# if not status_file: return status
if not os.path.isfile(status_file):
if DEBUG_STATUS: logg.debug("no status file: %s\n returning %s", status_file, status)
return status
if self.truncate_old(status_file):
if DEBUG_STATUS: logg.debug("old status file: %s\n returning %s", status_file, status)
return status
try:
if DEBUG_STATUS: logg.debug("reading %s", status_file)
for line in open(status_file):
if line.strip():
m = re.match(r"(\w+)[:=](.*)", line)
if m:
key, value = m.group(1), m.group(2)
if key.strip():
status[key.strip()] = value.strip()
else: #pragma: no cover
logg.warning("ignored %s", line.strip())
except:
logg.warning("bad read of status file '%s'", status_file)
return status
def get_status_from(self, conf, name, default = None):
if conf.status is None:
conf.status = self.read_status_from(conf)
return conf.status.get(name, default)
def set_status_from(self, conf, name, value):
if conf.status is None:
conf.status = self.read_status_from(conf)
if value is None:
try: del conf.status[name]
except KeyError: pass
else:
conf.status[name] = value
#
def get_boottime(self):
""" detects the boot time of the container - in general the start time of PID 1 """
if self._boottime is None:
self._boottime = self.get_boottime_from_proc()
assert self._boottime is not None
return self._boottime
def get_boottime_from_proc(self):
""" detects the latest boot time by looking at the start time of available process"""
pid1 = BOOT_PID_MIN or 0
pid_max = BOOT_PID_MAX
if pid_max < 0:
pid_max = pid1 - pid_max
for pid in xrange(pid1, pid_max):
proc = _proc_pid_stat.format(**locals())
try:
if os.path.exists(proc):
# return os.path.getmtime(proc) # did sometimes change
return self.path_proc_started(proc)
except Exception as e: # pragma: no cover
logg.warning("boottime - could not access %s: %s", proc, e)
if DEBUG_BOOTTIME:
logg.debug(" boottime from the oldest entry in /proc [nothing in %s..%s]", pid1, pid_max)
return self.get_boottime_from_old_proc()
def get_boottime_from_old_proc(self):
booted = time.time()
for pid in os.listdir(_proc_pid_dir):
proc = _proc_pid_stat.format(**locals())
try:
if os.path.exists(proc):
# ctime = os.path.getmtime(proc)
ctime = self.path_proc_started(proc)
if ctime < booted:
booted = ctime
except Exception as e: # pragma: no cover
logg.warning("could not access %s: %s", proc, e)
return booted
# Use uptime, time process running in ticks, and current time to determine process boot time
# You can't use the modified timestamp of the status file because it isn't static.
# ... using clock ticks it is known to be a linear time on Linux
def path_proc_started(self, proc):
#get time process started after boot in clock ticks
with open(proc) as file_stat:
data_stat = file_stat.readline()
file_stat.close()
stat_data = data_stat.split()
started_ticks = stat_data[21]
# man proc(5): "(22) starttime = The time the process started after system boot."
# ".. the value is expressed in clock ticks (divide by sysconf(_SC_CLK_TCK))."
# NOTE: for containers the start time is related to the boot time of host system.
clkTickInt = os.sysconf_names['SC_CLK_TCK']
clockTicksPerSec = os.sysconf(clkTickInt)
started_secs = float(started_ticks) / clockTicksPerSec
if DEBUG_BOOTTIME:
logg.debug(" BOOT .. Proc started time: %.3f (%s)", started_secs, proc)
# this value is the start time from the host system
# Variant 1:
system_uptime = _proc_sys_uptime
with open(system_uptime,"rb") as file_uptime:
data_uptime = file_uptime.readline()
file_uptime.close()
uptime_data = data_uptime.decode().split()
uptime_secs = float(uptime_data[0])
if DEBUG_BOOTTIME:
logg.debug(" BOOT 1. System uptime secs: %.3f (%s)", uptime_secs, system_uptime)
#get time now
now = time.time()
started_time = now - (uptime_secs - started_secs)
if DEBUG_BOOTTIME:
logg.debug(" BOOT 1. Proc has been running since: %s" % (datetime.datetime.fromtimestamp(started_time)))
# Variant 2:
system_stat = _proc_sys_stat
system_btime = 0.
with open(system_stat,"rb") as f:
for line in f:
assert isinstance(line, bytes)
if line.startswith(b"btime"):
system_btime = float(line.decode().split()[1])
f.closed
if DEBUG_BOOTTIME:
logg.debug(" BOOT 2. System btime secs: %.3f (%s)", system_btime, system_stat)
| |
<filename>tethysapp/app_store/original_code.py<gh_stars>1-10
import shutil
import yaml
import os
import git
from subprocess import (call, Popen, PIPE, STDOUT)
from argparse import Namespace
from conda.cli.python_api import run_command as conda_run, Commands
from django.core.exceptions import ObjectDoesNotExist
from tethys_apps.cli.cli_colors import pretty_output, FG_RED, FG_BLUE, FG_YELLOW
from tethys_apps.cli.services_commands import (services_create_persistent_command, services_create_spatial_command,
services_create_dataset_command, services_create_wps_command,
services_list_command)
from tethys_apps.cli.syncstores_command import syncstores_command
from tethys_apps.utilities import link_service_to_app_setting
FNULL = open(os.devnull, 'w')
serviceLinkParam = {
'spatial': 'ds_spatial',
"dataset": 'ds_dataset',
"persistent": 'ps_database',
'wps': 'wps'
}
tethysapp_dir = os.path.dirname(os.path.dirname(__file__))
root_app_path = os.path.join(tethysapp_dir, 'tethysapp')
thredds_dir = os.path.join(os.path.dirname(tethysapp_dir), 'thredds', 'public')
def write_error(msg):
with pretty_output(FG_RED) as p:
p.write(msg)
exit(1)
def write_msg(msg):
with pretty_output(FG_YELLOW) as p:
p.write(msg)
def create_services(service, create_service, config):
newService = None
try:
newService = service.objects.get(name=config['name'])
write_msg('Service with name "{0}" already exists. Skipping add.'.format(config['name']))
except ObjectDoesNotExist:
if not service:
write_error('Invalid Service Type : {0}.'.format(serviceKey))
serviceMethod = create_service
tempNS = Namespace()
for conf in config.keys():
setattr(tempNS, conf, config[conf])
newService = serviceMethod(tempNS)
def get_service_from_id(id):
from tethys_services.models import (SpatialDatasetService, PersistentStoreService,
DatasetService, WebProcessingService)
try:
persistent_entries = PersistentStoreService.objects.get(id=id) # noqa: F841
return {"service_type": "persistent",
"linkParam": serviceLinkParam['persistent']}
except ObjectDoesNotExist:
pass
try:
entries = SpatialDatasetService.objects.get(id=id) # noqa: F841
return {"service_type": "spatial",
"linkParam": serviceLinkParam['spatial']}
except ObjectDoesNotExist:
pass
try:
entries = DatasetService.objects.get(id=id) # noqa: F841
return {"service_type": "dataset",
"linkParam": serviceLinkParam['dataset']}
except ObjectDoesNotExist:
pass
try:
entries = WebProcessingService.objects.get(id=id) # noqa: F841
return {"service_type": "wps",
"linkParam": serviceLinkParam['persistent']}
except ObjectDoesNotExist:
pass
return False
def get_service_from_name(name):
from tethys_services.models import (SpatialDatasetService, PersistentStoreService,
DatasetService, WebProcessingService)
try:
persistent_entries = PersistentStoreService.objects.get(name=name) # noqa: F841
return {"service_type": "persistent",
"linkParam": serviceLinkParam['persistent']}
except ObjectDoesNotExist:
pass
try:
entries = SpatialDatasetService.objects.get(name=name) # noqa: F841
return {"service_type": "spatial",
"linkParam": serviceLinkParam['spatial']}
except ObjectDoesNotExist:
pass
try:
entries = DatasetService.objects.get(name=name) # noqa: F841
return {"service_type": "dataset",
"linkParam": serviceLinkParam['dataset']}
except ObjectDoesNotExist:
pass
try:
entries = WebProcessingService.objects.get(name=name) # noqa: F841
return {"service_type": "wps",
"linkParam": serviceLinkParam['wps']}
except ObjectDoesNotExist:
pass
return False
# Pulling this function out so I can mock this for inputs to the interactive mode
def get_interactive_input():
return input("")
def get_service_name_input():
return input("")
def parse_id_input(inputResponse):
id_search = False
try:
ids = inputResponse.split(',')
ids = list(map(lambda x: int(x), ids))
id_search = True
except ValueError:
ids = [inputResponse]
pass
return id_search, ids
def run_interactive_services(app_name):
write_msg('Running Interactive Service Mode. '
'Any configuration options in install.yml for services will be ignored...')
# List existing services
tempNS = Namespace()
for conf in ['spatial', 'persistent', 'wps', 'dataset']:
setattr(tempNS, conf, False)
services_list_command(tempNS)
write_msg('Please enter the service ID/Name to link one of the above listed service.')
write_msg('You may also enter a comma seperated list of service ids : (1,2).')
write_msg('Just hit return if you wish to skip this step and move on to creating your own services.')
valid = False
while not valid:
try:
response = get_interactive_input()
if response != "":
# Parse Response
id_search, ids = parse_id_input(response)
for service_id in ids:
if id_search:
service = get_service_from_id(service_id)
else:
service = get_service_from_name(service_id)
if service:
# Ask for app setting name:
write_msg(
'Please enter the name of the service from your app.py eg: "catalog_db")')
setting_name = get_service_name_input()
link_service_to_app_setting(service['service_type'],
service_id,
app_name,
service['linkParam'],
setting_name)
valid = True
else:
write_msg(
"Please run 'tethys services create -h' to create services via the command line.")
valid = True
except (KeyboardInterrupt, SystemExit):
with pretty_output(FG_YELLOW) as p:
p.write('\nInstall Command cancelled.')
exit(0)
def find_and_link(service_type, setting_name, service_id, app_name):
service = get_service_from_name(service_id)
if service:
link_service_to_app_setting(service['service_type'],
service_id,
app_name,
service['linkParam'],
setting_name)
else:
with pretty_output(FG_RED) as p:
p.write(
'Warning: Could not find service of type: {} with the name/id: {}'.format(service_type, service_id))
def run_portal_init(service_models, file_path, app_name):
if file_path is None:
file_path = './portal.yml'
if not os.path.exists(file_path):
write_msg("No Portal Services file found. Moving to look for local app level services.yml...")
return False
try:
write_msg("Portal init file found...Processing...")
with open(file_path) as f:
portal_options = yaml.safe_load(f)
except Exception as e:
with pretty_output(FG_RED) as p:
p.write(e)
p.write('An unexpected error occurred reading the file. Please try again.')
return False
if "apps" in portal_options and app_name in portal_options['apps'] and 'services' in portal_options['apps'][app_name]:
services = portal_options['apps'][app_name]['services']
if services and len(services) > 0:
for service_type in services:
if services[service_type] is not None:
current_services = services[service_type]
for service_setting_name in current_services:
find_and_link(service_type, service_setting_name,
current_services[service_setting_name], app_name)
else:
write_msg("No app configuration found for app: {} in portal config file. ".format(app_name))
else:
write_msg("No apps configuration found in portal config file. ".format(app_name))
return True
def install_dependencies(conda_config, pip_config):
# Add all channels listed in the file.
if "channels" in conda_config and conda_config['channels'] and len(conda_config['channels']) > 0:
channels = conda_config['channels']
for channel in channels:
[resp, err, code] = conda_run(
Commands.CONFIG, "--prepend", "channels", channel, use_exception_handler=False)
# Install all Dependencies
if "dependencies" in conda_config and conda_config['dependencies'] and len(conda_config['dependencies']) > 0:
dependencies = conda_config['dependencies']
with pretty_output(FG_BLUE) as p:
p.write('Installing Dependencies.....')
[resp, err, code] = conda_run(
Commands.INSTALL, *dependencies, use_exception_handler=False, stdout=None, stderr=None)
if code != 0:
with pretty_output(FG_RED) as p:
p.write('Warning: Dependencies installation ran into an error. Please try again or a manual install')
if pip_config and len(pip_config) > 0:
for pip_req in pip_config:
from pip._internal import main as pip
pip(['install', '--user', pip_req])
def run_services(services_config, file_path, app_name, serviceFileInput):
if serviceFileInput is None:
file_path = './services.yml'
else:
file_path = serviceFileInput
if not os.path.exists(file_path):
write_msg("No Services init file found. Skipping app service installation")
return
try:
with open(file_path) as f:
init_options = yaml.safe_load(f)
except Exception as e:
with pretty_output(FG_RED) as p:
p.write(e)
p.write('An unexpected error occurred reading the file. Please try again.')
exit(1)
# Setup any services that need to be setup
services = init_options
interactive_mode = False
skip = False
if "skip" in services:
skip = services['skip']
del services['skip']
if "interactive" in services:
interactive_mode = services['interactive']
del services['interactive']
if not skip:
if interactive_mode:
run_interactive_services(app_name)
else:
if services and len(services) > 0:
if 'version' in services:
del services['version']
for service_type in services:
if services[service_type] is not None:
current_services = services[service_type]
for service_setting_name in current_services:
find_and_link(service_type, service_setting_name,
current_services[service_setting_name], app_name)
write_msg("Services Configuration Completed.")
else:
write_msg("Skipping services configuration, Skip option found.")
def process_production_services(service_options, service_models):
creators = {
'spatial': services_create_spatial_command,
"dataset": services_create_dataset_command,
"persistent": services_create_persistent_command,
'wps': services_create_wps_command
}
for service_type in service_options:
if service_options[service_type]:
for service in service_options[service_type]:
create_services(service_models[service_type], creators[service_type], service)
def process_production_apps(apps):
for app_name in apps:
if "source" in apps[app_name]:
write_msg("Pulling application from source....")
dir_path = os.path.join(root_app_path, app_name)
service_file_path = os.path.join(dir_path, 'services.yml')
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
os.mkdir(dir_path)
repo = git.Repo.init(dir_path)
origin = repo.create_remote('origin', apps[app_name]["source"]["url"])
origin.fetch()
branch = "master"
if "branch" in apps[app_name]["source"]:
branch = apps[app_name]["source"]["branch"]
repo.git.checkout(branch)
for root, dirs, files in os.walk(dir_path):
for momo in dirs:
os.chmod(os.path.join(root, momo), 0o755)
for momo in files:
os.chmod(os.path.join(root, momo), 0o755)
if "services" in apps[app_name]:
with open(service_file_path, 'w') as outfile:
yaml.dump(apps[app_name]["services"], outfile)
else:
if os.path.isfile(service_file_path):
os.remove(service_file_path)
# Run the app install command with new params
tempNS = Namespace()
setattr(tempNS, 'file', os.path.join(dir_path, 'install.yml'))
setattr(tempNS, 'services_file', service_file_path)
setattr(tempNS, 'portal_file', None)
setattr(tempNS, 'exit', False)
init_command(tempNS)
if "custom_settings" in apps[app_name]:
process_custom_settings(apps[app_name]['custom_settings'], app_name)
else:
write_error("No App source present for App: {}. Aborting.".format(app_name))
def process_custom_settings(settings, app_name):
write_msg("Processing Custom Settings for app {}".format(app_name))
try:
# Try to get the app
from tethys_apps.models import TethysApp
db_app = TethysApp.objects.get(package=app_name)
if db_app is None:
write_msg("Cannot configure custom settings. Check errors...")
custom_settings = db_app.custom_settings
for setting_name in settings:
try:
custom_setting = custom_settings.get(name=setting_name)
value = settings[setting_name]
if value == "thredds_dir":
value = os.path.join(thredds_dir, app_name)
custom_setting.set_value(value)
custom_setting.save()
except ObjectDoesNotExist:
write_msg("Custom setting doesn't exist : {}".format(setting_name))
except Exception as e:
print(e)
def process_portal_settings(settings):
write_msg("Processing Portal Settings")
try:
# Try to get the settings
from tethys_config.models import Setting
for new_setting in settings:
portal_setting_obj = Setting.objects.get(name=new_setting)
setattr(portal_setting_obj, 'content', settings[new_setting])
portal_setting_obj.save()
except Exception as e:
print(e)
def run_production_install(file_path, service_models):
if file_path is None:
return
if not os.path.exists(file_path):
write_error("No Production File found at that path. Aborting. ")
try:
with open(file_path) as f:
production_options = yaml.safe_load(f)
except Exception as e:
with pretty_output(FG_RED) as p:
p.write(e)
p.write(
'An unexpected error occurred reading the file. Please try again.')
exit(1)
if "services" in production_options:
process_production_services(production_options['services'], service_models)
if "apps" in production_options:
process_production_apps(production_options['apps'])
if "portal_settings" in production_options:
process_portal_settings(production_options['portal_settings'])
write_msg("Syncing database for all installed applications...")
# Run the app install command with new params
tempNS = Namespace()
setattr(tempNS, 'app', ['all'])
setattr(tempNS, 'refresh', None)
setattr(tempNS, 'firsttime', None)
setattr(tempNS, 'database', None)
syncstores_command(tempNS)
exit(0)
def init_command(args):
"""
Init Command
"""
# Have to import within function or else install partial on a system fails
from tethys_services.models import (
SpatialDatasetService, DatasetService, PersistentStoreService, WebProcessingService)
service_models = {
'spatial': SpatialDatasetService,
"dataset": DatasetService,
"persistent": PersistentStoreService,
'wps': WebProcessingService
}
app_name = None
if 'production_file' | |
regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() -
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) -
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilities raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes; got {1:d} class(es)"
.format(self.__class__.__name__, n_classes))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
# prevents overflow and division by zero
if abs(denominator) < 1e-150:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
# prevents overflow and division by zero
if abs(denominator) < 1e-150:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
<NAME>, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes; got {1:d} class(es)"
.format(self.__class__.__name__, n_classes))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
# prevents overflow and division by zero
if abs(denominator) < 1e-150:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, criterion,
min_samples_split, min_samples_leaf, min_weight_fraction_leaf,
max_depth, min_impurity_decrease, min_impurity_split,
init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto',
validation_fraction=0.1, n_iter_no_change=None,
tol=1e-4):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.criterion = criterion
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.presort = presort
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.tol = tol
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
| |
networks.
Outputs:
-True if the network is loaded, false otherwise.
"""
return (username in self.networks) and (network_ID in self.networks[username])
def add_user_tab(self, username):
"""Add a new user entry inside of networks if not already in
Inputs:
-user : A string containing the name of the owner of the network.
"""
if not (username in self.networks):
self.networks[username] = dict()
self.tasks[username] = dict()
self.plots[username] = dict()
def count_neuron(self, username):
"""Count the total number of neurones in all the networks currently loaded by an user.
Inputs:
-user : A string containing the name of the owner of the networks.
Outputs:
The function return the sum of all the size of the networks that are loaded.
"""
n = 0
for i in self.networks[username]:
n += self.networks[username][i].network.N
return n
def add_network_tab(self, username, network_ID, size):
"""Add a new network entry inside of networks if the network can be loaded from the data base.
If the same user is already using to much resources, the function is not successfull.
Inputs:
-user : A string containing the name of the owner of the network.
-network_ID : A string containing the id of the network, with user, allows to identify the network in the data base.
-size : The size of the network you are trying to add.
Outputs:
If the network is added succesfully, the function returns 0, if too many neuronnes where already loaded, 1 and 2 if the network was already loaded.
"""
if ((self.count_neuron(username) + size) > settings.MAX_NEURONE):
return 1
if not (self.exist_entry(username, network_ID)):
self.networks[username][network_ID] = None
self.tasks[username][network_ID] = []
self.plots[username][network_ID] = []
return 0
return 2
def load_network(self, request, user, network_ID, auto_load = False):
"""Load a new network inside of networks if the network can be loaded from the data base.
If the same user is already using to much resources, the function is not successfull.
If it is the first time the user load a network, the user is added as an entry inside of networks.
Inputs:
-request: HTTP request from the front-end.
-user : A string containing the name of the owner of the network.
-network_ID : A string containing the id of the network, with user, allows to identify the network in the data base.
-auto_load : A boolean value, if True, the auto saved files must be loaded, if False, the network gets loaded normally.
"""
self.add_user_tab(user.username)
network = net.NetworkRunner.load_network(user.username, network_ID, auto_load)
err = self.add_network_tab(user.username, network_ID, network.network.N)
if err == 0:
self.networks[user.username][network_ID] = network
return HttpResponse('loaded')
elif err == 1:
messages.error(request, "Too many neurones already loaded")
return HttpResponseRedirect('/accounts/')
else:
messages.error(request, "Network already loaded")
return HttpResponseRedirect('/accounts/')
def add_task(self, request, user, network_ID, corpus_ID, start, stop, task):
"""Add a new (testing or training task) to perform to the network.
Fails if the network isn't loaded.
Inputs:
-request: HTTP request from the front-end.
-user : A string containing the name of the owner of the network.
-network_ID : A string containing the id of the network, with user, allows to identify the network in the dictionnary networks.
-corpus_ID : The ID of the corpus to load that is already uploaded on the server.
-start : An intenger that represent the line of the corpus where to begin the task.
-stop : An intenger that represent the line of the corpus where to stop the task.
-task : The type of task that the network shall perform
"""
if not (self.exist_entry(user.username, network_ID)):
return HttpResponseBadRequest("The network isn't loaded")
else:
CorpusObject = get_object_or_404(Corpus, pk=corpus_ID)
if (CorpusObject.size <= int(stop)):
return HttpResponseBadRequest("The stop point must be lower than the corpus size")
if (int(start) < 0):
return HttpResponseBadRequest("The start point must be greater or equal to 0")
if self.networks[user.username][network_ID].add_task(CorpusObject.path, int(start), int(stop), task):
infos = {
"corpus": CorpusObject.name,
"type": task,
"start": start,
"stop": stop
}
self.tasks[user.username][network_ID].append(infos)# Every time a task is added, the modals of tab needs to be updated
messages.info(request, 'task successfully added')
return HttpResponseRedirect('/accounts/')
messages.error(request, 'error at task creation')
return HttpResponseBadRequest("The task couldn't be added")
def add_observable(self, request, user, network_ID, observable, periodicity):
"""Add a new observable for the network to compute and link it to a free Bokeh instance.
Fails if the network isn't loaded.
Inputs:
-request: HTTP request from the front-end.
-user : A string containing the name of the owner of the network.
-network_ID : A string containing the id of the network, with user, allows to identify the network in the dictionnary networks.
-observable : The name of the observable to compute
-periodicity : This integer tells how many iterations they are between 2 computations of the observable
"""
if not (self.exist_entry(user.username, network_ID)):
messages.error(request, "The network doesn't exist")
return HttpResponseRedirect('/accounts/')
else:
if (len(self.plots) < settings.MAX_BOKEH):
self.networks[user.username][network_ID].add_observable(observable, int(periodicity))
self.plots[user.username][network_ID].append({'plot': make_ajax_plot(observable, self.networks[user.username][network_ID].size_observable(observable), network_ID), 'name':"bokeh_" + str(len(self.plots[user.username][network_ID])), 'verbose_name': observable})
return render(request, 'ARNN/bokeh.html', {'plots': self.plots[user.username][network_ID]})# Makes the render of the observable
else:
messages.error(request, "Unable to add additional Bokeh instance, max limit reached!")
return HttpResponseRedirect('/accounts/')
def remove_network(self, request, user, network_ID):
"""Remove the corresponding network from networks.
Inputs:
-request: HTTP request from the front-end.
-user : A string containing the name of the owner of the network.
-network_ID : A string containing the id of the network, with user, allows to identify the network in the dictionnary networks.
"""
if self.exist_entry(user.username, network_ID):
del self.networks[user.username][network_ID]
if (user.username in self.plots) and network_ID in self.plots[user.username]:
del self.plots[user.username][network_ID]
return HttpResponse()
messages.error(request, "This network doesn't exist")
return HttpResponseRedirect('/accounts/')
def toggle_run(self, request, user, network_ID):
"""If the corresponding network is not already performing a task, this function launch the iterations of the network from the current step.
If the network is running, this function stops the task at the current step.
Inputs:
-request: HTTP request from the front-end.
-user : A string containing the name of the owner of the network.
-network_ID : A string containing the id of the network, with user, allows to identify the network in the dictionnary networks.
"""
net = self.networks[user.username][network_ID]
a = "run ?"
a = net.play_pause()
print(a)
return HttpResponse(a)
def get_network_saved(request, pk):
""" This function returns a boolean meaning if the network given has some unsaved data.
Inputs :
-request: HTTP request from the front-end.
Outputs :
-A boolean value
"""
return HttpResponse(get_object_or_404(Network, pk=pk).auto_saved, content_type='bool')
def create(request):
"""This function create a new network, by generating all the matrix that are inside, and add the corresponding entry inside of the database.
Inputs:
-request : HTTP request from the front-end.
"""
if request.method == 'POST':
form = NetworkForm(request.user, request.POST)
if form.is_valid():
new_network = form.save(commit=False)
new_network.owner = request.user
new_network.save()# Create the network in the database
net.NetworkRunner.create(request.user.username, new_network.pk)
messages.info(request, 'Network successfully created!')
net_json = serializers.serialize('json', [new_network,])
return HttpResponse(net_json, content_type='json')
else:
forms=get_basic_forms(request)
forms["network_form"]=form
return render(request, 'ARNN/index.html', forms)
return HttpResponseBadRequest('Error: Not a POST request')
def list_net(request):
""" This function return this list of networks owned by the current user as a JSON.
Inputs :
-request: HTTP request from the front-end.
Outputs :
-A json of the list of networks own by a person
"""
networks = Network.objects.filter(owner=request.user)
net_json = serializers.serialize('json', networks)
return HttpResponse(net_json, content_type='json')
def delete(request, pk):
""" This fonction delete a network from the data base, and from the disk.
Inputs:
-request : HTTP request from the front-end
-pk : the ID of the network to delete
"""
network = get_object_or_404(Network, pk=pk)
network.delete()
path = os.path.join(os.path.join(settings.PATH_TO_USERS_FOLDER, request.user.username), settings.PATH_TO_NETWORKS)
file = os.path.join(path, str(pk)+"W.npy")
if os.path.exists(file):
os.remove(file)
file = os.path.join(path, str(pk)+"Win.npy")
if os.path.exists(file):
os.remove(file)
file = os.path.join(path, str(pk)+"Wout.npy")
if os.path.exists(file):
os.remove(file)
file = os.path.join(path, str(pk)+"~W.npy")
if os.path.exists(file):
os.remove(file)
file = os.path.join(path, str(pk)+"~Win.npy")
if os.path.exists(file):
os.remove(file)
file = os.path.join(path, str(pk)+"~Wout.npy")
if os.path.exists(file):
os.remove(file)
messages.info(request, 'Network successfully deleted!')
return HttpResponseRedirect('/accounts/')
def download(request, pk):
""" This function creates a ZIP archive containing the NPY files (weight matrix and state matrix)
of the network whose primary key is passed as parameter.
Inputs:
-request: HTTP request from the front-end
-pk: The ID of the network whose NPY files need to be downloaded.
"""
network = get_object_or_404(Network, pk=pk)
network_path | |
<filename>ndcube/tests/test_sequence_plotting.py
# -*- coding: utf-8 -*-
import pytest
import datetime
import numpy as np
import astropy.units as u
import matplotlib
from ndcube import NDCube, NDCubeSequence
from ndcube.utils.wcs import WCS
import ndcube.mixins.sequence_plotting
# Set matplotlib display for testing
#matplotlib.use('Agg')
# sample data for tests
# TODO: use a fixture reading from a test file. file TBD.
data = np.array([[[1, 2, 3, 4], [2, 4, 5, 3], [0, -1, 2, 3]],
[[2, 4, 5, 1], [10, 5, 2, 2], [10, 3, 3, 0]]])
data2 = np.array([[[11, 22, 33, 44], [22, 44, 55, 33], [0, -1, 22, 33]],
[[22, 44, 55, 11], [10, 55, 22, 22], [10, 33, 33, 0]]])
ht = {'CTYPE3': 'HPLT-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.5, 'CRPIX3': 0, 'CRVAL3': 0, 'NAXIS3': 2,
'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 0,
'NAXIS2': 3,
'CTYPE1': 'TIME ', 'CUNIT1': 'min', 'CDELT1': 0.4, 'CRPIX1': 0, 'CRVAL1': 0, 'NAXIS1': 4}
wt = WCS(header=ht, naxis=3)
cube1 = NDCube(
data, wt, missing_axis=[False, False, False, True],
extra_coords=[
('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
('distance', None, u.Quantity(0, unit=u.cm)),
('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
cube1_with_unit = NDCube(
data, wt, missing_axis=[False, False, False, True],
unit=u.km,
extra_coords=[
('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
('distance', None, u.Quantity(0, unit=u.cm)),
('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
cube1_with_mask = NDCube(
data, wt, missing_axis=[False, False, False, True],
mask=np.zeros_like(data, dtype=bool),
extra_coords=[
('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
('distance', None, u.Quantity(0, unit=u.cm)),
('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
cube1_with_uncertainty = NDCube(
data, wt, missing_axis=[False, False, False, True],
uncertainty=np.sqrt(data),
extra_coords=[
('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
('distance', None, u.Quantity(0, unit=u.cm)),
('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
cube1_with_unit_and_uncertainty = NDCube(
data, wt, missing_axis=[False, False, False, True],
unit=u.km, uncertainty=np.sqrt(data),
extra_coords=[
('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
('distance', None, u.Quantity(0, unit=u.cm)),
('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
cube3 = NDCube(
data2, wt, missing_axis=[False, False, False, True],
extra_coords=[
('pix', 0, u.Quantity(np.arange(1, data2.shape[0]+1), unit=u.pix) +
cube1.extra_coords['pix']['value'][-1]),
('hi', 1, u.Quantity(range(data2.shape[1]), unit=u.s)),
('distance', None, u.Quantity(2, unit=u.cm)),
('time', None, datetime.datetime(2000, 1, 1, 0, 2))])
cube3_with_unit = NDCube(
data2, wt, missing_axis=[False, False, False, True],
unit=u.m,
extra_coords=[
('pix', 0, u.Quantity(np.arange(1, data2.shape[0]+1), unit=u.pix) +
cube1.extra_coords['pix']['value'][-1]),
('hi', 1, u.Quantity(range(data2.shape[1]), unit=u.s)),
('distance', None, u.Quantity(2, unit=u.cm)),
('time', None, datetime.datetime(2000, 1, 1, 0, 2))])
cube3_with_mask = NDCube(
data2, wt, missing_axis=[False, False, False, True],
mask=np.zeros_like(data2, dtype=bool),
extra_coords=[
('pix', 0, u.Quantity(np.arange(1, data2.shape[0]+1), unit=u.pix) +
cube1.extra_coords['pix']['value'][-1]),
('hi', 1, u.Quantity(range(data2.shape[1]), unit=u.s)),
('distance', None, u.Quantity(2, unit=u.cm)),
('time', None, datetime.datetime(2000, 1, 1, 0, 2))])
cube3_with_uncertainty = NDCube(
data2, wt, missing_axis=[False, False, False, True],
uncertainty=np.sqrt(data2),
extra_coords=[
('pix', 0, u.Quantity(np.arange(1, data2.shape[0]+1), unit=u.pix) +
cube1.extra_coords['pix']['value'][-1]),
('hi', 1, u.Quantity(range(data2.shape[1]), unit=u.s)),
('distance', None, u.Quantity(2, unit=u.cm)),
('time', None, datetime.datetime(2000, 1, 1, 0, 2))])
cube3_with_unit_and_uncertainty = NDCube(
data2, wt, missing_axis=[False, False, False, True],
unit=u.m, uncertainty=np.sqrt(data2),
extra_coords=[
('pix', 0, u.Quantity(np.arange(1, data2.shape[0]+1), unit=u.pix) +
cube1.extra_coords['pix']['value'][-1]),
('hi', 1, u.Quantity(range(data2.shape[1]), unit=u.s)),
('distance', None, u.Quantity(2, unit=u.cm)),
('time', None, datetime.datetime(2000, 1, 1, 0, 2))])
# Define some test NDCubeSequences.
common_axis = 0
seq = NDCubeSequence(data_list=[cube1, cube3, cube1, cube3], common_axis=common_axis)
seq_no_common_axis = NDCubeSequence(data_list=[cube1, cube3, cube1, cube3])
seq_with_units = NDCubeSequence(
data_list=[cube1_with_unit, cube3_with_unit, cube1_with_unit, cube3_with_unit],
common_axis=common_axis)
seq_with_masks = NDCubeSequence(
data_list=[cube1_with_mask, cube3_with_mask, cube1_with_mask, cube3_with_mask],
common_axis=common_axis)
seq_with_unit0 = NDCubeSequence(data_list=[cube1_with_unit, cube3,
cube1_with_unit, cube3], common_axis=common_axis)
seq_with_mask0 = NDCubeSequence(data_list=[cube1_with_mask, cube3,
cube1_with_mask, cube3], common_axis=common_axis)
seq_with_uncertainty = NDCubeSequence(data_list=[cube1_with_uncertainty, cube3_with_uncertainty,
cube1_with_uncertainty, cube3_with_uncertainty],
common_axis=common_axis)
seq_with_some_uncertainty = NDCubeSequence(
data_list=[cube1_with_uncertainty, cube3, cube1, cube3_with_uncertainty],
common_axis=common_axis)
seq_with_units_and_uncertainty = NDCubeSequence(
data_list=[cube1_with_unit_and_uncertainty, cube3_with_unit_and_uncertainty,
cube1_with_unit_and_uncertainty, cube3_with_unit_and_uncertainty],
common_axis=common_axis)
seq_with_units_and_some_uncertainty = NDCubeSequence(
data_list=[cube1_with_unit_and_uncertainty, cube3_with_unit,
cube1_with_unit, cube3_with_unit_and_uncertainty],
common_axis=common_axis)
# Derive some expected data arrays in plot objects.
seq_data_stack = np.stack([cube.data for cube in seq_with_masks.data])
seq_mask_stack = np.stack([cube.mask for cube in seq_with_masks.data])
seq_stack = np.ma.masked_array(seq_data_stack, seq_mask_stack)
seq_stack_km = np.ma.masked_array(
np.stack([(cube.data * cube.unit).to(u.km).value for cube in seq_with_units.data]),
seq_mask_stack)
seq_data_concat = np.concatenate([cube.data for cube in seq_with_masks.data], axis=common_axis)
seq_mask_concat = np.concatenate([cube.mask for cube in seq_with_masks.data], axis=common_axis)
seq_concat = np.ma.masked_array(seq_data_concat, seq_mask_concat)
seq_concat_km = np.ma.masked_array(
np.concatenate([(cube.data * cube.unit).to(u.km).value
for cube in seq_with_units.data], axis=common_axis),
seq_mask_concat)
# Derive expected axis_ranges
x_axis_coords = np.array([0.4, 0.8, 1.2, 1.6]).reshape((1, 1, 4))
new_x_axis_coords_shape = u.Quantity(seq.dimensions, unit=u.pix).value.astype(int)
new_x_axis_coords_shape[-1] = 1
none_axis_ranges_axis3 = [np.arange(len(seq.data)), np.array([0., 2.]), np.array([0., 1.5, 3.]),
np.tile(np.array(x_axis_coords), new_x_axis_coords_shape)]
# Derive expected extents
seq_axis1_lim_deg = [0.49998731, 0.99989848]
seq_axis1_lim_arcsec = [(axis1_xlim*u.deg).to(u.arcsec).value for axis1_xlim in seq_axis1_lim_deg]
seq_axis2_lim_m = [seq[:, :, :, 0].data[0].axis_world_coords()[-1][0].value,
seq[:, :, :, 0].data[0].axis_world_coords()[-1][-1].value]
@pytest.mark.parametrize("test_input, test_kwargs, expected_values", [
(seq[:, 0, 0, 0], {},
(np.arange(len(seq.data)), np.array([1, 11, 1, 11]),
"meta.obs.sequence [None]", "Data [None]", (0, len(seq[:, 0, 0, 0].data)-1),
(min([cube.data.min() for cube in seq[:, 0, 0, 0].data]),
max([cube.data.max() for cube in seq[:, 0, 0, 0].data])))),
(seq_with_units[:, 0, 0, 0], {},
(np.arange(len(seq_with_units.data)), np.array([1, 0.011, 1, 0.011]),
"meta.obs.sequence [None]", "Data [km]", (0, len(seq_with_units[:, 0, 0, 0].data)-1),
(min([(cube.data * cube.unit).to(seq_with_units[:, 0, 0, 0].data[0].unit).value
for cube in seq_with_units[:, 0, 0, 0].data]),
max([(cube.data * cube.unit).to(seq_with_units[:, 0, 0, 0].data[0].unit).value
for cube in seq_with_units[:, 0, 0, 0].data])))),
(seq_with_uncertainty[:, 0, 0, 0], {},
(np.arange(len(seq_with_uncertainty.data)), np.array([1, 11, 1, 11]),
"meta.obs.sequence [None]", "Data [None]", (0, len(seq_with_uncertainty[:, 0, 0, 0].data)-1),
(min([cube.data for cube in seq_with_uncertainty[:, 0, 0, 0].data]),
max([cube.data for cube in seq_with_uncertainty[:, 0, 0, 0].data])))),
(seq_with_units_and_uncertainty[:, 0, 0, 0], {},
(np.arange(len(seq_with_units_and_uncertainty.data)), np.array([1, 0.011, 1, 0.011]),
"meta.obs.sequence [None]", "Data [km]",
(0, len(seq_with_units_and_uncertainty[:, 0, 0, 0].data)-1),
(min([(cube.data*cube.unit).to(seq_with_units_and_uncertainty[:, 0, 0, 0].data[0].unit).value
for cube in seq_with_units_and_uncertainty[:, 0, 0, 0].data]),
max([(cube.data*cube.unit).to(seq_with_units_and_uncertainty[:, 0, 0, 0].data[0].unit).value
for cube in seq_with_units_and_uncertainty[:, 0, 0, 0].data])))),
(seq_with_units_and_some_uncertainty[:, 0, 0, 0], {},
(np.arange(len(seq_with_units_and_some_uncertainty.data)), np.array([1, 0.011, 1, 0.011]),
"meta.obs.sequence [None]", "Data [km]",
(0, len(seq_with_units_and_some_uncertainty[:, 0, 0, 0].data)-1),
(min([(cube.data*cube.unit).to(
seq_with_units_and_some_uncertainty[:, 0, 0, 0].data[0].unit).value
for cube in seq_with_units_and_some_uncertainty[:, 0, 0, 0].data]),
max([(cube.data*cube.unit).to(
seq_with_units_and_some_uncertainty[:, 0, 0, 0].data[0].unit).value
for cube in seq_with_units_and_some_uncertainty[:, 0, 0, 0].data])))),
(seq[:, 0, 0, 0], {"axes_coordinates": "distance"},
((seq.sequence_axis_extra_coords["distance"]), np.array([1, 11, 1, 11]),
"distance [{0}]".format(seq.sequence_axis_extra_coords["distance"].unit), "Data [None]",
(min(seq.sequence_axis_extra_coords["distance"].value),
max(seq.sequence_axis_extra_coords["distance"].value)),
(min([cube.data.min() for cube in seq[:, 0, 0, 0].data]),
max([cube.data.max() for cube in seq[:, 0, 0, 0].data])))),
(seq[:, 0, 0, 0], {"axes_coordinates": u.Quantity(np.arange(len(seq.data)), unit=u.cm),
"axes_units": u.km},
(u.Quantity(np.arange(len(seq.data)), unit=u.cm).to(u.km), np.array([1, 11, 1, 11]),
"meta.obs.sequence [km]", "Data [None]",
(min((u.Quantity(np.arange(len(seq.data)), unit=u.cm).to(u.km).value)),
max((u.Quantity(np.arange(len(seq.data)), unit=u.cm).to(u.km).value))),
(min([cube.data.min() for cube in seq[:, 0, 0, 0].data]),
max([cube.data.max() for cube in seq[:, 0, 0, 0].data]))))
])
def test_sequence_plot_1D_plot(test_input, test_kwargs, expected_values):
# Unpack expected values
expected_x_data, expected_y_data, expected_xlabel, expected_ylabel, \
expected_xlim, expected_ylim = expected_values
# Run plot method
output = test_input.plot(**test_kwargs)
# Check values are correct
assert isinstance(output, matplotlib.axes.Axes)
np.testing.assert_array_equal(output.lines[0].get_xdata(), expected_x_data)
np.testing.assert_array_equal(output.lines[0].get_ydata(), expected_y_data)
assert output.axes.get_xlabel() == expected_xlabel
assert output.axes.get_ylabel() == expected_ylabel
output_xlim = output.axes.get_xlim()
assert output_xlim[0] <= expected_xlim[0]
assert output_xlim[1] >= expected_xlim[1]
output_ylim = output.axes.get_ylim()
assert output_ylim[0] <= expected_ylim[0]
assert output_ylim[1] >= expected_ylim[1]
@pytest.mark.parametrize("test_input, test_kwargs, expected_values", [
(seq[:, :, 0, 0], {},
(np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
0.49998731, 0.99989848, 0.49998731, 0.99989848]),
np.array([1, 2, 11, 22, 1, 2, 11, 22]),
"{0} [{1}]".format(seq[:, :, 0, 0].cube_like_world_axis_physical_types[common_axis], "deg"),
"Data [None]", tuple(seq_axis1_lim_deg),
(min([cube.data.min() for cube in seq[:, :, 0, 0].data]),
max([cube.data.max() for cube in seq[:, :, 0, 0].data])))),
(seq_with_units[:, :, 0, 0], {},
(np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
0.49998731, 0.99989848, 0.49998731, 0.99989848]),
np.array([1, 2, 0.011, 0.022, 1, 2, 0.011, 0.022]),
"{0} [{1}]".format(seq[:, :, 0, 0].cube_like_world_axis_physical_types[common_axis], "deg"),
"Data [km]", tuple(seq_axis1_lim_deg),
(min([min((cube.data * cube.unit).to(u.km).value)
for cube in seq_with_units[:, :, 0, 0].data]),
max([max((cube.data * cube.unit).to(u.km).value)
for cube in seq_with_units[:, :, 0, 0].data])))),
(seq_with_uncertainty[:, :, 0, 0], {},
(np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
0.49998731, 0.99989848, 0.49998731, 0.99989848]),
np.array([1, 2, 11, 22, 1, 2, 11, 22]),
"{0} [{1}]".format(
seq_with_uncertainty[:, :, 0, 0].cube_like_world_axis_physical_types[
common_axis], "deg"),
"Data [None]", tuple(seq_axis1_lim_deg),
(min([cube.data.min() for cube in seq_with_uncertainty[:, :, 0, 0].data]),
max([cube.data.max() for cube in seq_with_uncertainty[:, :, 0, 0].data])))),
(seq_with_some_uncertainty[:, :, 0, 0], {},
(np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
0.49998731, 0.99989848, 0.49998731, 0.99989848]),
np.array([1, 2, 11, 22, 1, 2, 11, 22]),
"{0} [{1}]".format(
seq_with_some_uncertainty[:, :, 0, 0].cube_like_world_axis_physical_types[
common_axis], "deg"),
"Data [None]", tuple(seq_axis1_lim_deg),
(min([cube.data.min() for cube in seq_with_some_uncertainty[:, :, 0, 0].data]),
max([cube.data.max() for cube in seq_with_some_uncertainty[:, :, 0, 0].data])))),
(seq_with_units_and_uncertainty[:, :, 0, 0], {},
(np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
0.49998731, 0.99989848, 0.49998731, 0.99989848]),
np.array([1, 2, 0.011, 0.022, 1, 2, 0.011, 0.022]),
"{0} [{1}]".format(
seq_with_units_and_uncertainty[:, :, 0, 0].cube_like_world_axis_physical_types[
common_axis], "deg"),
"Data [km]", tuple(seq_axis1_lim_deg),
(min([min((cube.data * cube.unit).to(u.km).value)
for cube in seq_with_units[:, :, 0, 0].data]),
max([max((cube.data * cube.unit).to(u.km).value)
for cube in seq_with_units[:, :, 0, 0].data])))),
(seq_with_units_and_some_uncertainty[:, :, 0, 0], {},
(np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
0.49998731, 0.99989848, 0.49998731, 0.99989848]),
np.array([1, 2, 0.011, 0.022, 1, 2, 0.011, 0.022]),
"{0} [{1}]".format(
seq_with_units_and_some_uncertainty[:, :, 0, 0].cube_like_world_axis_physical_types[
common_axis], "deg"),
"Data [km]", tuple(seq_axis1_lim_deg),
(min([min((cube.data * cube.unit).to(u.km).value)
for cube in seq_with_units[:, :, 0, 0].data]),
max([max((cube.data * cube.unit).to(u.km).value)
for cube in seq_with_units[:, :, 0, 0].data])))),
(seq[:, :, 0, 0], {"axes_coordinates": "pix"},
(seq[:, :, 0, 0].common_axis_extra_coords["pix"].value,
np.array([1, 2, 11, 22, 1, 2, 11, 22]), "pix [pix]", "Data [None]",
(min(seq[:, :, | |
import mmd_scripting.core.nuthouse01_core as core
import mmd_scripting.core.nuthouse01_pmx_parser as pmxlib
import mmd_scripting.core.nuthouse01_pmx_struct as pmxstruct
_SCRIPT_VERSION = "Script version: Nuthouse01 - v0.6.01 - 7/12/2021"
# This code is free to use and re-distribute, but I cannot be held responsible for damages that it may or may not cause.
#####################
# todo: once help text is properly filled out this will be gui-compatible
helptext = '''=================================================
asdfasdf
'''
"""
fragment detection algorithm:
there is a very slight airgap between all of the pieces, but each individual fragment is airtight. so the algorithm could look for vertices with exactly the same xyz as another vert to understand that they are in the same fragment! gonna be glacially slow tho, unless i do the hash trick
BRUTE FORCE ALGORITHM, no assumptions or optimizations
1. pick a vertex A that hasn't been used yet
2. create a new "fragment set" and add A to it
3. note the size of the "fragment set"
4. find all faces that include any vertex in the "fragment set", whenever i find one, add all verts that it includes to the "fragment set" as well
5. find all vertices that have the same exact coordinates as any vertex in the "fragment set", and add them to the "fragment set"
6. if the size of the fragment set is greater now than it was at step 3, repeat steps 3-6. otherwise, go to step 1.
observation: clustering! all verts of a fragment are contiguous, all faces of a fragment are contiguous.
observation: the first faces are built out of the first vertices, the second faces are built out of the second vertices, etc
optimization: when i flood into a face or vert, everything between that location and the start of the fragment is part of the fragment
optimization: when looking thru "all faces and/or verts", only scan forward X locations from the highest-index face/vert i have found so far
"""
MASS_FACTOR = 10
def dist_to_nearest_vertex(point, vert_set, pmx):
distance_per_vert = []
for v_id in vert_set:
v = pmx.verts[v_id]
delta = [a - b for a, b in zip(point, v.pos)] # [x-x, y-y, z-z]
dist = core.my_euclidian_distance(delta)
distance_per_vert.append(dist)
return min(distance_per_vert)
def dist_to_nearest_point_on_mesh_surface(point, vert_set, pmx, face_set):
# TODO: i dont wanna slog thru the math and figure out how to do this
# how do i calculate the distance from a point to the nearest point on the triangle-based surface of the fragment it is inside?
# for each triangle:
# calcualte a plane,
# calculate the closest point on that plane,
# ask "is this point inside this triangle?" (HOW?),
# if yes then cool! save the dist to this point,
# if no then https://stackoverflow.com/questions/10983872/distance-from-a-point-to-a-polygon, save the dist to the nearest point on the perimiter of the triangle,
# then you have created a list of the distances from the given point to the nearest point on the face or perimeter of every triangle,
# so return the minimum distance from that list
return 0
def main(moreinfo=True):
# PROBLEM: the assumption of locality was not correct! verts for a chunk are not clustered! (i think?)
# prompt PMX name
core.MY_PRINT_FUNC("Please enter name of PMX input file:")
input_filename_pmx = core.MY_FILEPROMPT_FUNC("PMX file", ".pmx")
pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)
# coordinates are stored as list[x, y, z], convert this --> tuple --> hash for much faster comparing
vert_coord_hashes = [hash(tuple(v.pos)) for v in pmx.verts]
list_of_vert_sets = []
list_of_face_sets = []
list_of_bone_indices = []
list_of_rigidbody_indices = []
# it's simpler to start with a full set of everything used, and then remove things as they are used
# and its no less efficient
all_unused_verts = set(list(range(len(pmx.verts))))
start_vert = 0
start_face = 0
# continue looping for as long as there are verts not in a fragment
while all_unused_verts:
# 1. start a new sets for the vertices and faces
vert_set = set()
face_set = set()
# 2. pick a vertex that hasn't been used yet and add it to the set, ez
start_vert = min(all_unused_verts)
print("start@%d:: " % start_vert, end="")
vert_set.add(start_vert)
'''
# 2b. optimization: a fragment is guaranteed to have at least 4 faces (to make a closed 3d solid) and therefore at least 4 verts
# can i safely assume that they are "sharp" corners and therefore there are 12 verts?
for i in range(3):
vert_set.add(start_vert + i)
# also, init the faces set with the minimum of 4 faces, and add any verts included in those faces to the vert set
for i in range(1):
face_set.add(start_face + i)
for v in pmx.faces[start_face + i]: # for each vert in this face,
vert_set.add(v) # add this vert to the vert set
# guarantee that it is contiguous from start_vert to the highest index that was in the faces
vert_set = set(list(range(start_vert, max(vert_set)+1)))
# now i have initialized the set with everything i know is guarnateed part of the fragment
highest_known_vert = max(vert_set)
highest_known_face = max(face_set)
'''
# begin looping & flooding until i don't detect any more
while True:
# 3. note the number of verts collected so far
set_size_A = len(vert_set)
# 4. find all faces that include any vertex in the "fragment set",
# whenever i find one, add all verts that it includes to the "fragment set" as well
# zero-assumption brute-force method:
for f_id in range(len(pmx.faces)):
face = pmx.faces[f_id]
if face[0] in vert_set or face[1] in vert_set or face[2] in vert_set: # we got a hit!
face_set.add(f_id)
vert_set.add(face[0])
vert_set.add(face[1])
vert_set.add(face[2])
'''
# optimization: scan only faces index 'highest_known_face+1' thru 'highest_known_face'+LOOKAHEAD
# because 0 thru start_face is guaranteed to not be part of the group
# and start_face thru highest_known_face is already guaranteed to be part of the group
# if chunks are bigger than LOOKAHEAD, then it's not guaranteed to succeed or fail, could do either
for f_id in range(highest_known_face+1, min(highest_known_face+LOOKAHEAD, len(pmx.faces))):
face = pmx.faces[f_id]
if face[0] in vert_set or face[1] in vert_set or face[2] in vert_set:
# we got a hit!
face_set.add(f_id)
vert_set.add(face[0])
vert_set.add(face[1])
vert_set.add(face[2])
# optimization: if this is farther than what i thought was the end, then everything before it should be added too
if f_id > highest_known_face:
for x in range(highest_known_face+1, f_id):
face_set.add(x)
vert_set.add(pmx.faces[x][0])
vert_set.add(pmx.faces[x][1])
vert_set.add(pmx.faces[x][2])
highest_known_face = f_id
'''
set_size_B = len(vert_set)
# update the set of vertex coord hashes for easier comparing
vert_set_hashes = set([vert_coord_hashes[i] for i in vert_set])
# 5. find all vertices that have the same exact coordinates as any vertex in the "fragment set",
# then and add them to the "fragment set"
# zero-assumption brute-force method:
for v_id in range(len(vert_coord_hashes)):
vert_hash = vert_coord_hashes[v_id]
if vert_hash in vert_set_hashes: # we got a hit!
vert_set.add(v_id)
'''
# optimization: scan only verts index 'highest_known_vert+1' thru 'highest_known_vert'+LOOKAHEAD
# because 0 thru start_vert is guaranteed to not be part of the group
# and start_vert thru highest_known_vert is already guaranteed to be part of the group
# if chunks are bigger than LOOKAHEAD, then it's not guaranteed to succeed or fail, could do either
for v_id in range(highest_known_vert+1, min(highest_known_vert+LOOKAHEAD, len(pmx.verts))):
vert_hash = vert_coord_hashes[v_id]
if vert_hash in vert_set_hashes:
# we got a hit!
vert_set.add(v_id)
# optimization: if this is farther than what i thought was the end, then everything before it should be added too
if v_id > highest_known_vert:
for x in range(highest_known_vert+1, v_id):
vert_set.add(x)
highest_known_vert = v_id
'''
set_size_C = len(vert_set)
# print("+%d +%d, " % (set_size_B - set_size_A, set_size_C - set_size_B), end="")
print("+%d, " % (set_size_C - set_size_A), end="")
# 6. if the number of verts did not change, we are done
if set_size_C == set_size_A:
break
pass
print("final size: %d verts, %d faces" % (len(vert_set), len(face_set)))
# print("min=%d, max=%d, contiguous=%s" % (min(vert_set), max(vert_set), str(bool(max(vert_set)-min(vert_set)==(len(vert_set)-1)))))
# 7. now i have a complete fragment in vert_set and face_set !! :)
list_of_vert_sets.append(vert_set)
list_of_face_sets.append(face_set)
# remove all "used" verts from the "unused" set
all_unused_verts.difference_update(vert_set)
# loop & populate another fragment
pass
# done with identifying all fragments!
# double-check that all vertices got sorted into one and only one fragment
assert sum([len(vs) for vs in list_of_vert_sets]) == len(pmx.verts)
temp = set()
for vs in list_of_vert_sets:
temp.update(vs)
assert len(temp) == len(pmx.verts)
# double-check that all faces got sorted into one and only one fragment
assert sum([len(fs) for fs in list_of_face_sets]) == len(pmx.faces)
temp = set()
for fs in list_of_face_sets:
temp.update(fs)
assert len(temp) == len(pmx.faces)
print("")
print("Identified %d discrete fragments!" % (len(list_of_vert_sets),))
# BONES AND WEIGHTS
for fragnum in range(len(list_of_vert_sets)):
# name
newbone_name = "fragment%d" % fragnum
# position: average of all vertices in the fragment? sure why not
# TODO is there a "better" way of calculating the average/centroid/center of mass? idk
newbone_pos = [0,0,0]
for v_id in list_of_vert_sets[fragnum]:
# accumulate the XYZ for each vertex in the fragment
newbone_pos[0] += pmx.verts[v_id].pos[0]
newbone_pos[1] += pmx.verts[v_id].pos[1]
newbone_pos[2] += pmx.verts[v_id].pos[2]
# divide by the number of verts in the fragment to get the average
newbone_pos[0] /= len(list_of_vert_sets[fragnum])
newbone_pos[1] /= len(list_of_vert_sets[fragnum])
newbone_pos[2] /= len(list_of_vert_sets[fragnum])
# create the new bone object
newbone_obj = pmxstruct.PmxBone(
name_jp=newbone_name, name_en=newbone_name, pos=newbone_pos, parent_idx=0, deform_layer=0,
deform_after_phys=False, has_rotate=True, has_translate=True, has_visible=True, has_enabled=True,
has_ik=False, tail_usebonelink=False, tail=[0, 0, 0], inherit_rot=False, inherit_trans=False,
has_fixedaxis=False, has_localaxis=False, has_externalparent=False,
)
# note the index | |
automatically converts integer inputs to float, while TensorFlow's
# reduce_mean does not. For integer inputs, we emulate TensorFlow's behavior
# using np.sum and truncating division.
np_sum = np.sum(x, axis=reduction_axes, keepdims=keepdims)
if np.issubdtype(x.dtype, np.integer):
return np_sum // count
return np_sum / count
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.test_session(use_gpu=True) as sess:
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = sess.run(v)
self.assertAllEqual(tf_v, 0)
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
def testGradient(self):
s = [2, 3, 4, 2]
for dtype in [dtypes.float32, dtypes.float64]:
x = self._makeIncremental(s, dtype)
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
def testEmptyGradients(self):
with self.test_session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_mean(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
def testDegenerate(self):
with self.test_session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_mean(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertTrue(np.all(np.isnan(y)))
class ProdReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_prod(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.prod(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.test_session(use_gpu=True) as sess:
v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = sess.run(v)
self.assertAllEqual(tf_v, 0)
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testInt32(self):
# Numpy automatically upgrades the type of np.prod from int32 to int64, so
# Numpy does not overflow an int32 np.prod while TensorFlow does. To avoid
# overflow, divide the incremental int32 array by 2.
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32) / 2
self._compareAllAxes(np_arr)
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
def testGradientWithZeros(self):
s = [2, 3, 4, 2]
x = self._makeIncremental(s, dtypes.float32) / 20.
# No zeros in input
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
# Zero at beginning
x1 = x.copy()
x1[:, :, 0, :] = 0
self._compareGradientAxes(x1, rtol=1e-3, atol=1e-3)
# Zero at end
x2 = x.copy()
x2[:, :, -1, :] = 0
self._compareGradientAxes(x2, rtol=1e-3, atol=1e-3)
# Zero in middle
x3 = x.copy()
x3[:, :, 2, :] = 0
self._compareGradientAxes(x3, rtol=1e-3, atol=1e-3)
# All zeros
x4 = x.copy()
x4[:, :, :, :] = 0
self._compareGradientAxes(x4, rtol=1e-3, atol=1e-3)
def testEmptyGradients(self):
with self.test_session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_prod(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
def testDegenerate(self):
with self.test_session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_prod(x, [0])
self.assertAllEqual(y.eval(), np.ones(9938))
class MinReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keepdims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_min(x, reduction_axes, keepdims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.test_session(use_gpu=True) as sess:
v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = sess.run(v)
self.assertAllEqual(tf_v, 0)
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testEmptyGradients(self):
with self.test_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_min(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class MaxReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keepdims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_max(x, reduction_axes, keepdims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.test_session(use_gpu=True) as sess:
v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = sess.run(v)
self.assertAllEqual(tf_v, 0)
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testInt64Reduce3D(self):
# Create a 3D array of int64s and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.int64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
| |
from __future__ import division
import torch
import torch.nn.functional as F
import numpy as np
def xywh2xyxy(x):
# Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2]
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2
y[:, 1] = x[:, 1] - x[:, 3] / 2
y[:, 2] = x[:, 0] + x[:, 2] / 2
y[:, 3] = x[:, 1] + x[:, 3] / 2
return y
def bbox_ious(boxes1, boxes2):
""" Compute IOU between all boxes from ``boxes1`` with all boxes from ``boxes2``.
Args:
boxes1 (torch.Tensor): List of bounding boxes
boxes2 (torch.Tensor): List of bounding boxes
Note:
List format: [[xc, yc, w, h],...]
"""
b1_len = boxes1.size(0)
b2_len = boxes2.size(0)
b1x1, b1y1 = (boxes1[:, :2] - (boxes1[:, 2:4] / 2)).split(1, 1)
b1x2, b1y2 = (boxes1[:, :2] + (boxes1[:, 2:4] / 2)).split(1, 1)
b2x1, b2y1 = (boxes2[:, :2] - (boxes2[:, 2:4] / 2)).split(1, 1)
b2x2, b2y2 = (boxes2[:, :2] + (boxes2[:, 2:4] / 2)).split(1, 1)
dx = (b1x2.min(b2x2.t()) - b1x1.max(b2x1.t())).clamp(min=0)
dy = (b1y2.min(b2y2.t()) - b1y1.max(b2y1.t())).clamp(min=0)
intersections = dx * dy
areas1 = (b1x2 - b1x1) * (b1y2 - b1y1)
areas2 = (b2x2 - b2x1) * (b2y2 - b2y1)
unions = (areas1 + areas2.t()) - intersections
return intersections / unions
def bbox_iou_xyxy_numpy(boxes1, boxes2):
"""
:param boxes1: boxes1和boxes2的shape可以不相同,但是需要满足广播机制
:param boxes2: 且需要保证最后一维为坐标维,以及坐标的存储结构为(xmin, ymin, xmax, ymax)
:return: 返回boxes1和boxes2的IOU,IOU的shape为boxes1和boxes2广播后的shape[:-1]
"""
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
# 计算出boxes1和boxes2相交部分的左上角坐标、右下角坐标
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
# 计算出boxes1和boxes2相交部分的宽、高
# 因为两个boxes没有交集时,(right_down - left_up) < 0,所以maximum可以保证当两个boxes没有交集时,它们之间的iou为0
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
IOU = 1.0 * inter_area / union_area
return IOU
def bbox_iou_xywh_numpy(boxes1, boxes2):
"""
:param boxes1: boxes1和boxes2的shape可以不相同,但是需要满足广播机制
:param boxes2: 且需要保证最后一维为坐标维,以及坐标的存储结构为(x, y, w, h)
:return: 返回boxes1和boxes2的IOU,IOU的shape为boxes1和boxes2广播后的shape[:-1]
"""
#tranform the xywh to xyxy
x1 = boxes1[..., 0:1] - boxes1[..., 2:3]/2
y1 = boxes1[..., 1:2] - boxes1[..., 3:4]/2
x1_2 = boxes1[..., 0:1] + boxes1[..., 2:3]/2
y1_2 = boxes1[..., 1:2] + boxes1[..., 3:4]/2
boxes1 = np.concatenate([x1, y1, x1_2, y1_2], axis=-1)
x2 = boxes2[..., 0:1] - boxes2[..., 2:3]/2
y2 = boxes2[..., 1:2] - boxes2[..., 3:4]/2
x2_2 = boxes2[..., 0:1] + boxes2[..., 2:3]/2
y2_2 = boxes2[..., 1:2] + boxes2[..., 3:4]/2
boxes2 = np.concatenate([x2, y2, x2_2, y2_2], axis=-1)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
# 计算出boxes1和boxes2相交部分的左上角坐标、右下角坐标
left_up = np.maximum(boxes1[..., None, :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., None, 2:], boxes2[..., 2:])
# 计算出boxes1和boxes2相交部分的宽、高
# 因为两个boxes没有交集时,(right_down - left_up) < 0,所以maximum可以保证当两个boxes没有交集时,它们之间的iou为0
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area[:, None] + boxes2_area - inter_area
IOU = 1.0 * inter_area / union_area
return IOU
# from utils.utils_select_device import select_device
# device = select_device(0)
# def __convert_pred(pred_bbox, test_input_size, valid_scale, conf_thresh=0.01):
# """
# 预测框进行过滤,去除尺度不合理的框
# """
# pred_bbox = pred_bbox.cpu().numpy()
# pred_coor = xywh2xyxy(pred_bbox[:, :4])
# pred_conf = pred_bbox[:, 4]
# pred_prob = pred_bbox[:, 5:]
#
# # (1)
# # (xmin_org, xmax_org) = ((xmin, xmax) - dw) / resize_ratio
# # (ymin_org, ymax_org) = ((ymin, ymax) - dh) / resize_ratio
# # 需要注意的是,无论我们在训练的时候使用什么数据增强方式,都不影响此处的转换方式
# # 假设我们对输入测试图片使用了转换方式A,那么此处对bbox的转换方式就是方式A的逆向过程
# # org_h, org_w = org_img_shape
# # resize_ratio = min(1.0 * test_input_size / org_w, 1.0 * test_input_size / org_h)
# # dw = (test_input_size - resize_ratio * org_w) / 2
# # dh = (test_input_size - resize_ratio * org_h) / 2
# # pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
# # pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
#
# # (2)将预测的bbox中超出原图的部分裁掉
# pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
# np.minimum(pred_coor[:, 2:], [test_input_size - 1, test_input_size - 1])], axis=-1)
# # (3)将无效bbox的coor置为0
# invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
# pred_coor[invalid_mask] = 0
#
# # (4)去掉不在有效范围内的bbox
# bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
# scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
#
# # (5)将score低于score_threshold的bbox去掉
# classes = np.argmax(pred_prob, axis=-1)
# scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
# score_mask = scores > conf_thresh
#
# mask = np.logical_and(scale_mask, score_mask)
#
# pred_bbox = pred_bbox[mask]
#
# pred_bbox = torch.Tensor(pred_bbox).to(device)
#
# return pred_bbox
#
# def non_max_suppression(prediction, num_classes, conf_thres=0.01, nms_thres=0.5):
# """
# Removes detections with lower object confidence score than 'conf_thres' and performs
# Non-Maximum Suppression to further filter detections.
# Returns detections with shape:
# (x1, y1, x2, y2, object_conf, class_score, class_pred)
# """
#
# # From (center x, center y, width, height) to (x1, y1, x2, y2)
# box_corner = prediction.new(prediction.shape)
# box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
# box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
# box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
# box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
# prediction[:, :, :4] = box_corner[:, :, :4]
#
# output = [None for _ in range(len(prediction))]
# for image_i, image_pred in enumerate(prediction):
# # Filter out confidence scores below threshold
# class_max = torch.max(image_pred[:, 5:], dim=-1)[1]
# score = image_pred[:, 4]*image_pred[:, 5:][torch.Tensor(np.arange(len(image_pred))).long(), class_max]
# conf_mask = (score >= conf_thres).squeeze()
# #conf_mask = (image_pred[:, 4] >= conf_thres).squeeze()#TODO:score_thershold
# image_pred = image_pred[conf_mask]
# #image_pred = __convert_pred(image_pred, config['img_h'], (0, np.inf))
#
# # If none are remaining => process next image
# if not image_pred.size(0):
# continue
# # Get score and class with highest confidence
# max_pred = torch.max(image_pred[:, 5:5+num_classes], 1, keepdim=True)#返回值与索引,这里是取最大的可能的类别,而不会在这个框下一下预测两个框,这个通过用阈值来修改
# class_conf, class_pred = max_pred
# # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
# detections = torch.cat((image_pred[:, :5], class_conf.float(), class_pred.float()), 1)
# # Iterate through all predicted classes
# unique_labels = detections[:, -1].unique()
# if prediction.is_cuda:
# unique_labels = unique_labels.to(device)
# temp_detections = []
# for label in unique_labels:
# class_detections = detections[detections[:, -1] == label]
# temp_detections.append(nms_class_detections(class_detections, nms_thres))
# nms_detections = temp_detections[0]
# for detection in temp_detections[1:]:
# nms_detections = torch.cat((nms_detections, detection), 0)
# output[image_i] = nms_detections.data
#
# return output
#
# def nms_class_detections(class_detections, nms_thresh):
# flag = torch.Tensor([True] * class_detections.shape[0]).bool().to(device)
# _, sort_index = torch.sort(class_detections[:, 4], dim=0, descending=True)
# class_detections = class_detections[sort_index, :]
# for i in range(len(flag)):
# if flag[i] == True:
# indexs = find_indexTrue_Flag(flag, i)
# iou = bbox_ious(class_detections[i, :4].unsqueeze(0), class_detections[indexs, :4])
# mask_iou = iou < nms_thresh
# flag[indexs] = mask_iou.squeeze()
# if i == len(flag) - 2:
# break
# return class_detections[flag==True, :]
#
# def find_indexTrue_Flag(flag, i):
# indexs = []
# for j in range(i+1, len(flag)):
# index = j
# if flag[j] == True:
# indexs.append(index)
# return indexs
class LabelSmooth(object):
def __init__(self, delta=0.01):
self.delta = delta
def __call__(self, onehot, num_classes):
return onehot * (1 - self.delta) + self.delta * 1.0 / num_classes
class focal_loss(object):
"""
loss to balance the positive and negtive
Args:
conf: a float tensor of arbitrary shape
the value of it must be sigmoid or [0,1]
mask: the label for each element in inputs
Returns:
loss tensor with the reduction option applied.
"""
def __init__(self, gamma=2, weight_pos=-1):
self.gamma = gamma
self.weight_pos = weight_pos
def __call__(self, conf, mask):
pt = mask * conf + (1-mask) * (1-conf)
ce_loss = F.binary_cross_entropy(conf, mask, reduction='none')
if self.weight_pos > 0:
weight = mask * self.weight_pos + (1-mask) * (1-self.weight_pos)
#loss = -1 * weight * (1-pt)**self.gamma * torch.log(pt)
loss = weight * (1-pt)**self.gamma * ce_loss
else:
loss = (1-pt)**self.gamma * ce_loss
return loss
class focal_loss_gaussian_weight(object):
"""
loss to balance the positive and negtive
Args:
initial_args:
gaussian_weight: calculate for self.weight_neg which is the negtive sample weight
for the focal loss
__call__args:
conf: a float tensor of arbitrary shape
the value of it must be sigmoid or [0,1]
mask: the label for each element in inputs
Returns:
loss tensor with the reduction option applied.
"""
def __init__(self, gaussian_weight, gamma=2, beta=4):
self.gamma = gamma
self.beta = beta
self.weight_pos = 1
self.weight_neg = torch.pow(1 - gaussian_weight, 4)
#self.gaussian_weight = gaussian_weight
def __call__(self, pred_conf_cls, mask):
pt = mask * pred_conf_cls + (1 - mask) * (1 - pred_conf_cls)
if self.weight_pos > 0:
weight = mask * self.weight_pos + (1 - mask) * self.weight_neg
loss = -1 * weight * (1-pt)**self.gamma * torch.log(pt)
else:
loss = (1 - pt) ** self.gamma * torch.log(pt)
return loss
from torch import nn
class HeatmapLoss(nn.Module):
def __init__(self, weight=None, alpha=2, beta=4, reduction='mean'):
super(HeatmapLoss, self).__init__()
self.alpha = | |
import tkinter as tk #possibly nessesary unsure
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
import sys
import os
import re
SCALEDOWNFACTOR = 0.2 #This is a magic number but what can ya do. I dont have a good way of getting image resolution.
SCALEUPFACTOR = 2.0
GAIN = 13.2
DISTANCE_BETWEEN_TEETH = 10
PREDEFINED_LENGTH = 5.738
ONLYINCREASING = 0
USING_EXCEL_DATA_CHANGE = 1
TEST = 1
SWITCH_POINT = 20
def imclearborders(imgBW, radius):
# Given a black and white image, first find all of its contours
imgBWcopy = imgBW.copy()
contours,hierarchy = cv2.findContours(imgBWcopy.copy(), cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
# Get dimensions of image
imgRows = imgBW.shape[0]
imgCols = imgBW.shape[1]
contourList = [] # ID list of contours that touch the border
# For each contour...
for idx in np.arange(len(contours)):
# Get the i'th contour
cnt = contours[idx]
# Look at each point in the contour
for pt in cnt:
rowCnt = pt[0][1]
colCnt = pt[0][0]
# If this is within the radius of the border
# this contour goes bye bye!
check1 = (rowCnt >= 0 and rowCnt < radius) or (rowCnt >= imgRows-1-radius and rowCnt < imgRows)
check2 = (colCnt >= 0 and colCnt < radius) or (colCnt >= imgCols-1-radius and colCnt < imgCols)
if check1 or check2:
contourList.append(idx)
break
for idx in contourList:
cv2.drawContours(imgBWcopy, contours, idx, (0,0,0), -1)
return imgBWcopy
def electricFieldToForce(voltageArray, stressArray):
i = 0
x = 0
xPowerTwo = 0
xPowerThree = 0
xPowerFour = 0
if(USING_EXCEL_DATA_CHANGE == 1):
A = 6*(pow(10, -11))
B = 6*(pow(10, -12))
C = 9*(pow(10, -12))
while(i < len(voltageArray)):
x = voltageArray[i]
xPowerTwo = pow(x, 2)
stressArray[i] = (A*xPowerTwo)+(B*x)+C
i = i + 1
elif(USING_EXCEL_DATA_CHANGE != 1):
A = 4.9*(pow(10, -14))
B = 1.2*(pow(10, -12))
C = 6.5*(pow(10, -11))
D = -2.4*(pow(10, -11))
while(i<len(voltageArray)):
x = voltageArray[i]
xPowerTwo = pow(x, 2)
xPowerThree = pow(x, 3)
xPowerFour = pow(x, 4)
stressArray[i] = (A*xPowerFour)+(B*xPowerThree)+(C*xPowerTwo)+(D*x)
i = i + 1
else:
i == i
return 0
def hysteresis (strainArray, stressArray, switchPoint, TEST, BUG_TESTING_TEXT_OUTPUT_FILE):
#const variable initialization
SIZEOFGUASSDATATWENTYFOUR = 24
#initialization of iterators
i = 0
j = 0
#intialize int variables
indexWithMaximumValueInStrainArray = -1
midpoint = 0
leftbound = 0
rightbound = 0
GLOuter = 0
GLInner = 0
integral = 0
root = 0
weight = 0
#initialize double variables
maxValueInStrainArray = 0.0
#intialize list variables
splitStrainArrayIncreasing = []
splitStressArrayIncreasing = []
splitStrainArrayDecreasing = []
splitStressArrayDecreasing = []
#test initialization
if(TEST == 1):
strainArray = [0]
stressArray = [0]
x = np.linspace(0.0001, 1, 101)
y = 0.1*np.exp(0.3*x) + 0.1*np.random.random(len(x))
x2 = np.linspace(0, 10, 101)
y2 = np.log(9*x) + 0.1*np.random.random(len(x))
#end of test initialization
x = np.linspace(-2, 2, 101)
data = np.genfromtxt('GAUSS-24.dat',
skip_header=1,
skip_footer=1,
names=True,
dtype=None,
delimiter=' ')
#maxValueIndexOfStressArray = np.argmax(strainArray) do not trust use of numpy as it is a different color. Maybe its better
lengthOfStrainArray = len(strainArray)
lengthOfStressArray = len(stressArray)#Bug checking value
#bug checking value
if(lengthOfStrainArray != lengthOfStressArray):
print('mismatched strain and stress arrays inputed within hysteresis function')
return 0
else:
#else do nothing
i=i
print('past bug checking')
sys.stdout.flush()
#while loop finds maximum value in the strain array
#unchecked for off by one errors
while(i < lengthOfStrainArray):
if(maxValueInStrainArray < strainArray[i]):
maxValueInStrainArray = strainArray[i]
indexWithMaximumValueInStrainArray = i
else:
#else do nothing
i=i
i = i + 1
print('past strain value check')
sys.stdout.flush()
#bug checkin value
if(indexWithMaximumValueInStrainArray == -1):
print('no value in strain array over -1')
return 0
else:
#else do nothing
i=i
i = 0
#Creates stress/strain array for increasing values
#unchecked for off by one errors
while(i <= switchPoint):
splitStrainArrayIncreasing.append(strainArray[i])
splitStressArrayIncreasing.append(stressArray[i])
if(TEST == 1):
#overwrite with testing
splitStrainArrayIncreasing[i] = x
splitStressArrayIncreasing[i] = y
else:
i=i
#else do nothing
i = i + 1
print('past switch point check')
sys.stdout.flush()
#creates stress/strain array for decreasing values
#unchecked for off by one errors
while(i < lengthOfStrainArray):
splitStrainArrayDecreasing.append(strainArray[i])
splitStressArrayDecreasing.append(stressArray[i])
if(TEST == 1):
#overwrite with testing
splitStrainArrayIncreasing[i] = x2
splitStressArrayIncreasing[i] = y2
else:
i=i
#else do nothing
i = i + 1
j = j + 1
print('past arraySplitCheck')
sys.stdout.flush()
#should obtain a decreasing function of the form y=Ae^(Bx)
i = 0
j = 0
stressArrayArr = np.asarray(stressArray)
strainArrayArr = np.asarray(strainArray)
stressArrayDecreasing = np.asarray(splitStressArrayDecreasing)
strainArrayDecreasing = np.asarray(splitStrainArrayDecreasing)
strainArrayDecreasingAbs = np.absolute(splitStrainArrayDecreasing)
stressArrayIncreasing = np.asarray(splitStressArrayIncreasing)
strainArrayIncreasing = np.asarray(splitStrainArrayIncreasing)
strainArrayIncreasingAbs = np.absolute(splitStrainArrayIncreasing)
if(ONLYINCREASING == 0):
while(i<len(stressArrayIncreasing)):
print(stressArrayIncreasing[i])
i = i + 1
i = 0
while(i<len(stressArrayDecreasing)):
print(stressArrayDecreasing[i])
i = i + 1
i = 0
while(i < len(strainArrayIncreasing)):
print(strainArrayIncreasing[i])
i = i + 1
i = 0
while(i << len(strainArrayDecreasing)):
print(strainArrayDecreasing[i])
i = i + 1
print('past exponential equation')
sys.stdout.flush()
else:
i == i
#beta = 9999
#Delta = -999999
stressArrayDecreasingArr = np.asarray(stressArrayDecreasing)
stressArrayIncreasingArr = np.asarray(stressArrayIncreasing)
strainArrayDecreasingArr = np.asarray(strainArrayDecreasing)
strainArrayIncreasingArr = np.asarray(strainArrayIncreasing)
strainArrayDecreasingSquared = np.square(strainArrayDecreasingArr)
strainArrayIncreasingSquared = np.square(strainArrayIncreasingArr)
print('strain decreasing')
sys.stdout.flush()
a = plt.figure(figsize = (10,8))
axes= a.add_axes([0.1,0.1,0.8,0.8])
#plt.plot(strainArrayIncreasingAbs, splitStressArrayIncreasing, 'b.')#ln
#plt.plot(strainArrayDecreasingAbs, splitStressArrayDecreasing, 'b.')#e
X = np.arange(0, 20)
if(ONLYINCREASING == 0):
axes.plot(strainArrayDecreasing, stressArrayDecreasing, 'o')
#axes.plot(stressArrayDecreasing, np.polyval(yEst, X))
axes.plot(strainArrayIncreasing, stressArrayIncreasing, 'o')
#axes.plot(stressArrayIncreasing, np.polyval(yEst2, X))
#plt.plot(x, alpha*np.exp(beta*x), 'r')
#plt.plot(x, (Cappa*np.log(np.absolute(Delta*x))+2), 'r')
else:
i == i
A = np.vstack([strainArrayArr, np.ones(len(strainArrayArr))]).T
stressArrayArr = stressArrayArr[:, np.newaxis]
linearSlope = np.dot((np.dot(np.linalg.inv(np.dot(A.T,A)),A.T)),stressArrayArr)
print(linearSlope)
B = np.vstack([strainArrayDecreasingArr, np.ones(len(strainArrayDecreasingArr))])
B = np.vstack([strainArrayDecreasingSquared, B]).T
stressArrayDecreasingArr = stressArrayDecreasingArr[:, np.newaxis]
polyValuesDecreasing = np.dot((np.dot(np.linalg.inv(np.dot(B.T,B)),B.T)),stressArrayDecreasingArr)
print(polyValuesDecreasing)
C = np.vstack([strainArrayIncreasingSquared, strainArrayIncreasingArr, np.ones(len(strainArrayIncreasingArr))]).T
stressArrayIncreasingArr = stressArrayIncreasingArr[:, np.newaxis]
polyValuesIncreasing = np.dot((np.dot(np.linalg.inv(np.dot(C.T,C)),C.T)),stressArrayIncreasingArr)
print(linearSlope)
axes.plot(x, linearSlope[0]*x+linearSlope[1], 'r')
axes.plot(x, (polyValuesDecreasing[0]*x*x)+polyValuesDecreasing[1]*x+polyValuesDecreasing[2], 'r')
axes.plot(x, (polyValuesIncreasing[0]*x*x)+polyValuesIncreasing[1]*x+polyValuesIncreasing[2], 'r')
plt.ylim([0,0.5])
plt.xlim([-0.2,0.2])
plt.xlabel('strain')
plt.ylabel('stress (Pa)')
plt.title('Stiffness Curve')
plt.show()
plt.savefig('hystersis_curve.png')
GLOuter = (leftbound - rightbound)/2
GLInner = (leftbound + rightbound)/2
if(ONLYINCREASING == 0):
i = 0
while(i < SIZEOFGUASSDATATWENTYFOUR):
combineRootWeightValues = data[i]
root = combineRootWeightValues[0]
weight = combineRootWeightValues[1]
integral = (GLOuter) * (weight) *(alpha * np.exp(beta * (GLOuter) * root * (GLInner))) + integral
i = i + 1
print(integral)
return 0
def bwareaopen(img, min_size, connectivity=8):
"""Remove small objects from binary image (approximation of
bwareaopen in Matlab for 2D images).
Args:
img: a binary image (dtype=uint8) to remove small objects from
min_size: minimum size (in pixels) for an object to remain in the image
connectivity: Pixel connectivity; either 4 (connected via edges) or 8 (connected via edges and corners).
Returns:
the binary image with small objects removed
"""
# Find all connected components (called here "labels")
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(
img, connectivity=connectivity)
# check size of all connected components (area in pixels)
for i in range(num_labels):
label_size = stats[i, cv2.CC_STAT_AREA]
# remove connected components smaller than min_size
if label_size < min_size:
img[labels == i] = 0
return img
def fillInBlanks(strainList):
#I despise this function as it invariably looses data but its the only way to get around limitations with pixels and resolution
i = 0
j = 1
k = -1
lengthOfStrainList = len(strainList)
nextInLineForStrainList = 0
while(i < lengthOfStrainList):
if(strainList[i] == 0 & (i != 0)):
while(strainList[i] == 0):
j = j + 1
nextInLineForStrainList = strainList[j]
if(nextInLineForStrainList == 0):
i = i
#repeat
elif(nextInLineForStrainList != 0):
if(strainList[j] == 0):
i == i
elif(strainList[j] != 0):
strainList[i] = i
else:
exit()
i = i + 1
j = i + 2
k = i
return 0
def ImageAnalysis(voltageList, imageList, Gain, distanceBetweenTeeth, predefiniedLength, switchPoint):
#test is on at 1
TEST = 0
SKIP_MANUAL_IMAGE_CROP = 0
ALLOW_PRINTS_FOR_TESTING = 1
JUST_OVER_TWO_THIRDS_A_PLATELET = 0.65
#iterators
i = 0
j = 0
k = 0
numFrames = len(imageList)
#these all seem like old values
pixelCheckGate = 0
numberOfWhitePixels = 0
lengthOfImageArrayWhitePixels = 0
longestLengthOfImageArrayWhitePixels = -1
lengthOfPixel = -1
#need to change designation to list as array is a sperate numpy class applicable to lists in math and functions
lengthArray = []
strainArray = []
stressArray = []
forceArray = []
stressArrayToPascals = []
#these values exist but will be deleted in final code
amplifiedVoltageArray = []
electricFieldArray = []
forceArray = []
cropImageSidesListTest = [319, 156, 194, 154]
BUG_TESTING_TEXT_OUTPUT_FILE = open("bugReport.txt", "w+")
print("data sent through")
while(i < numFrames):
lengthArray.append(0)
i = i + 1
i = 0
while(i < (len(voltageList))):
strainArray.append(0)
stressArray.append(0)
amplifiedVoltageArray.append(0)
electricFieldArray.append(0)
forceArray.append(0)
stressArrayToPascals.append(0)
i = | |
<reponame>ddr95070/RMIsaac
'''
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
from datetime import datetime
import enum
import json
import logging
from threading import Lock
from typing import List, Dict
import uuid
from isaac import Message as im
from .JsonTcpServer import JsonTcpServer, JsonTcpServerConnection
MASK_64BIT = ((1 << 64) - 1)
class Mission:
status_lock = Lock()
class Status(enum.Enum):
# The mission has not yet been submitted to the mission server
CREATED = 0
# The mission has been submitted but has not started because either:
# - The robot it should run on is not connected
# - There is another mission before it in the robot's mission queue
# - At least one of this mission's "upstream" missions hasn't completed yet
QUEUED = 1
# The mission has been sent to the robot but the robot has not yet acknowledged it
STARTED = 2
# The mission has been sent to and acknowledged by the robot
RUNNING = 3
# The robot indicated that the mission has succeeded
SUCCESS = 4
# The robot indicated that the mission failed
FAILED = 5
# The robot lost connection in the middle of the mission
FAILED_DISCONNECTED = 6
# One of the following things happened:
# - The mission was sent to the robot but the robot did not acknowledge it before the
# start_timeout
# - The robot acknowledged the mission but did not indicate success or failure before
# timeout
FAILED_TIMEDOUT = 7
# The mission will not be scheduled because one of its upstream missions failed
FAILED_UPSTREAM = 8
def __init__(self, robot: str, channel: str, config: Dict, status_channel: str = None,
**kwargs):
'''
Constructs a Mission object
Args:
robot (str): The name of the robot that should run this mission
channel (str): The JsonTcpServer channel to transmit the MissionProto on
config (dict): The ISAAC application configuration to embed in the MissionProto
status_channel (str)(optional): The JsonTcpServer channel to listen for the
MissionStatusProto on.
start_timeout (int)(optional): The number of seconds to wait between sending the
MissionProto and receiving an acknowledgment
MissionStatusProto before giving up
timeout (int)(optional): The number of seconds to allow a mission to run before giving
up
'''
self._robot = robot
self._channel = channel
self._config = config
self._status_channel = status_channel if status_channel is not None else channel + "_status"
self._status = Mission.Status.CREATED
self._uuid = uuid.uuid4()
self._start_timeout = kwargs.get("start_timeout", 5)
self._timeout = kwargs.get("timeout", float("inf"))
# "Downstream missions" that can't run until this one has been run
self._downstream = []
# The number of upstream missions that need to finish before this mission can start
self._outstanding_upstream = 0
# Count all of the upstream missions that are blocking this one from starting.
# Add this mission to the downstream list of each "outstanding" upstream mission
self._upstream = set(kwargs.get("upstream", []))
with self.status_lock:
for upstream_mission in self._upstream:
# If the upstream mission is CREATED/RUNNING/QUEUED, it is "outstanding"
# and therefore blocking this mission from starting
if upstream_mission.status in (Mission.Status.CREATED, Mission.Status.RUNNING,
Mission.Status.QUEUED):
upstream_mission._downstream.append(self)
self._outstanding_upstream += 1
# If the upstream mission has already completed, it is not outstanding
elif upstream_mission.status == Mission.Status.SUCCESS:
pass
# If the upstream mission has failed, then this mission cannot run
else:
self._outstanding_upstream = -1
break
@property
def uuid(self):
return self._uuid
@property
def uuid_dict(self):
return {
"lower": (self.uuid.int >> 0) & MASK_64BIT,
"upper": (self.uuid.int >> 64) & MASK_64BIT,
}
@property
def status(self) -> "Mission.Status":
return self._status
@status.setter
def status(self, new_status: "Mission.Status"):
with self.status_lock:
# Skip if reassigning the same status so start/end times dont get changed
if new_status == self._status:
return
# If this mission started, mark the start time
if new_status == Mission.Status.STARTED:
self._sent_time = datetime.now()
# If this mission is running, mark the time it started running
elif new_status == Mission.Status.RUNNING:
self._start_time = datetime.now()
# If this mission succeeded, mark the end time and decrement the "outstanding_upstream"
# counter for all downstream missions
elif new_status == Mission.Status.SUCCESS:
self._end_time = datetime.now()
for downstream in self._downstream:
downstream._outstanding_upstream -= 1
# If this mission failed, mark the end time and set the "outstanding_upstream" counter
# to -1 for all downstream missions indicating an upstream mission failed
elif new_status in (Mission.Status.FAILED, Mission.Status.FAILED_DISCONNECTED,
Mission.Status.FAILED_TIMEDOUT, Mission.Status.FAILED_UPSTREAM):
self._end_time = datetime.now()
for downstream in self._downstream:
downstream._outstanding_upstream = -1
self._status = new_status
def check_timeout(self) -> bool:
'''
Checks the state of the mission and updates the status to FAILED_TIMEOUT if necessary
Returns:
True if the mission status has been updated to FAILED_TIMEDOUT, otherwise false
'''
timed_out = False
with self.status_lock:
# If the mission was sent, make sure an acknowledgement was sent in a timely manner
if self._status == Mission.Status.STARTED and (
datetime.now() - self._sent_time).seconds > self._start_timeout:
timed_out = True
# If the mission was sent and acknowledge, make sure it completes within the timeout
elif self._status == Mission.Status.RUNNING and (
datetime.now() - self._start_time).seconds > self._timeout:
timed_out = True
if timed_out:
self.status = Mission.Status.FAILED_TIMEDOUT
return timed_out
@property
def config(self) -> Dict:
return self._config
@property
def channel(self) -> str:
return self._channel
@property
def outstanding_upstream(self):
return self._outstanding_upstream
@property
def all_upstream_submitted(self):
''' Checks if all upstream missions have already been submitted '''
with self.status_lock:
not_submitted = [
mission for mission in self._upstream if mission._status == Mission.Status.CREATED
]
return len(not_submitted) == 0
class _RobotConnection:
''' A class to help MissionServer keep track of all of the active robot TCP connections '''
def __init__(self, connection: JsonTcpServerConnection, name_channel: str = "name"):
'''
Constructs a RobotConnection object
Args:
connection (JsonTcpServerConnection): The connection used to communicate with the robot
name_channel (str): The JsonTcpServer channel to listen for TextProtos to get the robot
name
'''
self._connection = connection
self._name = None
self._current_mission = None
self._connection.set_message_callback(self._message_callback)
self._name_channel = name_channel
self._logger = logging.getLogger('MissionServer')
@property
def name(self) -> str:
return self._name
def _message_callback(self, message, channel):
# Update the robot name if applicable
proto = message.proto
if channel == self._name_channel and self._name is None:
self._name = proto.text
self._logger.info("Robot at address {} identified as \"{}\"".format(
self._connection.address, self.name))
return
# If there is a mission running, see if there is a status update
if self._current_mission is not None and channel == self._current_mission._status_channel:
status_uuid = ((proto.uuid.upper & MASK_64BIT) << 64) + (proto.uuid.lower & MASK_64BIT)
# Does this status message have to do with the current mission?
if status_uuid == self._current_mission.uuid.int:
if proto.missionStatus == "running":
self._current_mission.status = Mission.Status.RUNNING
elif proto.missionStatus == "success":
self._current_mission.status = Mission.Status.SUCCESS
self._current_mission = None
elif proto.missionStatus == "failure":
self._current_mission.status = Mission.Status.FAILED
self._current_mission = None
def start_mission(self, mission: Mission):
# Generate the message
builder = im.MessageBuilder()
builder.proto = im.CAPNP_DICT["MissionProto"].from_dict({
"uuid": mission.uuid_dict,
"config": {
"serialized": json.dumps(mission.config)
}
})
# Send the mission
self._connection.send_message(builder, mission.channel)
# Set the mission state to STARTED
mission.status = Mission.Status.STARTED
self._current_mission = mission
@property
def connected(self) -> bool:
return self._connection.connected
def close(self):
self._logger.info("Robot \"{}\" lost connection".format(self.name))
if self._current_mission is not None:
self._current_mission.status = Mission.Status.FAILED_DISCONNECTED
def check_timeout(self):
if self._current_mission is None:
return
if self._current_mission.check_timeout():
self._current_mission = None
class MissionServer:
'''
A class to manage the submission and running of missions to robots. It instantiates a
JsonTcpServer and uses it to listen for connections to robots. Mission submitted to this class
are queued, scheduled, and sent to connected robots.
'''
def __init__(self, port: int = 9998, name_channel: str = "name"):
'''
Constructs a MissionServer object
Args:
port (int): The TCP port to listen on for robot connections
name_channel (str): The JsonTcpServer channel to listen for TextProtos to get the robot
name
'''
self._name_channel = name_channel
# Create dictionary for robots by name and a list of unidentified robots
self._robots = {}
self._unidentified_robots = []
# Create a dictionary of mission queues by robot name
self._mission_queues = {}
self._mission_queue_lock = Lock()
# Initialize logger
FORMAT = '%(asctime)-15s %(levelname)s %(message)s'
logging.basicConfig(format=FORMAT)
self._logger = logging.getLogger('MissionServer')
self._logger.setLevel(logging.DEBUG)
# Create | |
def removeExtraMsg(self):
self.ui.statusbar.removeWidget(self.extraMsgWidget)
self.extraMsgWidget=None
def setMainPageIndex(self,i):
"""open previous page if i == None"""
i = self.prevMainPageindex if i is None else i
self.prevMainPageindex = self.ui.mainPage.currentIndex()
if i == 0:
self.ui.startPageBtn.setChecked(1)
elif i == 2:
self.ui.PpageBtn.setChecked(1)
self.ui.mainPage.setCurrentIndex(i)
def saveWindowSettings(self):
self.settings.beginGroup("MainWindow")
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("windowState", self.saveState())
self.settings.endGroup()
def readWindowSettings(self):
self.settings.beginGroup("MainWindow")
geometry = self.settings.value("geometry", QByteArray())
state = self.settings.value("windowState", QByteArray())
if not geometry.isEmpty():
self.restoreGeometry(geometry)
self.restoreState(state)
self.settings.endGroup()
def closeEvent(self, event):
self.saveWindowSettings()
[thread.terminate() for thread in self.threads if thread.isRunning()]
# delete temp files
for file in os.listdir(tempDir()):
file = os.path.join(tempDir(),file)
if os.path.isfile(file):
os.remove(file)
event.accept()
def eventFilter(self, obj, event):
# print(event.type())
if obj.objectName() == 'header':
if self.ui.logo.underMouse():
if event.type() == QEvent.MouseButtonRelease:
webbrowser.open('https://madponyinteractive.github.io/MadQt/')
return True
else:
if event.type() == QEvent.MouseButtonDblClick:
self.setWindowState(self.windowState() ^ Qt.WindowFullScreen)
return True
if event.type() == QEvent.MouseButtonRelease:
if event.globalPosition().y() < 10 and self.moved:
self.prevGeo = self.geometry()
self.showMaximized()
return True
if event.type() == QEvent.MouseButtonPress:
self.prevMousePos = event.scenePosition()
self.moved = False
if event.type() == QEvent.MouseMove:
if self.windowState() == Qt.WindowFullScreen\
or self.windowState() == Qt.WindowMaximized:
self.showNormal()
self.prevMousePos = QPointF(self.prevGeo.width()*.5,50)
gr=self.geometry()
screenPos = event.globalPosition()
pos = screenPos-self.prevMousePos
x = max(pos.x(),0)
y = max(pos.y(),0)
screen = QGuiApplication.screenAt(QPoint(x,y)).size()
x = min(x,screen.width()-gr.width())
y = min(y,screen.height()-gr.height())
self.move(x,y)
self.moved = True
# print(QGuiApplication.screens())
return QMainWindow.eventFilter(self, obj, event)
'''
# A play around with custom resize handles
def event(self, event):
if event.type() == QEvent.MouseButtonRelease:
self.grabbedHandle = False
if event.type() == QEvent.MouseMove:
if self.handle and event.buttons()==Qt.LeftButton:
self.grabbedHandle=True
gr=self.geometry()
gp=event.globalPosition()
minW=self.minimumSizeHint().width()
minH=self.minimumSizeHint().height()
x = int(gp.x())
y = int(gp.y())
# x = max(x,minW)
if self.handle == 'top':
# y = min(y,minH)
gr.setTop(y)
# self.move(self.x(),y)
elif self.handle == 'bottom':
gr.setBottom(y)
# self.resize(gr.size())
self.setGeometry(gr)
self.updateGeometry()
if event.type() == QEvent.HoverMove and not self.grabbedHandle:
x=int(event.position().x())
y=int(event.position().y())
gr=self.geometry()
w=gr.width()
h=gr.height()
o=8
yr=y in range(o,h-o)
xr=x in range(o,w-o)
if not yr and not xr:
# corners
if y<o and x<o:
self.handle ='top left'
self.setCursor(Qt.SizeFDiagCursor)
elif x>w-o and y<o:
self.handle ='top right'
self.setCursor(Qt.SizeBDiagCursor)
elif x<o and y>h-o:
self.handle ='bottom left'
self.setCursor(Qt.SizeBDiagCursor)
elif x>w-o and y>h-o:
self.handle ='bottom right'
self.setCursor(Qt.SizeFDiagCursor)
else: self.handle = None
else:
# edges
if x<o and yr:
self.handle ='left'
self.setCursor(Qt.SizeHorCursor)
elif x>w-o and yr:
self.handle ='right'
self.setCursor(Qt.SizeHorCursor)
elif y<o and xr:
self.handle ='top'
self.setCursor(Qt.SizeVerCursor)
elif y>h-o and xr:
self.handle ='bottom'
self.setCursor(Qt.SizeVerCursor)
else: self.handle = None
if self.handle:
pass
else:
self.setCursor(Qt.ArrowCursor)
return QMainWindow.event(self, event)
'''
def initUi(self):
self.ui.header.installEventFilter(self)
self.ui.statusbar.insertPermanentWidget(0,QLabel('V.0.0.2'),0)
# Start Page
self.ui.startPageBtn.clicked.connect(lambda: self.setMainPageIndex(0))
self.ui.newP.clicked.connect(lambda: self.setMainPageIndex(1))
self.ui.addP.clicked.connect(self.addProjectFromFile)
self.ui.delSelectedP.clicked.connect(lambda: self.removeSelectedProjects())
self.ui.settBtn.clicked.connect(lambda: self.setMainPageIndex(3))
self.ui.PpageBtn.clicked.connect(lambda: self.setMainPageIndex(2))
self.ui.ProjectsList.itemDoubleClicked.connect(self.openProject)
self.ui.ProjectsList.dropped.connect(self.droppedProject)
# Settings Page
default_qtPath = os.path.join(pySideDir())
# self.settings.setValue("usrInput/designerPath", default_qtPath)
self.ui.QtDesignerPathInput.setText(self.settings.value("usrInput/designerPath", default_qtPath))
self.ui.sublimePathInput.setText(self.settings.value("usrInput/sublimePath", ''))
self.ui.icoSizeCb.setCurrentText(self.settings.value("usrInput/icoSize",str(64)))
def changedIco(v): self.settings.setValue("usrInput/icoSize",v)
self.ui.icoSizeCb.currentTextChanged.connect(changedIco)
self.ui.QtDesignerPathBtn.clicked.connect(lambda: self.openFolder(self.ui.QtDesignerPathInput))
self.ui.sublimePathBtn.clicked.connect(lambda: self.openFolder(self.ui.sublimePathInput))
global MadQt_Designer_Paths
MadQt_Designer_Paths = self.settings.value("usrInput/pluginPaths", [MadQt.get_path('QtDesignerPlugins')])
for item in MadQt_Designer_Paths:
self.ui.designerPathsList.addItem(item)
self.ui.SettDoneBtn.clicked.connect(lambda: self.setMainPageIndex(None))
self.ui.saveSett.clicked.connect(self.saveUserSettings)
self.ui.createMQEXEC.clicked.connect(lambda: webbrowser.open("https://madponyinteractive.github.io/MadQt/ProjectManager/install.html"))
# New custom widget page
self.ui.cwQtClass.addItems(Mt.QDesignerBaseClasses())
self.ui.cwQtClass.setCurrentText('QPushButton')
self.ui.createWidget.clicked.connect(self.createWidget)
self.ui.cancelCreateW.clicked.connect(lambda: self.setMainPageIndex(None))
# New project page
self.ui.newPFolder.clicked.connect(lambda: self.openFolder(self.ui.NewPFolderName))
self.ui.newPIconBrowse.clicked.connect(lambda: self.openImg(self.ui.newPIcon,True))
self.ui.createP.clicked.connect(self.createNewProject)
self.ui.cancelCreateP.clicked.connect(lambda: self.setMainPageIndex(None))
# New Ui Page
self.ui.createNewUi.clicked.connect(self.createUi)
self.ui.cancelCreateUi.clicked.connect(lambda: self.setMainPageIndex(None))
# Project Page menu buttons
self.ui.refreshBtn.clicked.connect(self.refresh)
self.ui.openPFolder.clicked.connect(self.openProjectFolder)
if self.hasSublime():
self.ui.sublimePBtn.setStatusTip('Open sublime project')
else:
self.ui.sublimePBtn.setStatusTip('Setup sublime path in settings to open sublime projects')
self.ui.sublimePBtn.clicked.connect(self.openSublimeProject)
self.ui.uiFilesBtn.clicked.connect(lambda: self.ui.PStackedWidget.setCurrentIndex(0))
self.ui.custWidgetsBtn.clicked.connect(lambda: self.ui.PStackedWidget.setCurrentIndex(1))
self.ui.qrcPBtn.clicked.connect(lambda: self.ui.PStackedWidget.setCurrentIndex(2))
self.ui.execPBtn.clicked.connect(lambda: self.ui.PStackedWidget.setCurrentIndex(3))
self.ui.runBtn.clicked.connect(self.runApp)
self.ui.saveAsBtn.clicked.connect(self.saveToNewFolder)
# Ui page
self.ui.uiList.itemDoubleClicked.connect(self.openUi)
self.ui.uiList.dropped.connect(self.addUiFiles)
self.ui.addUiBtn.clicked.connect(self.addUiFiles)
self.ui.delSelectedUiBtn.clicked.connect(self.removeUi)
self.ui.newUiBtn.clicked.connect(lambda: self.setMainPageIndex(5))
# Custom Widget page
self.ui.moduleTree.header().setSectionResizeMode(QHeaderView.Stretch)
self.ui.moduleTree.droppedModule.connect(self.addModules)
self.ui.moduleTree.itemDoubleClicked.connect(self.openClass)
self.ui.addModuleBtn.clicked.connect(self.addModuleDialogue)
self.ui.delSelectedMods.clicked.connect(self.deleteModules)
self.ui.newModuleBtn.clicked.connect(self.newModule)
self.ui.cwClass.textChanged.connect(self.cwClassChanged)
self.ui.cwMod.textChanged.connect(self.cwModChanged)
self.ui.cwQtClass.currentTextChanged.connect(self.cwQtClassChanged)
# Qrc Image page
self.ui.qrcTree.header().setSectionResizeMode(QHeaderView.Stretch)
self.ui.qrcTree.droppedQrc.connect(self.addQrc)
self.ui.qrcTree.droppedImg.connect(self.addImages)
iSize = self.settings.value("usrInput/iconSize",QSize(64,64))
self.ui.qrcTree.setIconSize(iSize)
self.ui.imgSizeSlider.setValue(iSize.width())
def setIconSize(s):
size=QSize(s,s)
self.ui.qrcTree.setIconSize(size)
self.settings.setValue("usrInput/iconSize",size)
self.ui.statusbar.showMessage(f'Icon size: {s}px')
self.ui.imgSizeSlider.valueChanged.connect(setIconSize)
# Create Executable page
self.ui.createExecBtn.clicked.connect(self.createExec)
self.ui.pyinstallerHelp.clicked.connect(lambda:\
webbrowser.open('https://pyinstaller.readthedocs.io/en/stable/when-things-go-wrong.html'))
# Create Executable
@thread
def runExec(self,arguments):
process = subprocess.Popen(arguments, shell=True, bufsize = 1,
stdout=subprocess.PIPE, stderr = subprocess.STDOUT,
encoding='utf-8', errors = 'replace')
while True:
realtime_output = process.stdout.readline()
if realtime_output == '' and process.poll() is not None:
self.finishExec()
break
if realtime_output:
self.ui.execOutput.append(realtime_output.strip())
def finishExec(self):
for file in os.listdir(self.project.devPath()):
if os.path.splitext(file)[1] == '.spec':
os.remove(file)
break
shutil.rmtree(self.project.devFile('build'))
src = self.project.devFile('dist')
dst = self.project.folder
findAt = os.path.join(dst,'dist')
if os.path.isdir(findAt):shutil.rmtree(findAt)
shutil.move(src, dst, copy_function = shutil.copytree)
self.ui.execOutput.append(F"Successfully created executable for project: {self.project.name}!")
self.ui.execOutput.append(F"Output files located @ {findAt}")
self.ui.execOutput.moveCursor(QTextCursor.End)
self.ui.statusbar.showMessage('Executable created!')
def createExec(self):
self.ui.statusbar.showMessage('Creating executable...')
self.project.updateUis()
os.chdir(self.project.devPath())
arguments = F"pyinstaller "
arguments += self.ui.execArgs.text()
arguments +=F" --icon {os.path.join('gui',self.project.icon)} main.py"
self.ui.execOutput.ensureCursorVisible()
self.ui.execOutput.append(f'Running: {arguments}')
self.runExec(arguments)
# Helper Dialogues
def openFile(self,fType='All Files (*)',prefix=''):
return QFileDialog.getOpenFileName(self,
"MadQt Project Manager",
prefix,
fType, "")
def openFiles(self,fType='All Files (*)',prefix=''):
return QFileDialog.getOpenFileNames(self,
"MadQt Project Manager",
prefix,
fType, "")
def openFolder(self,widget):
""" Opens a file dialogue
Sets text on provided widget
"""
text = widget.text() if len(widget.text()) else ''
options = QFileDialog.DontResolveSymlinks | QFileDialog.ShowDirsOnly
directory = QFileDialog.getExistingDirectory(self,
"MadQt Project Manager - Select folder",
text, options)
if directory:
widget.setText(directory)
self.selectedPath = directory
def openImg(self,widget,ico=False):
""" Opens a file dialogue
Sets pixmap on provided widget
"""
file = self.openFile('Images (*.png *.ico *.jpg *.gif *.svg)',widget.text())[0]
if len(file):
if ico:
if '.ico' not in file:
dest_ico = os.path.join(tempDir(),'logo.ico')
file = Mt.createIco(file,dest_ico,self.defaultIcoSize())
widget.setPixmap(QPixmap(file))
self.selectedFile = file
# self.settings.setValue("usrInput/newProjectIcon", directory)
def questionBox(self,msg):
reply = QMessageBox.question(self, "MadQt Warning!",
msg,
QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)
if reply == QMessageBox.Yes:
return "Yes"
elif reply == QMessageBox.No:
return "No"
else:
return "Cancel"
def labelBox(self,msg,text):
usrInput, ok = QInputDialog.getText(self, "MadQt Project Manager",
msg, QLineEdit.Normal, text)
if ok and usrInput != '':
return usrInput
# Project Methods
def addProject(self,file=None,save=False):
"""
Adds item to the start page project list
Saves project to settings if save=True
"""
if isinstance(file,QListWidgetItem):
file=file.file
save=True
elif not file:
file = self.project.file()
# File was deleted from system?
if not os.path.isfile(file):
self.removeProject(file,True)
return
# File already added?
for item in self.ui.ProjectsList.getItems():
if item.file == file:
# self.removeProject(file,True)
self.extraMsg('Project folder already in list!')
return
# Add project to the start page project list
self.ui.ProjectsList.insertItem(0,ProjectItem(file))
# Save project to settings
if not save:return
allP = self.settings.value("projects",[])
# remove projects with same folder
[self.removeProject(p,True) for p in allP if p == file]
allP.append(file)
self.settings.setValue("projects", allP)
def removeProject(self,file=None,save=False):
"""
Removes item from the start page project list
Removes project from settings if save=True
"""
if isinstance(file,QListWidgetItem):
file=file.file
save=True
elif not file:
file = self.project.file()
self.ui.ProjectsList.removeItem(file)
# Removes project from settings
if not save:return
allP = self.settings.value("projects",[])
[allP.remove(p) for p in allP if p == file]
self.settings.setValue("projects", allP)
if not len(allP):self.ui.PpageBtn.setEnabled(False)
def removeSelectedProjects(self):
"""removes all selected projects in project list"""
[self.removeProject(item) for item in self.ui.ProjectsList.selectedItems()]
def addProjectFromFile(self,file=None):
"""Add and open a project from an existing file"""
file = self.openFile('MadQt Project Files (*.mqpm)')[0]\
if not file else file
if not len(file):return
if not file.endswith('mqpm'):
self.extraMsg('Not a valid project file!')
return
# Update folder path
prevSettings = {}
with open(file, "r") as f: prevSettings = json.loads(f.read())
prevSettings['folder'] = os.path.dirname(file)
with open(file, "w") as f: json.dump(prevSettings, f, indent=4)
self.addProject(file,True)
def createNewProject(self):
name = self.ui.projectNameInput.text()
folder = self.ui.NewPFolderName.text()
if not os.path.isdir(folder):
self.ui.statusbar.showMessage('Provided path not found!',5000)
return
name = 'New MadQt project' if not name else name.strip()
self.settings.setValue("usrInput/newProjectDir", folder)
folder = os.path.join(folder,Mt.cleanString(name))
ico = os.path.join(MadQt.get_path('Templates'),'NewProject','gui','logo.ico')
if self.selectedFile is not None:
ico = ico if '.ico' not in self.selectedFile else self.selectedFile
self.project = Project(name,folder,ico)
if not self.project.valid:
self.project = Project()
self.ui.statusbar.showMessage('Could not create project! Provided folder has a project?')
return
# self.settings.remove('projects')#333333333333333333333333333333333
self.addProject(save=True)
self.extraMsg(f'Successfully created "{name}"!')
self.openProject()
def saveToNewFolder(self):
"""open file dialogue"""
options = QFileDialog.DontResolveSymlinks | QFileDialog.ShowDirsOnly
fileName, filtr = QFileDialog.getSaveFileName(self,
"MadQt Project Manager - Save to new folder",
self.project.folder,
"MadQt Project Files (*.mqpm)", "", options)
file_name = os.path.basename(fileName)
file, ext = os.path.splitext(file_name)
directory = os.path.dirname(fileName)
if fileName != '':
self.project.name = file
if self.project.saveToNewFolder(directory):
self.addProject(save=True)
self.extraMsg(f'Successfully saved and opened!')
self.openProject()
else:
self.extraMsg(f'Could not save, saving in a project folder?')
def droppedProject(self, urls):
"""when user drops file on projects list"""
[self.addProjectFromFile(file) for file in urls]
def openProject(self,item=None):
"""open existing project"""
if item:
# Avoid re-opening same project
if item.file == self.project.file():
self.setMainPageIndex(2)
return
self.project = Project(item.file)
self.setCursor(Qt.WaitCursor)
self.ui.qrcTree.clear()
self.ui.moduleTree.clear()
self.ui.uiList.clear()
if not self.project.valid:return
self.ui.statusbar.showMessage('Loading project...')
self.ui.PpageBtn.setEnabled(True)
if self.hasSublime(): self.ui.sublimePBtn.setEnabled(True)
self.ui.PpageBtn.setChecked(True)
self.ui.PrjName.setText(self.project.name)
self.project.updateUis()
# Populate Ui list
[self.addUi(ui) for ui in self.project.uiFiles() if os.path.isfile(ui)]
# Populate Qrc list
[self.addQrc(qrc) for qrc in self.project.qrcFiles() if os.path.isfile(qrc)]
# Populate Custom Widget list
[self.addModule(mod) for mod in self.project.customWidgetsFiles() if os.path.isfile(mod)]
self.ui.execArgs.setText(F"--onefile --windowed --name={Mt.cleanString(self.project.name)}")
self.setMainPageIndex(2)
self.ui.statusbar.showMessage(f'{self.project.name} project loaded!')
self.setCursor(Qt.ArrowCursor)
def openProjectFolder(self):
# Open base on project page
Mt.openFileExplorer(self.project.folder)
# Qrc Methods
def createQrc(self):
qrcName = self.labelBox("New Qrc name:",'resources.qrc')
if not qrcName:return
self.setCursor(Qt.WaitCursor)
if '.' | |
#!/usr/bin/env python
# This script is deprecated, please use ./xconfig_to_configs.py
# tdnn or RNN with 'jesus layer'
# inputs to jesus layer:
# - for each spliced version of the previous layer the output (of dim --jesus-forward-output-dim)
# outputs of jesus layer:
# for all layers:
# --jesus-forward-output-dim
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
import re, os, argparse, sys, math, warnings
import imp
nnet3_train_lib = imp.load_source('ntl', 'steps/nnet3/nnet3_train_lib.py')
chain_lib = imp.load_source('ncl', 'steps/nnet3/chain/nnet3_chain_lib.py')
parser = argparse.ArgumentParser(description="Writes config files and variables "
"for TDNNs creation and training",
epilog="See steps/nnet3/train_tdnn.sh for example.");
parser.add_argument("--splice-indexes", type=str, required = True,
help="Splice[:recurrence] indexes at each hidden layer, e.g. '-3,-2,-1,0,1,2,3 -3,0:-3 -3,0:-3 -6,-3,0:-6,-3'. "
"Note: recurrence indexes are optional, may not appear in 1st layer, and must be "
"either all negative or all positive for any given layer.")
# Only one of these arguments can be specified, and one of them has to
# be compulsarily specified
feat_group = parser.add_mutually_exclusive_group(required = True)
feat_group.add_argument("--feat-dim", type=int,
help="Raw feature dimension, e.g. 13")
feat_group.add_argument("--feat-dir", type=str,
help="Feature directory, from which we derive the feat-dim")
# only one of these arguments can be specified
ivector_group = parser.add_mutually_exclusive_group(required = False)
ivector_group.add_argument("--ivector-dim", type=int,
help="iVector dimension, e.g. 100", default=0)
ivector_group.add_argument("--ivector-dir", type=str,
help="iVector dir, which will be used to derive the ivector-dim ", default=None)
num_target_group = parser.add_mutually_exclusive_group(required = True)
num_target_group.add_argument("--num-targets", type=int,
help="number of network targets (e.g. num-pdf-ids/num-leaves)")
num_target_group.add_argument("--ali-dir", type=str,
help="alignment directory, from which we derive the num-targets")
num_target_group.add_argument("--tree-dir", type=str,
help="directory with final.mdl, from which we derive the num-targets")
parser.add_argument("--include-log-softmax", type=str,
help="add the final softmax layer ", default="true", choices = ["false", "true"])
parser.add_argument("--xent-regularize", type=float,
help="For chain models, if nonzero, add a separate output for cross-entropy "
"regularization (with learning-rate-factor equal to the inverse of this)",
default=0.0)
parser.add_argument("--xent-separate-forward-affine", type=str,
help="if using --xent-regularize, gives it separate last-but-one weight matrix",
default="false", choices = ["false", "true"])
parser.add_argument("--use-repeated-affine", type=str,
help="if true use RepeatedAffineComponent, else BlockAffineComponent (i.e. no sharing)",
default="true", choices = ["false", "true"])
parser.add_argument("--final-layer-learning-rate-factor", type=float,
help="Learning-rate factor for final affine component",
default=1.0)
parser.add_argument("--self-repair-scale-nonlinearity", type=float,
help="Small scale involved in fixing derivatives, if supplied (e.g. try 0.00001)",
default=0.0)
parser.add_argument("--jesus-hidden-dim", type=int,
help="hidden dimension of Jesus layer.", default=10000)
parser.add_argument("--jesus-forward-output-dim", type=int,
help="part of output dimension of Jesus layer that goes to next layer",
default=1000)
parser.add_argument("--jesus-forward-input-dim", type=int,
help="Input dimension of Jesus layer that comes from affine projection "
"from the previous layer (same as output dim of forward affine transform)",
default=1000)
parser.add_argument("--final-hidden-dim", type=int,
help="Final hidden layer dimension-- or if <0, the same as "
"--jesus-forward-input-dim", default=-1)
parser.add_argument("--num-jesus-blocks", type=int,
help="number of blocks in Jesus layer. All configs of the form "
"--jesus-*-dim will be rounded up to be a multiple of this.",
default=100);
parser.add_argument("--jesus-stddev-scale", type=float,
help="Scaling factor on parameter stddev of Jesus layer (smaller->jesus layer learns faster)",
default=1.0)
parser.add_argument("--clipping-threshold", type=float,
help="clipping threshold used in ClipGradient components (only relevant if "
"recurrence indexes are specified). If clipping-threshold=0 no clipping is done",
default=15)
parser.add_argument("config_dir",
help="Directory to write config files and variables");
print(' '.join(sys.argv))
args = parser.parse_args()
if not os.path.exists(args.config_dir):
os.makedirs(args.config_dir)
## Check arguments.
if args.feat_dir is not None:
args.feat_dim = nnet3_train_lib.GetFeatDim(args.feat_dir)
if args.ali_dir is not None:
args.num_targets = nnet3_train_lib.GetNumberOfLeaves(args.ali_dir)
elif args.tree_dir is not None:
args.num_targets = chain_lib.GetNumberOfLeaves(args.tree_dir)
if args.ivector_dir is not None:
args.ivector_dim = nnet3_train_lib.GetIvectorDim(args.ivector_dir)
if not args.feat_dim > 0:
raise Exception("feat-dim has to be postive")
if not args.num_targets > 0:
print(args.num_targets)
raise Exception("num_targets has to be positive")
if not args.ivector_dim >= 0:
raise Exception("ivector-dim has to be non-negative")
## Check arguments.
if args.num_jesus_blocks < 1:
sys.exit("invalid --num-jesus-blocks value");
if args.final_hidden_dim < 0:
args.final_hidden_dim = args.jesus_forward_input_dim
for name in [ "jesus_hidden_dim", "jesus_forward_output_dim", "jesus_forward_input_dim",
"final_hidden_dim" ]:
old_val = getattr(args, name)
if old_val % args.num_jesus_blocks != 0:
new_val = old_val + args.num_jesus_blocks - (old_val % args.num_jesus_blocks)
printable_name = '--' + name.replace('_', '-')
print('Rounding up {0} from {1} to {2} to be a multiple of --num-jesus-blocks={3} '.format(
printable_name, old_val, new_val, args.num_jesus_blocks))
setattr(args, name, new_val);
# this is a bit like a struct, initialized from a string, which describes how to
# set up the statistics-pooling and statistics-extraction components.
# An example string is 'mean(-9fc00:db20:35b:7399::5)', which means, compute the mean of
# data within a window of -99 to +99, with distinct means computed every 9 frames
# (we round to get the appropriate one), and with the input extracted on multiples
# of 3 frames (so this will force the input to this layer to be evaluated
# every 3 frames). Another example string is 'mean+stddev(-99:3:9:99)',
# which will also cause the standard deviation to be computed.
class StatisticsConfig:
# e.g. c = StatisticsConfig('mean+stddev(-99:3:9:99)', 400, 'jesus1-forward-output-affine')
def __init__(self, config_string, input_dim, input_name):
self.input_dim = input_dim
self.input_name = input_name
m = re.search("(mean|mean\+stddev)\((-?\d+):(-?\d+):(-?\d+):(-?\d+)\)",
config_string)
if m == None:
sys.exit("Invalid splice-index or statistics-config string: " + config_string)
self.output_stddev = (m.group(1) != 'mean')
self.left_context = -int(m.group(2))
self.input_period = int(m.group(3))
self.stats_period = int(m.group(4))
self.right_context = int(m.group(5))
if not (self.left_context > 0 and self.right_context > 0 and
self.input_period > 0 and self.stats_period > 0 and
self.left_context % self.stats_period == 0 and
self.right_context % self.stats_period == 0 and
self.stats_period % self.input_period == 0):
sys.exit("Invalid configuration of statistics-extraction: " + config_string)
# OutputDim() returns the output dimension of the node that this produces.
def OutputDim(self):
return self.input_dim * (2 if self.output_stddev else 1)
# OutputDims() returns an array of output dimensions, consisting of
# [ input-dim ] if just "mean" was specified, otherwise
# [ input-dim input-dim ]
def OutputDims(self):
return [ self.input_dim, self.input_dim ] if self.output_stddev else [ self.input_dim ]
# Descriptor() returns the textual form of the descriptor by which the
# output of this node is to be accessed.
def Descriptor(self):
return 'Round({0}-pooling-{1}-{2}, {3})'.format(self.input_name, self.left_context, self.right_context,
self.stats_period)
# This function writes the configuration lines need to compute the specified
# statistics, to the file f.
def WriteConfigs(self, f):
print('component name={0}-extraction-{1}-{2} type=StatisticsExtractionComponent input-dim={3} '
'input-period={4} output-period={5} include-variance={6} '.format(
self.input_name, self.left_context, self.right_context,
self.input_dim, self.input_period, self.stats_period,
('true' if self.output_stddev else 'false')), file=f)
print('component-node name={0}-extraction-{1}-{2} component={0}-extraction-{1}-{2} input={0} '.format(
self.input_name, self.left_context, self.right_context), file=f)
stats_dim = 1 + self.input_dim * (2 if self.output_stddev else 1)
print('component name={0}-pooling-{1}-{2} type=StatisticsPoolingComponent input-dim={3} '
'input-period={4} left-context={1} right-context={2} num-log-count-features=0 '
'output-stddevs={5} '.format(self.input_name, self.left_context, self.right_context,
stats_dim, self.stats_period,
('true' if self.output_stddev else 'false')),
file=f)
print('component-node name={0}-pooling-{1}-{2} component={0}-pooling-{1}-{2} input={0}-extraction-{1}-{2} '.format(
self.input_name, self.left_context, self.right_context), file=f)
## Work out splice_array
## e.g. for
## args.splice_indexes == '-3,-2,-1,0,1,2,3 -3,0:-3 -3,0:-3 -6,-3,0:-6,-3'
## we would have
## splice_array = [ [ -3,-2,...3 ], [-3,0] [-3,0] [-6,-3,0]
splice_array = []
left_context = 0
right_context = 0
split_on_spaces = args.splice_indexes.split(" "); # we already checked the string is nonempty.
if len(split_on_spaces) < 2:
sys.exit("invalid --splice-indexes argument, too short: "
+ args.splice_indexes)
try:
for string in split_on_spaces:
this_layer = len(splice_array)
this_splices = string.split(",")
splice_array.append(this_splices)
# the rest of this block updates left_context and right_context, and
# does some checking.
leftmost_splice = 10000
rightmost_splice = -10000
for s in this_splices:
try:
n = int(s)
if n < leftmost_splice:
leftmost_splice = n
if n > rightmost_splice:
rightmost_splice = n
except:
if len(splice_array) == 1:
sys.exit("First dimension of splicing array must not have averaging [yet]")
try:
x = StatisticsConfig(s, 100, 'foo')
except:
sys.exit("The following element of the splicing array is not a valid specifier "
"of statistics: " + s)
if leftmost_splice == 10000 or rightmost_splice == -10000:
sys.exit("invalid element of --splice-indexes: " + string)
left_context += -leftmost_splice
right_context += rightmost_splice
except ValueError as e:
sys.exit("invalid --splice-indexes argument " + args.splice_indexes + " " + str(e))
left_context = max(0, left_context)
right_context = max(0, right_context)
num_hidden_layers = len(splice_array)
input_dim = len(splice_array[0]) * args.feat_dim + args.ivector_dim
f = open(args.config_dir + "/vars", "w")
print('left_context=' + str(left_context), file=f)
print('right_context=' + str(right_context), file=f)
print('num_hidden_layers=' + str(num_hidden_layers), file=f)
f.close()
f = open(args.config_dir + "/init.config", "w")
print('# Config file for initializing neural network prior to', file=f)
print('# preconditioning matrix computation', file=f)
print('input-node name=input dim=' + str(args.feat_dim), file=f)
list=[ ('Offset(input, {0})'.format(n) if n != 0 else 'input' ) for n in splice_array[0] ]
if args.ivector_dim > 0:
print('input-node name=ivector dim=' + str(args.ivector_dim), file=f)
list.append('ReplaceIndex(ivector, t, 0)')
# example of next line:
# output-node name=output input="Append(Offset(input, -3), Offset(input, -2), Offset(input, -1), ... , Offset(input, 3), ReplaceIndex(ivector, t, 0))"
print('output-node name=output input=Append({0})'.format(", ".join(list)), file=f)
f.close()
for l in range(1, num_hidden_layers + 1):
# the following summarizes the structure of the layers: Here, the Jesus component includes ReLU at its input and output, and renormalize
# at its output after the ReLU.
# layer1: splice + LDA-transform + affine + ReLU + renormalize
# layerX: splice + Jesus + affine + ReLU
| |
<gh_stars>0
import os
from montepython.likelihood_class import Likelihood
import math as m
import statistics as s
from scipy import stats
import scipy.linalg as la
import numpy as np
from scipy.integrate import quad
import time
def lin_reg(x,y):
#length of data set
n = len(x)
#summing independent variable
x_sum = sum(x)
#summing dependent variable
y_sum = sum(y)
#mean of independent variable
x_mean = s.mean(x)
#mean of dependent variable
y_mean= s.mean(y)
#sum of x squared
x_sqr = []
for i in range(len(x)):
x_temp = x[i]**2
x_sqr.append(x_temp)
x_sqr_sum = sum(x_sqr)
#sum of y squared
y_sqr = []
for i in range(len(y)):
y_temp = y[i]**2
y_sqr.append(y_temp)
y_sqr_sum = sum(y_sqr)
#sum of xy product
xy_prod = []
for i in range(len(y)):
xy_temp = y[i]*x[i]
xy_prod.append(xy_temp)
xy_prod_sum = sum(xy_prod)
#numerator and denominator of slope estimate
S_xx = x_sqr_sum - (x_sum**2/n)
S_xy = xy_prod_sum - (x_sum*y_sum/n)
#slope estimate
B_1 = S_xy/S_xx
#intercept estimate
B_0 = y_mean - B_1*x_mean
return B_0, B_1
#simple linear regression with fixed slope and error on slope
def lin_reg_fixed_slope(x,y,m,dm,dx,dy):
B_0 = np.mean(y-m*x)
B_1 = m
dB0 = np.sqrt((1/len(x)))*np.sqrt(np.sum(np.power(dy,2))+np.sum(np.power(m*x,2)*(np.power(dm/m,2)+np.power(dx/x,2))))
dB1 = dm
return B_0, B_1, dB0, dB1
# linear regression using error as a weighting scheme, fixing slope and slope error
def weighted_fixed_slope(x,y,m,dm,dx,dy):
#wx = 1/dx^2
#wy = 1/dy^2
w = 1/(dx**2+dy**2)
ybar = sum(w*y)/sum(w)
xbar = sum(w*x)/sum(w)
B0 = ybar - m*xbar
r = y - (B0 +m*x)
wr = np.sqrt(w)*r
n = len(x)
SE_wr = np.sqrt(sum(wr**2)/(n-2))
dB0 = np.sqrt((SE_wr**2/sum(w))+dm*xbar**2)
B1 = m
dB1 = dm
return B0, B1, dB0, dB1
#york linear regression
# York correction to linear regression including error in both x and y
# https://aapt.scitation.org/doi/abs/10.1119/1.1632486
def york_fit(x,y,sigma_x,sigma_y,r,tol,n_max):
#make sure inputs are a numpy array
#if the error is 0, replace with something very very small to
# to prevent nan
sigma_x[sigma_x == 0] = 10**-15
sigma_y[sigma_y == 0] = 10**-15
#define an array which tracks the changes in slope, B_1
b_hist = np.ones(n_max)
#1) choose an approximate initial value for the slope, b
# -> simple linear regression
# B_0 is intercept, B_1 is slope from simple linear regression
[B_0_simple, B_1_simple] = lin_reg(x,y)
b_hist[0] = B_1_simple
B_0 = B_0_simple
B_1 = B_1_simple
#2) determine the weights omega of each point for both x and y
# usually 1/sigma where sigma is the error associated with x and y
# at the i'th point
omega_x = 1/np.square(np.array(sigma_x))
omega_y = 1/np.square(np.array(sigma_y))
#3) use these weights with B_1 and the correlation r (if any) to
# evaluate W_i for each point
alpha = np.sqrt(omega_x*omega_y)
#6) calculate B_1_new until the difference between B_1_new and B_1 is
# less than the tolerance provided
counter = 1
while counter < n_max:
W = (omega_x*omega_y)/(omega_x + (B_1**2)*omega_y - 2*B_1*r*alpha)
#4) use the observed points and W to calculate x_bar and y_bar from
# from which U V and beta can be evaluated for each point
x_bar = sum(W*x)/sum(W)
y_bar = sum(W*y)/sum(W)
U = x - x_bar
V = y - y_bar
beta = W*((U/omega_y)+(B_1*V/omega_x)-(B_1*U+V)*(r/alpha))
#5) use W U V and beta to calculate a new estimate of B_1
B_1_new = sum(W*beta*V)/sum(W*beta*U)
b_hist[counter] = B_1_new
if(abs(B_1_new-B_1)< tol):
B_1 = B_1_new
break
counter += 1
B_1 = B_1_new
#7) using the final value of B_1, x_bar, y_bar, calculate B_0
B_0 = y_bar - B_1*x_bar
#8) for each point x and y, calculate the adjusted values
x_adj = x_bar + beta
y_adj = y_bar + B_1*beta
#9) use x_adj and W to calc x_bar_adj and u abd v
x_bar_adj = sum(W*x_adj)/sum(W)
#y_bar_adj = sum(W*y_adj)/sum(W)
u = x_adj - x_bar_adj
#v = y_adj - y_bar_adj
#10) use W x_bar and u to calculate sigma_a and sigma_b
var_b = 1/(sum(W*u**2))
var_a = 1/sum(W)+(x_bar**2)*var_b
sigma_a_new = np.sqrt(var_a)
sigma_b_new = np.sqrt(var_b)
return B_0, B_1, sigma_a_new, sigma_b_new, b_hist, B_0_simple, B_1_simple
#######################################################################################
#CLASS DEFINITIONS OF STEPS OF THE DISTANCE LADDER#
#######################################################################################
# This class defines the structure in which Anchors with known distances
#and contain cepheids are imported
class Anchor:
"This class defines a structure which holds data for objects that have geometric distance measurements, and cepheid variables."
def __init__(self,Name='',Dist=0,dDist=0):
self.Name = Name
self.Dist = Dist
self.dDist = dDist
self.mu = 5*np.log10(self.Dist)-5
self.dmu = 5*np.log10(np.exp(1))*self.dDist/self.Dist
def Compute_abs_ceph_mag(self,period,dperiod,mh,dmh):
r = 0
tol = 10**-15
n = 20
[B_0, B_1, sigma_B_0, sigma_B_1, b_save, B_0_simple, B_1_simple] = york_fit(period,mh,dperiod,dmh,r,tol,n)
self.Mceph = B_0 - self.mu
self.dMceph = np.sqrt(np.power(sigma_B_0,2)+np.power(self.dmu,2))
# This class defines the structure in which the cephied data is
#imported, and the DM calculated to those cephieds
class Ceph_data:
"This class creates a structure which holds cepheid data for a given host"
def __init__(self,Host='',ID='',Period=0,V=0,dV=0,I=0,dI=0,NIR=0,dNIR=0,OH=0):
self.Host = Host
self.ID = ID
self.Period = Period
self.NIR = NIR
self.dNIR = dNIR
self.V = V
self.dV = dV
self.I = I
self.dI = dI
# R value is a correlation coeffecient, forced to be the same
#as what is used in previous analysis
self.R = 0.386
self.mh = NIR - self.R*(V-I)
self.dmh = np.sqrt(np.power(dNIR,2)+self.R*np.power(dV,2)+self.R*np.power(dI,2))
def proto_Compute_mu(self,mh,dmh,period,dperiod,Mceph,dMceph,slope,dslope):
[B0, B_1, dB0, dB1] = weighted_fixed_slope(period,mh,slope,dslope,dperiod,dmh)
self.mu = B0 - Mceph
self.dmu = np.sqrt(np.power(dB0,2)+np.power(dMceph,2))
# This class defines the structure in which the SN which are
#found with cephieds are imported
class Local_SN_data:
"This class creates a structure which holds SN data for a given host."
def __init__(self,Host='',ID='',m=0,dm=0):
self.Host = Host
self.ID = ID
self.m = m
self.dm = dm
def Compute_abs_sn_mag(self,m,dm,mu,dmu):
x = mu
y = m
xerror = dm
yerror = dmu
[Msn, B1, dMsn, sigma_B1] = weighted_fixed_slope(x,y,1,0,xerror,yerror)
self.Msn = Msn
self.dMsn = dMsn
# This class defines the structure in which hubble flow SN are imported
class Hubble_SN_data:
"This class creates a structure which holds SN data in the hubble flow."
def __init__(self,ID='',z=0,dz=0,m=0,dm=0):
self.ID = ID
self.m = m
self.dm = dm
self.z = z
self.dz = dz
def Compute_hubble_mu(self,m,dm,Msn,dMsn):
self.mu = m - Msn
self.dmu = np.sqrt(np.power(dm,2)+np.power(dMsn,2))
class distanceladder(Likelihood):
# initialization routine. Here we will establish the subclasses needed to calculate the
#attributes of the distance ladder, up until the distance modulus calculation of
#hubble flow SN
def __init__(self, path, data, command_line):
#initialization of Likelihood class
Likelihood.__init__(self,path,data,command_line)
#simple linear regression used by Yorkfit for initial parameter estimation
##########################################################################################
#TABLE READERS#
##########################################################################################
def anchor_reader(file,index_array,header_length,delim):
base_dir = self.data_directory
#stable method to read in tabular data
file = open(base_dir + file)
table = file.readlines()
file.close()
#defining the length of the actual data of the table
i = len(table)-header_length
#preallocating the arrays for memory
host_list = []
dist_np = np.zeros(i)
ddist_np = np.zeros(i)
lc = 0
for line in table:
lc += 1
i = lc - header_length - 1
data = line.strip('\n').split(delim)
if lc>header_length:
if index_array[0] is not np.NaN:
host_list.append(data[index_array[0]])
else:
host_list.append(np.NaN)
if index_array[1] is not np.NaN:
dist_np[i] = data[index_array[1]]
else:
dist_np[i] = np.NaN
if index_array[2] is not np.NaN:
ddist_np[i] = data[index_array[2]]
else:
ddist_np[i] = np.NaN
name = []
for a in range(len(host_list)):
name.append([host_list[a],dist_np[a],ddist_np[a]])
return name
def ceph_table_reader(file,index_array,header_length,delim):
#file(string): location of table wished to be read in
#index_array(array): array containing the column index of the
#following attributes:
#host,ID,period,V,dV,I,dI,NIR,dNIR,OH
# if attribute does not occur in table, put -1
#host: index of host column in table
# list, strings
#ID: index of ID column in table
# list, strings
#period: index of period column in table
# np array, float
# the remainder of entries are the index of said variable in table
# all are np array's, with float values
#header_length(int): line number of last row of header
#name(string):user desired prefix for data
# name_host, name_ID, etc
#cc(string): character which splits your data
#All data should be located in this parent folder
base_dir = self.data_directory
#stable method to read in tabular data
file = open(base_dir + file)
table = file.readlines()
file.close()
#defining the length of the actual data of the table
i = len(table)-header_length
#preallocating the arrays for memory
host_list = []
ID_list = []
period_np = np.zeros(i)
V_np = np.zeros(i)
dV_np = np.zeros(i)
I_np = np.zeros(i)
dI_np = np.zeros(i)
NIR_np = np.zeros(i)
dNIR_np = np.zeros(i)
OH_np = np.zeros(i)
lc = 0
for line in table:
lc += 1
i = lc - header_length - 1
data = line.strip('\n').split(delim)
if lc>header_length:
if index_array[0] is not np.NaN:
host_list.append(data[index_array[0]])
else:
host_list.append(np.NaN)
if index_array[1] is not np.NaN:
ID_list.append(data[index_array[1]])
else:
ID_list.append(np.NaN)
if index_array[2] is not np.NaN:
period_np[i] = data[index_array[2]]
else:
period_np[i] = np.NaN
if index_array[3] is not np.NaN:
V_np[i] = data[index_array[3]]
else:
V_np[i] = np.NaN
if index_array[4] is not np.NaN:
dV_np[i] = data[index_array[4]]
else:
dV_np[i] = np.NaN
if index_array[5] is not np.NaN:
I_np[i] = data[index_array[5]]
else:
I_np[i] = np.NaN
if index_array[6] is not np.NaN:
dI_np[i] = data[index_array[6]]
else:
dI_np[i] = np.NaN
if index_array[7] is not np.NaN:
NIR_np[i] = data[index_array[7]]
else:
NIR_np[i] = np.NaN
if index_array[8] is not np.NaN:
dNIR_np[i] = data[index_array[8]]
else:
dNIR_np[i] = np.NaN
if index_array[9] is not np.NaN:
OH_np[i] = data[index_array[9]]
else:
OH_np[i] = np.NaN
name = []
for a in range(len(host_list)):
name.append([host_list[a],ID_list[a],period_np[a],V_np[a],dV_np[a],I_np[a],dI_np[a],NIR_np[a],dNIR_np[a],OH_np[a]])
return name
# reads in local SN data
def local_sn_table_reader(file,index_array,header_length,delim):
#All data should be located in this parent folder
base_dir = self.data_directory
#stable method to read in tabular data
file = open(base_dir + file)
table = file.readlines()
file.close()
#defining the length of the actual data of the table
i = len(table)-header_length
#preallocating the arrays for memory
host_list = []
ID_list = []
m_np = np.zeros(i)
dm_np = np.zeros(i)
lc = 0
for line in table:
lc += 1
i = lc - header_length - 1
data = line.strip('\n').split(delim)
if lc>header_length:
if index_array[0] is not np.NaN:
host_list.append(data[index_array[0]])
else:
host_list.append(np.NaN)
if index_array[1] is not np.NaN:
ID_list.append(data[index_array[1]])
else:
ID_list.append(np.NaN)
if index_array[2] is not np.NaN:
m_np[i] = data[index_array[2]]
else:
m_np[i] = np.NaN
if index_array[3] is not np.NaN:
dm_np[i] = data[index_array[3]]
else:
dm_np[i] = np.NaN
name = []
for a in range(len(host_list)):
name.append([host_list[a],ID_list[a],m_np[a],dm_np[a]])
return | |
<gh_stars>0
import copy
import os
import sys
import warnings
from time import gmtime, strftime
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
from pandas.api.types import (
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
is_numeric_dtype,
is_object_dtype,
is_string_dtype,
CategoricalDtype,
)
from sklearn.model_selection import (
KFold,
train_test_split,
GridSearchCV,
StratifiedKFold,
cross_val_score,
RandomizedSearchCV,
)
from sklearn.preprocessing import (
StandardScaler,
RobustScaler,
PolynomialFeatures,
OrdinalEncoder,
LabelEncoder,
OneHotEncoder,
KBinsDiscretizer,
QuantileTransformer,
PowerTransformer,
MinMaxScaler,
)
class Machine:
"""
Documentation:
Description:
machine facilitates rapid machine learning experimentation tasks, including data
cleaning, feature encoding, exploratory data analysis, data prepation, model building,
model tuning and model evaluation.
"""
# import mlmachine submodules
from .explore.eda_suite import (
df_side_by_side,
eda_cat_target_cat_feat,
eda_cat_target_num_feat,
eda_num_target_cat_feat,
eda_num_target_num_feat,
eda,
)
from .explore.eda_preprocessing import (
eda_missing_summary,
eda_skew_summary,
eda_transform_box_cox,
eda_transform_target,
eda_transform_log1,
)
from .features.preprocessing import (
GroupbyImputer,
DataFrameSelector,
DualTransformer,
KFoldEncoder,
PandasFeatureUnion,
PandasTransformer,
unique_category_levels,
compare_train_valid_levels,
missing_column_compare,
missing_summary,
skew_summary,
)
from .features.outlier import (
ExtendedIsoForest,
OutlierIQR,
outlier_summary,
outlier_IQR,
)
from .features.selection import FeatureSelector
from .model.evaluate.summarize import (
binary_prediction_summary,
regression_prediction_summary,
regression_results,
regression_stats,
top_bayes_optim_models,
)
from .model.evaluate.visualize import (
binary_classification_panel,
regression_panel,
)
from .model.explain.shap_explanations import (
create_shap_explainers
)
from .model.explain.shap_visualizations import (
multi_shap_value_tree,
multi_shap_viz_tree,
shap_dependence_grid,
shap_dependence_plot,
shap_summary_plot,
single_shap_value_tree,
single_shap_viz_tree,
)
from .model.tune.bayesian_optim_search import (
BayesOptimModelBuilder,
BayesOptimClassifierBuilder,
BayesOptimRegressorBuilder,
BasicClassifierBuilder,
BasicRegressorBuilder,
BasicModelBuilder,
exec_bayes_optim_search,
model_loss_plot,
model_param_plot,
objective,
sample_plot,
unpack_bayes_optim_summary,
)
from .model.tune.power_grid_search import (
PowerGridModelBuilder,
PowerGridSearcher,
)
from .model.tune.stack import (
model_stacker,
oof_generator,
)
def __init__(self, experiment_name, training_dataset, validation_dataset, remove_features=[], identify_as_boolean=None, identify_as_continuous=None, identify_as_count=None,
identify_as_date=None, identify_as_nominal=None, identify_as_ordinal=None, ordinal_encodings=None,
identify_as_string=None, target=None, is_classification=None, create_experiment_dir=None):
"""
Documentation:
---
Description:
__init__ handles initial processing of main data set. Creates DataFrame of independent
variables, Pandas Series containing dependent variable and a dictionary that categorizes
features by mlm data type.
---
Parameters:
experiment_name : str
Name of experiment, used to create sub-directory in experiments folder.
training_dataset : Pandas DataFrame
Training data provided as a Pandas DataFrame.
validation_dataset : Pandas DataFrame
Validation data provided as a Pandas DataFrame.
remove_features : list, default=[]
Features to be completely removed from dataset.
identify_as_boolean : list, default=None
Preidentified boolean features. Columns given boolean dtype.
identify_as_continuous : list, default=None
Preidentified continuous features. Columns given float64 dtype.
identify_as_count : list, default=None
Preidentified count features. Columns given int64 dtype.
identify_as_date : list, default=None
Preidentified date features. Columns given datetime64[ns] dtype.
identify_as_nominal : list, default=None
Preidentified nominal category features. Columns given category dtype.
identify_as_ordinal : list, default=None
Preidentified ordinal category features. Columns given category dtype. If
an ordinal_encodings dict is passed, the category column will be given the
specified order.
ordinal_encodings : dict, default=None
Dictionary where the key is the ordinal column name provided as a
string, and the associated value is a list containing the preferred
order of the values.
identify_as_string : list, default=None
Preidentified string features. Columns given string dtype.
target : list, default=None
Name of column containing dependent variable.
is_classification : boolean, default=None
Controls whether Machine is instantiated as a classification object or a
regression object.
create_experiment_dir : boolean, default=None
Controls whether a shell experiment directory gets created for storing
experiment objects.
---
Attributes:
data : Pandas DataFrame
Independent variables returned as a Pandas DataFrame.
target : Pandas Series
Dependent variable returned as a Pandas Series.
"""
self.experiment_name = experiment_name
self.remove_features = remove_features
self.training_target = training_dataset[target].squeeze() if target is not None else None
self.training_features = (
training_dataset.drop(self.remove_features + [self.training_target.name], axis=1)
if target is not None
else training_dataset.drop(self.remove_features, axis=1)
)
self.validation_target = validation_dataset[target].squeeze() if target is not None else None
self.validation_features = (
validation_dataset.drop(self.remove_features + [self.validation_target.name], axis=1)
if target is not None
else validation_dataset.drop(self.remove_features, axis=1)
)
self.identify_as_continuous = identify_as_continuous
self.identify_as_boolean = identify_as_boolean
self.identify_as_count = identify_as_count
self.identify_as_date = identify_as_date
self.identify_as_nominal = identify_as_nominal
self.identify_as_ordinal = identify_as_ordinal
self.ordinal_encodings = ordinal_encodings
self.identify_as_string = identify_as_string
self.is_classification = is_classification
if self.is_classification is None:
raise Exception ("Indicate whether supervised learning problem is classification or not by specifying 'is_classification=True' or 'is_classification=False'")
if self.identify_as_ordinal is not None and self.ordinal_encodings is None:
warnings.warn("Recommendation - Ordinal column names passed to 'identify_as_ordinal' variable but, no ordinal encoding instructions pass to 'ordinal_encodings' variable. It is recommended to pass a dictionary containing ordinal column names as keys and lists containing the preferred order of encoding as values", UserWarning)
# execute method capture_mlm_dtypes on training_features
# self.training_features = PreserveMetaData(self.training_features)
self.capture_mlm_dtypes()
# encode the target column in training and validation datasets if is_classification == True
if self.training_target is not None and self.is_classification:
self.training_target, self.le_ = self.encode_target(self.training_target)
self.validation_target, _ = self.encode_target(self.validation_target)
# create experiment directory tree
self.create_experiment_dir()
def capture_mlm_dtypes(self):
"""
Documentation:
--
Description:
Determine mlm dtype for each feature. Add determination to mlm_dtypes attribute
and set Pandas dtype in DataFrame accordingly.
"""
### populate mlm_dtypes dictionary with feature type label for each feature
self.training_features.mlm_dtypes = {}
### boolean
# mlmachine dtype capture
if isinstance(self.identify_as_boolean, list):
self.training_features.mlm_dtypes["boolean"] = self.identify_as_boolean
elif not isinstance(self.identify_as_boolean, list) and self.identify_as_boolean is not None:
raise AttributeError ("Variable passed to identify_as_boolean is not a list. Provide a list of column names, provide None or allow identify_as_boolean to default to None.")
elif self.identify_as_boolean is None:
self.training_features.mlm_dtypes["boolean"] = []
# Pandas dtype
for column in self.training_features.mlm_dtypes["boolean"]:
self.training_features[column] = self.training_features[column].astype("boolean")
### nominal category
# mlmachine dtype capture
if isinstance(self.identify_as_nominal, list):
self.training_features.mlm_dtypes["nominal"] = self.identify_as_nominal
elif not isinstance(self.identify_as_nominal, list) and self.identify_as_nominal is not None:
raise AttributeError ("Variable passed to identify_as_nominal is not a list. Provide a list of column names, provide None or allow identify_as_nominal to default to None.")
elif self.identify_as_nominal is None:
self.training_features.mlm_dtypes["nominal"] = []
# Pandas dtype
for column in self.training_features.mlm_dtypes["nominal"]:
self.training_features[column] = self.training_features[column].astype("category")
### ordinal category
# mlmachine dtype capture
if isinstance(self.identify_as_ordinal, list):
self.training_features.mlm_dtypes["ordinal"] = self.identify_as_ordinal
elif not isinstance(self.identify_as_ordinal, list) and self.identify_as_ordinal is not None:
raise AttributeError ("Variable passed to identify_as_ordinal is not a list. Provide a list of column names, provide None or allow identify_as_ordinal to default to None.")
elif isinstance(self.ordinal_encodings, dict):
self.training_features.mlm_dtypes["ordinal"] = list(self.ordinal_encodings.keys())
elif self.identify_as_ordinal is None and self.ordinal_encodings is None:
self.training_features.mlm_dtypes["ordinal"] = []
# Pandas dtype
if isinstance(self.ordinal_encodings, dict):
for column, order in self.ordinal_encodings.items():
category_type = CategoricalDtype(categories=order, ordered=True)
self.training_features[column] = self.training_features[column].astype(category_type)
for column in self.training_features.mlm_dtypes["ordinal"]:
self.training_features[column] = self.training_features[column].astype("category")
### continuous
# mlmachine dtype capture
if isinstance(self.identify_as_continuous, list):
self.training_features.mlm_dtypes["continuous"] = self.identify_as_continuous
elif not isinstance(self.identify_as_continuous, list) and self.identify_as_continuous is not None:
raise AttributeError ("Variable passed to identify_as_continuous is not a list. Either provider a list of column names, provide None or allow identify_as_continuous to default to None.")
elif self.identify_as_continuous is None:
self.training_features.mlm_dtypes["continuous"] = []
# Pandas dtype
for column in self.training_features.mlm_dtypes["continuous"]:
self.training_features[column] = self.training_features[column].astype("float64")
### count
# mlmachine dtype capture
if isinstance(self.identify_as_count, list):
self.training_features.mlm_dtypes["count"] = self.identify_as_count
elif not isinstance(self.identify_as_count, list) and self.identify_as_count is not None:
raise AttributeError ("Variable passed to identify_as_count is not a list. Provide a list of column names, provide None or allow identify_as_count to default to None.")
elif self.identify_as_count is None:
self.training_features.mlm_dtypes["count"] = []
# Pandas dtype
for column in self.training_features.mlm_dtypes["count"]:
try:
self.training_features[column] = self.training_features[column].astype("int64")
except ValueError:
self.training_features[column] = self.training_features[column].astype("float64")
### string
# mlmachine dtype capture
if isinstance(self.identify_as_string, list):
self.training_features.mlm_dtypes["string"] = self.identify_as_string
elif not isinstance(self.identify_as_string, list) and self.identify_as_string is not None:
raise AttributeError ("Variable passed to identify_as_string is not a list. Provide a list of column names, provide None or allow identify_as_string to default to None.")
elif self.identify_as_string is None:
self.training_features.mlm_dtypes["string"] = []
# Pandas dtype
for column in self.training_features.mlm_dtypes["string"]:
self.training_features[column] = self.training_features[column].astype("string")
### date
# mlmachine dtype capture
if isinstance(self.identify_as_date, list):
self.training_features.mlm_dtypes["date"] = self.identify_as_date
elif not isinstance(self.identify_as_date, list) and self.identify_as_date is not None:
raise AttributeError ("Variable passed to identify_as_date is not a list. Provide a list of column names, provide None or allow identify_as_date to default to None.")
elif self.identify_as_date is None:
self.training_features.mlm_dtypes["date"] = []
# Pandas dtype
for column in self.training_features.mlm_dtypes["date"]:
self.training_features[column] = self.training_features[column].astype("datetime64[ns]")
### untracked columns
# compile single list of features that have already been categorized
tracked_columns = [i for i in sum(self.training_features.mlm_dtypes.values(), [])]
# iterate through untracked columns and attempt mlmachine dtype identification
for column in [i for i in self.training_features.columns if i not in tracked_columns]:
# capture column statistics and characteristics
try:
value_mean = np.mean(self.training_features[column].dropna())
value_std = np.std(self.training_features[column].dropna())
except TypeError:
pass
# identify how many values in feature are zero or one
zeros_and_ones = (self.training_features[column].eq(0) | | |
{'k1': 'Libya', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Madagascar', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Malawi', 'k2': '?', 'k3': '?', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Mali', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Morocco', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Mauritius', 'k2': '100,000 rupees', 'k3': '5', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Mauritania', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Mozambique', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Namibia', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Niger', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Nigeria', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 1 MW / Any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Uganda', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 0,5 MW / Any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Rwanda', 'k2': '?', 'k3': '?', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Sao Tome And Principe', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Senegal', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Sierra Leone', 'k2': '?', 'k3': '?', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Somalia', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Sudan', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'South Sudan', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Tanzania', 'k2': '?', 'k3': '?', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Tchad', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Togo', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Tunisia', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Zambia', 'k2': '?', 'k3': '?', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Zimbabwe', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 100 kW / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Seychelles', 'k2': 'SCR 200,000', 'k3': '2', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'}
]
return jsonify(data)
# Get information about electricity generation without license in America
# k1 = country
# k2 = Amount of the fine for electricity generation without license and not connected to the national grid for
# internal needs ?
# k3 = Number of years of imprisonment for electricity generation without license and not linked to any grid or
# network for internal needs ?
# k4 = Possible use of free energy devices for internal needs without licence ?
# k5 = Possible control of the power plant by any jurisdictions without that the producer holds a license ?
# k6 = Flixbus in the capital ?
@app.route("/international_electricity_generation_without_license_in_america")
def international_electricity_generation_without_license_in_america():
data = [
{'k1': 'Antigua And Barbuda', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices',
'k5': 'No', 'k6': 'No'},
{'k1': 'Argentina', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Bahamas', 'k2': '2000,00 €', 'k3': '2', 'k4': 'Yes up to 250 kW / any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Barbados', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 5 kW / any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Belize', 'k2': '5000,00 €', 'k3': '1', 'k4': 'Yes up to 75 kW / any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Bolivia', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Brazil', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 50 MW / Any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Canada', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Chile', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Colombia', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 1 MW / Any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Costa Rica', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Cuba', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Dominican Republic', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices',
'k5': 'No', 'k6': 'No'},
{'k1': 'Dominique', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Ecuador', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Usa', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Grenada', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Guatemala', 'k2': '?', 'k3': '?', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Guyana', 'k2': '50000,00 €', 'k3': '5', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Haiti', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Honduras', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Jamaica', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Mexcio', 'k2': '?', 'k3': '?', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Panama', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Nicaragua', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Paraguay', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Peru', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Saint Kitts And Nevis', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Saint Lucia', 'k2': '5000,00 €', 'k3': '1', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Saint Vincent And The Grenadines', 'k2': '750,00 €', 'k3': '1', 'k4': 'No', 'k5': 'Yes',
'k6': 'No'},
{'k1': 'El Salvador', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Suriname', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Trinidad And Tobago', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Uruguay', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Venezuela', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Puerto Rico', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices',
'k5': 'No', 'k6': 'No'},
{'k1': 'American Virgin Islands', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Galápagos island', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Archipel of San Andrés, Providencia and Santa Catalina', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?',
'k6': 'No'},
{'k1': 'Navasse island', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Cayman Islands', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices',
'k5': 'No', 'k6': 'No'},
{'k1': 'Greenland', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Turks And Caicos Islands', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'British Virgin Islands', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices',
'k5': 'No', 'k6': 'No'},
{'k1': 'Caribbean Netherlands', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Anguilla', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Aruba', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Montserrat', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Clipperton island', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Bermuda', 'k2': '50000,00 €', 'k3': '5', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Curaçao', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Bonaire', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': | |
= Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1237 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1238 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1239 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1240 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1241 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1242 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1243 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1244 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1245 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1246 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1247 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1248 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1249 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1250 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1251 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1252 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1253 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1254 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1255 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1256 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1257 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1258 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1259 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1260 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1261 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1262 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1263 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1264 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1265 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1266 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1267 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1268 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1269 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1270 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1271 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1272 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1273 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1274 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1275 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1276 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1277 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1278 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1279 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1280 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1281 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1282 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1283 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1284 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1285 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1286 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1287 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1288 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1289 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1290 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1291 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1292 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1293 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1294 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1295 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1296 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1297 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1298 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1299 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1300 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1301 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1302 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1303 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1304 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1305 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1306 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1307 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1308 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1309 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1310 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1311 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1312 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1313 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1314 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1315 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1316 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1317 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1318 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1319 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1320 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1321 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1322 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1323 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1324 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1325 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1326 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1327 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1328 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1329 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1330 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1331 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1332 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1333 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1334 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1335 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1336 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1337 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1338 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1339 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1340 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1341 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1342 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1343 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1344 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1345 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1346 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1347 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1348 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1349 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1350 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1351 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1352 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1353 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1354 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1355 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1356 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1357 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1358 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1359 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1360 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1361 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1362 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1363 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1364 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1365 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1366 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1367 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1368 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1369 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1370 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1371 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1372 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1373 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1374 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1375 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1376 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1377 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1378 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1379 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1380 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1381 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1382 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1383 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1384 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1385 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1386 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1387 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1388 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1389 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1390 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1391 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1392 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1393 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1394 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1395 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1396 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1397 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1398 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1399 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1400 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1401 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1402 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1403 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1404 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1405 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1406 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1407 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1408 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1409 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1410 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1411 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1412 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1413 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1414 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1415 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1416 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1417 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1418 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1419 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1420 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1421 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1422 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1423 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1424 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1425 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1426 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1427 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1428 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1429 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1430 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1431 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1432 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1433 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1434 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1435 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1436 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1437 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1438 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1439 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1440 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1441 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1442 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1443 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1444 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1445 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1446 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1447 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1448 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1449 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1450 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1451 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1452 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1453 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1454 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1455 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1456 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1457 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1458 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1459 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1460 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1461 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1462 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1463 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1464 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1465 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1466 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1467 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1468 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1469 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1470 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1471 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1472 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1473 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1474 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1475 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1476 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1477 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1478 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1479 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1480 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1481 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1482 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1483 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1484 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1485 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1486 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1487 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1488 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1489 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1490 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1491 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1492 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1493 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1494 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1495 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1496 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1497 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1498 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1499 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1500 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1501 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1502 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1503 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1504 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1505 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1506 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1507 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1508 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1509 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1510 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1511 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1512 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1513 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1514 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1515 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1516 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1517 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1518 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1519 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1520 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1521 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1522 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1523 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1524 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1525 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1526 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1527 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1528 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1529 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1530 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1531 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1532 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1533 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1534 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1535 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1536 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1537 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1538 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1539 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1540 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1541 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1542 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1543 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1544 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1545 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1546 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1547 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1548 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1549 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1550 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1551 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1552 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1553 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1554 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1555 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1556 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1557 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1558 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1559 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1560 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1561 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1562 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1563 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1564 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1565 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1566 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1567 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1568 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1569 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1570 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1571 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1572 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1573 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1574 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1575 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1576 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1577 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1578 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1579 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1580 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1581 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1582 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1583 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1584 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1585 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1586 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1587 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1588 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1589 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1590 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1591 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1592 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1593 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1594 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1595 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1596 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1597 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1598 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1599 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1600 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1601 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1602 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1603 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1604 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1605 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1606 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1607 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1608 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1609 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1610 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1611 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1612 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1613 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1614 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1615 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1616 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1617 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1618 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1619 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1620 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1621 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1622 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1623 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1624 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1625 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1626 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1627 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1628 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1629 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1630 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1631 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1632 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1633 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1634 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1635 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1636 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1637 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1638 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1639 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1640 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1641 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1642 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1643 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1644 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1645 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1646 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1647 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1648 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1649 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1650 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1651 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1652 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1653 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1654 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1655 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1656 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1657 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1658 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1659 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1660 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1661 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1662 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1663 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1664 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1665 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1666 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1667 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1668 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1669 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1670 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1671 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1672 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1673 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1674 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1675 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1676 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1677 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1678 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1679 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1680 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1681 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1682 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1683 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1684 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1685 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1686 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1687 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1688 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1689 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1690 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1691 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1692 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1693 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1694 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1695 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1696 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1697 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1698 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1699 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1700 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1701 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1702 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1703 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1704 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1705 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1706 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1707 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1708 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1709 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1710 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1711 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1712 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1713 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1714 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1715 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1716 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1717 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1718 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1719 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1720 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1721 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1722 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1723 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1724 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1725 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1726 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1727 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1728 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1729 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1730 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1731 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1732 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1733 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1734 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1735 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1736 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1737 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1738 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1739 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1740 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1741 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1742 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1743 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1744 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1745 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1746 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1747 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1748 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1749 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1750 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1751 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1752 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1753 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1754 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1755 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1756 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1757 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1758 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1759 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1760 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1761 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1762 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1763 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1764 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1765 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1766 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1767 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1768 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1769 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1770 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1771 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1772 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1773 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1774 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1775 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1776 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1777 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1778 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1779 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1780 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1781 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1782 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1783 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1784 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1785 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1786 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1787 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1788 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1789 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1790 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1791 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1792 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1793 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1794 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1795 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1796 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1797 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1798 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1799 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1800 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1801 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1802 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1803 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1804 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1805 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1806 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1807 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1808 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1809 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1810 = Var(within=Reals,bounds=(None,None),initialize=0.005)
m.x1812 = Var(within=Reals,bounds=(15.2083333333333,15.2083333333333),initialize=15.2083333333333)
m.x1813 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1814 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1815 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1816 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1817 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1818 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1819 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1820 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1821 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1822 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1823 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1824 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1825 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1826 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1827 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1828 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1829 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1830 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1831 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1832 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1833 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1834 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1835 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1836 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1837 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1838 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1839 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1840 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1841 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1842 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1843 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1844 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1845 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1846 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1847 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1848 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1849 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1850 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1851 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1852 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1853 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1854 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1855 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1856 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1857 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1858 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1859 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1860 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1861 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1862 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1863 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1864 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1865 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1866 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1867 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1868 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1869 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1870 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1871 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1872 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1873 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1874 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1875 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1876 = Var(within=Reals,bounds=(0.0001,None),initialize=15.2083333333333)
m.x1877 | |
= 'in-use'
db.volume_create(self.context, self.volume_attrs)
# start test
self.assertRaises(exception.ImageNotFound,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_driver_not_initialized(self):
# creating volume testdata
db.volume_create(self.context, self.volume_attrs)
# set initialized to False
self.volume.driver._initialized = False
# start test
self.assertRaises(exception.DriverNotInitialized,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume.status)
def test_copy_volume_to_image_driver_exception(self):
self.image_meta['id'] = self.image_id
image_service = fake_image.FakeImageService()
# create new image in queued state
queued_image_id = 'd5133f15-f753-41bd-920a-06b8c49275d9'
queued_image_meta = image_service.show(self.context, self.image_id)
queued_image_meta['id'] = queued_image_id
queued_image_meta['status'] = 'queued'
image_service.create(self.context, queued_image_meta)
# create new image in saving state
saving_image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
saving_image_meta = image_service.show(self.context, self.image_id)
saving_image_meta['id'] = saving_image_id
saving_image_meta['status'] = 'saving'
image_service.create(self.context, saving_image_meta)
# create volume
self.volume_attrs['status'] = 'available'
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
with mock.patch.object(self.volume.driver,
'copy_volume_to_image') as driver_copy_mock:
driver_copy_mock.side_effect = exception.VolumeDriverException(
"Error")
# test with image not in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image shouldn't be deleted if it is not in queued state
image_service.show(self.context, self.image_id)
# test with image in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
queued_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# queued image should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
queued_image_id)
# test with image in saving state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
saving_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image in saving state should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
saving_image_id)
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(vol_manager.VolumeManager, 'create_volume')
@mock.patch.object(fake_driver.FakeISCSIDriver, 'copy_volume_to_image')
def _test_copy_volume_to_image_with_image_volume(
self, mock_copy, mock_create, mock_quota_commit,
mock_quota_reserve):
self.flags(glance_api_version=2)
self.volume.driver.configuration.image_upload_use_cinder_backend = True
image_service = fake_image.FakeImageService()
image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
self.image_meta['id'] = image_id
self.image_meta['status'] = 'queued'
image_service.create(self.context, self.image_meta)
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
def fake_create(context, volume_id, **kwargs):
db.volume_update(context, volume_id, {'status': 'available'})
mock_create.side_effect = fake_create
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# return create image
image = image_service.show(self.context, image_id)
image_service.delete(self.context, image_id)
return image
def test_copy_volume_to_image_with_image_volume(self):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertTrue(image['locations'][0]['url'].startswith('cinder://'))
def test_copy_volume_to_image_with_image_volume_qcow2(self):
self.image_meta['disk_format'] = 'qcow2'
image = self._test_copy_volume_to_image_with_image_volume()
self.assertIsNone(image.get('locations'))
@mock.patch.object(vol_manager.VolumeManager, 'delete_volume')
@mock.patch.object(fake_image._FakeImageService, 'add_location',
side_effect=exception.Invalid)
def test_copy_volume_to_image_with_image_volume_failure(
self, mock_add_location, mock_delete):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertIsNone(image.get('locations'))
self.assertTrue(mock_delete.called)
class GetActiveByWindowTestCase(BaseVolumeTestCase):
def setUp(self):
super(GetActiveByWindowTestCase, self).setUp()
self.ctx = context.get_admin_context(read_deleted="yes")
self.db_vol_attrs = [
{
'id': fake.VOLUME_ID,
'host': 'devstack',
'project_id': fake.PROJECT_ID,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1),
},
{
'id': fake.VOLUME2_ID,
'host': 'devstack',
'project_id': fake.PROJECT_ID,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1),
},
{
'id': fake.VOLUME3_ID,
'host': 'devstack',
'project_id': fake.PROJECT_ID,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1),
},
{
'id': fake.VOLUME4_ID,
'host': 'devstack',
'project_id': fake.PROJECT_ID,
'created_at': datetime.datetime(1, 3, 10, 1, 1, 1),
},
{
'id': fake.VOLUME5_ID,
'host': 'devstack',
'project_id': fake.PROJECT_ID,
'created_at': datetime.datetime(1, 5, 1, 1, 1, 1),
}
]
self.db_snap_attrs = [
{
'id': fake.SNAPSHOT_ID,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True,
'status': fields.SnapshotStatus.DELETED,
'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT2_ID,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True,
'status': fields.SnapshotStatus.DELETED,
'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT3_ID,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True,
'status': fields.SnapshotStatus.DELETED,
'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT_ID,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 3, 10, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT2_ID,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 5, 1, 1, 1, 1),
'volume_id': fake.VOLUME_ID
}
]
def test_volume_get_active_by_window(self):
# Find all all volumes valid within a timeframe window.
# Not in window
db.volume_create(self.ctx, self.db_vol_attrs[0])
# In - deleted in window
db.volume_create(self.ctx, self.db_vol_attrs[1])
# In - deleted after window
db.volume_create(self.ctx, self.db_vol_attrs[2])
# In - created in window
db.volume_create(self.context, self.db_vol_attrs[3])
# Not of window.
db.volume_create(self.context, self.db_vol_attrs[4])
volumes = db.volume_get_active_by_window(
self.context,
datetime.datetime(1, 3, 1, 1, 1, 1),
datetime.datetime(1, 4, 1, 1, 1, 1),
project_id=fake.PROJECT_ID)
self.assertEqual(3, len(volumes))
self.assertEqual(fake.VOLUME2_ID, volumes[0].id)
self.assertEqual(fake.VOLUME3_ID, volumes[1].id)
self.assertEqual(fake.VOLUME4_ID, volumes[2].id)
def test_snapshot_get_active_by_window(self):
# Find all all snapshots valid within a timeframe window.
db.volume_create(self.context, {'id': fake.VOLUME_ID})
for i in range(5):
self.db_vol_attrs[i]['volume_id'] = fake.VOLUME_ID
# Not in window
del self.db_snap_attrs[0]['id']
snap1 = objects.Snapshot(self.ctx, **self.db_snap_attrs[0])
snap1.create()
# In - deleted in window
del self.db_snap_attrs[1]['id']
snap2 = objects.Snapshot(self.ctx, **self.db_snap_attrs[1])
snap2.create()
# In - deleted after window
del self.db_snap_attrs[2]['id']
snap3 = objects.Snapshot(self.ctx, **self.db_snap_attrs[2])
snap3.create()
# In - created in window
del self.db_snap_attrs[3]['id']
snap4 = objects.Snapshot(self.ctx, **self.db_snap_attrs[3])
snap4.create()
# Not of window.
del self.db_snap_attrs[4]['id']
snap5 = objects.Snapshot(self.ctx, **self.db_snap_attrs[4])
snap5.create()
snapshots = objects.SnapshotList.get_active_by_window(
self.context,
datetime.datetime(1, 3, 1, 1, 1, 1),
datetime.datetime(1, 4, 1, 1, 1, 1)).objects
self.assertEqual(3, len(snapshots))
self.assertEqual(snap2.id, snapshots[0].id)
self.assertEqual(fake.VOLUME_ID, snapshots[0].volume_id)
self.assertEqual(snap3.id, snapshots[1].id)
self.assertEqual(fake.VOLUME_ID, snapshots[1].volume_id)
self.assertEqual(snap4.id, snapshots[2].id)
self.assertEqual(fake.VOLUME_ID, snapshots[2].volume_id)
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""
driver_name = "cinder.volume.driver.FakeBaseDriver"
def setUp(self):
super(DriverTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volume_driver=self.driver_name,
volumes_dir=vol_tmpdir)
self.volume = importutils.import_object(CONF.volume_manager)
self.context = context.get_admin_context()
self.output = ""
self.configuration = conf.Configuration(None)
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
def _fake_execute(_command, *_args, **_kwargs):
"""Fake _execute."""
return self.output, None
exec_patcher = mock.patch.object(self.volume.driver, '_execute',
_fake_execute)
exec_patcher.start()
self.addCleanup(exec_patcher.stop)
self.volume.driver.set_initialized()
self.addCleanup(self._cleanup)
def _cleanup(self):
try:
shutil.rmtree(CONF.volumes_dir)
except OSError:
pass
def _attach_volume(self):
"""Attach volumes to an instance."""
return []
def _detach_volume(self, volume_id_list):
"""Detach volumes from an instance."""
for volume_id in volume_id_list:
db.volume_detached(self.context, volume_id)
self.volume.delete_volume(self.context, volume_id)
class GenericVolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver."""
driver_name = "cinder.tests.unit.fake_driver.LoggingVolumeDriver"
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_available(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup = tests_utils.create_backup(self.context,
vol['id'])
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver.create_snapshot = mock.MagicMock()
self.volume.driver.delete_snapshot = mock.MagicMock()
mock_volume_get.return_value = vol
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
def test_create_temp_cloned_volume(self):
with mock.patch.object(
self.volume.driver,
'create_cloned_volume') as mock_create_cloned_volume:
model_update = {'provider_location': 'dummy'}
mock_create_cloned_volume.return_value = model_update
vol = tests_utils.create_volume(self.context,
status='backing-up')
cloned_vol = self.volume.driver._create_temp_cloned_volume(
self.context, vol)
self.assertEqual('dummy', cloned_vol.provider_location)
self.assertEqual('available', cloned_vol.status)
mock_create_cloned_volume.return_value = None
vol = tests_utils.create_volume(self.context,
status='backing-up')
cloned_vol = self.volume.driver._create_temp_cloned_volume(
self.context, vol)
self.assertEqual('available', cloned_vol.status)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_inuse_temp_volume(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context,
status='backing-up',
previous_status='in-use')
temp_vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup = tests_utils.create_backup(self.context,
vol['id'])
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver._create_temp_cloned_volume = mock.MagicMock()
self.volume.driver._delete_temp_volume = mock.MagicMock()
mock_volume_get.return_value = vol
self.volume.driver._create_temp_cloned_volume.return_value = temp_vol
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
self.volume.driver._create_temp_cloned_volume.assert_called_once_with(
self.context, vol)
self.volume.driver._delete_temp_volume.assert_called_once_with(
self.context, temp_vol)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch('six.moves.builtins.open')
def test_restore_backup(self,
mock_open,
mock_get_connector_properties,
mock_temporary_chown):
dev_null = '/dev/null'
vol = tests_utils.create_volume(self.context)
backup = {'volume_id': vol['id'], 'id': 'backup-for-%s' % vol['id']}
properties = {}
attach_info = {'device': {'path': dev_null}}
root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf'
volume_file = mock.MagicMock()
mock_open.return_value.__enter__.return_value = volume_file
mock_get_connector_properties.return_value = properties
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver.secure_file_operations_enabled = mock.MagicMock()
self.volume.driver.secure_file_operations_enabled.side_effect = (False,
True)
backup_service = mock.MagicMock()
for i in (1, 2):
self.volume.driver.restore_backup(self.context, backup, vol,
backup_service)
mock_get_connector_properties.assert_called_with(root_helper,
CONF.my_ip,
False, False)
self.volume.driver._attach_volume.assert_called_with(
self.context, vol, properties)
self.assertEqual(i, self.volume.driver._attach_volume.call_count)
self.volume.driver._detach_volume.assert_called_with(
self.context, attach_info, vol, properties)
self.assertEqual(i, self.volume.driver._detach_volume.call_count)
self.volume.driver.secure_file_operations_enabled.\
assert_called_with()
self.assertEqual(
i,
self.volume.driver.secure_file_operations_enabled.call_count
)
mock_temporary_chown.assert_called_once_with(dev_null)
mock_open.assert_called_with(dev_null, 'wb')
self.assertEqual(i, mock_open.call_count)
backup_service.restore.assert_called_with(backup, vol['id'],
volume_file)
self.assertEqual(i, backup_service.restore.call_count)
def test_get_backup_device_available(self):
vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup = tests_utils.create_backup(self.context,
vol['id'])
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
(backup_device, is_snapshot) = self.volume.driver.get_backup_device(
self.context, backup_obj)
volume = objects.Volume.get_by_id(self.context, vol.id)
self.assertEqual(volume, backup_device)
self.assertFalse(is_snapshot)
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
self.assertIsNone(backup.temp_volume_id)
def test_get_backup_device_in_use(self):
vol = tests_utils.create_volume(self.context,
status='backing-up',
previous_status='in-use')
temp_vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup = tests_utils.create_backup(self.context,
vol['id'])
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
with mock.patch.object(
self.volume.driver,
'_create_temp_cloned_volume') as mock_create_temp:
mock_create_temp.return_value = temp_vol
(backup_device, is_snapshot) = (
self.volume.driver.get_backup_device(self.context,
backup_obj))
self.assertEqual(temp_vol, backup_device)
self.assertFalse(is_snapshot)
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
self.assertEqual(temp_vol.id, backup_obj.temp_volume_id)
def test__create_temp_volume_from_snapshot(self):
volume_dict = {'id': fake.SNAPSHOT_ID,
'host': 'fakehost',
'availability_zone': 'fakezone',
'size': 1}
vol = fake_volume.fake_volume_obj(self.context, **volume_dict)
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
with mock.patch.object(
self.volume.driver,
'create_volume_from_snapshot'):
temp_vol = self.volume.driver._create_temp_volume_from_snapshot(
self.context,
vol, snapshot)
| |
import json
import time
import requests
import re
from flask import Flask, render_template, jsonify
from pyecharts.charts import Map, Timeline,Kline,Line,Bar,WordCloud
from pyecharts import options as opts
from pyecharts.globals import SymbolType
app = Flask(__name__)
#字典,受限于谷歌调用限制
cn_to_en = {'安哥拉': 'Angola', '阿富汗': 'Afghanistan', '阿尔巴尼亚': 'Albania', '阿尔及利亚': 'Algeria', '安道尔共和国': 'Andorra', '安圭拉岛': 'Anguilla', '安提瓜和巴布达': 'Antigua and Barbuda',
'阿根廷': 'Argentina', '亚美尼亚': 'Armenia', '阿森松': 'Ascension', '澳大利亚': 'Australia', '奥地利': 'Austria', '阿塞拜疆': 'Azerbaijan', '巴哈马': 'Bahamas', '巴林': 'Bahrain',
'孟加拉国': 'Bangladesh', '巴巴多斯': 'Barbados', '白俄罗斯': 'Belarus', '比利时': 'Belgium', '伯利兹': 'Belize', '贝宁': 'Benin', '百慕大群岛': 'Bermuda Is', '玻利维亚': 'Bolivia',
'博茨瓦纳': 'Botswana', '巴西': 'Brazil', '文莱': 'Brunei', '保加利亚': 'Bulgaria', '布基纳法索': 'Burkina Faso', '缅甸': 'Burma', '布隆迪': 'Burundi', '喀麦隆': 'Cameroon',
'加拿大': 'Canada', '开曼群岛': 'Cayman Is', '中非共和国': 'Central African Republic', '乍得': 'Chad', '智利': 'Chile', '中国': 'China', '哥伦比亚': 'Colombia', '刚果': 'Congo',
'库克群岛': 'Cook Is', '哥斯达黎加': 'Costa Rica', '古巴': 'Cuba', '塞浦路斯': 'Cyprus', '捷克': 'Czech Republic', '丹麦': 'Denmark', '吉布提': 'Djibouti', '多米尼加共和国': 'Dominica Rep',
'厄瓜多尔': 'Ecuador', '埃及': 'Egypt', '萨尔瓦多': 'EI Salvador', '爱沙尼亚': 'Estonia', '埃塞俄比亚': 'Ethiopia', '斐济': 'Fiji', '芬兰': 'Finland', '法国': 'France', '法属圭亚那': 'French Guiana',
'法属玻利尼西亚': 'French Polynesia', '加蓬': 'Gabon', '冈比亚': 'Gambia', '格鲁吉亚': 'Georgia', '德国': 'Germany', '加纳': 'Ghana', '直布罗陀': 'Gibraltar', '希腊': 'Greece', '格林纳达': 'Grenada',
'关岛': 'Guam', '危地马拉': 'Guatemala', '几内亚': 'Guinea', '圭亚那': 'Guyana', '海地': 'Haiti', '洪都拉斯': 'Honduras', '香港': 'Hongkong', '匈牙利': 'Hungary', '冰岛': 'Iceland', '印度': 'India',
'印度尼西亚': 'Indonesia', '伊朗': 'Iran', '伊拉克': 'Iraq', '爱尔兰':'Ireland', '以色列': 'Israel', '意大利': 'Italy', '科特迪瓦': 'Ivory Coast', '牙买加': 'Jamaica', '日本': 'Japan', '约旦': 'Jordan',
'柬埔寨': 'Kampuchea (Cambodia )', '哈萨克斯坦': 'Kazakstan', '肯尼亚': 'Kenya', '韩国': 'Korea', '科威特': 'Kuwait', '吉尔吉斯坦': 'Kyrgyzstan', '老挝': 'Laos', '拉脱维亚': 'Latvia', '黎巴嫩': 'Lebanon',
'莱索托': 'Lesotho', '利比里亚': 'Liberia', '利比亚': 'Libya', '列支敦士登': 'Liechtenstein', '立陶宛': 'Lithuania', '卢森堡': 'Luxembourg', '澳门': 'Macao', '马达加斯加': 'Madagascar',
'马拉维': 'Malawi', '马来西亚': 'Malaysia', '马尔代夫': 'Maldives', '马里': 'Mali', '马耳他': 'Malta', '马里亚那群岛': 'Mariana Is', '马提尼克': 'Martinique', '毛里求斯': 'Mauritius', '墨西哥': 'Mexico',
'摩尔多瓦': 'Moldova', '摩纳哥': 'Monaco', '蒙古': 'Mongolia', '蒙特塞拉特岛': 'Montserrat Is', '摩洛哥': 'Morocco', '莫桑比克': 'Mozambique', '纳米比亚': 'Namibia', '瑙鲁': 'Nauru', '尼泊尔': 'Nepal',
'荷属安的列斯': 'Netheriands Antilles', '荷兰': 'Netherlands', '新西兰': 'New Zealand', '尼加拉瓜': 'Nicaragua', '尼日尔': 'Niger', '尼日利亚': 'Nigeria', '朝鲜': 'North Korea', '挪威': 'Norway',
'阿曼': 'Oman', '巴基斯坦': 'Pakistan', '巴拿马':'Panama', '巴布亚新几内亚': 'Papua New Cuinea', '巴拉圭': 'Paraguay', '秘鲁': 'Peru', '菲律宾': 'Philippines', '波兰': 'Poland', '葡萄牙': 'Portugal',
'波多黎各': 'Puerto Rico', '卡塔尔': 'Qatar', '留尼旺': 'Reunion', '罗马尼亚': 'Romania', '俄罗斯': 'Russia', '圣卢西亚': 'St.Lucia', '圣文森特岛': 'Saint Vincent', '东萨摩亚(美)': 'Samoa Eastern',
'西萨摩亚': 'Samoa Western', '圣马力诺': 'San Marino', '圣多美和普林西比': 'Sao Tome and Principe', '沙特阿拉伯': 'Saudi Arabia', '塞内加尔': 'Senegal', '塞舌尔': 'Seychelles', '塞拉利昂': 'Sierra Leone',
'新加坡': 'Singapore', '斯洛伐克': 'Slovakia', '斯洛文尼亚': 'Slovenia', '所罗门群岛': 'Solomon Is', '索马里': 'Somali', '南非': 'South Africa', '西班牙': 'Spain', '斯里兰卡': 'SriLanka',
'圣文森特': 'St.Vincent', '苏丹': 'Sudan', '苏里南': 'Suriname', '斯威士兰': 'Swaziland', '瑞典': 'Sweden', '瑞士': 'Switzerland', '叙利亚': 'Syria', '台湾省': 'Taiwan', '塔吉克斯坦': 'Tajikstan',
'坦桑尼亚': 'Tanzania', '泰国': 'Thailand', '多哥': 'Togo', '汤加': 'Tonga', '特立尼达和多巴哥': 'Trinidad and Tobago', '突尼斯': 'Tunisia', '土耳其': 'Turkey', '土库曼斯坦': 'Turkmenistan',
'乌干达': 'Uganda', '乌克兰': 'Ukraine', '阿联酋': 'United Arab Emirates', '英国': 'United Kiongdom', '美国': 'United States', '乌拉圭': 'Uruguay', '乌兹别克斯坦': 'Uzbekistan',
'委内瑞拉': 'Venezuela', '越南': 'Vietnam', '也门': 'Yemen', '南斯拉夫': 'Yugoslavia', '津巴布韦': 'Zimbabwe', '扎伊尔': 'Zaire', '赞比亚': 'Zambia','克罗地亚':'Croatia','北马其顿':'North Macedonia'}
def update_news():
url = 'https://opendata.baidu.com/data/inner?tn=reserved_all_res_tn&dspName=iphone&from_sf=1&dsp=iphone&resource_id=28565&alr=1&query=%E8%82%BA%E7%82%8E'
r = json.loads(requests.get(url).text)
top10 = r['Result'][0]['items_v2'][0]['aladdin_res']['DisplayData']['result']['items'][:5] #list
news_data = []
for r in top10:
news_data.append({
'title': r['eventDescription'],
'sourceUrl': r['eventUrl'],
'infoSource': time.strftime('%m-%d %H:%M:%S', time.localtime(int(r['eventTime']))) + ' ' + r['siteName'] #时间属性 + 消息来源
}) #构建新的列表
return news_data
def update_overall():
url = 'http://lab.isaaclin.cn/nCoV/api/overall'
overall_data = json.loads(requests.get(url).text) #标准的json数据格式化
overall_data['time'] = time.strftime("%m-%d %H:%M", time.localtime(time.time())) #当前时间
# time.time() --> '1580232854.7124019'
## time.localtime(time.time()) --> 'time.struct_time(tm_year=2020, tm_mon=1, tm_mday=29, tm_hour=1, tm_min=34, tm_sec=36, tm_wday=2, tm_yday=29, tm_isdst=0)'
### time.strftime("%m-%d %H:%M", time.localtime(time.time())) ---> '01-29 01:37' 获得当前月、日、小时、分钟
return overall_data
#
def update_hotnews():
url = 'https://i-lq.snssdk.com/api/feed/hotboard_online/v1/?is_in_channel=1&count=5&fe_source=news_hot&tab_name=stream&is_web_refresh=1&client_extra_params={%22hot_board_source%22:%22news_hot%22,%22fe_version%22:%22v10%22}&extra={%22CardStyle%22:0,%22JumpToWebList%22:true}&category=hotboard_online&update_version_code=75717'
r = requests.get(url).text #标准的json数据格式化
data = re.findall(r'title\\":\\"(.*?)\\',r)[:-1]
# time.time() --> '1580232854.7124019'
## time.localtime(time.time()) --> 'time.struct_time(tm_year=2020, tm_mon=1, tm_mday=29, tm_hour=1, tm_min=34, tm_sec=36, tm_wday=2, tm_yday=29, tm_isdst=0)'
### time.strftime("%m-%d %H:%M", time.localtime(time.time())) ---> '01-29 01:37' 获得当前月、日、小时、分钟
return data #list
def word_cloud() -> WordCloud:
url = 'https://i-lq.snssdk.com/api/feed/hotboard_online/v1/?is_in_channel=1&count=10&fe_source=news_hot&tab_name=stream&is_web_refresh=1&client_extra_params={%22hot_board_source%22:%22news_hot%22,%22fe_version%22:%22v10%22}&extra={%22CardStyle%22:0,%22JumpToWebList%22:true}&category=hotboard_online&update_version_code=75717'
r = requests.get(url).text #标准的json数据格式化
data = re.findall(r'title\\":\\"(.*?)\\',r)[:-1]
datanum = [8,7,6,5,5,4,4,2,1,1]
words = [w for w in zip(data,datanum)]
c = (
WordCloud()
.add("", words, word_size_range=[20, 100], shape=SymbolType.DIAMOND)
.set_global_opts(title_opts=opts.TitleOpts(title="WordCloud-shape-diamond"))
)
return c
def update_china_data(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
p_data = {}
#print(data['areaTree'][0]['children'][0])
for i in data['areaTree'][0]['children']: #各个省份
p_data[i['name']] = i['total']['confirm']
# 先对字典进行排序,按照value从大到小
p_data= sorted(p_data.items(), key=lambda x: x[1], reverse=True)
#print(p_data)
return p_data
def update_china_heal_data(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
p_data = {}
#print(data['areaTree'][0]['children'][0])
for i in data['areaTree'][0]['children']: #各个省份
p_data[i['name']] = i['total']['confirm'] - i['total']['dead'] - i['total']['heal']
# 先对字典进行排序,按照value从大到小
p_data= sorted(p_data.items(), key=lambda x: x[1], reverse=True)
#print(p_data)
return p_data
def china_map(data)-> Map:
opt= [
{"min":1001,"color":'#731919'},
{"min":500,"max":1000,"color":'red'},
{"min":100,"max":499,"color":'#e26061'},
{"min":10,"max":99,"color":'#f08f7f'},
{"min":1,"max":9,"color":'#ffb86a'},
{"value":0,"color":'#ffffff'}
]
c = (
Map()
.add(
"确诊人数", data, "china", is_map_symbol_show=False,
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False,font_size=8))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(max_=1000,is_piecewise=True,pieces=opt),
legend_opts=opts.LegendOpts(is_show=False),
#title_opts=opts.TitleOpts(title="全国疫情(2019-nCov)")
)
)
return c
# 获取世界数据
def update_world_data(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#print(data['areaTree'][0]['children'][0])
countryEN = []
total_confirm = []
for i in data['areaTree']:
if i['name'] != '钻石号邮轮':
if i['name'] == '日本本土':
countryEN.append('Japan')
total_confirm.append(i['total']['confirm'])
else:
countryEN.append(cn_to_en[i['name']])
total_confirm.append(i['total']['confirm'])
data = [list(z) for z in zip(countryEN, total_confirm)]
return data
def update_world_data1(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#print(data['areaTree'][0]['children'][0])
translate = Translator()
country = [] #中文国家提取
total_confirm = []
for i in data['areaTree']:
country.append(i['name'])
total_confirm.append(i['total']['confirm'])
countryEN = [] #翻译
for i in country:
countryEN.append(translate.translate(i).text)
#今日数据
data = [list(z) for z in zip(countryEN, total_confirm)]
return data
def world_map(data)-> Map:
opt= [
{"min":1001,"color":'#731919'},
{"min":51,"max":1000,"color":'red'},
{"min":11,"max":50,"color":'#e26061'},
{"min":6,"max":10,"color":'#f08f7f'},
{"min":1,"max":5,"color":'#ffb86a'},
{"value":0,"color":'#ffffff'}
]
c = (
Map()
.add("确诊人数", data, "world",is_map_symbol_show=False)
#.add("商家A", [list(z) for z in zip(countryEN, total_confirm)], "world")
.set_series_opts(label_opts=opts.LabelOpts(is_show=False,font_size=8),)
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(max_=1000,is_piecewise=True,pieces=opt),
legend_opts=opts.LegendOpts(is_show=False),
#title_opts=opts.TitleOpts(title="全球疫情(2019-nCov)")
)
)
return c
def kline()-> Kline:
data = get_origin_data() #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
a = []
c = [x['confirm'] for x in data['chinaDayList']]
for i in range(len(c)):
if i == 0:
a.append(0)
else:
a.append(int(c[i]) - int(c[i-1]))
b = []
for i in range(len(a)):
if i == 0:
b.append([0,0,0,a[i]])
elif i == 1:
b.append([0,0,a[i-1],a[i]])
elif i == 2:
b.append([0,a[i-2],a[i-1],a[i]])
else:
b.append([a[i-3],a[i-2],a[i-1],a[i]])
c = (
Kline()
.add_xaxis([x['date'] for x in data['chinaDayList']])
.add_yaxis("kline", b)
.set_global_opts(
yaxis_opts=opts.AxisOpts(
is_scale=True,
splitarea_opts=opts.SplitAreaOpts(
is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)
),
),
xaxis_opts=opts.AxisOpts(is_scale=True),
#title_opts=opts.TitleOpts(title="2019-nCov K线图"),
datazoom_opts=[opts.DataZoomOpts(pos_bottom="-2%",range_end=100)],
)
)
return c
def get_origin_data():
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other'
r = requests.get(url)
data = json.loads(json.loads(r.text)['data'])
return data
def line_connect_null() -> Line:
data = get_origin_data() #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
Dailyincrease = []
a = [x['confirm'] for x in data['chinaDayList']]
for i in range(len(a)):
if i == 0:
Dailyincrease.append(0)
else:
Dailyincrease.append(int(a[i]) - int(a[i-1]))
c = (
Line()
.add_xaxis([x['date'] for x in data['chinaDayList']]) #直接列表
.add_yaxis('确诊',[x['confirm'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False)) #‘列表名,[]’
.add_yaxis('疑似',[x['suspect'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False))
.add_yaxis('治愈',[x['heal'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False))
.add_yaxis('死亡',[x['dead'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False))
.add_yaxis('每日确诊增加数',Dailyincrease,areastyle_opts=opts.AreaStyleOpts(opacity=0.5),label_opts=opts.LabelOpts(is_show=False)) #areastyle_opts=opts.AreaStyleOpts(opacity=0.5) 投射面积
.set_global_opts(
#title_opts=opts.TitleOpts(title="2019-nCov"),
datazoom_opts=opts.DataZoomOpts(range_end=100),
)
)
return c
def line_heal() -> Line:
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
Dailyincrease = []
a = [x['confirm'] for x in data['chinaDayList']]
for i in range(len(a)):
if i == 0:
Dailyincrease.append(0)
else:
Dailyincrease.append(int(a[i]) - int(a[i-1]))
#每日疑似增加数
Dailysuspect = []
a = [x['suspect'] for x in data['chinaDayList']]
for i in range(len(a)):
if i == 0:
Dailysuspect.append(0)
else:
Dailysuspect.append(int(a[i]) - int(a[i-1]))
c = (
Line()
.add_xaxis([x['date'] for x in data['chinaDayList']]) #直接列表
.add_yaxis('治愈',[x['heal'] for x in data['chinaDayList']])
.add_yaxis('死亡',[x['dead'] for x in data['chinaDayList']])
.set_global_opts(
#title_opts=opts.TitleOpts(title="2019-nCov"),
datazoom_opts=opts.DataZoomOpts(range_end=100),
)
)
return c
#海外国家统计
def world_bar() -> Bar:
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
country = []
numbers = []
for i in data['areaTree']:
country.append(i['name'])
numbers.append(i['total']['confirm'])
country.reverse()
numbers.reverse()
c = (
Bar()
.add_xaxis(country[:-1])
.add_yaxis("确诊人数", numbers[:-1])
.reversal_axis()
.set_series_opts(label_opts=opts.LabelOpts(position="right",color="black"))
.set_global_opts(
#title_opts=opts.TitleOpts(title="海外国家统计数据"),
yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-45,font_size=11)),
)
)
return c
#海外国家趋势
def other_line() -> Line:
url = 'https://services1.arcgis.com/0MSEUqKaxRlEPj5g/arcgis/rest/services/cases_time_v3/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&orderByFields=Report_Date_String%20asc&resultOffset=0&resultRecordCount=2000&cacheHint=true'
r_data = json.loads(requests.get(url).text)
data = r_data['features'] #初始化json数据,为dict ['chinaTotal']
dates = []
numbers = []
for i in data:
date = time.strftime("%m.%d", time.localtime(i['attributes']['Report_Date'] / 1000))
dates.append(date)
numbers.append(i['attributes']['Other_Locations'])
c = (
Line()
.add_xaxis(dates) #直接列表
.add_yaxis('确诊',numbers)
.set_global_opts(
#title_opts=opts.TitleOpts(title="海外国家疫情趋势", subtitle=""),
)
)
return c
def china_online():
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
# chinaTotal = data['chinaTotal'] #结果为列表
# chinaAdd = data['chinaAdd']
# lastUpdateTime = data['lastUpdateTime']
return data
@app.route("/")
def index():
other_data = get_origin_data()
return render_template("index.html")
# 全国地图数据
@app.route("/map")
def get_map():
data = update_china_data()
return china_map(data).dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
# | |
<filename>swagger_client/api/corporation_api.py
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class CorporationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_corporations_corporation_id(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation information # noqa: E501
Public information about a corporation --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation information # noqa: E501
Public information about a corporation --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v4/corporations/{corporation_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCorporationsCorporationIdOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_alliancehistory(self, corporation_id, **kwargs): # noqa: E501
"""Get alliance history # noqa: E501
Get a list of all the alliances a corporation has been a member of --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_alliancehistory(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdAlliancehistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_alliancehistory_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_alliancehistory_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_alliancehistory_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get alliance history # noqa: E501
Get a list of all the alliances a corporation has been a member of --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_alliancehistory_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdAlliancehistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_alliancehistory" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_alliancehistory`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_alliancehistory`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/{corporation_id}/alliancehistory/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdAlliancehistory200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_blueprints(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation blueprints # noqa: E501
Returns a list of blueprints the corporation owns --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_blueprints(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdBlueprints200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_blueprints_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_blueprints_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_blueprints_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation blueprints # noqa: E501
Returns a list of blueprints the corporation owns --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_blueprints_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: | |
# Copyright 2015 The Tornado Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Flexible routing implementation.
Tornado routes HTTP requests to appropriate handlers using `Router`
class implementations. The `tornado.web.Application` class is a
`Router` implementation and may be used directly, or the classes in
this module may be used for additional flexibility. The `RuleRouter`
class can match on more criteria than `.Application`, or the `Router`
interface can be subclassed for maximum customization.
`Router` interface extends `~.httputil.HTTPServerConnectionDelegate`
to provide additional routing capabilities. This also means that any
`Router` implementation can be used directly as a ``request_callback``
for `~.httpserver.HTTPServer` constructor.
`Router` subclass must implement a ``find_handler`` method to provide
a suitable `~.httputil.HTTPMessageDelegate` instance to handle the
request:
.. code-block:: python
class CustomRouter(Router):
def find_handler(self, request, **kwargs):
# some routing logic providing a suitable HTTPMessageDelegate instance
return MessageDelegate(request.connection)
class MessageDelegate(HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def finish(self):
self.connection.write_headers(
ResponseStartLine("HTTP/1.1", 200, "OK"),
HTTPHeaders({"Content-Length": "2"}),
b"OK")
self.connection.finish()
router = CustomRouter()
server = HTTPServer(router)
The main responsibility of `Router` implementation is to provide a
mapping from a request to `~.httputil.HTTPMessageDelegate` instance
that will handle this request. In the example above we can see that
routing is possible even without instantiating an `~.web.Application`.
For routing to `~.web.RequestHandler` implementations we need an
`~.web.Application` instance. `~.web.Application.get_handler_delegate`
provides a convenient way to create `~.httputil.HTTPMessageDelegate`
for a given request and `~.web.RequestHandler`.
Here is a simple example of how we can we route to
`~.web.RequestHandler` subclasses by HTTP method:
.. code-block:: python
resources = {}
class GetResource(RequestHandler):
def get(self, path):
if path not in resources:
raise HTTPError(404)
self.finish(resources[path])
class PostResource(RequestHandler):
def post(self, path):
resources[path] = self.request.body
class HTTPMethodRouter(Router):
def __init__(self, app):
self.app = app
def find_handler(self, request, **kwargs):
handler = GetResource if request.method == "GET" else PostResource
return self.app.get_handler_delegate(request, handler, path_args=[request.path])
router = HTTPMethodRouter(Application())
server = HTTPServer(router)
`ReversibleRouter` interface adds the ability to distinguish between
the routes and reverse them to the original urls using route's name
and additional arguments. `~.web.Application` is itself an
implementation of `ReversibleRouter` class.
`RuleRouter` and `ReversibleRuleRouter` are implementations of
`Router` and `ReversibleRouter` interfaces and can be used for
creating rule-based routing configurations.
Rules are instances of `Rule` class. They contain a `Matcher`, which
provides the logic for determining whether the rule is a match for a
particular request and a target, which can be one of the following.
1) An instance of `~.httputil.HTTPServerConnectionDelegate`:
.. code-block:: python
router = RuleRouter([
Rule(PathMatches("/handler"), ConnectionDelegate()),
# ... more rules
])
class ConnectionDelegate(HTTPServerConnectionDelegate):
def start_request(self, server_conn, request_conn):
return MessageDelegate(request_conn)
2) A callable accepting a single argument of `~.httputil.HTTPServerRequest` type:
.. code-block:: python
router = RuleRouter([
Rule(PathMatches("/callable"), request_callable)
])
def request_callable(request):
request.write(b"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\n\\r\\nOK")
request.finish()
3) Another `Router` instance:
.. code-block:: python
router = RuleRouter([
Rule(PathMatches("/router.*"), CustomRouter())
])
Of course a nested `RuleRouter` or a `~.web.Application` is allowed:
.. code-block:: python
router = RuleRouter([
Rule(HostMatches("example.com"), RuleRouter([
Rule(PathMatches("/app1/.*"), Application([(r"/app1/handler", Handler)]))),
]))
])
server = HTTPServer(router)
In the example below `RuleRouter` is used to route between applications:
.. code-block:: python
app1 = Application([
(r"/app1/handler", Handler1),
# other handlers ...
])
app2 = Application([
(r"/app2/handler", Handler2),
# other handlers ...
])
router = RuleRouter([
Rule(PathMatches("/app1.*"), app1),
Rule(PathMatches("/app2.*"), app2)
])
server = HTTPServer(router)
For more information on application-level routing see docs for `~.web.Application`.
.. versionadded:: 4.5
"""
import re
from functools import partial
from tornado import httputil
from tornado.httpserver import _CallableAdapter
from tornado.escape import url_escape, url_unescape, utf8
from tornado.log import app_log
from tornado.util import basestring_type, import_object, re_unescape, unicode_type
try:
import typing # noqa
except ImportError:
pass
class Router(httputil.HTTPServerConnectionDelegate):
"""Abstract router interface."""
def find_handler(self, request, **kwargs):
# type: (httputil.HTTPServerRequest, typing.Any)->httputil.HTTPMessageDelegate
"""Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate`
that can serve the request.
Routing implementations may pass additional kwargs to extend the routing logic.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg kwargs: additional keyword arguments passed by routing implementation.
:returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to
process the request.
"""
raise NotImplementedError()
def start_request(self, server_conn, request_conn):
return _RoutingDelegate(self, server_conn, request_conn)
class ReversibleRouter(Router):
"""Abstract router interface for routers that can handle named routes
and support reversing them to original urls.
"""
def reverse_url(self, name, *args):
"""Returns url string for a given route name and arguments
or ``None`` if no match is found.
:arg str name: route name.
:arg args: url parameters.
:returns: parametrized url string for a given route name (or ``None``).
"""
raise NotImplementedError()
class _RoutingDelegate(httputil.HTTPMessageDelegate):
def __init__(self, router, server_conn, request_conn):
self.server_conn = server_conn
self.request_conn = request_conn
self.delegate = None
self.router = router # type: Router
def headers_received(self, start_line, headers):
request = httputil.HTTPServerRequest(
connection=self.request_conn,
server_connection=self.server_conn,
start_line=start_line, headers=headers)
self.delegate = self.router.find_handler(request)
if self.delegate is None:
app_log.debug("Delegate for %s %s request not found",
start_line.method, start_line.path)
self.delegate = _DefaultMessageDelegate(self.request_conn)
return self.delegate.headers_received(start_line, headers)
def data_received(self, chunk):
return self.delegate.data_received(chunk)
def finish(self):
self.delegate.finish()
def on_connection_close(self):
self.delegate.on_connection_close()
class _DefaultMessageDelegate(httputil.HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def finish(self):
self.connection.write_headers(
httputil.ResponseStartLine("HTTP/1.1", 404, "Not Found"), httputil.HTTPHeaders())
self.connection.finish()
class RuleRouter(Router):
"""Rule-based router implementation."""
def __init__(self, rules=None):
"""Constructs a router from an ordered list of rules::
RuleRouter([
Rule(PathMatches("/handler"), Target),
# ... more rules
])
You can also omit explicit `Rule` constructor and use tuples of arguments::
RuleRouter([
(PathMatches("/handler"), Target),
])
`PathMatches` is a default matcher, so the example above can be simplified::
RuleRouter([
("/handler", Target),
])
In the examples above, ``Target`` can be a nested `Router` instance, an instance of
`~.httputil.HTTPServerConnectionDelegate` or an old-style callable,
accepting a request argument.
:arg rules: a list of `Rule` instances or tuples of `Rule`
constructor arguments.
"""
self.rules = [] # type: typing.List[Rule]
if rules:
self.add_rules(rules)
def add_rules(self, rules):
"""Appends new rules to the router.
:arg rules: a list of Rule instances (or tuples of arguments, which are
passed to Rule constructor).
"""
for rule in rules:
if isinstance(rule, (tuple, list)):
assert len(rule) in (2, 3, 4)
if isinstance(rule[0], basestring_type):
rule = Rule(PathMatches(rule[0]), *rule[1:])
else:
rule = Rule(*rule)
self.rules.append(self.process_rule(rule))
def process_rule(self, rule):
"""Override this method for additional preprocessing of each rule.
:arg Rule rule: a rule to be processed.
:returns: the same or modified Rule instance.
"""
return rule
def find_handler(self, request, **kwargs):
for rule in self.rules:
target_params = rule.matcher.match(request)
if target_params is not None:
if rule.target_kwargs:
target_params['target_kwargs'] = rule.target_kwargs
delegate = self.get_target_delegate(
rule.target, request, **target_params)
if delegate is not None:
return delegate
return None
def get_target_delegate(self, target, request, **target_params):
"""Returns an instance of `~.httputil.HTTPMessageDelegate` for a
Rule's target. This method is called by `~.find_handler` and can be
extended to provide additional target types.
:arg target: a Rule's target.
:arg httputil.HTTPServerRequest request: current request.
:arg target_params: additional parameters that can be useful
for `~.httputil.HTTPMessageDelegate` creation.
"""
if isinstance(target, Router):
return target.find_handler(request, **target_params)
elif isinstance(target, httputil.HTTPServerConnectionDelegate):
return target.start_request(request.server_connection, request.connection)
elif callable(target):
return _CallableAdapter(
partial(target, **target_params), request.connection
)
return None
class ReversibleRuleRouter(ReversibleRouter, RuleRouter):
"""A rule-based router that implements ``reverse_url`` method.
Each rule added to this router may have a ``name`` attribute that can be
used to reconstruct an original uri. The actual reconstruction takes place
in a rule's matcher (see `Matcher.reverse`).
"""
def __init__(self, rules=None):
self.named_rules = {} # type: typing.Dict[str]
super(ReversibleRuleRouter, self).__init__(rules)
def process_rule(self, rule):
rule = super(ReversibleRuleRouter, self).process_rule(rule)
if rule.name:
if rule.name in self.named_rules:
app_log.warning(
"Multiple handlers named %s; replacing previous value",
rule.name)
self.named_rules[rule.name] = rule
return rule
def reverse_url(self, name, *args):
if name in self.named_rules:
return self.named_rules[name].matcher.reverse(*args)
for rule in self.rules:
if isinstance(rule.target, ReversibleRouter):
reversed_url = rule.target.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
return None
class Rule(object):
"""A routing rule."""
def __init__(self, matcher, target, target_kwargs=None, name=None):
"""Constructs a Rule instance.
:arg Matcher matcher: a `Matcher` instance used for determining
whether the rule should be considered a match for a specific
request.
:arg target: a Rule's target (typically a ``RequestHandler`` or
`~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`,
depending on routing implementation).
:arg dict target_kwargs: a dict of parameters that can be useful
at the moment of target instantiation (for | |
"""Assortment of handy functions."""
import inspect
import numpy as np
from six.moves import map, range, zip
from functools import partial
from itertools import tee
from inspect import isgenerator
class Bunch(dict):
"""
Container object for datasets.
Dictionary-like object that exposes its keys as attributes.
Examples
--------
>>> b = Bunch(foo=42, bar=10)
>>> b == {'foo': 42, 'bar': 10}
True
>>> b.foo
42
>>> b.bar
10
>>> b['foo']
42
>>> b.baz = 61
>>> b.baz
61
>>> b['baz']
61
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def issequence(obj):
"""
Test if an object is an iterable generator, list or tuple.
Parameters
----------
obj : object
object to test
Returns
-------
bool :
True if :code:`obj` is a tuple, list or generator only.
Examples
--------
>>> issequence([1, 2])
True
>>> issequence((1,))
True
>>> issequence((i for i in range(8)))
True
>>> issequence(np.array([1, 2, 3]))
False
"""
return inspect.isgenerator(obj) or isinstance(obj, (list, tuple))
def atleast_list(a):
"""
Promote an object to a list if not a list or generator.
Parameters
----------
a: object
any object you want to at least be a list with one element
Returns
-------
list or generator:
untounched if :code:`a` was a generator or list, otherwise :code:`[a]`.
Examples
--------
>>> a = 1.
>>> atleast_list(a)
[1.0]
>>> a = [1.]
>>> atleast_list(a)
[1.0]
"""
return a if isinstance(a, list) or isgenerator(a) else [a]
def atleast_tuple(a):
"""
Promote an object to a tuple if not a tuple or generator.
Parameters
----------
a: object
any object you want to at least be a tuple with one element
Returns
-------
tuple or generator:
untounched if :code:`a` was a generator or tuple, otherwise
:code:`(a,)`.
Examples
--------
>>> a = 1.
>>> atleast_tuple(a)
(1.0,)
>>> a = (1.,)
>>> atleast_tuple(a)
(1.0,)
"""
return a if isinstance(a, tuple) or isgenerator(a) else (a,)
def couple(f, g):
r"""
Compose a function thate returns two arguments.
Given a pair of functions that take the same arguments, return a
single function that returns a pair consisting of the return values
of each function.
Notes
-----
Equivalent to::
lambda f, g: lambda *args, **kwargs: (f(*args, **kwargs),
g(*args, **kwargs))
Examples
--------
>>> f = lambda x: 2*x**3
>>> df = lambda x: 6*x**2
>>> f_new = couple(f, df)
>>> f_new(5)
(250, 150)
"""
def coupled(*args, **kwargs):
return f(*args, **kwargs), g(*args, **kwargs)
return coupled
def decouple(fn):
"""
Inverse operation of couple.
Create two functions of one argument and one return from a function that
takes two arguments and has two returns
Examples
--------
>>> h = lambda x: (2*x**3, 6*x**2)
>>> f, g = decouple(h)
>>> f(5)
250
>>> g(5)
150
"""
def fst(*args, **kwargs):
return fn(*args, **kwargs)[0]
def snd(*args, **kwargs):
return fn(*args, **kwargs)[1]
return fst, snd
def nwise(iterable, n):
r"""
Sliding window iterator.
Iterator that acts like a sliding window of size `n`; slides over
some iterable `n` items at a time. If iterable has `m` elements,
this function will return an iterator over `m-n+1` tuples.
Parameters
----------
iterable : iterable
An iterable object.
n : int
Window size.
Returns
-------
iterator of tuples.
Iterator of size `n` tuples
Notes
-----
First `n` iterators are created::
iters = tee(iterable, n)
Next, iterator `i` is advanced `i` times::
for i, it in enumerate(iters):
for _ in range(i):
next(it, None)
Finally, the iterators are zipped back up again::
return zip(*iters)
Examples
--------
>>> a = [2, 5, 7, 4, 2, 8, 6]
>>> list(nwise(a, n=3))
[(2, 5, 7), (5, 7, 4), (7, 4, 2), (4, 2, 8), (2, 8, 6)]
>>> pairwise = partial(nwise, n=2)
>>> list(pairwise(a))
[(2, 5), (5, 7), (7, 4), (4, 2), (2, 8), (8, 6)]
>>> list(nwise(a, n=1))
[(2,), (5,), (7,), (4,), (2,), (8,), (6,)]
>>> list(nwise(a, n=7))
[(2, 5, 7, 4, 2, 8, 6)]
.. todo::
These should probably raise `ValueError`...
>>> list(nwise(a, 8))
[]
>>> list(nwise(a, 9))
[]
A sliding window of size `n` over a list of `m` elements
gives `m-n+1` windows
>>> len(a) - len(list(nwise(a, 2))) == 1
True
>>> len(a) - len(list(nwise(a, 3))) == 2
True
>>> len(a) - len(list(nwise(a, 7))) == 6
True
"""
iters = tee(iterable, n)
for i, it in enumerate(iters):
for _ in range(i):
next(it, None)
return zip(*iters)
def scalar_reshape(a, newshape, order='C'):
"""
Reshape, but also return scalars or empty lists.
Identical to `numpy.reshape` except in the case where `newshape` is
the empty tuple, in which case we return a scalar instead of a
0-dimensional array.
Examples
--------
>>> a = np.arange(6)
>>> np.array_equal(np.reshape(a, (3, 2)), scalar_reshape(a, (3, 2)))
True
>>> scalar_reshape(np.array([3.14]), newshape=())
3.14
>>> scalar_reshape(np.array([2.71]), newshape=(1,))
array([ 2.71])
>>> scalar_reshape(np.array([]), newshape=(0,))
[]
"""
if newshape == ():
return np.asscalar(a)
if newshape == (0,):
return []
return np.reshape(a, newshape, order)
def flatten(arys, returns_shapes=True, hstack=np.hstack, ravel=np.ravel,
shape=np.shape):
"""
Flatten a potentially recursive list of multidimensional objects.
.. note::
Not to be confused with `np.ndarray.flatten()` (a more befitting
might be `chain` or `stack` or maybe something else entirely
since this function is more than either `concatenate` or
`np.flatten` itself. Rather, it is the composition of the former
with the latter.
Parameters
----------
arys : list of objects
One or more input arrays of possibly heterogenous shapes and
sizes.
returns_shapes : bool, optional
Default is `True`. If `True`, the tuple `(flattened, shapes)` is
returned, otherwise only `flattened` is returned.
hstack : callable, optional
a function that implements horizontal stacking
ravel : callable, optional
a function that flattens the object
shape : callable, optional
a function that returns the shape of the object
Returns
-------
flattened,[shapes] : {1dobject, list of tuples}
Return the flat (1d) object resulting from the concatenation of
flattened multidimensional objects. When `returns_shapes` is `True`,
return a list of tuples containing also the shapes of each array as the
second element.
See Also
--------
revrand.utils.unflatten : its inverse
Examples
--------
>>> a = 9
>>> b = np.array([4, 7, 4, 5, 2])
>>> c = np.array([[7, 3, 1],
... [2, 6, 6]])
>>> d = np.array([[[6, 5, 5],
... [1, 6, 9]],
... [[3, 9, 1],
... [9, 4, 1]]])
>>> flatten([a, b, c, d]) # doctest: +NORMALIZE_WHITESPACE
(array([9, 4, 7, 4, 5, 2, 7, 3, 1, 2, 6, 6, 6, 5, 5, 1, 6, 9, 3, 9,
1, 9, 4, 1]), [(), (5,), (2, 3), (2, 2, 3)])
Note that scalars and 0-dimensional arrays are treated differently
from 1-dimensional singleton arrays.
>>> flatten([3.14, np.array(2.71), np.array([1.61])])
... # doctest: +NORMALIZE_WHITESPACE
(array([ 3.14, 2.71, 1.61]), [(), (), (1,)])
>>> flatten([a, b, c, d], returns_shapes=False)
... # doctest: +NORMALIZE_WHITESPACE
array([9, 4, 7, 4, 5, 2, 7, 3, 1, 2, 6, 6, 6, 5, 5, 1, 6, 9, 3, 9,
1, 9, 4, 1])
>>> w, x, y, z = unflatten(*flatten([a, b, c, d]))
>>> w == a
True
>>> np.array_equal(x, b)
True
>>> np.array_equal(y, c)
True
>>> np.array_equal(z, d)
True
>>> flatten([3.14, [np.array(2.71), np.array([1.61])]])
... # doctest: +NORMALIZE_WHITESPACE
(array([ 3.14, 2.71, 1.61]), [(), [(), (1,)]])
"""
if issequence(arys) and len(arys) > 0:
flat = partial(flatten,
returns_shapes=True,
hstack=hstack,
ravel=ravel,
shape=shape
)
flat_arys, shapes = zip(*map(flat, arys))
flat_ary = hstack(flat_arys)
shapes = list(shapes)
else:
flat_ary = ravel(arys)
shapes = shape(arys)
return (flat_ary, shapes) if returns_shapes else flat_ary
def unflatten(ary, shapes, reshape=scalar_reshape):
r"""
Inverse opertation of flatten.
Given a flat (1d) array, and a list of shapes (represented as tuples),
return a list of ndarrays with the specified shapes.
Parameters
----------
ary : a 1d array
A flat (1d) array.
shapes : list of tuples
A list of ndarray shapes (tuple of array dimensions)
Returns
-------
list of ndarrays
A list of ndarrays with the specified shapes.
See Also
--------
revrand.utils.flatten : its inverse
Notes
-----
Equivalent to::
lambda ary, shapes, order='C': \
map(partial(custom_reshape, order=order),
np.hsplit(ary, np.cumsum(map(partial(np.prod, dtype=int),
shapes))), shapes)
Examples
--------
>>> a = np.array([7, 4, 5, 8, 9, 1, 4, 2, 5, 3, 4, 3])
>>> list(unflatten(a, [(1,), (1,), (4,), (2, 3)]))
... # doctest: +NORMALIZE_WHITESPACE
[array([7]), array([4]), array([5, 8, 9, | |
= name
self._home = home
self._owned = owned
self._transaction = home._transaction
self._newObjectResources = {}
self._cachedObjectResources = {}
self._removedObjectResources = set()
self._index = None # Derived classes need to set this
self._invites = None # Derived classes need to set this
self._renamedName = realName
if self._home._notifiers:
self._notifiers = dict([(factory_name, notifier.clone(self),) for factory_name, notifier in self._home._notifiers.items()])
else:
self._notifiers = None
@classmethod
def objectWithName(cls, home, name, owned):
return cls(name, home, owned) if home._path.child(name).isdir() else None
@property
def _path(self):
return self._home._path.child(self._name)
@property
def _txn(self):
return self._transaction
def directoryService(self):
return self._transaction.store().directoryService()
def retrieveOldIndex(self):
"""
Retrieve the old Index object.
"""
return self._index._oldIndex
def retrieveOldInvites(self):
"""
Retrieve the old Invites DB object.
"""
return self._invites._oldInvites
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._path.path)
def name(self):
if self._renamedName is not None:
return self._renamedName
return self._path.basename()
def shareMode(self):
"""
Stub implementation of L{ICalendar.shareMode}; always returns L{_BIND_MODE_OWN}.
"""
return _BIND_MODE_OWN
def effectiveShareMode(self):
"""
Stub implementation of L{ICalendar.effectiveShareMode}; always returns L{_BIND_MODE_OWN}.
"""
return _BIND_MODE_OWN
def owned(self):
return self._owned
_renamedName = None
@writeOperation
def rename(self, name):
oldName = self.name()
self._renamedName = name
self._home._newChildren[name] = self
self._home._removedChildren.add(oldName)
def doIt():
self._path.moveTo(self._path.sibling(name))
return lambda : None # FIXME: revert
self._transaction.addOperation(doIt, "rename home child %r -> %r" %
(oldName, name))
self.retrieveOldIndex().bumpRevision()
self.notifyChanged()
@writeOperation
def remove(self):
def do(transaction=self._transaction):
childPath = self._path
for i in xrange(1000):
trash = childPath.sibling("._del_%s_%d" % (childPath.basename(), i))
if not trash.exists():
break
else:
raise InternalDataStoreError("Unable to create trash target for child at %s" % (childPath,))
try:
childPath.moveTo(trash)
except (IOError, OSError), e:
if e.errno == ENOENT:
raise NoSuchHomeChildError(self._name)
raise
def cleanup():
try:
trash.remove()
self.properties()._removeResource()
except Exception, e:
self.log.error("Unable to delete trashed child at %s: %s" % (trash.fp, e))
self._transaction.addOperation(cleanup, "remove child backup %r" % (self._name,))
def undo():
trash.moveTo(childPath)
return undo
# FIXME: direct tests
self._transaction.addOperation(
do, "prepare child remove %r" % (self._name,)
)
self.notifyChanged()
def ownerHome(self):
return self._home
def viewerHome(self):
return self._home
def setSharingUID(self, uid):
self.properties()._setPerUserUID(uid)
def objectResources(self):
"""
Return a list of object resource objects.
"""
return [self.objectResourceWithName(name)
for name in self.listObjectResources()]
def objectResourcesWithNames(self, names):
"""
Return a list of the specified object resource objects.
"""
results = []
for name in names:
obj = self.objectResourceWithName(name)
if obj is not None:
results.append(obj)
return results
def listObjectResources(self):
"""
Return a list of object resource names.
"""
return sorted((
name
for name in (
set(self._newObjectResources.iterkeys()) |
set(p.basename() for p in self._path.children()
if not p.basename().startswith(".") and
p.isfile()) -
set(self._removedObjectResources)
))
)
def countObjectResources(self):
return len(self.listObjectResources())
def objectResourceWithName(self, name):
if name in self._removedObjectResources:
return None
if name in self._newObjectResources:
return self._newObjectResources[name]
if name in self._cachedObjectResources:
return self._cachedObjectResources[name]
objectResourcePath = self._path.child(name)
if objectResourcePath.isfile():
obj = self._objectResourceClass(name, self)
self._cachedObjectResources[name] = obj
return obj
else:
return None
def objectResourceWithUID(self, uid):
rname = self.retrieveOldIndex().resourceNameForUID(uid)
if rname and rname not in self._removedObjectResources:
return self.objectResourceWithName(rname)
return None
@writeOperation
def createObjectResourceWithName(self, name, component, metadata=None):
"""
Create a new resource with component data and optional metadata. We create the
python object using the metadata then create the actual store object with setComponent.
"""
if name.startswith("."):
raise ObjectResourceNameNotAllowedError(name)
if len(name) > 255:
raise ObjectResourceNameNotAllowedError(name)
objectResourcePath = self._path.child(name)
if objectResourcePath.exists():
raise ObjectResourceNameAlreadyExistsError(name)
objectResource = self._objectResourceClass(name, self, metadata)
objectResource.setComponent(component, inserting=True)
self._cachedObjectResources[name] = objectResource
# Note: setComponent triggers a notification, so we don't need to
# call notify( ) here like we do for object removal.
return objectResource
def removedObjectResource(self, child):
self.retrieveOldIndex().deleteResource(child.name())
self._removedObjectResources.add(child.name())
self.notifyChanged()
def syncToken(self):
try:
urnuuid = str(self.properties()[PropertyName.fromElement(ResourceID)].children[0])
except KeyError:
urnuuid = uuid.uuid4().urn
self.properties()[PropertyName(*ResourceID.qname())] = ResourceID(HRef.fromString(urnuuid))
return succeed("%s_%s" % (urnuuid[9:], self.retrieveOldIndex().lastRevision()))
def objectResourcesSinceToken(self, token):
raise NotImplementedError()
def resourceNamesSinceToken(self, token):
return succeed(self.retrieveOldIndex().whatchanged(token))
def objectResourcesHaveProperties(self):
"""
So filestore objects do need to support properties.
"""
return True
# FIXME: property writes should be a write operation
@cached
def properties(self):
# FIXME: needs direct tests - only covered by store tests
# FIXME: transactions
propStoreClass = self._home._dataStore._propertyStoreClass
props = propStoreClass(self._home.uid(), lambda: self._path)
self.initPropertyStore(props)
self._transaction.addOperation(props.flush,
"flush object resource properties")
return props
def initPropertyStore(self, props):
"""
A hook for subclasses to override in order to set up their property
store after it's been created.
@param props: the L{PropertyStore} from C{properties()}.
"""
pass
def addNotifier(self, factory_name, notifier):
if self._notifiers is None:
self._notifiers = {}
self._notifiers[factory_name] = notifier
def getNotifier(self, factory_name):
return self._notifiers.get(factory_name)
def notifierID(self):
return (self.ownerHome()._notifierPrefix, "%s/%s" % (self.ownerHome().uid(), self.name(),),)
def parentNotifierID(self):
return self.ownerHome().notifierID()
@inlineCallbacks
def notifyChanged(self):
"""
Trigger a notification of a change
"""
# Only send one set of change notifications per transaction
if self._notifiers and not self._transaction.isNotifiedAlready(self):
# cache notifiers run in post commit
notifier = self._notifiers.get("cache", None)
if notifier:
self._transaction.postCommit(notifier.notify)
# push notifiers add their work items immediately
notifier = self._notifiers.get("push", None)
if notifier:
yield notifier.notify(self._transaction)
self._transaction.notificationAddedForObject(self)
@inlineCallbacks
def sharingInvites(self):
"""
Stub for interface-compliance tests.
"""
yield None
returnValue([])
class CommonObjectResource(FileMetaDataMixin, FancyEqMixin):
"""
@ivar _path: The path of the file on disk
@type _path: L{FilePath}
"""
log = Logger()
compareAttributes = (
"_name",
"_parentCollection",
)
def __init__(self, name, parent, metadata=None):
self._name = name
self._parentCollection = parent
self._transaction = parent._transaction
self._objectText = None
@property
def _path(self):
return self._parentCollection._path.child(self._name)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._path.path)
@property
def _txn(self):
return self._transaction
def transaction(self):
return self._transaction
def directoryService(self):
return self._transaction.store().directoryService()
@writeOperation
def setComponent(self, component, inserting=False):
raise NotImplementedError
def component(self):
raise NotImplementedError
def remove(self):
# FIXME: test for undo
objectResourcePath = self._path
def do():
objectResourcePath.remove()
return lambda: None
self._transaction.addOperation(do, "remove object resource object %r" % (self._name,))
self._parentCollection.removedObjectResource(self)
purge = remove
def _text(self):
raise NotImplementedError
def uid(self):
raise NotImplementedError
@cached
def properties(self):
home = self._parentCollection._home
uid = home.uid()
if self._parentCollection.objectResourcesHaveProperties():
propStoreClass = home._dataStore._propertyStoreClass
props = propStoreClass(uid, lambda : self._path)
else:
props = NonePropertyStore(uid)
self.initPropertyStore(props)
self._transaction.addOperation(props.flush, "object properties flush")
return props
def initPropertyStore(self, props):
"""
A hook for subclasses to override in order to set up their property
store after it's been created.
@param props: the L{PropertyStore} from C{properties()}.
"""
pass
class CommonStubResource(object):
"""
Just enough resource to keep the collection sql DB classes going.
"""
def __init__(self, resource):
self.resource = resource
self.fp = self.resource._path
def bumpSyncToken(self, reset=False):
# FIXME: needs direct tests
return self.resource._updateSyncToken(reset)
def initSyncToken(self):
# FIXME: needs direct tests
self.bumpSyncToken(True)
class NotificationCollection(CommonHomeChild):
"""
File-based implementation of L{INotificationCollection}.
"""
implements(INotificationCollection)
def __init__(self, name, parent, realName=None):
"""
Initialize an notification collection pointing at a path on disk.
@param name: the subdirectory of parent where this notification collection
resides.
@type name: C{str}
@param parent: the home containing this notification collection.
@type parent: L{CommonHome}
"""
super(NotificationCollection, self).__init__(name, parent, realName)
self._index = NotificationIndex(self)
self._invites = None
self._objectResourceClass = NotificationObject
@classmethod
def notificationsFromHome(cls, txn, home):
notificationCollectionName = "notification"
if not home._path.child(notificationCollectionName).isdir():
notifications = cls._create(txn, home, notificationCollectionName)
else:
notifications = cls(notificationCollectionName, home)
return notifications
@classmethod
def _create(cls, txn, home, collectionName):
# FIXME: this is a near-copy of CommonHome.createChildWithName.
temporary = hidden(home._path.child(collectionName).temporarySibling())
temporary.createDirectory()
temporaryName = temporary.basename()
c = cls(temporary.basename(), home)
def do():
childPath = home._path.child(collectionName)
temporary = childPath.sibling(temporaryName)
try:
props = c.properties()
temporary.moveTo(childPath)
c._name = collectionName
# FIXME: _lots_ of duplication of work here.
props.flush()
except (IOError, OSError), e:
if e.errno == EEXIST and childPath.isdir():
raise HomeChildNameAlreadyExistsError(collectionName)
raise
# FIXME: direct tests, undo for index creation
# Return undo
return lambda: home._path.child(collectionName).remove()
txn.addOperation(do, "create notification child %r" % (collectionName,))
return c
notificationObjects = CommonHomeChild.objectResources
listNotificationObjects = CommonHomeChild.listObjectResources
notificationObjectWithName = CommonHomeChild.objectResourceWithName
def notificationObjectWithUID(self, uid):
name = uid + ".xml"
return self.notificationObjectWithName(name)
def writeNotificationObject(self, uid, notificationtype, notificationdata):
name = uid + ".xml"
if name.startswith("."):
raise ObjectResourceNameNotAllowedError(name)
objectResource = NotificationObject(name, self)
objectResource.setData(uid, notificationtype, notificationdata)
self._cachedObjectResources[name] = objectResource
# Update database
self.retrieveOldIndex().addOrUpdateRecord(NotificationRecord(uid, name, notificationtype))
self.notifyChanged()
@writeOperation
def removeNotificationObjectWithName(self, name):
if name.startswith("."):
raise NoSuchObjectResourceError(name)
self.retrieveOldIndex().removeRecordForName(name)
objectResourcePath = self._path.child(name)
if objectResourcePath.isfile():
self._removedObjectResources.add(name)
# FIXME: test for undo
def do():
objectResourcePath.remove()
return lambda: None
self._transaction.addOperation(do, "remove object resource object %r" %
(name,))
self.notifyChanged()
else:
raise NoSuchObjectResourceError(name)
@writeOperation
def removeNotificationObjectWithUID(self, uid):
name = uid + ".xml"
self.removeNotificationObjectWithName(name)
class NotificationObject(CommonObjectResource):
"""
"""
implements(INotificationObject)
def __init__(self, name, notifications):
super(NotificationObject, self).__init__(name, notifications)
self._uid = name[:-4]
def notificationCollection(self):
return self._parentCollection
def created(self):
if not self._path.exists():
from twisted.internet import reactor
return int(reactor.seconds())
return super(NotificationObject, self).created()
def modified(self):
if not self._path.exists():
from twisted.internet import reactor
return int(reactor.seconds())
| |
'''
Helper functions to read through raw data for each corpus.
Each is a generator that yields a single dataset and metadata in the form:
{
'df'
'locator',
'dataset_id'
}
'''
import os
from os import listdir
from os.path import join
from collections import OrderedDict
import argparse
import gzip
import json
import chardet
import traceback
import itertools
import numpy as np
import pandas as pd
from .general_helpers import clean_chunk
from .type_detection import detect_field_type, data_type_to_general_type, data_types, general_types
raw_data_dir = os.environ['RAW_DIR']
CHUNK_SIZE = 500
all_corpus = ['plotly','manyeyes','test','opendata'] + ['webtables{}'.format(i) for i in range(10)]
data_dirs = { x: join(raw_data_dir, 'webtables', x) if x.startswith('webtables') else join(raw_data_dir, x) for x in all_corpus}
def listdir_not_hidden(path):
dirlist = listdir(path)
return filter(lambda x: not x.startswith('.'), dirlist) if dirlist else []
def plotly_file_iter(file_path):
# large file, read in chunks
raw_df_chunks = pd.read_csv(
file_path,
sep='\t',
usecols=['fid', 'table_data', 'layout', 'chart_data'],
error_bad_lines=False,
warn_bad_lines=False,
chunksize=CHUNK_SIZE,
encoding='utf-8'
)
for chunk_num, chunk in enumerate(raw_df_chunks):
chunk = clean_chunk(chunk)
for row in chunk.iterrows():
yield row
def extract_plotly(table_data, locator, dataset_id, exact_num_fields=None, min_fields=None, max_fields=None, valid_fields=None):
#table_data = chart_obj.table_data
fields = table_data[list(table_data.keys())[0]]['cols']
sorted_fields = sorted(fields.items(), key=lambda x: x[1]['order'])
num_fields = len(sorted_fields)
if exact_num_fields:
if num_fields != exact_num_fields: return
if min_fields:
if num_fields < min_fields: return
if max_fields:
if num_fields > max_fields: return
data_as_dict = OrderedDict()
for k, v in sorted_fields:
data_as_dict[k] = pd.Series(v['data'])
df = pd.DataFrame(data_as_dict)
# If specified, only return the valid fields
if valid_fields is not None:
df = df.iloc[:,valid_fields]
result = {
'df': df,
'dataset_id': dataset_id,
'locator': locator
}
return result
def get_plotly_dfs(limit=None, exact_num_fields=None, min_fields=None, max_fields=None):
corpus = 'plotly'
base_dir = data_dirs[corpus]
files = [ f for f in listdir_not_hidden(base_dir) if f.endswith('.tsv') ]
for f in files[:limit]:
file_path = join(data_dirs[corpus], f)
plotly_row_iter = plotly_file_iter(file_path)
for chart_num, chart_obj in plotly_row_iter:
df = extract_plotly(chart_obj.table_data, f, chart_obj.fid, exact_num_fields, min_fields, max_fields)
if df is not None:
yield df
def load_manyeyes(full_file_path, locator, dataset_id, exact_num_fields=None, min_fields=None, max_fields=None, valid_fields=None):
try:
df = pd.read_csv(
full_file_path,
error_bad_lines=False,
warn_bad_lines=False,
sep='\t',
encoding='utf-8'
)
num_fields = len(df.columns)
# If specified, only return the valid fields
if valid_fields is not None:
df = df.iloc[:,valid_fields]
if exact_num_fields:
if num_fields != exact_num_fields: return
if min_fields:
if num_fields < min_fields: return
if max_fields:
if num_fields > max_fields: return
result = {
'df': df,
'dataset_id': dataset_id,
'locator': locator
}
return result
except Exception as e:
#print("Exception loading manyeyes data", e)
return
def get_manyeyes_dfs(exact_num_fields=None, min_fields=None, max_fields=None):
corpus='manyeyes'
base_dir = data_dirs[corpus]
files = []
for year_dir in listdir_not_hidden(base_dir):
for month_dir in listdir_not_hidden(join(base_dir, year_dir)):
month_files = listdir_not_hidden(join(base_dir, year_dir, month_dir))
files.append([ year_dir, month_dir, month_files ])
for (year_dir, month_dir, month_files) in files:
for i, file_name in enumerate(month_files):
locator = join(year_dir, month_dir, file_name)
full_file_path = join(base_dir, year_dir, month_dir, file_name)
dataset_id = file_name
df = load_manyeyes(full_file_path, locator, dataset_id, exact_num_fields, min_fields, max_fields)
if df:
yield df
else:
continue
def webtables_iter(path):
# generate the next line of json(table)
with gzip.open(path, 'rb') as f_in:
iter_count = 0 # only count the # of succesfully yield dataframes
for line_count, dataset in enumerate(f_in):
try:
data = json.loads(dataset.decode('utf-8'))
yield (iter_count, data)
iter_count+=1
except UnicodeDecodeError:
encoding = chardet.detect(dataset)['encoding']
try:
data = json.loads(dataset.decode(encoding))
yield (iter_count, data)
iter_count+=1
except Exception as e:
#print('Cannot parse:', e)
continue
continue
def extract_webtables(data, locator, dataset_id=None, exact_num_fields=None, min_fields=None, max_fields=None, valid_fields=None, line_no=0):
# if dataset_id is set, only extract if there's a match
try:
# webtables are not uniquely identified by pageTitle + tableNum,
# TO distinguish between tables, add a index with respect to location in the conatining file.
if data['hasHeader'] and (data['headerPosition'] == 'FIRST_ROW'):
d_id = '{}-{}-{}'.format(line_no, data['pageTitle'], data['tableNum'])
# table name not matching
if dataset_id is not None and d_id != dataset_id:
return
header_row_index = data.get('headerRowIndex', 0)
data_as_dict = OrderedDict()
for raw_cols in data['relation']:
header_row = raw_cols[header_row_index]
raw_cols.pop(header_row_index)
parsed_values = pd.Series([ None if (v == '-') else v for v in raw_cols ])
try:
parsed_values = pd.to_numeric(parsed_values, errors='raise')
except:
#print('CAN"T PARASE')
pass
#parsed_values = parsed_values.replace(value='-', None)
data_as_dict[header_row] = parsed_values
df = pd.DataFrame(data_as_dict)
num_fields = len(df.columns)
if exact_num_fields:
if num_fields != exact_num_fields: return
if min_fields:
if num_fields < min_fields: return
if max_fields:
if num_fields > max_fields: return
# If specified, only return the valid fields
if valid_fields is not None:
df = df.iloc[:,valid_fields]
result = {
'df': df,
'dataset_id': d_id,
'locator': locator
}
return result
else:
return
except Exception as e:
print("Exception in table extraction: ",e)
return
def get_webtables_dfs(detailed_name, exact_num_fields=None, min_fields=None, max_fields=None):
corpus = detailed_name
base_dir = data_dirs[corpus]
files = []
for sub_dir in listdir_not_hidden(base_dir):
if sub_dir.endswith(tuple(['.gz', '.lst', '.html'])): continue
json_files = listdir_not_hidden(join(base_dir, sub_dir, 'warc'))
files.append([ base_dir, sub_dir, json_files ])
for (base_dir, sub_dir, json_files) in files:
for i, file_name in enumerate(json_files):
full_file_path = join(base_dir, sub_dir, 'warc', file_name)
locator = join(sub_dir, 'warc', file_name)
w_iter = webtables_iter(full_file_path)
for idx, data in w_iter:
df = extract_webtables(data, locator, None, exact_num_fields, min_fields, max_fields, line_no = idx)
if df is not None:
yield df
def load_opendata(full_dataset_path, locator, dataset_id, exact_num_fields=None, min_fields=None, max_fields=None, valid_fields=None):
engine = 'c'
encoding = 'utf-8'
sep=','
attempts = 2
while attempts > 0: # don't try forever...
try:
df = pd.read_csv(
full_dataset_path,
engine=engine, # https://github.com/pandas-dev/pandas/issues/11166
error_bad_lines=False,
warn_bad_lines=False,
encoding=encoding,
sep=sep
)
num_fields = len(df.columns)
if num_fields == 1 and sep != ':':
if sep == ',': sep=';'
elif sep == ';': sep='\t'
elif sep == '\t': sep=':'
attempts -=1
elif num_fields == 1 and sep == ':':
with open(full_dataset_path, 'r') as f:
head = [next(f) for x in range(100)]
head = ''.join(head)
for t in [ '<body>', 'html', 'DOCTYPE' ]:
if t in head:
print('is html')
return
return result
else:
if exact_num_fields:
if num_fields != exact_num_fields: return #continue
if max_fields:
if num_fields > max_fields: return #continue
# If specified, only return the valid fields
if valid_fields is not None:
df = df.iloc[:,valid_fields]
result = {
'df': df,
'dataset_id': dataset_id,
'locator': locator
}
return result
#yield result
#break
except UnicodeDecodeError as ude:
#print("Endcoding error:", encoding, ude)
encoding = 'latin-1'
attempts -= 1
except pd.errors.ParserError as cpe:
#print('Engeine error:', cpe)
engine = 'python'
attempts -= 1
except Exception as e:
#print('Exception:', e)
return
def get_opendata_dfs(exact_num_fields=None, min_fields=None, max_fields=None, valid_fields=None):
corpus = 'opendata'
base_dir = data_dirs[corpus]
files = []
for portal_dir in listdir_not_hidden(base_dir):
full_portal_dir = join(base_dir, portal_dir)
for dataset_id_dir in listdir_not_hidden(full_portal_dir):
full_dataset_id_dir = join(full_portal_dir, dataset_id_dir)
for dataset_name in listdir_not_hidden(full_dataset_id_dir):
full_dataset_path = join(full_dataset_id_dir, dataset_name)
locator = join(portal_dir, dataset_id_dir)
dataset_id = dataset_name
df = load_opendata(full_dataset_path, locator, dataset_id, exact_num_fields, min_fields, max_fields)
if df:
yield df
##################################################################
# Filtered iterators (based on the header_iter passed in)
##################################################################
def get_opendata_filtered_dfs(header_iter, exact_num_fields=None, min_fields=None, max_fields=None):
corpus = 'opendata'
base_dir = data_dirs[corpus]
for next_line in header_iter:
if next_line == 'EOF':
return
idx, row = next_line
locator = row['locator']
dataset_id = row['dataset_id']
fields = eval(row['field_list']) #convert string to list
full_dataset_path = join(base_dir, locator, dataset_id)
df = load_opendata(full_dataset_path, locator, dataset_id, exact_num_fields, min_fields, max_fields, fields)
if df:
yield df
def get_webtables_filterd_dfs(corpus, header_iter, exact_num_fields=None, min_fields=None, max_fields=None):
base_dir = data_dirs[corpus] # webtables0/ webtables1 ...
row_idx, row = next(header_iter)
prev_locator = row['locator']
prev_dataset_id = row['dataset_id']
prev_idx = int(prev_dataset_id.split('-')[0])
w_iter = webtables_iter(join(base_dir, prev_locator))
_, data = next(itertools.islice(w_iter, prev_idx, None))
df = extract_webtables(data, prev_locator, prev_dataset_id, exact_num_fields, min_fields, max_fields, list(eval(row['field_list'])), line_no=prev_idx)
if df is not None:
yield df
for header_line in header_iter:
if header_line =='EOF':
return
_, row = header_line
locator, dataset_id = row['locator'], row['dataset_id']
new_idx = int(dataset_id.split('-')[0])
#print("new_idx", new_idx)
if locator == prev_locator:
offset = new_idx-prev_idx-1
else:
w_iter = webtables_iter(join(base_dir, locator))
offset = new_idx
#print('offset', offset)
_, data = next(itertools.islice(w_iter, offset, None))
df = extract_webtables(data, locator, dataset_id, exact_num_fields, min_fields, max_fields, list(eval(row['field_list'])), line_no=new_idx)
if df is not None:
yield df
# update prev
prev_locator = locator
prev_idx = new_idx
def get_manyeyes_filtered_dfs(header_iter, exact_num_fields=None, min_fields=None, max_fields=None):
corpus='manyeyes'
base_dir = data_dirs[corpus]
for next_line in header_iter:
if next_line == 'EOF':
return
idx, row = next_line
locator = row['locator']
fields = eval(row['field_list']) #convert string to list
full_file_path = join(base_dir, locator)
dataset_id = locator.split('/')[-1]
df = load_manyeyes(full_file_path, locator, dataset_id, exact_num_fields, min_fields, max_fields, fields)
if df:
yield df
else:
continue
def get_plotly_filtered_dfs(header_iter, limit=None, exact_num_fields=None, min_fields=None, max_fields=None):
corpus = 'plotly'
base_dir = data_dirs[corpus]
idx, row = next(header_iter)
locator_buff = [row['locator']] # pending locators
current_dataset_id = row['dataset_id']
current_fields = list(eval(row['field_list']))
#for f in locator_buff:
while(len(locator_buff)>0):
f = locator_buff.pop(0)
file_path = | |
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import pymel.core as pm
import mgear.rigbits.sdk_io as sdk_io
import mgear.core.pickWalk as pickWalk
SDK_ANIMCURVES_TYPE = ("animCurveUA", "animCurveUL", "animCurveUU")
# reload(sdk_io)
# ================================================= #
# MATH
# ================================================= #
def next_biggest(target, in_list):
"""
Returns the next highest number in the in_list.
If target is greater the the last number in in_list, will return the last
item in the list.
"""
next_highest = None
for item in in_list:
if item > target:
next_highest = item
break
if next_highest is None:
next_highest = in_list[-1]
return next_highest
def next_smallest(target, in_list):
"""
Returns the next Lowest number in the in_list.
If target is smaller the the last number in in_list, will return the first
item in the list.
"""
next_lowest = None
for item in in_list[::-1]:
if item < target:
next_lowest = item
break
if next_lowest is None:
next_lowest = in_list[0]
return next_lowest
# ================================================= #
# SELECTION
# ================================================= #
def select_all(mode):
"""
Select all the Driver Ctls, Anim Ctls
Joints or SDK Nodes in the scene.
Arguments:
mode (str) - drv : Driver Ctls
- anim : Anim Ctls
- jnts : Joints
- nodes : SDK Nodes
Returns:
None
"""
pm.select(clear=True)
# Driver Ctl and Anim Ctl mode
if mode == "drv" or mode == "anim":
# Setting the Attr to look for on nodes.
attr = "is_SDK" if mode == "drv" else "is_tweak"
for item in pm.ls('*.' + attr):
if "controlBuffer" not in item.name():
pm.select(item.split(".")[0], add=True)
# Joints Mode
elif mode == "jnts":
all_joints = []
for item in pm.ls('*.is_tweak'):
if "controlBuffer" not in item.name():
jnt = joint_from_driver_ctl(item.node())
if jnt not in all_joints:
all_joints.append(jnt)
pm.select(all_joints, replace=True)
# Node Mode
elif mode == "nodes":
str_sdk_nodes = []
for item in pm.ls('*.is_SDK'):
if "controlBuffer" not in item.name():
sdk_info = sdk_io.getAllSDKInfoFromNode(item.node())
str_sdk_nodes.extend(sdk_info.keys())
sdk_nodes = sdk_io.getPynodes(str_sdk_nodes)
pm.select(sdk_nodes, replace=True)
def reset_to_default(mode, clear_sel=False):
"""
Reset All the Rig Driver Ctls or Anim Ctls to Default
Arguments:
mode (str) - all : All Ctl Curves
- drv : Driver Ctls
- anim : Anim Ctls
Returns:
None
"""
attrs_dict = {"tx": 0,
"ty": 0,
"tz": 0,
"rx": 0,
"ry": 0,
"rz": 0,
"sx": 1,
"sy": 1,
"sz": 1}
# All Ctls Mode
if mode == "all":
for attrib in pm.ls("*.invTx"):
node = attrib.node()
if "controlBuffer" not in node.name():
for attr, value in attrs_dict.items():
# If the Attr is settable, set it
if pm.getAttr(node.attr(attr), settable=True):
pm.setAttr(node.attr(attr), value)
# Driver Ctls and Anim Ctls
elif mode == "drv" or "anim":
select_all(mode)
for item in pm.ls(sl=True):
for attr, value in attrs_dict.items():
pm.setAttr(item.attr(attr), value)
if clear_sel:
pm.select(clear=True)
# ================================================= #
# NAVIGATION
# ================================================= #
def driver_ctl_from_joint(joint):
"""
Will try find the Driver control given the joint by searching through the
mGear nodes.
Arguments:
joint (PyNode): joint to search connections on
Returns:
PyNode : Control
"""
driver_control = None
if pm.nodeType(joint) == "joint":
for connected in pm.listConnections(joint.translateX, source=True):
if pm.nodeType(connected) == "decomposeMatrix":
for conB in pm.listConnections(connected.inputMatrix,
source=True):
if pm.nodeType(conB) == "mgear_mulMatrix":
for drvCtl in pm.listConnections(conB.matrixA,
source=True):
if pm.nodeType(drvCtl) == "transform":
driver_control = drvCtl
return driver_control
def joint_from_driver_ctl(node):
"""
Will try find the joint given the Driver control by searching through the
mGear nodes.
TO DO:
Expand this to be more robust. Check all channels / not rely on
translate connections only.
Arguments:
node (PyNode): node to search connections on
Returns:
PyNode : joint
"""
joint = None
for connected in pm.listConnections(node.worldMatrix[0], destination=True):
if pm.nodeType(connected) == "mgear_mulMatrix":
for conA in pm.listConnections(connected.output, destination=True):
if pm.nodeType(conA) == "decomposeMatrix":
for conB in pm.listConnections(conA.outputTranslateX,
destination=True):
if pm.nodeType(conB) == "joint":
joint = conB
return joint
def get_info(node):
"""
Given either the SDK box, or Anim ctl, will find other and return it
Arguments:
node (PyNode): either the SDK box or Anim ctl
Returns:
list [PyNode(SDK box), PyNode(anim ctl)]
"""
SDK_node = None
tweak_node = None
if pm.attributeQuery("is_SDK", node=node, ex=True):
SDK_node = node
for connected in pm.listConnections(node.attr("ctl")):
if pm.attributeQuery("is_tweak", node=connected, ex=True):
tweak_node = connected
if pm.attributeQuery("is_tweak", node=node, ex=True):
tweak_node = node
for connected in pm.listConnections(node.attr("sdk")):
if pm.attributeQuery("is_SDK", node=connected, ex=True):
SDK_node = connected
return [SDK_node, tweak_node]
def ctl_from_list(in_list, SDK=False, animTweak=False):
"""
Returns either the SDK's or animTweaks from the in_list.
If given a SDK, it will find the animTweak pair and vise versa.
To qualify as SDK ctl must have "is_SDK" attr, or "is_tweak" attr for
animTweak
Arguments:
in_list (list[PyNode]): List of PyNodes to sort through
SDK (bool): If you want SDK ctls
animTweak (bool): If you want animTweak ctls
Returns:
list [List of either SDK ctls or animTweaks]
"""
SDK_ctls = []
for item in in_list:
# If its a joint, find the connected control
if pm.nodeType(item) == "joint":
item = driver_ctl_from_joint(item)
SDK_ctl = get_info(item)[0]
if SDK_ctl:
if SDK_ctl not in SDK_ctls:
SDK_ctls.append(SDK_ctl)
return SDK_ctls
# ================================================= #
# SDK
# ================================================= #
def set_driven_key(driverAttr,
drivenAttr,
driverVal,
drivenVal,
preInfinity=0,
postInfinity=0,
inTanType="linear",
outTanType="linear"):
"""
Convinience function to aid in setting driven keys.
Arguments:
driverAttr (PyNode.attribute): Driver.attr to drive the SDK
drivenAttr (PyNode.attribute): Driven.attr to be driven by the SDK
driverVal (float): Value to use for driver
drivenVal (float): Value to use for driven
preInfinity (int): IndexKey - constant[0], linear[1], cycle[2],
cycleOffset[3], Oscillate[4]
postInfinity (int): IndexKey - constant[0], linear[1], cycle[2],
cycleOffset[3], Oscillate[4]
inTanType (str): spline, linear, fast, slow, flat, stepped, step next,
fixed, clampedand plateau
outTanType (str): spline, linear, fast, slow, flat, stepped, step next,
fixed, clampedand plateau
Returns:
new Anim UU node or the Edited one.
TO DO:
fix the return.
"""
animUU = None
# Grabbing the Driver connections for comparison later
driver_con_A = pm.listConnections(driverAttr)
# setting the Driven key frame
pm.setDrivenKeyframe(drivenAttr,
cd=driverAttr,
driverValue=driverVal,
value=drivenVal,
inTangentType=inTanType,
outTangentType=outTanType,
)
# Compairing the connections to DriverAtt to find new Anim UU node.
DriverConB = pm.listConnections(driverAttr)
for conB in DriverConB:
if conB not in driver_con_A:
animUU = conB
# Setting Attrs
if animUU:
animUU.preInfinity.set(preInfinity)
animUU.postInfinity.set(postInfinity)
# renaming
if animUU:
newName = "{}_{}".format(driverAttr.split(".")[0],
drivenAttr.split(".")[1])
pm.rename(animUU, newName)
return animUU
def get_driven_from_attr(driverAttr, is_SDK=False):
"""
Returns a list of driven controls given the driver attr
Arguments:
driverAttr (PyNode): the driver attr to search
is_SDK (bool): if True, will check if the is_SDK attr is present before
adding to driven_ctls list.
Returns:
list [List of unicode names]
"""
driven_ctls = []
for connected_node in pm.listConnections(driverAttr):
if pm.nodeType(connected_node) in SDK_ANIMCURVES_TYPE:
drvn_ctl = sdk_io.getSDKDestination(connected_node)[0]
if is_SDK:
if not pm.attributeQuery("is_SDK", node=drvn_ctl, ex=True):
break
if drvn_ctl not in driven_ctls:
driven_ctls.append(drvn_ctl)
return driven_ctls
def get_driver_from_driven(drivenCtl):
"""
Finds the Driver controls for a given driven ctl
Arguments:
drivenCtl (PyNode): A Driven Node to query.
Returns:
list [All found Driver Nodes]
"""
driver_ctls = []
retrieved_SDK_nodes = sdk_io.getConnectedSDKs(drivenCtl)
retrieved_SDK_nodes.extend(sdk_io.getMultiDriverSDKs(drivenCtl))
for rtv_attrs in retrieved_SDK_nodes:
for rtv_attr in rtv_attrs:
if pm.nodeType(rtv_attr) in SDK_ANIMCURVES_TYPE:
try:
SDK_info = sdk_io.getSDKInfo(rtv_attr.node())
if SDK_info['driverNode'] not in driver_ctls:
driver_ctls.append(SDK_info['driverNode'])
except: # noqa: E722
pass
return driver_ctls
def get_driver_keys(driverAttr,
firstKey=None,
prevKey=None,
nextKey=None,
lastKey=None):
"""
Returns a list of Driver key values for the given driverAttr.
If all optional arguments are None, will return list of all values
Arguments:
driverAttr (PyNode.Attribute): Driver Ctl.attr
firstKey (bool):
prevKey (bool):
nextKey (bool):
lastKey (bool):
Returns:
List (If all optional None) - List of driver key values
float (If one specified) - The float value for the driver on that key.
"""
dirver_con = pm.listConnections(driverAttr)
keys_list = []
if len(dirver_con) > 0:
for con in dirver_con:
if pm.nodeType(con) in SDK_ANIMCURVES_TYPE:
SDK_dict = sdk_io.getSDKInfo(con)
for key in SDK_dict["keys"]:
if key[0] not in keys_list:
keys_list.append(key[0])
if firstKey:
return keys_list[0]
if prevKey:
return next_smallest(driverAttr.get(), keys_list)
if nextKey:
return next_biggest(driverAttr.get(), keys_list)
if lastKey:
return keys_list[-1]
else:
return keys_list
def mirror_SDK(driverCtl):
"""
Takes in a driver control and extrapolates out all the other
information needed to mirror it's connected SDK's.
Arguments:
driverCtl (PyNode):
Returns:
None
"""
# Getting The Opposite Driver
t_driver = pickWalk.getMirror(driverCtl)[0]
# Getting all the SDK Ctls + RHS Counterparts from Driver Ctl Name.
driven_ctls_dict = {}
for sdk_attrs in sdk_io.getConnectedSDKs(driverCtl):
for sdk_attr in sdk_attrs:
if pm.nodeType(sdk_attr.node()) in SDK_ANIMCURVES_TYPE:
destination_ctl = sdk_io.getSDKDestination(sdk_attr.node())[0]
driven_ctls_dict[destination_ctl] = pickWalk.getMirror(
pm.PyNode(destination_ctl))[0]
# Removing any Already Existing SDK's from the target driver.
for s_driven, t_driven in driven_ctls_dict.items():
sdk_io.removeSDKs(t_driven, sourceDriverFilter=[t_driver])
# Looping over | |
# -*- coding: utf-8 -*-
"""
checkout
~~~~~~~~~~~~
Python wrapper for the Checkout Finland API.
Copyright (c) 2014 by <NAME>.
Copyright (c) 2013 by <NAME>.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* The names of the contributors may not be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import hashlib
import hmac
import xml.etree.ElementTree as ET
import requests
__version__ = '0.2.0'
try:
text_type = unicode # This is Py2
except NameError: # Must be Py3
text_type = str
def join_as_bytes(joiner, bits, encoding="ascii"):
joined_unicode = text_type(joiner).join(text_type(bit) for bit in bits)
return joined_unicode.encode(encoding)
class CheckoutException(Exception):
"""This exception is raised when the request made to the Checkout API
is invalid, or some other error occurs in the usage of the API."""
def __init__(self, message):
#: An error description of the error in chosen localization. This error
#: description is not meant to be displayed to the end-user.
self.message = message
class Contact(object):
"""This class represents the payer of a payment.
Contact details are optional """
def __init__(self, **options):
#: Payer's first name.
self.first_name = options.get('first_name', '')[0:40]
#: Payer's surname.
self.last_name = options.get('last_name', '')[0:40]
#: Payer's email address.
self.email = options.get('email', '')[0:200]
#: Payer's telephone number.
self.phone = options.get('phone', '')[0:30]
#: Payer's street address.
self.address = options.get('address', '')[0:40]
#: Payer's postal code.
self.postcode = options.get('postcode', '')[0:14]
#: Payer's post office.
self.postoffice = options.get('postoffice', '')[0:18]
#: Payer's country. 3-letter ISO code.
self.country = options.get('country', '')[0:3]
@property
def dict(self):
"""Dict of this contact in fields specified by Checkout API and clipped accordingly."""
return {
'PHONE': self.phone,
'EMAIL': self.email,
'FIRSTNAME': self.first_name,
'FAMILYNAME': self.last_name,
'ADDRESS': self.address,
'POSTCODE': self.postcode,
'POSTOFFICE': self.postoffice,
'COUNTRY': self.country
}
class Payment(object):
def __init__(self, order_number, reference_number, amount, delivery_date, return_url, cancel_url, **options):
#: Order number is a string of characters identifying the customer's
#: purchase and the used webshop software creates it. Mandatory.
if len(order_number) > 20:
raise CheckoutException("order_number over maximum allowed 20 characters")
else:
self.order_number = order_number
#: Reference number is sent to bank by default and is automatically
#: created. In those payment methods that are used as an interface,
#: this field can contain own reference number, which is sent to the
#: bank service instead of the automatically generated reference
#: number. Mandatory.
if len(reference_number) > 20:
raise CheckoutException("reference_number over maximum allowed 20 characters")
else:
self.reference_number = reference_number
#: Order amount in cents. Mandatory.
if len(amount) > 8:
raise CheckoutException("amount over maximum allowed 8 characters")
else:
self.amount = amount
#: Delivery date of order in format YYYYMMDD. Mandatory
if len(delivery_date) > 8:
raise CheckoutException("delivery_date over maximum allowed 8 characters")
else:
self.delivery_date = delivery_date
#: Any data about the order in text format can be sent to the payment
#: system. They are shown in the Merchant's Panel in payment details. Optional.
self.message = options.get('message', '')[0:1000]
#: Payment currency. Value must EUR for the Finnish banks, otherwise
#: the payment will not be accepted. Mandatory, defaults to 'EUR'.
self.currency = options.get('currency', 'EUR')
#: Language defines default language for the payment method
#: selection page. Optional, 2-letter ISO code.
self.language = options.get('language', 'FI')
#: Contact object for the Payment. Optional, if supplied with None blank contact is used.
self.contact = options.get('contact', Contact())
#: Payment content. "1" for normal content and "10" for adult content. Mandatory, default 1.
self.content = options.get('content', '1')[0:2]
#: URL to which user is redirected after a successful payment. Mandatory.
if len(return_url) > 300:
raise CheckoutException("return_url over maximum allowed 300 characters")
else:
self.return_url = return_url
#: URL to which user is redirected after a cancelled or failed payment. Mandatory.
if len(cancel_url) > 300:
raise CheckoutException("cancel_url over maximum allowed 300 characters")
else:
self.cancel_url = cancel_url
#: URL to which user is directed, if the payment is pending.
#: After the actual payment, the payment acknowledged as received by Checkout
#: with fetching this URL along with same parameters as used in normal return_url.
#: Optional.
self.delayed_url = options.get('delayed_url', '')
if len(self.delayed_url) > 300:
raise CheckoutException("delayed_url over maximum allowed 300 characters")
#: URL requested when the payment is marked as rejected. The URL is
#: requested with the same GET parameters as return address when the
#: payment is made. Optional.
self.reject_url = options.get('reject_url', '')
if len(self.reject_url) > 300:
raise CheckoutException("reject_url over maximum allowed 300 characters")
@property
def currency(self):
return self._currency
@currency.setter
def currency(self, value):
if value != 'EUR':
raise CheckoutException("Currently EUR is the only supported currency.")
self._currency = value
@property
def language(self):
return self._language
@language.setter
def language(self, value):
if value not in ('FI', 'SE', 'EN'):
raise CheckoutException("Given language is not supported: %r" % value)
self._language = value
@property
def dict(self):
returndict = {
'VERSION': "0001", #: Version of the API.
'STAMP': self.order_number,
'AMOUNT': self.amount,
'REFERENCE': self.reference_number,
'MESSAGE': self.message,
'LANGUAGE': self.language,
'RETURN': self.return_url,
'CANCEL': self.cancel_url,
'REJECT': self.reject_url,
'DELAYED': self.delayed_url,
'CURRENCY': self.currency,
'CONTENT': self.content,
'TYPE': "0", #: Static field.
'ALGORITHM': "3", #: Return AUTHCODE algorithm, "3" for HMAC-SHA256.
'DELIVERY_DATE': self.delivery_date
}
#: Merge with Contact values
returndict.update(self.contact.dict)
return returndict
class Checkout(object):
SERVICE_URL = "https://payment.checkout.fi/"
def __init__(self, merchant_id='375917',
merchant_secret='SAIPPUAKAUPPIAS'):
"""
Initialize Checkout with your own merchant id and merchant secret.
:param merchant_id: Mercant ID is given to you by Checkout
when you make the contract. Default is the test merchant_id.
:param merchant_secret: Merchant secret is given to you by Checkout.
Default is the test merchant_secret.
"""
self.merchant_id = merchant_id
self.merchant_secret = merchant_secret
self.session = requests.Session() # noqa
def get_onsite_button_data(self, payment):
"""
Creates a new payment and returns a `list` with the following data for each payment method:
{ 'bank': bankname, 'url': posturl, 'icon': iconurl, formfields: {} }
:param payment: a `Payment` object
"""
postdict = payment.dict
postdict['MERCHANT'] = self.merchant_id
postdict['DEVICE'] = "10" #: "10" to get XML data for payment methods back
postdict['MAC'] = self._calculate_payment_md5(postdict, self.merchant_secret)
response = self.session.post(self.SERVICE_URL, data=postdict)
return self.parse_xml_response(response.content)
def get_offsite_button_data(self, payment):
"""
Returns form fields for off-page payment where user is sent to checkout.fi and shown
all the payment options there instead of showing them onsite.
:param payment: a `Payment` object
"""
paymentdict = payment.dict
paymentdict['MERCHANT'] = self.merchant_id
paymentdict['DEVICE'] = "1" #: "1" to get payment method selection form from Checkout.fi
paymentdict['MAC'] = self._calculate_payment_md5(paymentdict, self.merchant_secret)
return paymentdict
def parse_xml_response(self, xmlraw):
"""
Parses XML-response for onsite payment method
:param xmlraw: Raw XML data returned by checkout.fi
"""
payment_list = []
XML = ET.fromstring(xmlraw) # noqa
banks = XML.findall(".//payment/banks/*")
for bank in banks:
bankdict = dict(bank.items())
fielddict = {}
for fieldname in bank:
fielddict[fieldname.tag] = fieldname.text
bankdict["fields"] = fielddict
payment_list.append(bankdict)
return payment_list
def _calculate_payment_md5(self, params, merchant_secret):
fields = [params["VERSION"], params["STAMP"], params["AMOUNT"], params["REFERENCE"],
params["MESSAGE"], params["LANGUAGE"], params["MERCHANT"], params["RETURN"],
params["CANCEL"], params["REJECT"], params["DELAYED"], params["COUNTRY"],
params["CURRENCY"], params["DEVICE"], params["CONTENT"], params["TYPE"],
params["ALGORITHM"], params["DELIVERY_DATE"], params["FIRSTNAME"], params["FAMILYNAME"],
params["ADDRESS"], params["POSTCODE"], params["POSTOFFICE"], merchant_secret]
base = join_as_bytes("+", fields, encoding="utf-8")
return hashlib.md5(base).hexdigest().upper()
def validate_payment_return(self, mac, version, order_number, order_reference, payment, status, algorithm):
"""
Validates parameters sent by Checkout Finland to the | |
# This file is part of parallel-ssh.
# Copyright (C) 2014-2018 <NAME> and contributors.
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, version 2.1.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import sys
if 'threading' in sys.modules:
del sys.modules['threading']
from gevent import monkey # noqa: E402
monkey.patch_all()
import os # noqa: E402
import logging # noqa: E402
from socket import gaierror as sock_gaierror, error as sock_error # noqa: E402
from gevent import sleep # noqa: E402
import paramiko # noqa: E402
from paramiko.ssh_exception import ChannelException # noqa: E402
from ...exceptions import UnknownHostException, AuthenticationException, \
ConnectionErrorException, SSHException # noqa: E402
from ...constants import DEFAULT_RETRIES # noqa: E402
from ...utils import read_openssh_config # noqa: E402
host_logger = logging.getLogger('pssh.host_logger')
logger = logging.getLogger(__name__)
class SSHClient(object):
"""SSH client based on Paramiko with sane defaults.
Honours ``~/.ssh/config`` and ``/etc/ssh/ssh_config`` host entries
for host, user name, port and key overrides.
"""
def __init__(self, host,
user=None, password=<PASSWORD>, port=None,
pkey=None, forward_ssh_agent=True,
num_retries=DEFAULT_RETRIES, agent=None,
allow_agent=True, timeout=10, proxy_host=None,
proxy_port=22, proxy_user=None, proxy_password=None,
proxy_pkey=None, channel_timeout=None,
_openssh_config_file=None,
**paramiko_kwargs):
"""
:param host: Hostname to connect to
:type host: str
:param user: (Optional) User to login as. Defaults to logged in user or
user from ~/.ssh/config if set
:type user: str
:param password: (Optional) Password to use for login. Defaults to
no password
:type password: str
:param port: (Optional) Port number to use for SSH connection. Defaults
to ``None`` which uses SSH default
:type port: int
:param pkey: (Optional) Client's private key to be used to connect with
:type pkey: :py:class:`paramiko.pkey.PKey`
:param num_retries: (Optional) Number of retries for connection attempts
before the client gives up. Defaults to 3.
:type num_retries: int
:param timeout: (Optional) Number of seconds to timeout connection
attempts before the client gives up
:type timeout: int
:param forward_ssh_agent: (Optional) Turn on SSH agent forwarding -
equivalent to `ssh -A` from the `ssh` command line utility.
Defaults to True if not set.
:type forward_ssh_agent: bool
:param agent: (Optional) Override SSH agent object with the provided.
This allows for overriding of the default paramiko behaviour of
connecting to local SSH agent to lookup keys with our own SSH agent
object.
:type agent: :py:class:`paramiko.agent.Agent`
:param forward_ssh_agent: (Optional) Turn on SSH agent forwarding -
equivalent to `ssh -A` from the `ssh` command line utility.
Defaults to True if not set.
:type forward_ssh_agent: bool
:param proxy_host: (Optional) SSH host to tunnel connection through
so that SSH clients connects to self.host via
client -> proxy_host -> host
:type proxy_host: str
:param proxy_port: (Optional) SSH port to use to login to proxy host if
set. Defaults to 22.
:type proxy_port: int
:param channel_timeout: (Optional) Time in seconds before an SSH
operation times out.
:type channel_timeout: int
:param allow_agent: (Optional) set to False to disable connecting to
the SSH agent
:type allow_agent: bool
:param paramiko_kwargs: (Optional) Extra keyword arguments to be
passed on to :py:func:`paramiko.client.SSHClient.connect`
:type paramiko_kwargs: dict
"""
try:
_host, _user, _port, _pkey = read_openssh_config(
host, config_file=_openssh_config_file)
except TypeError:
_host, _user, _port, _pkey = None, None, 22, None
user = user if user else _user
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
self.forward_ssh_agent = forward_ssh_agent
self.client = client
self.user = user
self.password = password
self.pkey = pkey if pkey else _pkey
self.port = port if port else _port
self.host = host
self._host = _host
self.allow_agent = allow_agent
if agent:
self.client._agent = agent
self.num_retries = num_retries
self.timeout = timeout
self.channel_timeout = channel_timeout
self.proxy_host, self.proxy_port, self.proxy_user, \
self.proxy_password, self.proxy_pkey = proxy_host, proxy_port, \
proxy_user, proxy_password, proxy_pkey
self.proxy_client = None
real_host = _host if _host is not None else host
if self.proxy_host and self.proxy_port:
logger.debug(
"Proxy configured for destination host %s - Proxy host: %s:%s",
real_host, self.proxy_host, self.proxy_port,)
self._connect_tunnel(real_host, **paramiko_kwargs)
else:
self._connect(self.client, real_host, self.port, **paramiko_kwargs)
def __del__(self):
try:
self.client.close()
except Exception:
pass
def _connect_tunnel(self, host, **paramiko_kwargs):
"""Connects to SSH server via an intermediate SSH tunnel server.
client (me) -> tunnel (ssh server to proxy through) ->
``self.host`` (ssh server to run command)
:rtype: :py:class:`paramiko.SSHClient` Client to remote SSH destination
via intermediate SSH tunnel server.
"""
self.proxy_client = paramiko.SSHClient()
self.proxy_client.set_missing_host_key_policy(
paramiko.MissingHostKeyPolicy())
self._connect(self.proxy_client, self.proxy_host, self.proxy_port,
user=self.proxy_user, password=<PASSWORD>,
pkey=self.proxy_pkey, **paramiko_kwargs)
logger.info("Connecting via SSH proxy %s:%s -> %s:%s", self.proxy_host,
self.proxy_port, host, self.port,)
try:
proxy_channel = self.proxy_client.get_transport().open_channel(
'direct-tcpip', (host, self.port,), ('127.0.0.1', 0),
timeout=self.timeout)
sleep(0)
return self._connect(self.client, host, self.port,
sock=proxy_channel,
**paramiko_kwargs)
except (ChannelException, paramiko.SSHException) as ex:
error_type = ex.args[1] if len(ex.args) > 1 else ex.args[0]
raise ConnectionErrorException(
"Error connecting to host '%s:%s' - %s",
host, self.port, str(error_type))
def _connect(self, client, host, port, sock=None, retries=1,
user=None, password=<PASSWORD>, pkey=None,
**paramiko_kwargs):
"""Connect to host
:raises: :py:class:`pssh.exceptions.AuthenticationException`
on authentication error
:raises: :py:class:`pssh.exceptions.UnknownHostException`
on DNS resolution error
:raises: :py:class:`pssh.exceptions.ConnectionErrorException`
on error connecting
:raises: :py:class:`pssh.exceptions.SSHException` on other undefined
SSH errors
"""
logger.debug("Connecting to %s..", host)
try:
client.connect(host,
username=user if user else self.user,
password=password if password else self.password,
port=port, pkey=pkey if pkey else self.pkey,
sock=sock, timeout=self.timeout,
allow_agent=self.allow_agent,
**paramiko_kwargs)
except sock_gaierror as ex:
logger.error("Could not resolve host '%s' - retry %s/%s",
host, retries, self.num_retries)
while retries < self.num_retries:
sleep(5)
return self._connect(client, host, port,
sock=sock,
retries=retries+1,
**paramiko_kwargs)
raise UnknownHostException("Unknown host %s - %s - retry %s/%s",
host, str(ex.args[1]), retries,
self.num_retries)
except sock_error as ex:
logger.error("Error connecting to host '%s:%s' - retry %s/%s",
host, self.port, retries, self.num_retries)
while retries < self.num_retries:
sleep(5)
return self._connect(client, host, port,
sock=sock,
retries=retries+1,
**paramiko_kwargs)
error_type = ex.args[1] if len(ex.args) > 1 else ex.args[0]
raise ConnectionErrorException(
"Error connecting to host '%s:%s' - %s - retry %s/%s",
host, self.port, str(error_type), retries,
self.num_retries,)
except paramiko.AuthenticationException:
msg = "Authentication error while connecting to %s:%s."
raise AuthenticationException(msg, host, port)
# SSHException is more general so should be below other types
# of SSH failure
except paramiko.SSHException as ex:
msg = "General SSH error - %s" % (ex,)
logger.error(msg)
raise SSHException(msg, host, port)
def exec_command(self, command, sudo=False, user=None,
shell=None,
use_shell=True, use_pty=True):
"""Wrapper to :py:func:`paramiko.SSHClient.exec_command`
Opens a new SSH session with a new pty and runs command before yielding
the main gevent loop to allow other greenlets to execute.
:param command: Command to execute
:type command: str
:param sudo: (Optional) Run with sudo. Defaults to False
:type sudo: bool
:param user: (Optional) User to switch to via sudo to run command as.
Defaults to user running the python process
:type user: str
:param shell: (Optional) Shell override to use instead of user login
configured shell. For example ``shell='bash -c'``
:param use_shell: (Optional) Force use of shell on/off.
Defaults to `True` for on
:type use_shell: bool
:param use_pty: (Optional) Enable/Disable use of pseudo terminal
emulation. This is required in vast majority of cases, exception
being where a shell is not used and/or stdout/stderr/stdin buffers
are not required. Defaults to ``True``
:type use_pty: bool
:rtype: Tuple of `(channel, hostname, stdout, stderr, stdin)`.
Channel is the remote SSH channel, needed to ensure all of stdout has
been got, hostname is remote hostname the copy is to, stdout and
stderr are buffers containing command output and stdin is standard
input channel
"""
channel = self.client.get_transport().open_session()
if self.forward_ssh_agent:
agent_handler = paramiko.agent.AgentRequestHandler( # noqa: F841
channel)
if use_pty:
channel.get_pty()
if self.channel_timeout:
channel.settimeout(self.channel_timeout)
stdout, stderr, stdin = channel.makefile('rb'), \
channel.makefile_stderr('rb'), channel.makefile('wb')
for _char in ['\\', '"', '$', '`']:
command = command.replace(_char, r'\%s' % (_char,))
shell = '$SHELL -c' if not shell else shell
_command = ''
if sudo and not user:
_command = 'sudo -S '
elif user:
_command = 'sudo -u %s -S ' % (user,)
if use_shell:
_command += '%s "%s"' % (shell, command,)
else:
_command += '"%s"' % (command,)
logger.debug("Running parsed command %s on %s", _command, self.host)
| |
are loaded into the last page of this cache
# area.
class overlay_data:
_instance = None
# Holds information about all the groups and multi-groups.
class _overlay_group_data:
def __init__ (self, groups, multi_groups, multi_group_table):
self._groups = groups
self._multi_groups = multi_groups
self._multi_group_table = multi_group_table
def get_group (self, index):
return self._groups[index]
def get_group_count (self):
return len (self._groups)
def get_multi_group (self, index):
return self._multi_groups[index]
def get_multi_group_count (self):
return len (self._multi_groups)
def get_token_from_multi_group_table (self, index):
return self._multi_group_table[index]
# Holds information about a single group.
class _overlay_group:
def __init__ (self, base_address, size_in_bytes):
self._base_address = base_address
self._size_in_bytes = size_in_bytes
def base_address (self):
return self._base_address
def size_in_bytes (self):
return self._size_in_bytes
# Holds information about a single multi-group.
class _overlay_multi_group:
def __init__ (self, number, index, tokens):
self._number = number
self._index = index
self._tokens = tokens
def tokens (self):
return self._tokens
def index (self):
return self._index
def number (self):
return self._number
# A class to describe an area of memory. This serves as a base
# class for the cache region descriptor, and the storage region
# descriptor classes.
class _memory_region:
# The START is the first address within the region, while END
# is the first address just beyond the region.
def __init__ (self, start, end):
self._start_address = start
self._end_address = end
# Returns the first address within the region.
def start_address (self):
return self._start_address
# Return the first address past the end of the region.
def end_address (self):
return self._end_address
# A static description of the overlay cache area. This is the
# area of memory into which overlays are loaded so they can be
# used.
class _cache_descriptor (_memory_region):
def __init__ (self, start, end):
super (overlay_data._cache_descriptor, self).__init__ (start, end)
# Return the address for the start of the cache region.
def base_address (self):
return self.start_address ()
# Return the total size of the cache in bytes, including the tables
# region.
def size_in_bytes (self):
return self.end_address () - self.start_address ()
# Return the number of entries that are available for holding
# overlays. This excludes the area that is given up to hold the
# overlay tables. Currently the tables are copied into the last entry
# in the cache.
def number_of_working_entries (self):
entry_size = self.entry_size_in_bytes ()
return ((self.size_in_bytes () / entry_size)
- (self.tables_size_in_bytes () / entry_size))
# Return the total number of entries that are in the cache, this
# includes any entries being used to hold the overlay tables.
def total_number_of_entries (self):
entry_size = self.entry_size_in_bytes ()
return (self.cache_size_in_bytes () / entry_size)
# The address of the overlay tables within the cache. Currently these
# are always in the last entry of the cache, and are one entry in size.
def tables_base_address (self):
entry_size = self.entry_size_in_bytes ()
return self.end_address () - self.tables_size_in_bytes ()
# Return the size of the overlay tables region in bytes. This is
# currently always a single page of the cache.
def tables_size_in_bytes (self):
return self.entry_size_in_bytes ()
# Return the size in bytes of a single entry (or page) within the
# cache.
def entry_size_in_bytes (self):
return get_comrv_min_entry_size ()
# A class that describes the overlay systems storage area. This
# is the area of memory from which the overlays are loaded. The
# debug information will refer to this area,
class _storage_descriptor (_memory_region):
def __init__ (self, start, end):
super (overlay_data._storage_descriptor, self).__init__ (start, end)
class _comrv_labels ():
def __init__ (self):
self.comrv_invoke_callee \
= get_symbol_address (COMRV_INVOKE_CALLEE_LABEL)
self.ret_from_callee \
= get_symbol_address (COMRV_RETURN_FROM_CALLEE_LABEL)
self.comrv_ret_from_callee_context_switch \
= get_symbol_address (COMRV_RETURN_FROM_CALLEE_CONTEXT_SWITCH_LABEL)
self.comrv_entry \
= get_symbol_address (COMRV_ENTRY_LABEL)
self.comrv_entry_context_switch \
= get_symbol_address (COMRV_ENTRY_CONTEXT_SWITCH_LABEL)
self.comrv_exit \
= get_symbol_address (COMRV_EXIT_LABEL)
self.enabled = (self.comrv_invoke_callee
and self.ret_from_callee
and self.comrv_entry and self.comrv_exit)
# A wrapper class to hold all the different information we loaded from
# target memory. An instance of this is what we return from the fetch
# method.
class _overlay_data_inner:
def __init__ (self, cache_descriptor, storage_descriptor, groups_data,
mg_index_offset, info_sym):
self._cache_descriptor = cache_descriptor
self._groups_data = groups_data
self._storage_descriptor = storage_descriptor
self._multi_group_index_offset = mg_index_offset
self._info_sym = info_sym
def cache (self):
return self._cache_descriptor
def storage (self):
return self._storage_descriptor
def group (self, index):
return self._groups_data.get_group (index)
def group_count (self):
return self._groups_data.get_group_count ()
def multi_group (self, index):
return self._groups_data.get_multi_group (index)
def multi_group_count (self):
return self._groups_data.get_multi_group_count ()
def is_multi_group_enabled (self):
return self._multi_group_index_offset > 0
def multi_group_index_offset (self):
return self._multi_group_index_offset
def get_token_from_multi_group_table (self, index):
return self._groups_data.get_token_from_multi_group_table (index)
def comrv_initialised (self):
return (not self._groups_data == None)
def labels (self):
# TODO: Maybe we could do some caching here?
return overlay_data._comrv_labels ()
def comrv_info (self):
return self._info_sym
# Read the group offset for overlay group GROUP_NUMBER. The
# overlay data starts at address BASE_ADDRESS in memory.
#
# Return the offset in bytes for the specified overlay group.
@staticmethod
def _read_overlay_offset (base_address, end_address, group_number):
base_address = base_address + (2 * group_number)
if ((base_address + 1) >= end_address):
raise RuntimeError ("out of bounds access while reading offset "
+ "table for group %d" % (group_number))
scaled_offset = mem_reader.read_16_bit (base_address)
offset = get_comrv_min_entry_size () * scaled_offset
return offset
# Read a 32-bit overlay token from the multi-group table. ADDRESS
# is the exact address from which the token should be loaded.
@staticmethod
def _read_overlay_token (address):
token = mem_reader.read_32_bit (address)
return token
# Load information about all of the groups and multi-groups from the
# overlay cache tables, and return an instance of an object holding all of
# this data.
@staticmethod
def _load_group_data (table_start, table_size, storage_desc,
multi_group_offset):
def _load_overlay_groups (table_start, table_end, storage_start):
groups = list ()
# Read all of the overlay group offsets from memory, adding
# entries to the overlay group list as we go.
grp = 0
# Read the offset of the very first overlay group. This
# should always be 0, but lets check it anyway.
prev_offset \
= overlay_data._read_overlay_offset (table_start,
table_end,
grp)
if (prev_offset != 0):
raise RuntimeError ("offset of first overlay group is 0x%x not 0"
% (prev_offset))
while (True):
# Read the offset for the start of the next overlay group.
next_offset \
= overlay_data._read_overlay_offset (table_start,
table_end,
(grp + 1))
# An offset of 0 indicates the end of the group table.
if (next_offset == 0):
break
# Calculate the size of this overlay group, and create an
# object to represent it.
size = next_offset - prev_offset
groups.append (overlay_data.
_overlay_group (storage_start + prev_offset, size))
grp += 1
prev_offset = next_offset
return groups
def _load_overlay_multi_groups (table_start, table_end):
multi_groups = list ()
all_tokens = list ()
# The start and end of the region containing the
# multi-group table.
mg_start = table_start
mg_end = table_end
# A number assigned to each multi-group. Starts at 0, and
# increments by one for each multi-group.
mg_num = 0
# An index assigned to each multi-group. This is the
# index of the first member of the multi-group.
mg_idx = 0
# Used to track the index into the multi-group table.
idx = 0
# The tokens within the current multi-group.
mg_tokens = list ()
while (mg_start < mg_end):
# Read a 32-bit overlay token from the multi-group table.
ovly_token = overlay_data._read_overlay_token (mg_start)
all_tokens.append (ovly_token)
idx += 1
# A token of 0 indicates the end of a multi-group.
if (ovly_token == 0):
# If this is the first entry in a multi-group then we
# have reached the end of all multi-group data, and
# we're done.
if (len (mg_tokens) == 0):
break
# Otherwise, we've reached the end of this
# multi-group, but there might be more after this.
# Finalise this multi-group, and prepare to parse the
# next.
else:
multi_groups.append (overlay_data.
_overlay_multi_group (mg_num,
mg_idx,
mg_tokens))
# Now reset ready to read the next multi-group.
mg_num += 1
mg_idx = idx
mg_tokens = list ()
# Otherwise a non-zero token is a member of the multi-group.
else:
mg_tokens.append (ovly_token)
mg_start += 4 # The size of one overlay token.
return multi_groups, all_tokens
storage_start = storage_desc.start_address ()
if (multi_group_offset >= 0):
table_end | |
: {0}_ .'.format(self.codex.de_codex))
# paginae.extend(self.imprimere_in_datapackage())
# return self.imprimere_in_datapackage()
return paginae
def imprimere_res_methodi_ex_dictionariorum_corde(self, item):
if item and 'meta' in item and \
'methodi_ex_dictionariorum_corde' in item['meta'] and \
'mul-Zyyy' in item['meta']['methodi_ex_dictionariorum_corde']:
return item['meta']['methodi_ex_dictionariorum_corde']['mul-Zyyy']
return None
class CodexInTabulamJson:
"""Codex Sarcinarum Adnexīs
//Packages of attachments from Codex//
Trivia:
- cōdex, m, s, (Nominative) https://en.wiktionary.org/wiki/codex#Latin
- adnexīs, m/f/n, pl (Dative) https://en.wiktionary.org/wiki/adnexus#Latin
- annexīs, m/f/n, pl (Dative) https://en.wiktionary.org/wiki/annexus#Latin
- sarcinārum, f, pl, (Gengitive) https://en.wiktionary.org/wiki/sarcina
/print book cover (SVG format)/@eng-Latn
Trivia:
- cōdex, m, s, (Nominative), https://en.wiktionary.org/wiki/codex#Latin
- imprimere, v, s, (), https://en.wiktionary.org/wiki/imprimo#Latin
- in (+ ablative), in (+ accusative)
https://en.wiktionary.org/wiki/in#Latin
- (+ accusative) into, to
- tabulam, f, s, /accusative/,
https://en.wiktionary.org/wiki/tabula#Latin
- json, ---,
- https://www.json.org/
- https://www.mediawiki.org/wiki/Help:Tabular_Data
"""
# sarcinae = ['todo']
# completum = []
# sarcina_index = []
linguae = {}
def __init__(
self,
codex: Type['Codex']
):
self.codex = codex
self.initiari()
def initiari(self):
"""initiarī
Trivia:
- initiārī, https://en.wiktionary.org/wiki/initio#Latin
"""
# self.linguae['#item+rem+i_lat+is_latn'] = 'la'
# self.linguae['#item+rem+i_eng+is_latn'] = 'en'
# self.linguae['#item+rem+i_por+is_latn'] = 'pt'
for _clavem, item in self.codex.dictionaria_linguarum.dictionaria_codex.items():
# raise ValueError(str(item))
if '#item+rem+i_qcc+is_zxxx+ix_wikilngm' in item and \
item['#item+rem+i_qcc+is_zxxx+ix_wikilngm']:
hashtag = '#item+rem' + item['#item+rem+i_qcc+is_zxxx+ix_hxla']
self.linguae[hashtag] = \
item['#item+rem+i_qcc+is_zxxx+ix_wikilngm']
# raise ValueError(str(self.linguae))
def _columnae(self) -> list:
"""_columnae /Column fields of the tabular format/@eng-Latn
Trivia:
- columnae, f, pl, /Nominative/, https://en.wiktionary.org/wiki/columna
Returns:
list: _description_
"""
res = []
res.append({'name': 'codicem', 'type': 'string',
'title': {
'la': 'Codicem',
'en': 'Numerordinatio local Code',
}})
res.append(
{'name': 'ix_wikiq', 'type': 'string',
'title': {
'la': 'Vicidata QID',
'en': 'Wikidata QID'
}})
res.append(
{'name': 'rem__i_mul__is_zyyy', 'type': 'string',
'title': {
'la': 'Linguae multiplīs (Scrīptum incognitō)',
'en': 'Multiple languages (unknown writing system)'
}})
clavem = self.codex.codex[0].keys()
# for item in clavem:
# pass
# res.append(
# {'name': 'item__rem__terminum', 'type': 'localized',
# 'title': {
# 'la': 'Lingua Latina (Abecedarium Latinum)',
# 'en': 'Lingua Anglica (Abecedarium Latinum)',
# 'pt': 'Lingua Lusitana (Abecedarium Latinum)',
# }})
res.append(
{'name': 'rem__terminum', 'type': 'localized',
'title': {
'la': 'Rēs linguālibus',
'en': 'Lingual thing',
# 'en': 'Lingua Anglica (Abecedarium Latinum)',
# 'pt': 'Lingua Lusitana (Abecedarium Latinum)',
}})
return res
def _linguae_ex_re(self, res) -> list:
"""linguae ex rē /Languages of the thing/@eng-Latn
Trivia:
- rēs, f, s, /Nominative/, https://en.wiktionary.org/wiki/res#Latin
- linguīs, f, pl, /Nominative/, https://en.wiktionary.org/wiki/columna
- linguae, f, pl, /Nominative/,
- ex (+ ablative), https://en.wiktionary.org/wiki/ex#Latin
- rē, f, s, /Ablative)
Returns:
list: _description_
"""
resultatum = {}
for clavem, item in res.items():
if clavem in self.linguae and item:
resultatum[self.linguae[clavem]] = item
return resultatum if resultatum else None
def dicitionaria_rebus(self) -> list:
"""_columnae /Column fields of the tabular format/@eng-Latn
Trivia:
- rēbus, f, pl, /Dative/, https://en.wiktionary.org/wiki/res#Latin
- dictiōnāria, n, pl, /Nominative/
https://en.wiktionary.org/wiki/dictionarium#Latin
Returns:
list: _description_
"""
res = []
# res.append([
# '1',
# 'Q1',
# '/salvi mundi!/@lat-Latn',
# {
# 'la': 'testum est',
# 'en': 'testing testing',
# 'pt': 'teste teste',
# }
# ])
# res.append([
# '2',
# 'Q2',
# '/test/@lat-Latn',
# None
# ])
# res.append([
# '2_3',
# 'Q345',
# '/test test test/@lat-Latn',
# {
# 'pt': 'teste teste',
# }
# ])
# res.append([
# '33',
# 'Q33',
# '/teste em espanhol/@por-Latn',
# {
# 'es': 'teste en espanol',
# }
# ])
for item in self.codex.codex:
codicem_loci = item['#item+conceptum+codicem']
if codicem_loci.find('0_999') == 0:
continue
if codicem_loci.find('0_1603') == 0:
continue
if '#item+rem+i_mul+is_zyyy' in item \
and item['#item+rem+i_mul+is_zyyy']:
item_rem_mul = item['#item+rem+i_mul+is_zyyy']
elif '#item+rem+i_lat+is_latn' in item \
and item['#item+rem+i_lat+is_latn']:
item_rem_mul = item['#item+rem+i_lat+is_latn']
else:
item_rem_mul = None
if '#item+rem+i_qcc+is_zxxx+ix_wikiq' in item \
and item['#item+rem+i_qcc+is_zxxx+ix_wikiq']:
qcodicem = item['#item+rem+i_qcc+is_zxxx+ix_wikiq']
else:
qcodicem = None
# item_data =
res.append([
item['#item+conceptum+codicem'],
qcodicem,
item_rem_mul,
self._linguae_ex_re(item)
])
# item_data.append(item['#item+conceptum+codicem'])
return res
def imprimere_textum(self) -> list:
"""imprimere /print/@eng-Latn
Trivia:
- cōdex, m, s, (Nominative), https://en.wiktionary.org/wiki/codex#Latin
- imprimere, v, s, (), https://en.wiktionary.org/wiki/imprimo#Latin
Returns:
[list]:
"""
# numerum = self.codex.m1603_1_1__de_codex['#item+rem+i_qcc+is_zxxx+ix_n1603']
# nomen = self.codex.m1603_1_1__de_codex['#item+rem+i_mul+is_zyyy']
descriptionem = '[{0}] {1}'.format(
self.codex.m1603_1_1__de_codex['#item+rem+i_qcc+is_zxxx+ix_n1603'],
self.codex.m1603_1_1__de_codex['#item+rem+i_mul+is_zyyy']
)
scope_and_content = self.codex.quod_res('0_1603_1_7_2616_7535')
if scope_and_content:
codexfacto = qhxl(scope_and_content, [
'#item+rem+i_qcc+is_zxxx+ix_codexfacto'])
if codexfacto:
import textwrap
codexfacto = codexfacto.replace(
'\\n', '').replace('\\t', '').strip()
# Wikimedia Data Preview wants very short descriptions
codexfacto = textwrap.shorten(codexfacto, 350)
# if len(codexfacto) > 400:
# codexfacto = codexfacto[:400] + '...'
descriptionem = '{0}. {1}'.format(
descriptionem, codexfacto)
# 0_1603_1_7_2616_7535
resultatum = {
'license': "CC0-1.0",
'sources': "https://github.com/EticaAI/multilingual-lexicography "
"+ https://www.wikidata.org/wiki/Help:Multilingual",
'description': {
'la': descriptionem
},
'schema': {
'fields': self._columnae()
},
'data': self.dicitionaria_rebus()
}
# paginae = ['{"TODO": 11}']
import json
return json.dumps(
resultatum, indent=4, ensure_ascii=False, sort_keys=False)
class CodexSarcinarumAdnexis:
"""Codex Sarcinarum Adnexīs
//Packages of attachments from Codex//
Trivia:
- cōdex, m, s, (Nominative) https://en.wiktionary.org/wiki/codex#Latin
- adnexīs, m/f/n, pl (Dative) https://en.wiktionary.org/wiki/adnexus#Latin
- annexīs, m/f/n, pl (Dative) https://en.wiktionary.org/wiki/annexus#Latin
- sarcinārum, f, pl, (Gengitive) https://en.wiktionary.org/wiki/sarcina
# >>> ca1603_25_1.quod_picturae()
"""
# sarcinae = ['todo']
completum = []
sarcina_index = []
sarcina = []
def __init__(
self,
de_codex: str,
):
self.de_codex = de_codex
self.initiari()
# print('completum', self.completum)
def initiari(self):
"""initiarī
Trivia:
- initiārī, https://en.wiktionary.org/wiki/initio#Latin
"""
basepath = numerordinatio_neo_separatum(self.de_codex, '/')
for root, dirnames, filenames in os.walk(basepath):
self.completum.extend(dirnames)
for item in dirnames:
sarcina_index = item.split('~').pop()
self.sarcina.append({
'index': sarcina_index,
'sarcina': item,
# @TODO: make this not as hardcoded as it is now
'meta': self._quod_meta(
root + '/' + item + '/0.nnx.tm.hxl.csv'),
'_meta': self._quod_meta_rem(
root + '/' + item + '/0.nnx.tm.hxl.csv')
})
# self.sarcina_index.append(index)
def _quod_meta(self, trivum):
meta = {
'ix_wikip2479': None, # license
'ix_wikiq': None,
'ix_wikip577': None, # /publication date/
'ix_wikip1476': None, # /title of published work.../
'ix_wikip110': None, # /illustrator/
'ix_wikip50': None, # /author/
'ix_wikip854': None, # /reference URL/
# '__': [],
}
# @TODO: allow have more detailed metadata per individual item
# for now we're just using global values
if not os.path.exists(trivum):
return meta
with open(trivum) as csvfile:
reader = csv.DictReader(csvfile)
for lineam in reader:
for clavem in meta.keys():
ix_item = qhxl(lineam, clavem)
if ix_item:
meta[clavem] = ix_item
meta['titulum'] = self._quod_meta_titulum(meta)
return meta
def _quod_meta_rem(self, trivum):
resultatum = {}
meta = {
'ix_wikip2479': None, # license
'ix_wikiq': None,
'ix_wikip577': None, # /publication date/
'ix_wikip1476': None, # /title of published work.../
'ix_wikip110': None, # /illustrator/
'ix_wikip50': None, # /author/
'ix_wikip854': None, # /reference URL/
# '__': [],
}
# @TODO: allow have more detailed metadata per individual item
# for now we're just using global values
if not os.path.exists(trivum):
return meta
with open(trivum) as csvfile:
reader = csv.DictReader(csvfile)
for lineam in reader:
est_meta = copy(meta)
for clavem in meta.keys():
ix_item = qhxl(lineam, clavem)
if ix_item:
est_meta[clavem] = ix_item
est_meta['titulum'] = self._quod_meta_titulum(est_meta)
if '#item+conceptum+codicem' in lineam:
resultatum[str(
lineam['#item+conceptum+codicem'])] = est_meta
return resultatum
def _quod_meta_titulum(self, meta):
nomen = ''
if meta['ix_wikip110']:
nomen += meta['ix_wikip110'] + ' '
if meta['ix_wikip577']:
nomen += meta['ix_wikip577'] + ' '
if meta['ix_wikip1476']:
nomen += meta['ix_wikip1476'] + ' '
if meta['ix_wikip2479']:
nomen += ' [' + meta['ix_wikip2479'] + ']'
return nomen
def quod_sarcinarum(self, index: str = None):
resultatum = []
for item in self.sarcina:
if index is not None:
if item['index'] == index or ('~' + item['index']) == index:
return item
else:
resultatum.append(item)
if index is not None:
raise ValueError('index [{0}] [{1}]'.format(index, self.sarcina))
return resultatum
class DataApothecae:
"""Data apothēcae
Trivia:
- data, n, pl, nominativus, https://en.wiktionary.org/wiki/datum#Latin
- apothēcae, f, s, dativus, https://en.wiktionary.org/wiki/apotheca#Latin
"""
# No 1603 prefix
data_apothecae_ex: list = []
data_apothecae_ad: str = ''
data_apothecae_formato: str = None
resultatum: list = []
def __init__(
self,
data_apothecae_ex: list,
data_apothecae_ad: str = 'apothecae.datapackage.json',
data_apothecae_formato: str = None
):
self.data_apothecae_ex = data_apothecae_ex
self.data_apothecae_ad = data_apothecae_ad
if data_apothecae_formato:
self.data_apothecae_formato = data_apothecae_formato
else:
if data_apothecae_ad.endswith('.db') or \
data_apothecae_ad.endswith('.sqlite'):
self.data_apothecae_formato = 'sqlite'
elif data_apothecae_ad.endswith('.json'):
self.data_apothecae_formato = 'datapackage'
else:
raise ValueError('--data-apothecae-formato ?')
self.initiari()
def initiari(self):
"""initiarī
Trivia:
- initiārī, https://en.wiktionary.org/wiki/initio#Latin
"""
pass
def imprimere(self) -> list:
return self.resultatum
def praeparatio(self):
"""praeparātiō
Trivia:
- praeparātiō, s, f, Nom., https://en.wiktionary.org/wiki/praeparatio
"""
# codex = Codex('1603_1_1')
# libraria = LibrariaStatusQuo(
# codex,
# 'locale')
# libraria.imprimere_in_datapackage_sqlite()
codex = Codex('1603_1_1')
libraria = LibrariaStatusQuo(
codex,
'locale')
if self.data_apothecae_formato == 'datapackage':
# return self.praeparatio_datapackage(libraria)
return self.praeparatio_datapackage()
if self.data_apothecae_formato == 'sqlite':
# return self.praeparatio_sqlite(libraria)
return self.praeparatio_sqlite()
return True
def praeparatio_datapackage(
self,
temporarium: str = None):
"""praeparatio_datapackage
Args:
libraria (LibrariaStatusQuo):
"""
paginae = []
sarcina = {
'name': '1603',
'profile': 'data-package-catalog',
'resources': []
}
| |
if nodes[j]["n1"] >= p1.numnodes: # ring node
w = nodes[j]["n1"] - p1.numnodes
s = nodes[j]["n2"] - p2.numnodes
idxj1 = members1[w]
idxj2 = members2[s]
else:
w = nodes[j]["n1"]
s = nodes[j]["n2"]
idxj1 = [w]
idxj2 = [s]
if len(set(idxi1) & set(idxj1)) > 0 or \
len(set(idxi2) & set(idxj2)) > 0:
# do not connect node with itself
continue
is_connected = False
# compute distances in graphs
# for ring nodes find shortest distances
# note: loop is faster than numpy (a lot of singletons to check)
d1 = float("inf")
for p in idxi1:
for q in idxj1:
if p1.edges[p, q] > 0:
is_connected = True
if dist1[p, q] < d1:
d1 = dist1[p, q]
d2 = float("inf")
for p in idxi2:
for q in idxj2:
if p2.edges[p, q] > 0:
is_connected = True
if dist2[p, q] < d2:
d2 = dist2[p, q]
if math.fabs(d1 - d2) <= dist_tol:
if is_connected:
costs[i, j] = costs[j, i] = math.fabs(d1 - d2)
edges[i, j] = edges[j, i] = 1.0
return nodes, scores, edges, costs
def __BronKerbosch(edges, P=None, X=None, R=None, degrees=None, neigh=None):
"""Bron-Kerbosch algorithm for finding all maximal cliques in a graph
Args:
edges (numpy array): array representing edges in the graph
P (set of ints, optional): nodes to check
X (set of ints, optional): excluded nodes
R (set of ints, optional): current clique
degrees (numpy array, optional): nodes degrees
neigh (dict, optional): dictionary of sets of neighbours for all nodes
Returns:
list of sets: list of all maximal cliques
see:
<NAME>, <NAME>. "Algorithm 457: finding all cliques of an undirected
graph." Commun ACM. 1973;16(9):575–577.
<NAME>, <NAME>. "A note on the problem of reporting maximal
cliques." Theor Comput Sci. 2008;407(1–3):564–568.
"""
if P is None:
P = set(range(len(edges)))
if X is None:
X = set()
if R is None:
R = set()
if degrees is None:
degrees = np.sum(edges > 0, axis=1)
if neigh is None:
neigh = {}
for i in range(len(edges)):
neigh[i] = set(np.where(edges[i] > 0)[0])
if len(P) == 0 and len(X) == 0:
yield R
else:
candidates = np.array(list(P | X))
# try to select pivot which minimizes number of recursive calls
pivot = candidates[np.argmax(degrees[candidates])]
for v in (P - neigh[pivot]):
for clique in __BronKerbosch(edges, degrees=degrees,
R=(R | set([v])),
P=(P & neigh[v]),
X=(X & neigh[v]),
neigh=neigh):
yield clique
P = P - set([v])
X = X | set([v])
def __align_rings(p1, p2, n1, n2, idx1, idx2, mapping=None, dist1=None,
dist2=None, dist_tol=0):
"""Align rings from coarse-grained alignment.
Args:
p1, p2 (Pharmacophore): models to align
n1, n2 (list of numpy arrays): nodes to align. i-th array of both
lists contains parts of coarse-grained alignment (i.e. ring systems)
that should be mapped to each other.
idx1, idx2 (list of ints): lists of aligned nodes
mapping (numpy array, optional): array describing nodes compatibility
dist1, dist2 (numpy array, optional): arrays with distances between all
nodes in models
dist_tol (float, optional): accept distance differences below this
threshold
Returns:
float: unnormalized similarity score
float: edge length differences cost
2D list: list of two lists representing matched nodes
"""
assert len(n1) == len(n2), "wrong n1 or n2"
if mapping is None:
mapping = np.zeros((p1.numnodes, p2.numnodes))
for i in range(p1.numnodes):
for j in range(p2.numnodes):
weighted_freq, _ = compare_nodes(p1.nodes[i], p2.nodes[j])
if weighted_freq > 0.0:
mapping[i][j] = weighted_freq
if dist1 is None:
dist1 = distances(p1)
dist1[p1.edges > 0] = p1.edges[p1.edges > 0]
if dist2 is None:
dist2 = distances(p2)
dist2[p2.edges > 0] = p2.edges[p2.edges > 0]
# create modular product of graph using alignment as constraints
nodes = []
scores = []
assert len(idx1) == len(idx2), "unequal subgraphs sizes"
old_len = len(idx1)
for i in range(old_len):
weighted_freq = mapping[idx1[i], idx2[i]]
assert weighted_freq > 0, "wrong alignment given"
nodes.append({"n1": idx1[i], "n2": idx2[i]})
scores.append(weighted_freq)
for i in range(len(n1)):
possible_matches = np.where(mapping[n1[i], :][:, n2[i]] > 0)
pairs1 = n1[i][possible_matches[0]]
pairs2 = n2[i][possible_matches[1]]
if len(idx1) > 0:
# find pairs compatible with given alignment
d1 = dist1[pairs1, :][:, idx1]
d2 = dist2[pairs2, :][:, idx2]
compatible = np.where(np.all(np.abs(d1 - d2) <= dist_tol,
axis=1))[0]
else:
# empty alignment given, accept everything
compatible = np.array(list(range(len(pairs1))))
for i in compatible:
weighted_freq = mapping[pairs1[i], pairs2[i]]
assert weighted_freq > 0, "wrong possible matches"
nodes.append({"n1": pairs1[i], "n2": pairs2[i]})
scores.append(weighted_freq)
scores = np.array(scores)
n = len(nodes)
edges = np.zeros((n, n))
costs = np.zeros((n, n))
for i in range(n):
for j in range(i):
if nodes[i]["n1"] == nodes[j]["n1"] or \
nodes[i]["n2"] == nodes[j]["n2"]:
continue
is_connected = False
if p1.edges[nodes[i]["n1"], nodes[j]["n1"]] > 0:
is_connected = True
d1 = dist1[nodes[i]["n1"], nodes[j]["n1"]]
if p2.edges[nodes[i]["n2"], nodes[j]["n2"]] > 0:
is_connected = True
d2 = dist2[nodes[i]["n2"], nodes[j]["n2"]]
if math.fabs(d1 - d2) <= dist_tol:
if is_connected:
costs[i, j] = costs[j, i] = math.fabs(d1 - d2)
edges[i, j] = edges[j, i] = 1.0
alignment = list(range(old_len))
score = np.sum(scores[alignment])
cost = np.sum(costs[alignment, :][:, alignment]) / 2
scorecost = score-cost
for clique in __BronKerbosch(edges):
clique = list(clique)
s = np.sum(scores[clique])
c = np.sum(costs[clique, :][:, clique]) / 2
if (s - c > scorecost) or (s - c == scorecost and s > score):
idx1 = []
idx2 = []
n1 = []
n2 = []
score = s
cost = c
scorecost = s - c
for pair in clique:
idx1.append(nodes[pair]["n1"])
idx2.append(nodes[pair]["n2"])
return score, cost, [idx1, idx2]
def __add_neighbours(p1, p2, n1, n2, idx1, idx2, mapping=None, dist1=None,
dist2=None, dist_tol=0, pairs=None):
"""Try to extend alignment by adding nodes connected to already aligned
parts of pharmacophores. In most cases there is nothing to add, but
sometimes differences beteween scaffolds of the molecules (rings vs
linear fragments) result in incomplete alignment after rings decompression.
Also, pairs of neighbours incompatible with global constraints used
in coarse-grained alignment (to speed-up the procedure) will be added here.
Args:
p1, p2 (Pharmacophore): models to align
n1, n2 (list of ints): nodes to align
idx1, idx2 (list of ints): lists of aligned nodes
mapping (numpy array, optional): array describing nodes compatibility
Returns:
float: unnormalized similarity score
float: edge length differences cost
2D list: list of two lists representing matched nodes
"""
if mapping is None:
mapping = np.zeros((p1.numnodes, p2.numnodes))
for i in range(p1.numnodes):
for j in range(p2.numnodes):
weighted_freq, _ = compare_nodes(p1.nodes[i], p2.nodes[j])
if weighted_freq > 0.0:
mapping[i][j] = weighted_freq
if dist1 is None:
dist1 = distances(p1)
dist1[p1.edges > 0] = p1.edges[p1.edges > 0]
if dist2 is None:
dist2 = distances(p2)
dist2[p2.edges > 0] = p2.edges[p2.edges > 0]
if pairs is None:
pairs = []
def is_compatible(pair1, pair2):
if (pair1[0] == pair2[0]) or (pair1[1] == pair1[1]):
return False
if (p1.edges[pair1[0], pair2[0]] != 0) and \
(p2.edges[pair1[1], pair2[1]] != 0):
dist_diff = math.fabs((p1.edges[pair1[0], pair2[0]] -
p2.edges[pair1[1], pair2[1]]))
if dist_diff <= dist_tol:
return True
else:
return False
else:
return True
for i1, i2 in zip(idx1, idx2):
neighbours1 = []
for node in reversed(np.where(p1.edges[i1, n1] > 0)[0]):
neighbours1.append(n1[node])
n1.remove(n1[node])
neighbours2 = []
for node in reversed(np.where(p2.edges[i2, n2] > 0)[0]):
neighbours2.append(n2[node])
n2.remove(n2[node])
# find compatible neighbours
for neigh1 in neighbours1:
for neigh2 in neighbours2:
if (mapping[neigh1, neigh2] > 0):
dist_diff = np.abs(dist1[idx1, neigh1] -
dist2[idx2, neigh2])
# there is edge between nodes in at least one of phars
connected = np.where(p1.edges[idx1, neigh1] +
p2.edges[idx2, neigh2])
max_cost = np.max(dist_diff[connected])
if (max_cost <= dist_tol):
pairs.append((neigh1, neigh2))
score = np.sum(mapping[idx1, idx2])
dist_diff = np.abs(dist1[idx1][:, idx1] - dist2[idx2][:, idx2])
connected = np.where(p1.edges[idx1][:, idx1] + p2.edges[idx2][:, idx2])
if len(connected[0]) > 0:
assert np.max(dist_diff[connected]) <= dist_tol, "cost too high"
cost = np.sum(dist_diff[connected]) / 2.0
aln = [idx1[:], idx2[:]]
for i, pair in enumerate(pairs):
compatible = [p for p in pairs[i+1:] if is_compatible(pair, p)]
s, c, (aln1, aln2) = __add_neighbours(p1, p2, n1[:], n2[:],
idx1+[pair[0]], idx2+[pair[1]],
mapping, dist1, dist2, dist_tol,
compatible)
if (s - c > score - cost) or (s - c == score - cost and s > score):
score = s
cost = c
aln = [aln1[:], aln2[:]]
return score, cost, aln
def map_pharmacophores(p1, p2, dist_tol=0.0, coarse_grained=True,
add_neighbours=False):
"""Find best common substructure match for two Pharmacophores.
Args:
p1, p2 (Pharmacophore): models | |
#standard
import argparse
import datetime
import os
os.environ['TMPDIR'] = '/var/tmp'
from operator import itemgetter, attrgetter
activate_this = '/gluster/gluster1/bsm_data_emulator/bsmde_python/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
import pandas as pd
#local
from GTFileReader2 import Trajectories
from readlinks import read_link_file,read_endpoints
from TCACore import Timer
SHOCKWAVE_START_ACCELERATION = -.25 * 9.8
SHOCKWAVE_HEADWAY_DISTANCE = 50.0 #ft
QUEUE_SPEED = 10
FREE_FLOW = 109.361
link_positions = {}
t = Timer(enabled=True)
def run_gt_shockwave(trj, bnlinks, df_onRoute, unknown_queue_output_name):
unknown_queue_output = []
# Shockwave Step 1: For each instantaneous time period, t, in the simulation duration do steps 2-6
shockwave_list = []
for t, veh_list in trj.read():
remove_list = []
for start in shockwave_list:
if start[1]["tp"] + 2 < t:
if start[2] is not None:
shockwave_length = (start[2]['x'] - start[0]['x']) + start[2]['Length']
unknown_queue_output.append([str(start[0]['tp']), start[4], '1', str(start[0]['x']), str(shockwave_length), str(start[2]['tp']), str(start[2]['x']), str(start[5])])
remove_list.append(shockwave_list.index(start))
else:
remove_list.append(shockwave_list.index(start))
remove_list = sorted(remove_list, reverse=True)
for index in remove_list:
shockwave_list.pop(index)
# Shockwave Step 2: For each roadway segment, L, in the set of all roadway segments do steps 3-6
for roadway in bnlinks:
# Shockwave Step 3: For each lane, l, on the roadway segment, L, do steps 4-6
for lane_group in bnlinks[roadway].keys():
# Shockwave #4: Identify IDs of all vehicles that are on link L at time t in lane l. Let I be the array of identified vehicle IDs.
I = get_roadway_vehicles(roadway, lane_group, bnlinks, veh_list)
I = sorted(I, key=lambda k: k['x'])
for i in I:
start_flag = True
for start in shockwave_list:
if roadway == start[4]:
if (i['x'] - start[1]['x']) <= SHOCKWAVE_HEADWAY_DISTANCE and (i['x'] - start[1]['x']) > 0:
if start[5] == '4' and i['v'] <= QUEUE_SPEED:
start[1] = i
start[2] = i
start_flag = False
elif start[5] == '3' and ((QUEUE_SPEED < i['v'] <= (1/3) * FREE_FLOW and (i['a'] < 0))):
start[1] = i
start[2] = i
start_flag = False
elif (start[5] == '2' or start[5] == '1') and((i['v'] > (1/3) * FREE_FLOW and (i['a'] < 0))):
start[1] = i
start[2] = i
start_flag = False
if start_flag:
if i['v'] <= QUEUE_SPEED:
i_s = i # ID of first vehicle in shockwave
i_e = None # ID of last vehicle in shockwave
i_f = i # ID of follower vehicle
i_n = None
shockwave_list.append([i_s, i_f, i_e, i_n, roadway, '4'])
elif (QUEUE_SPEED < i['v'] <= (1/3) * FREE_FLOW and (i['a'] <= SHOCKWAVE_START_ACCELERATION)):
i_s = i # ID of first vehicle in shockwave
i_e = None # ID of last vehicle in shockwave
i_f = i # ID of follower vehicle
i_n = None
shockwave_list.append([i_s, i_f, i_e, i_n, roadway, '3'])
elif (i['v'] > (1/3) * FREE_FLOW and (i['a'] <= SHOCKWAVE_START_ACCELERATION)):
i_s = i # ID of first vehicle in shockwave
i_e = None # ID of last vehicle in shockwave
i_f = i # ID of follower vehicle
i_n = None
shockwave_list.append([i_s, i_f, i_e, i_n, roadway, '2'])
elif (i['v'] > (1/3) * FREE_FLOW and (i['a'] < 0)):
i_s = i # ID of first vehicle in shockwave
i_e = None # ID of last vehicle in shockwave
i_f = i # ID of follower vehicle
i_n = None
shockwave_list.append([i_s, i_f, i_e, i_n, roadway, '1'])
for start in shockwave_list:
if start[2] is not None:
shockwave_length = (start[2]['x'] - start[0]['x']) + start[2]['Length']
unknown_queue_output.append([str(start[0]['tp']), start[4], '1', str(start[0]['x']), str(shockwave_length), str(start[2]['tp']), str(start[2]['x']), str(start[5])])
unknown_queue_output = sorted(unknown_queue_output, key=lambda k: float(k[0]))
consolidated_output = []
for shockwave in unknown_queue_output:
unconsolidated_flag = True
for i in xrange(len(consolidated_output)):
if shockwave[1] == consolidated_output[i][1] \
and shockwave[7] == consolidated_output[i][7] \
and float(shockwave[0]) - 30 <= float(consolidated_output[i][0]) \
and (float(shockwave[3]) >= float(consolidated_output[i][3]) and float(shockwave[3]) <= float(consolidated_output[i][6]) \
and float(shockwave[6]) >= float(consolidated_output[i][3]) and float(shockwave[6]) <= float(consolidated_output[i][6])):
unconsolidated_flag = False
break
if unconsolidated_flag:
consolidated_output.append(shockwave)
Q = 2200.0
J = 0.1
T = 1.0
FREE_FLOW_KM = 120
for shockwave in consolidated_output:
downstream_speed = get_downstream_speed(df_onRoute,shockwave[3], shockwave[0], shockwave[1])
upstream_speed = get_upstream_speed(df_onRoute,shockwave[6], shockwave[5], shockwave[1])
if upstream_speed != None and downstream_speed != None:
downstream_speed = downstream_speed * 1.60934
upstream_speed = upstream_speed * 1.60934
if downstream_speed == 0:
downstream_speed = .001
if upstream_speed == 0:
upstream_speed = .001
downstream_q = Q * (1 + ((((1/downstream_speed) - (1/FREE_FLOW_KM))**2 - ((.25**2 * 8 * J * T)/Q))/(.25 * 2 * T * ((1/downstream_speed) - (1/FREE_FLOW_KM)))))
upstream_q = Q * (1 + ((((1/upstream_speed) - (1/FREE_FLOW_KM))**2 - ((.25**2 * 8 * J * T)/Q))/(.25 * 2 * T * ((1/upstream_speed) - (1/FREE_FLOW_KM)))))
try:
propogation = (downstream_q - upstream_q)/((downstream_q/downstream_speed) - (upstream_q/upstream_speed))
except ZeroDivisionError:
propogation = 0
else:
propogation = 'NA'
if propogation != 'NA':
propogation = propogation * 0.621371
shockwave.append(str(propogation))
shockwave_count = float(shockwave[4])/20
shockwave.append(str(int(shockwave_count)))
if shockwave_count >= 5 and shockwave[7] == '4':
shockwave.append('Y')
else:
shockwave.append('N')
consolidated_output = sorted(consolidated_output, key=lambda k: float(k[0]))
with open(unknown_queue_output_name + "_consolidated_output.csv", "wb") as out_f:
out_f.write('time,link,lane_group,start_location_x, shockwave_length, end_time, end_x, shockwave_type, shockwave_propogation_speed, shockwave_count, significant_shockwave\n')
write_output(out_f, consolidated_output)
def write_output(out_f, output):
if len(output) > 0:
print 'Writing to file'
for line in output:
out_f.write(','.join(line) + '\n')
def distance_between(origin_x, origin_y, destination_x, destination_y):
distance = ((origin_x - destination_x)**2 + (origin_y - destination_y)**2)**.5
return distance
def getLinkPosition(link):
global link_positions
for l in link_positions.keys():
if link == l:
x = link_positions[link]["x"]
y = link_positions[link]["y"]
return x,y
def get_roadway_vehicles(roadway, lane_group, bnlinks, veh_list):
I = []
for link_position, link_data in enumerate(bnlinks[roadway][lane_group]):
link_num, link_lane, link_len = link_data[0], link_data[1], link_data[2]
dis_stop = 0
# If not the first link in the roadway, find the summed distance of all prior links
if link_position != 0:
for i in reversed(range(link_position)):
dis_stop += bnlinks[roadway][lane_group][i][2]
for veh in veh_list:
if (veh['Link'] == link_num):
#change distance from end of link to distance to stop point
link_x, link_y = getLinkPosition(link_num)
veh['x'] = distance_between(link_x, link_y,veh['World_x'],veh['World_y']) + dis_stop
I.append(veh)
return I
def get_downstream_speed(df_onRoute, start_x, start_time, roadway, distance = 50, time = 0):
messages = []
df_time = df_bsms[abs(df_bsms['localtime'] - float(start_time)) <= time]
df_dist = df_time[(float(start_x) - df_time[roadway+'x'] > 0) & (float(start_x) - df_time[roadway+'x'] <= distance)]
messages = list(df_dist.itertuples())
total_speed = 0
n = 0
for message in messages:
total_speed += message[1]
n += 1
if n > 0:
average_speed = total_speed/n
return average_speed
if distance > 1000:
return None
if time == 0:
df_time = df_bsms[abs(df_bsms['localtime'] - float(start_time)) <= time]
df_dist = df_time[(float(start_x) - df_time[roadway+'x'] > 0) & (float(start_x) - df_time[roadway+'x'] <= distance)]
messages = list(df_dist.itertuples())
total_speed = 0
n = 0
for message in messages:
total_speed += message[1]
n += 1
if n > 0:
average_speed = total_speed/n
return average_speed
return get_downstream_speed(df_onRoute, start_x, start_time, roadway, distance + 50, time + 2)
def get_upstream_speed(df_onRoute, end_x, end_time, roadway, distance = 50, time = 0):
messages = []
df_time = df_bsms[abs(df_bsms['localtime'] - float(end_time)) <= time]
df_dist = df_time[(df_time[roadway+'x'] - float(end_x) > 0) & (df_time[roadway+'x'] - float(end_x) <= distance)]
messages = list(df_dist.itertuples())
total_speed = 0
n = 0
for message in messages:
total_speed += message[1]
n += 1
if n > 0:
average_speed = total_speed/n
return average_speed
if distance > 1000:
return None
if time == 0:
df_time = df_bsms[abs(df_bsms['localtime'] - float(end_time)) <= time]
df_dist = df_time[(df_time[roadway+'x'] - float(end_x) > 0) & (df_time[roadway+'x'] - float(end_x) <= distance)]
messages = list(df_dist.itertuples())
total_speed = 0
n = 0
for message in messages:
total_speed += message[1]
n += 1
if n > 0:
average_speed = total_speed/n
return average_speed
return get_upstream_speed(df_onRoute, end_x, end_time, roadway, distance + 50, time + 2)
def dis_stop(bnlinks, roadway, lane_group):
link_distance = {}
for link_position, link_data in enumerate(bnlinks[roadway][lane_group]):
link_num, link_lane, link_len = link_data[0], link_data[1], link_data[2]
dis_stop = 0
# If not the first link in the roadway, find the summed distance of all prior links
if link_position != 0:
for i in reversed(range(link_position)):
dis_stop += bnlinks[roadway][lane_group][i][2]
link_distance[link_num] = dis_stop
return link_distance
def file_fzp_start(filename):
"""
finds the start of the fzp data
:param filename: string of the fzp file name
:return: number of lines to skip at top of file
"""
with open(filename) as in_f:
c= 0
cols = []
#find start of VISSIM data
line = in_f.readline()
while 'VehNr;' not in line:
line = in_f.readline()
cols = [x.strip() for x in line.split(';')][:-1]
c +=1
return c
t.start('main')
parser = argparse.ArgumentParser(description='GT program for reading in fzp files and producing Shockwave values')
parser.add_argument('trj_filename')
parser.add_argument('link_filename', help = 'CSV | |
in my coordinate system
return pfnnPosInModelSpace
################################################################### Main starts here!!!!!
P = np.array([[0, -1, 0], [0, 0, -1], [1, 0, 0]])
init_names= {-1: "training", 0: "training", 1: "On pedestrian", 3: "On pavement", 2: "By car", 4: "Random",5: "car_env", 6: "Near pedestrian",7:"pedestrian environment", 9:"average"}
pos_y=-128 / 2
labels_to_use, name_movie, target_dir, num_measures, semantic_channels_separated, in_2D=read_settings_file(path)
# Find CARLA dataset
filename_list = {}
# Get files to run on.
ending_local = "test_*"
for filepath in glob.glob(env_path + ending_local):
parts = os.path.basename(filepath).split('_')
pos = int(parts[-1])
filename_list[pos] = filepath
# find statistics files.
find_path=stat_path+'*'+timestamp+"*"
match=find_path+"*.npy"
files=glob.glob(match)
test_files, reconstructions_test, poses_test = sort_files_eval(files)
#filenames_itr_cars, test_files_cars, filenames_itr_people,test_files_people, filenames_itr, test_files, reconstructions_test, iterations, iterations_cars, special_cases
#
# test_files=[(0, os.path.join(path, 'visualize_2D_goal_agent_2019-11-13-22-28-15.914025_test_0_-64_0.npy'), 0),
# (0, os.path.join(path,'visualize_2D_goal_agent_2019-11-13-22-28-15.914025_test_0_-64_1.npy'), 1),
# (0, os.path.join(path,'visualize_2D_goal_agent_2019-11-13-22-28-15.914025_test_0_-64_2.npy'), 2),
# (0, os.path.join(path,'visualize_2D_goal_agent_2019-11-13-22-28-15.914025_test_0_-64_3.npy'), 3)]
#test_files=[ (0, os.path.join(path,'visualize_2D_goal_agent_2019-11-13-22-28-15.914025_test_0_-64_2.npy'), 2)]
print(("Test files: "+str(test_files)))
test_points = {0: 0}
epoch = 0
saved_files_counter = 0
csv_rows = []
if len(test_files)>0:
scenes =get_scenes(test_files,test_points)
for scene in scenes:
filepath_npy = scene[1]
#filepath_npy = os.path.join(path, "visualize_2D_goal_agent_2019-11-13-22-28-15.914025_test_0_-64_2.npy")
epoch = scene[2]
cur_stat = scene[0]
#file_nbrs = [0, 8, 24, 36] # Which Carla initialization point to look at. Depends on the above file.
#if test_data:
nbr = filepath_npy.split('_')[-1]
nbr = nbr[:-len('.npy')]
nbr = convertFileNbrToCarlaNbr(nbr)
# epoch = 13
# cur_stat = scene[0]
agent_pos = cur_stat[:, 0:3] # Agent's trajectory
agent_vel = cur_stat[:, 3:6] # Actions taken by agent
agent_action = cur_stat[ :, 6] # Actions taken by agent
agent_probabilities = cur_stat[:, 7:34] # Probabilities of different actions (output of RL agent)
agent_reward = cur_stat[:, 34] # Agent's reward
agent_measures = cur_stat[:, 38:] # Different measures of agent
agent_goals = cur_stat[3:5, -1] # Goal position of agent
reached_goal = agent_measures[:,13] # Goal reached by agent
yaw_mapping=[-0.75, -0.5,-0.25,-1,0,0.25,0.75,0.5,0] # yaw for different actions.
poses_stats=scene[3]
agent_poses=poses_stats[ :, :93]
agent_pfnn_pos=poses_stats[ :, 93:95]
agent_pfnn_itr=poses_stats[ :, 95]
agent_pfnn_avg_speed=poses_stats[ :, 96]
# print filepath_npy
print (str(nbr) +" "+str(epoch))
print (filename_list[int(nbr)] + " " + str(nbr))
if int(nbr)==TARGET_NBR and int(epoch) == TARGET_EPOCH_ID:
filepath = filename_list[int(nbr)]
basename = os.path.basename(filepath)
print(("Filepath: "+str(filepath_npy)))
print (epoch)
# Load car and pedestrian positions
cars_path = os.path.join(filepath, 'cars.p')
cars_dict = pickle.load(open(cars_path, "rb"), encoding='latin1')
print (cars_path)
people_path = os.path.join(filepath, 'people.p')
people_dict = pickle.load(open(people_path, "rb"), encoding='latin1')
print (people_path)
# Load how my coordinate system is related to CARLA coordinate system.
centeringPath = os.path.join(filepath, "centering.p")
centering = pickle.load(open(centeringPath, "rb"), encoding='latin1')
print(centeringPath)
startPositionsPath = os.path.join(path, "start_positions.p")
start_poses = pickle.load(open(startPositionsPath, "rb"), encoding='latin1')
print (startPositionsPath)
R, middle, frames, middle_2,C, middle_w = get_camera_matrix(filepath)
#grid_axis_x=[]
# for y in range(128):
# p=[]
####################################################################
# Get pose coordinate system rotation for PFNN network. The first action is where the z axis is rotated.
rotation_matrix = np.zeros((2, 2), np.float)
inverse_rotation_matrix = np.zeros((2, 2), np.float)
init_pos=agent_pos[0,:].copy()
y = (agent_vel[0,1])
z = (agent_vel[0,1])
d = np.sqrt(y ** 2 + z ** 2)
if d > 0:
# Rotation matrix from my position to PFNN
rotation_matrix[0, 0] = y / d
rotation_matrix[0, 1] = z / d
rotation_matrix[1, 1] = y / d
rotation_matrix[1, 0] = -z / d
# Rotation matrix from PFNN to my coordinate system
'''
inverse_rotation_matrix[0, 0] = y / d
inverse_rotation_matrix[0, 1] = -z / d
inverse_rotation_matrix[1, 1] = y / d
inverse_rotation_matrix[1, 0] = z / d
'''
'''
inverse_rotation_matrix[0, 0] = 1.0
inverse_rotation_matrix[0, 1] = 0.0
inverse_rotation_matrix[1, 1] = 1.0
inverse_rotation_matrix[1, 0] = 0.0
'''
inverse_rotation_matrix = rotation_matrix
print ("Velocity "+str(agent_vel[0,:]))
print (rotation_matrix)
print ("Agent init positions: "+str(init_pos)+" "+str(agent_pos[1,:]))
################################################################
agent_positions=[]
agent_pose_perFrame=[]
agent_pose_perFrame_inPFNNLocal = []
agent_yaw=[]
previous_pos=[0,0]
agent_goal = agentPosFromModelToCARLA(np.array([0, agent_goals[0], agent_goals[1]]), pos_y, centering, R, P, middle, 0) # The goal point in CARLA coordinate system
directions=[]
directions_angle=[]
for frame in range(agent_pos.shape[0]):
if reached_goal[max(frame-15,0)]==0:
agentPosThisFrame_modelSpace = agent_pos[frame, :]
####################################
# Convert PFNN position into CARLA position
pfnn_itr = int(agent_pfnn_itr[frame])
#print pfnn_itr
# Get agent pose in PFNN coordinate system
pfnn_pos = agent_pfnn_pos[pfnn_itr, :].copy()
pfnn_pose = agent_poses[pfnn_itr, :].copy()
# HACK TO SAVE POSE - NOT NEEDED
pfnn_pose_copy = pfnn_pose.copy()
pfnn_offset_X = pfnn_pos[0]
pfnn_offset_Z = pfnn_pos[1]
for i in range(31):
pfnn_pose_copy[i*3 + 0] -= pfnn_offset_X
pfnn_pose_copy[i*3 + 2] -= pfnn_offset_Z
agent_pose_perFrame_inPFNNLocal.append(pfnn_pose_copy)
#END HACK
# Convert PFNN pos to model space
pfnnPosInModelSpace = agentPosFromPFNNToModel(np.array([pfnn_pos[0], 0.0, pfnn_pos[1]]), inverse_rotation_matrix, init_pos)
# Convert PFNN pos from model space to CARLA
pfnnPosInCARLASpace = agentPosFromModelToCARLA(pfnnPosInModelSpace, pos_y, centering, R, P, middle, frame)
# Because there is a gap on Z axis between the spaces
pfnn_Z_offsetToCarla = PEDESTRIAN_OFFSET_Z - pfnnPosInCARLASpace.item(2)
# Convert PFNN bones positions to CARLA space
for boneId in range(31):
bonePos_pfnn = pfnn_pose[boneId*3 + 0 : boneId*3 + 3]
originalBoneZ = pfnn_pose[boneId*3 + 1] # 1 because source has Y as UP axis
bonePos_model = agentPosFromPFNNToModel(bonePos_pfnn, inverse_rotation_matrix, init_pos)
bonePos_carla = agentPosFromModelToCARLA(bonePos_model, pos_y, centering, R, P, middle, frame)
for i in range(3):
pfnn_pose[boneId*3 + i] = bonePos_carla.item(i)
pfnn_pose[boneId*3 + 2] += pfnn_Z_offsetToCarla + (originalBoneZ * 0.01) # Because in centimeters
##################################################################################################
p = agentPosFromModelToCARLA(agentPosThisFrame_modelSpace, pos_y, centering, R, P, middle, frame)
# Find agent's next position in CARLA coordinate system
agent_pose_perFrame.append(pfnn_pose)
agent_positions.append(p.copy())
# What direction should the agent be pointing in?
if frame==0:
agent_yaw.append(0)
agent_yaw.append(0)
else:
directions.append([p.item(0)-previous_pos[0],p.item(1)-previous_pos[1],0] )
directions_angle.append(math.atan2(p.item(1)-previous_pos[1],p.item(0)-previous_pos[0] )*180/math.pi)
if False:
#len(agent_yaw)>4 and np.sign(directions[-1])==np.sign(directions[-2]) and np.sign(directions[-2])==np.sign(directions[-3]) and np.sign(directions[-3])==np.sign(directions[-4]):
# dir=math.atan2(p[1]-previous_pos[1],p[0]-previous_pos[0] )*180/math.pi
# if len(agent_yaw)>2 and np.sign(dir)==np.sign(agent_yaw[-2]) and np.sign(-dir)==np.sign(agent_yaw[-1]):
# agent_yaw[-1]=np.mean([dir,agent_yaw[-2]]) # -90)
agent_yaw.append(math.atan2(p[1]-previous_pos[1],p[0]-previous_pos[0] )*180/math.pi)#math.atan2(p[1]-previous_pos[1],p[0]-previous_pos[0] )*180/math.pi)#-90)
else:
dir = math.atan2(agent_goal[1]-p[1], agent_goal[0]-p[0]) * 180 / math.pi
# if len(agent_yaw) > 2 and np.sign(dir) == np.sign(agent_yaw[-2]) and np.sign(
# -dir) == np.sign(agent_yaw[-1]):
# agent_yaw[-1] = np.mean([dir, agent_yaw[-2]]) # -90)
agent_yaw.append(math.atan2(agent_goal[1]-p[1], agent_goal[0]-p[0]) * 180 / math.pi) # -90)
#print str(p[0]-previous_pos[0])+" "+str(p[1]-previous_pos[1])+" "+str(agent_yaw[-1])
previous_pos=[p.item(0),p.item(1)]
#agent_yaw.append(yaw_mapping[int(agent_action[frame])]*180)#math.pi)
# HACK TO SAVE POSES - REMOVE
posesToSave = [0, 12, 16, 19, 24, 41, 35, 94, 123, 141]
for poseId in posesToSave:
agentPoseToSave = agent_pose_perFrame_inPFNNLocal[poseId].copy()
np.savetxt('pose'+ str(poseId) + '.txt', agentPoseToSave, delimiter=',') # X is an array
#copyPose = np.loadtxt('pose0.txt', delimiter=',')
# END HACK
camera_yaw=math.atan2(R[1,0],R[0,0])
camera_pitch = math.atan2(-R[2,0] ,np.sqrt( R[2,1]**2+(R[2,2]**2)))
camera_roll = math.atan2(R[2,1] , R[2,2])
actor_list = []
car_list={}
pygame.init()
client = carla.Client('localhost', 2000)
client.set_timeout(100000.0)
world = client.get_world()
print(("initial speed: "+str(np.linalg.norm(directions)*10)))
control=carla.WalkerControl()
control.speed=np.linalg.norm(directions)*10
print('enabling synchronous mode.')
settings = world.get_settings()
settings.synchronous_mode = True
world.apply_settings(settings)
cur_yaw=0
name_movie=""
name_movie_main=""
try:
m = world.get_map()
print ("starting ")
poses= m.get_spawn_points()
start_pose=poses[int(nbr)]
print ("Location")
print((start_pose.location))
print (middle_2)
print (middle_w)
print ("Old Location")
print((start_poses[int(nbr)]))
print ("Centering")
print((centering["middle"]))
init_pos = carla.Location(x=middle_w[0,0], y=middle_w[1,0], z=PEDESTRIAN_OFFSET_Z)
init_rot = carla.Rotation(yaw=0, pitch=0, roll=0)
blueprint_library = world.get_blueprint_library()
vehicles=blueprint_library.filter('vehicle.*')
car_bp = [x for x in vehicles if int(x.get_attribute('number_of_wheels')) == 4]
if not actor_perspective:
init_trans = carla.Transform(init_pos, init_rot)
spectator = world.get_spectator()
spectator.set_transform(init_trans)
print ("Position of actor")
print((agent_positions[0]))
agent_init_pos = carla.Location(x=agent_positions[0].item(0), y=agent_positions[0].item(1), z=PEDESTRIAN_OFFSET_Z)
print (agent_init_pos)
agent_init_rot = carla.Rotation(yaw=directions_angle[0], pitch=0, roll=0)
print (agent_init_rot)
walkersBPs = blueprint_library.filter(PEDESTRIAN_BLUEPRINT_FOR_MAIN_ACTOR)
bp = random.choice(walkersBPs)
transform = carla.Transform(agent_init_pos, agent_init_rot)
print ("Here")
#if not actor_perspective:
actor = world.try_spawn_actor(bp, transform)
if actor is not None:
print ("Initialized actor")
else:
"Failed to initialize actor"
# else:
# actor=None
# spectator = world.get_spectator()
# spectator.set_transform(transform)
#static.prop.box03
# Place out goal in CARLA coordinates
rotation = actor.get_transform().rotation
#actor.set_velocity(carla.Vector3D(directions[0][0]/np.linalg.norm(disp),directions[0][1]/np.linalg.norm(disp),0.0 ))
goal_box = None
goal_based=True
if goal_based:
bp_goal = random.choice(blueprint_library.filter('static.prop.box03'))
agent_goal_pos = carla.Location(x=agent_goal.item(0), y=agent_goal.item(1), z=0.3)
print (agent_goal_pos)
agent_goal_rot = carla.Rotation(yaw=0, pitch=0, roll=0)
print (agent_goal_rot)
transform_goal = carla.Transform(agent_goal_pos, agent_goal_rot)
goal_box = world.try_spawn_actor(bp_goal, transform_goal)
vehicle_map={}
pedestrian_map={}
vehicle_vel_map = {}
pedestrian_vel_map = {}
prev_pos=[copy.copy(agent_init_pos.x), copy.copy(agent_init_pos.y), copy.copy(agent_init_pos.z)]
camera_transform = carla.Transform(carla.Location(x=0.3,y=0.0, z=0),carla.Rotation(yaw=0, pitch=0, roll=0))
if not actor_perspective:
camera = world.spawn_actor(
blueprint_library.find('sensor.camera.rgb'),
camera_transform,
attach_to=spectator)
else:
camera = world.spawn_actor(
blueprint_library.find('sensor.camera.rgb'),
carla.Transform(carla.Location(x=agent_positions[0].item(0)-5.5, y=agent_positions[0].item(1), z=1+2.8), carla.Rotation(pitch=-15)))
#attach_to=actor)
if DONT_CREATE_OTHER_AGENTS:
pass
else:
# Create cars
for car_key, car in cars_dict[50].items():
npc=None
tries=0
while npc is None and tries<3:
bp=random.choice(car_bp)#blueprint_library.filter('vehicle.*'))#z=1
transform=carla.Transform(carla.Location(x=car['transform'][0], y=car['transform'][1], z=0.35),carla.Rotation(yaw=car['yaw'],pitch=0, roll=0))
npc = world.try_spawn_actor(bp, transform)
tries=tries+1
if npc is not None:
vehicle_map[car_key]=npc
print(('created %s' % car_key))#npc.type_id)
# Create pedestrians
ped_controllers={}
ped_rotations={}
for car_key, car in people_dict[50].items():
npc = None
tries = 0
while npc is None and tries < 3:
bp = random.choice(blueprint_library.filter('walker.pedestrian.0001'))
print("Height "+str(car['bounding_box'][2]))
# print "Typical yaw"+str(car['yaw'])
transform = carla.Transform(carla.Location(x=car['transform'][0], y=car['transform'][1], z=car['bounding_box'][2]+0.3),
carla.Rotation(yaw=car['yaw'], pitch=0, roll=0))
npc = world.try_spawn_actor(bp, transform)
tries = tries + 1
if npc is not None:
pedestrian_map[car_key] = npc
ped_controllers[car_key] = carla.WalkerControl()
ped_rotations[car_key]=npc.get_transform().rotation
print(('created %s' % npc.type_id))
# Make sync queue for sensor data.
image_queue = queue.Queue()
camera.listen(image_queue.put)
frame = None
my_frame=0
display = pygame.display.set_mode(
(800, 600),
pygame.HWSURFACE | pygame.DOUBLEBUF)
font = get_font()
clock = pygame.time.Clock()
while my_frame<len(agent_positions):
if my_frame >= FRAME_RECORDING_STOP_FRAME:
break
| |
continue
if role == rs[0]:
if count == 1:
vb = "is"
else:
vb = "are"
if count != 1:
if count == 0 and len(var.ORIGINAL_ROLES[role]) == 0:
continue
message.append("\u0002{0}\u0002 {1}".format(count if count else "\u0002no\u0002", plural(role)))
else:
message.append("\u0002{0}\u0002 {1}".format(count, role))
# Only show team affiliation, this may be different than what mystics
# and wolf mystics are told since neutrals are split off. Determination
# of what numbers are shown is the same as summing up counts in "accurate"
# as accurate, this contains no hidden information
elif var.STATS_TYPE == "team":
wolfteam = 0
villagers = 0
neutral = 0
for role, players in var.ROLES.items():
if role in var.CURRENT_GAMEMODE.SECONDARY_ROLES:
continue
if role in Wolfteam:
wolfteam += len(players)
elif role in Neutral:
neutral += len(players)
else:
villagers += len(players)
message.append("\u0002{0}\u0002 {1}".format(wolfteam if wolfteam else "\u0002no\u0002", "wolf" if wolfteam == 1 else "wolves"))
message.append("\u0002{0}\u0002 {1}".format(villagers if villagers else "\u0002no\u0002", "villager" if villagers == 1 else "villagers"))
message.append("\u0002{0}\u0002 {1}".format(neutral if neutral else "\u0002no\u0002", "neutral player" if neutral == 1 else "neutral players"))
vb = "is" if wolfteam == 1 else "are"
stats_mssg = "{0}It is currently {4}. There {3} {1}, and {2}.".format(_nick,
", ".join(message[0:-1]),
message[-1],
vb,
var.PHASE)
reply(cli, nick, chan, stats_mssg)
@handle_error
def hurry_up(gameid, change):
if var.PHASE != "day": return
if gameid:
if gameid != var.DAY_ID:
return
if not change:
event = Event("daylight_warning", {"message": "daylight_warning"})
event.dispatch(var)
channels.Main.send(messages[event.data["message"]])
return
var.DAY_ID = 0
chk_decision(var, timeout=True)
@cmd("fnight", flag="N")
def fnight(cli, nick, chan, rest):
"""Forces the day to end and night to begin."""
if var.PHASE != "day":
cli.notice(nick, messages["not_daytime"])
else:
hurry_up(0, True)
@cmd("fday", flag="N")
def fday(cli, nick, chan, rest):
"""Forces the night to end and the next day to begin."""
if var.PHASE != "night":
cli.notice(nick, messages["not_nighttime"])
else:
transition_day()
def stop_game(var, winner="", abort=False, additional_winners=None, log=True):
if abort:
channels.Main.send(messages["role_attribution_failed"])
elif not var.ORIGINAL_ROLES: # game already ended
return
if var.DAY_START_TIME:
now = datetime.now()
td = now - var.DAY_START_TIME
var.DAY_TIMEDELTA += td
if var.NIGHT_START_TIME:
now = datetime.now()
td = now - var.NIGHT_START_TIME
var.NIGHT_TIMEDELTA += td
daymin, daysec = var.DAY_TIMEDELTA.seconds // 60, var.DAY_TIMEDELTA.seconds % 60
nitemin, nitesec = var.NIGHT_TIMEDELTA.seconds // 60, var.NIGHT_TIMEDELTA.seconds % 60
total = var.DAY_TIMEDELTA + var.NIGHT_TIMEDELTA
tmin, tsec = total.seconds // 60, total.seconds % 60
gameend_msg = messages["endgame_stats"].format(tmin, tsec,
daymin, daysec,
nitemin, nitesec)
if not abort:
channels.Main.send(gameend_msg)
roles_msg = []
# squirrel away a copy of our original roleset for stats recording, as the following code
# modifies var.ORIGINAL_ROLES and var.ORIGINAL_MAIN_ROLES.
rolecounts = {role: len(players) for role, players in var.ORIGINAL_ROLES.items()}
# save some typing
rolemap = var.ORIGINAL_ROLES
mainroles = var.ORIGINAL_MAIN_ROLES
orig_main = {} # if get_final_role changes mainroles, we want to stash original main role
for player, role in mainroles.items():
evt = Event("get_final_role", {"role": var.FINAL_ROLES.get(player.nick, role)})
evt.dispatch(var, player, role)
if role != evt.data["role"]:
rolemap[role].remove(player)
rolemap[evt.data["role"]].add(player)
mainroles[player] = evt.data["role"]
orig_main[player] = role
# track if we already printed "was" for a role swap, e.g. The wolves were A (was seer), B (harlot)
# so that we can make the message a bit more concise
roleswap_key = "endgame_roleswap_long"
for role in role_order():
numrole = len(rolemap[role])
if numrole == 0:
continue
msg = []
for player in rolemap[role]:
# check if the player changed roles during game, and if so insert the "was X" message
player_msg = []
if mainroles[player] == role and player in orig_main:
player_msg.append(messages[roleswap_key].format(orig_main[player]))
roleswap_key = "endgame_roleswap_short"
evt = Event("get_endgame_message", {"message": player_msg})
evt.dispatch(var, player, role, is_mainrole=mainroles[player] == role)
if player_msg:
msg.append("\u0002{0}\u0002 ({1})".format(player, ", ".join(player_msg)))
else:
msg.append("\u0002{0}\u0002".format(player))
# FIXME: get rid of hardcoded English
if numrole == 2:
roles_msg.append("The {1} were {0[0]} and {0[1]}.".format(msg, plural(role)))
elif numrole == 1:
roles_msg.append("The {1} was {0[0]}.".format(msg, role))
else:
roles_msg.append("The {2} were {0}, and {1}.".format(", ".join(msg[0:-1]), msg[-1], plural(role)))
message = ""
count = 0
if not abort:
evt = Event("game_end_messages", {"messages": roles_msg})
evt.dispatch(var)
channels.Main.send(*roles_msg)
# map player: all roles of that player (for below)
allroles = {player: {role for role, players in rolemap.items() if player in players} for player in mainroles}
# "" indicates everyone died or abnormal game stop
if winner != "" or log:
winners = set()
player_list = []
if additional_winners is not None:
winners.update(additional_winners)
for plr, rol in mainroles.items():
splr = plr.nick # FIXME: for backwards-compat
pentry = {"version": 2,
"nick": None,
"account": None,
"ident": None,
"host": None,
"role": None,
"templates": [],
"special": [],
"won": False,
"iwon": False,
"dced": False}
if plr in var.DCED_LOSERS:
pentry["dced"] = True
pentry["account"] = plr.account
pentry["nick"] = plr.nick
pentry["ident"] = plr.ident
pentry["host"] = plr.host
pentry["mainrole"] = rol
pentry["allroles"] = allroles[plr]
won = False
iwon = False
survived = get_players()
if not pentry["dced"]:
# determine default win status (event can override)
if rol in Wolfteam or (var.HIDDEN_ROLE == "cultist" and role in Hidden):
if winner == "wolves":
won = True
iwon = plr in survived
elif rol not in Neutral and winner == "villagers":
won = True
iwon = plr in survived
# true neutral roles are handled via the event below
evt = Event("player_win", {"won": won, "iwon": iwon, "special": pentry["special"]})
evt.dispatch(var, plr, rol, winner, plr in survived)
won = evt.data["won"]
iwon = evt.data["iwon"]
# ensure that it is a) a list, and b) a copy (so it can't be mutated out from under us later)
pentry["special"] = list(evt.data["special"])
# special-case everyone for after the event
if winner == "everyone":
iwon = True
if pentry["dced"]:
# You get NOTHING! You LOSE! Good DAY, sir!
won = False
iwon = False
elif not iwon:
iwon = won and plr in survived # survived, team won = individual win
if winner == "":
pentry["won"] = False
pentry["iwon"] = False
else:
pentry["won"] = won
pentry["iwon"] = iwon
if won or iwon:
winners.add(plr.nick)
if not plr.is_fake:
# don't record fjoined fakes
player_list.append(pentry)
if winner == "":
winners = set()
if log:
game_options = {"role reveal": var.ROLE_REVEAL,
"stats": var.STATS_TYPE,
"abstain": "on" if var.ABSTAIN_ENABLED and not var.LIMIT_ABSTAIN else "restricted" if var.ABSTAIN_ENABLED else "off",
"roles": {}}
for role,pl in var.ORIGINAL_ROLES.items():
if len(pl) > 0:
game_options["roles"][role] = len(pl)
db.add_game(var.CURRENT_GAMEMODE.name,
len(survived) + len(var.DEAD),
time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(var.GAME_ID)),
time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()),
winner,
player_list,
game_options)
# spit out the list of winners
winners = sorted(winners)
if len(winners) == 1:
channels.Main.send(messages["single_winner"].format(winners[0]))
elif len(winners) == 2:
channels.Main.send(messages["two_winners"].format(winners[0], winners[1]))
elif len(winners) > 2:
nicklist = ("\u0002" + x + "\u0002" for x in winners[0:-1])
channels.Main.send(messages["many_winners"].format(", ".join(nicklist), winners[-1]))
# Message players in deadchat letting them know that the game has ended
if var.DEADCHAT_PLAYERS:
for user in var.DEADCHAT_PLAYERS:
user.queue_message(messages["endgame_deadchat"].format(channels.Main))
user.send_messages()
reset_modes_timers(var)
reset()
expire_tempbans()
# This must be after reset()
if var.AFTER_FLASTGAME is not None:
var.AFTER_FLASTGAME()
var.AFTER_FLASTGAME = None
if var.ADMIN_TO_PING is not None: # It was an flastgame
channels.Main.send("PING! {0}".format(var.ADMIN_TO_PING))
var.ADMIN_TO_PING = None
def chk_win(*, end_game=True, winner=None):
""" Returns True if someone won """
lpl = len(get_players())
if var.PHASE == "join":
if lpl == 0:
reset_modes_timers(var)
reset()
# This must be after reset()
if var.AFTER_FLASTGAME is not None:
var.AFTER_FLASTGAME()
var.AFTER_FLASTGAME = None
if var.ADMIN_TO_PING is not None: # It was an flastgame
channels.Main.send("PING! {0}".format(var.ADMIN_TO_PING))
var.ADMIN_TO_PING = None
return True
return False
if var.PHASE not in var.GAME_PHASES:
return False #some other thread already ended game probably
return chk_win_conditions(var.ROLES, var.MAIN_ROLES, end_game, winner)
def chk_win_conditions(rolemap, mainroles, end_game=True, winner=None):
"""Internal handler for the chk_win function."""
with var.GRAVEYARD_LOCK:
if var.PHASE == "day":
pl = set(get_players()) - get_absent(var)
lpl = len(pl)
else:
pl = set(get_players(mainroles=mainroles))
lpl = len(pl)
if var.RESTRICT_WOLFCHAT & var.RW_REM_NON_WOLVES:
if var.RESTRICT_WOLFCHAT & var.RW_TRAITOR_NON_WOLF:
wcroles = Wolf
else:
wcroles = Wolf | {"traitor"}
else:
wcroles = Wolfchat
wolves = set(get_players(wcroles, mainroles=mainroles))
lwolves = len(wolves & pl)
lrealwolves = len(get_players(Wolf & Killer, mainroles=mainroles))
message = ""
if lpl < 1:
message = messages["no_win"]
# still want people like jesters, dullahans, etc. to get wins if they fulfilled their win conds
winner = "no_team_wins"
# TODO: flip priority order (so that things like fool run last, and therefore override previous win conds)
# Priorities:
# 0 = fool, other roles that end game immediately
# 1 = things that could short-circuit game ending, such as cub growing up | |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .adc import _adc
from .dac import _quantize_dac
from .w2g import w2g
from .crossbarlayer import CrossbarLayer
quantize_input = _quantize_dac.apply
# quantize_weight = _quantize_dac.apply
adc = _adc.apply
class crxb_Conv2d(nn.Conv2d):
"""
This is the custom conv layer that takes non-ideal effects of ReRAM crossbar into account. It has three functions.
1) emulate the DAC at the input of the crossbar and qnantize the input and weight tensors.
2) map the quantized tensor to the ReRAM crossbar arrays and include non-ideal effects such as noise, ir drop, and
SAF.
3) emulate the ADC at the output of he crossbar and convert the current back to digital number
to the input of next layers
Args:
scaler_dw(float): weight quantization scaler to reduce the influence of the ir drop.
crxb_size(int): size of the crossbar.
quantize(int): quantization resolution of the crossbar.
"""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1,
bias=True, crxb_size=64, scaler_dw=1, **crxb_cfg):
super(crxb_Conv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
assert self.groups == 1, "currently not support grouped convolution for custom conv"
# self.ir_drop = ir_drop
# self.device = device
################## Crossbar conversion #############################
self.crxb_size = crxb_size
# self.enable_ec_SAF = enable_ec_SAF
# self.nchout_index = nn.Parameter(torch.arange(self.out_channels), requires_grad=False)
self.register_buffer('nchout_index', torch.arange(self.out_channels))
weight_flatten_rows = self.in_channels * torch.cumprod(torch.tensor(self.kernel_size), 0)[-1].item()
weight_flatten_cols = self.out_channels
self.crxb_row, self.crxb_row_pads = self.num_pad(
weight_flatten_rows, self.crxb_size)
self.crxb_col, self.crxb_col_pads = self.num_pad(
weight_flatten_cols, self.crxb_size)
# p3d = (0, 1, 2, 1, 3, 3) # pad by (0, 1), (2, 1), and (3, 3)
self.w_pad = [0, self.crxb_row_pads, 0, self.crxb_col_pads]
self.input_pad = [0, 0, 0, self.crxb_row_pads]
weight_crxb_shape = torch.Size((self.crxb_col, self.crxb_row,
self.crxb_size, self.crxb_size))
################# Hardware conversion ##############################
# weight and input levels
# Q(x) = (2^{k−1} − 1)* round((2^{k−1} − 1) * x)
# self.n_lvl = 2 ** quantize
# self.h_lvl = (self.n_lvl - 2) / 2
# ReRAM cells
# 7-bit precisionis achievable on state-of-the-art RRAM device [9]
# [9] High precision tuning of state for memristive devices by
# adaptable variation-tolerant algorithm
# self.Gmax = gmax # max conductance
# self.Gmin = gmin # min conductance
# self.delta_g = (self.Gmax - self.Gmin) / (2 ** 7) # conductance step
# self.crxb = crxb_layer(ir_drop, device, gmax, gmin, gwire, gload, scaler_dw, vdd, enable_noise,
# freq, temp , crxb_size, quantize, enable_SAF, enable_ec_SAF)
self.crxb = CrossbarLayer(crxb_size=crxb_size, **crxb_cfg)
self.w2g = w2g(self.crxb.delta_g, Gmin=self.crxb.Gmin, G_SA0=self.crxb.Gmax,
G_SA1=self.crxb.Gmin, weight_shape=weight_crxb_shape, enable_SAF=self.crxb.enable_SAF)
# self.Gwire = gwire
# self.Gload = gload
# DAC
# self.Vdd = vdd # unit: volt
# self.delta_v = self.Vdd / (self.n_lvl - 1)
# self.delta_in_sum = nn.Parameter(torch.Tensor(1), requires_grad=False)
# self.delta_out_sum = nn.Parameter(torch.Tensor(1), requires_grad=False)
# self.counter = nn.Parameter(torch.Tensor(1), requires_grad=False)
self.register_buffer('delta_in_sum', torch.zeros(1))
self.register_buffer('delta_out_sum', torch.zeros(1))
self.register_buffer('counter', torch.zeros(1))
self.scaler_dw = scaler_dw
self.delta_w = 0 # self.weight.abs().max() / self.h_lvl * self.scaler_dw
self.delta_x = 0 # self.delta_in_sum.data / self.counter.data
self.h_out = None
self.w_out = None
def num_pad(self, source, target):
crxb_index = math.ceil(source / target)
num_padding = crxb_index * target - source
return crxb_index, num_padding
# Mapping the weights to the crossbar array
def mapping(self, input):
# 1. input data and weight quantization
# delta_x delta_w
with torch.no_grad():
self.delta_w = self.weight.abs().max() / self.crxb.h_lvl * self.scaler_dw
# trainable delta_x
if self.training:
self.counter.data += 1
self.delta_x = input.abs().max() / self.crxb.h_lvl
self.delta_in_sum.data += self.delta_x
else:
self.delta_x = self.delta_in_sum.data / self.counter.data
input_quan, weight_quan = self.crxb.quantize(input, self.delta_x, self.weight, self.delta_w)
# 2. Perform the computation between input voltage and weight conductance
# compute output feature size
if self.h_out is None and self.w_out is None:
self.h_out = int(
(input.shape[2] - self.kernel_size[0] + 2 * self.padding[0]) / self.stride[0] + 1)
self.w_out = int(
(input.shape[3] - self.kernel_size[0] + 2 * self.padding[0]) / self.stride[0] + 1)
num_block = self.h_out * self.w_out
block_size = input.shape[1] * torch.cumprod(torch.tensor(self.kernel_size), 0)[-1]
pad_block_size = block_size + self.input_pad[2] + self.input_pad[3]
# 2.1 flatten and unfold the weight and input
input_unfold = F.unfold(input_quan, kernel_size=self.kernel_size[0],
dilation=self.dilation, padding=self.padding,
stride=self.stride)
weight_flatten = weight_quan.view(self.out_channels, -1)
# 2.2. add paddings
input_padded = F.pad(input_unfold, self.input_pad,
mode='constant', value=0)
weight_padded = F.pad(weight_flatten, self.w_pad,
mode='constant', value=0)
# 2.3. reshape to crxb size
input_crxb = input_padded.view(input.shape[0], 1, self.crxb_row,
self.crxb_size, num_block)
weight_crxb = weight_padded.view(self.crxb_col, self.crxb_size,
self.crxb_row, self.crxb_size).transpose(1, 2)
# convert the floating point weight into conductance pair values
G_crxb = self.w2g(weight_crxb)
return input_crxb, G_crxb
def forward(self, input):
assert input.dim() == 4
# 1. input data and weight quantization
input_crxb, G_crxb = self.mapping(input)
# 2. Perform the computation between input voltage and weight conductance
# 2.1-2.3 Mapping
# 2.4. compute matrix multiplication
G_crxb = self.crxb.noise_injection(input_crxb, G_crxb)
# this block is to calculate the ir drop of the crossbar
output_crxb = self.crxb.solve_crxb(input_crxb, G_crxb)
# 3. perform ADC operation (i.e., current to digital conversion)
# input.shape[0], self.crxb_col, self.crxb_row,
# self.crxb_size, self.num_block
with torch.no_grad():
if self.training:
self.delta_i = output_crxb.abs().max() / (self.crxb.h_lvl)
self.delta_out_sum.data += self.delta_i
else:
self.delta_i = self.delta_out_sum.data / self.counter.data
self.delta_y = self.delta_w * self.delta_x * \
self.delta_i / (self.crxb.delta_v * self.crxb.delta_g)
# print('adc LSB ration:', self.delta_i/self.max_i_LSB)
output_adc = self.crxb.output_convet(output_crxb, self.delta_i, self.delta_y)
if self.w2g.enable_SAF:
if self.enable_ec_SAF:
G_pos_diff, G_neg_diff = self.w2g.error_compensation()
ec_scale = self.delta_y / self.delta_i
output_adc += (torch.matmul(G_pos_diff, input_crxb)
- torch.matmul(G_neg_diff, input_crxb)) * ec_scale
output_sum = torch.sum(output_adc, dim=2)
# input.shape[0], self.crxb_col,
# self.crxb_size, self.num_block
output = output_sum.view(input.shape[0],
output_sum.shape[1] * output_sum.shape[2],
self.h_out,
self.w_out).index_select(dim=1, index=self.nchout_index) # remove the padded columns
if self.bias is not None:
output += self.bias.unsqueeze(1).unsqueeze(1)
return output
def _reset_delta(self):
self.delta_in_sum.data[0] = 0
self.delta_out_sum.data[0] = 0
self.counter.data[0] = 0
class crxb_Linear(nn.Linear):
"""
This is the custom linear layer that takes non-ideal effects of ReRAM crossbar into account. It has three functions.
1) emulate the DAC at the input of the crossbar and qnantize the input and weight tensors.
2) map the quantized tensor to the ReRAM crossbar arrays and include non-ideal effects such as noise, ir drop, and
SAF.
3) emulate the ADC at the output of he crossbar and convert the current back to digital number
to the input of next layers
Args:
scaler_dw(float): weight quantization scaler to reduce the influence of the ir drop.
crxb_size(int): size of the crossbar.
quantize(int): quantization resolution of the crossbar.
"""
def __init__(self, in_features, out_features,
bias=True, crxb_size=64, scaler_dw=1, **crxb_cfg):
super(crxb_Linear, self).__init__(in_features, out_features, bias)
# self.ir_drop = ir_drop
# self.device = device
################## Crossbar conversion #############################
self.crxb_size = crxb_size
# self.enable_ec_SAF = enable_ec_SAF
# self.out_index = nn.Parameter(torch.arange(out_features), requires_grad=False)
self.register_buffer('out_index', torch.arange(out_features))
self.crxb_row, self.crxb_row_pads = self.num_pad(
self.weight.shape[1], self.crxb_size)
self.crxb_col, self.crxb_col_pads = self.num_pad(
self.weight.shape[0], self.crxb_size)
# p3d = (0, 1, 2, 1, 3, 3) # pad by (0, 1), (2, 1), and (3, 3)
self.w_pad = [0, self.crxb_row_pads, 0, self.crxb_col_pads]
self.input_pad = [0, self.crxb_row_pads]
weight_crxb_shape = torch.Size((self.crxb_col, self.crxb_row,
self.crxb_size, self.crxb_size))
################# Hardware conversion ##############################
# weight and input levels
# Q(x) = (2^{k−1} − 1)* round((2^{k−1} − 1) * x)
# self.n_lvl = 2 ** quantize
# self.h_lvl = (self.n_lvl - 2) / 2
# ReRAM cells
# 7-bit precisionis achievable on state-of-the-art RRAM device [9]
# [9] High precision tuning of state for memristive devices by
# adaptable variation-tolerant algorithm
# self.Gmax = gmax # max conductance
# self.Gmin = gmin # min conductance
# self.delta_g = (self.Gmax - self.Gmin) / (2 ** 7) # conductance step
# self.crxb = crxb_layer(ir_drop, device, gmax, gmin, gwire, gload, scaler_dw, vdd, enable_noise,
# freq, temp , crxb_size, quantize, enable_SAF, enable_ec_SAF)
self.crxb = CrossbarLayer(crxb_size=crxb_size, **crxb_cfg)
self.w2g = w2g(self.crxb.delta_g, Gmin=self.crxb.Gmin, G_SA0=self.crxb.Gmax,
G_SA1=self.crxb.Gmin, weight_shape=weight_crxb_shape, enable_SAF=self.crxb.enable_SAF)
# self.Gwire = gwire
# self.Gload = gload
# DAC
# self.Vdd = vdd # unit: volt
# self.delta_v = self.Vdd / (self.n_lvl - 1)
# self.delta_in_sum = nn.Parameter(torch.Tensor(1), requires_grad=False)
# self.delta_out_sum = nn.Parameter(torch.Tensor(1), requires_grad=False)
# self.counter = nn.Parameter(torch.Tensor(1), requires_grad=False)
self.register_buffer('delta_in_sum', torch.zeros(1))
self.register_buffer('delta_out_sum', torch.zeros(1))
self.register_buffer('counter', torch.zeros(1))
self.scaler_dw = scaler_dw
self.delta_w = 0 # self.weight.abs().max() / self.h_lvl * self.scaler_dw
self.delta_x = 0 # self.delta_in_sum.data / self.counter.data
def num_pad(self, source, target):
crxb_index = math.ceil(source / target)
num_padding = crxb_index * target - source
return crxb_index, num_padding
# Mapping the weights to the crossbar array
def mapping(self, input):
# 1. input data and weight quantization
# delta_x delta_w
with torch.no_grad():
self.delta_w = self.weight.abs().max() / self.crxb.h_lvl * self.scaler_dw
# trainable delta_x
if self.training:
self.counter.data += | |
circuits, targets)
def test_sdg_gate_nondeterministic_waltz_basis_gates(self):
"""Test sdg-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_1q_clifford.sdg_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_1q_clifford.sdg_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_sdg_gate_nondeterministic_minimal_basis_gates(self):
"""Test sdg-gate gate circuits compiling to u3,cx"""
circuits = ref_1q_clifford.sdg_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_1q_clifford.sdg_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
# ---------------------------------------------------------------------
# Test cx-gate
# ---------------------------------------------------------------------
def test_cx_gate_deterministic_default_basis_gates(self):
"""Test cx-gate circuits compiling to backend default basis_gates."""
circuits = ref_2q_clifford.cx_gate_circuits_deterministic(
final_measure=False)
targets = ref_2q_clifford.cx_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_cx_gate_deterministic_waltz_basis_gates(self):
"""Test cx-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_2q_clifford.cx_gate_circuits_deterministic(
final_measure=False)
targets = ref_2q_clifford.cx_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_cx_gate_deterministic_minimal_basis_gates(self):
"""Test cx-gate gate circuits compiling to u3,cx"""
circuits = ref_2q_clifford.cx_gate_circuits_deterministic(
final_measure=False)
targets = ref_2q_clifford.cx_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_cx_gate_nondeterministic_default_basis_gates(self):
"""Test cx-gate circuits compiling to backend default basis_gates."""
circuits = ref_2q_clifford.cx_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_2q_clifford.cx_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_cx_gate_nondeterministic_waltz_basis_gates(self):
"""Test cx-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_2q_clifford.cx_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_2q_clifford.cx_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_cx_gate_nondeterministic_minimal_basis_gates(self):
"""Test cx-gate gate circuits compiling to u3,cx"""
circuits = ref_2q_clifford.cx_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_2q_clifford.cx_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
# ---------------------------------------------------------------------
# Test cz-gate
# ---------------------------------------------------------------------
def test_cz_gate_deterministic_default_basis_gates(self):
"""Test cz-gate circuits compiling to backend default basis_gates."""
circuits = ref_2q_clifford.cz_gate_circuits_deterministic(
final_measure=False)
targets = ref_2q_clifford.cz_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_cz_gate_deterministic_waltz_basis_gates(self):
"""Test cz-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_2q_clifford.cz_gate_circuits_deterministic(
final_measure=False)
targets = ref_2q_clifford.cz_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_cz_gate_deterministic_minimal_basis_gates(self):
"""Test cz-gate gate circuits compiling to u3,cx"""
circuits = ref_2q_clifford.cz_gate_circuits_deterministic(
final_measure=False)
targets = ref_2q_clifford.cz_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_cz_gate_nondeterministic_default_basis_gates(self):
"""Test cz-gate circuits compiling to backend default basis_gates."""
circuits = ref_2q_clifford.cz_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_2q_clifford.cz_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_cz_gate_nondeterministic_waltz_basis_gates(self):
"""Test cz-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_2q_clifford.cz_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_2q_clifford.cz_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_cz_gate_nondeterministic_minimal_basis_gates(self):
"""Test cz-gate gate circuits compiling to u3,cx"""
circuits = ref_2q_clifford.cz_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_2q_clifford.cz_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
# ---------------------------------------------------------------------
# Test swap-gate
# ---------------------------------------------------------------------
def test_swap_gate_deterministic_default_basis_gates(self):
"""Test swap-gate circuits compiling to backend default basis_gates."""
circuits = ref_2q_clifford.swap_gate_circuits_deterministic(
final_measure=False)
targets = ref_2q_clifford.swap_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_swap_gate_deterministic_waltz_basis_gates(self):
"""Test swap-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_2q_clifford.swap_gate_circuits_deterministic(
final_measure=False)
targets = ref_2q_clifford.swap_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_swap_gate_deterministic_minimal_basis_gates(self):
"""Test swap-gate gate circuits compiling to u3,cx"""
circuits = ref_2q_clifford.swap_gate_circuits_deterministic(
final_measure=False)
targets = ref_2q_clifford.swap_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_swap_gate_nondeterministic_default_basis_gates(self):
"""Test swap-gate circuits compiling to backend default basis_gates."""
circuits = ref_2q_clifford.swap_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_2q_clifford.swap_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_swap_gate_nondeterministic_waltz_basis_gates(self):
"""Test swap-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_2q_clifford.swap_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_2q_clifford.swap_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_swap_gate_nondeterministic_minimal_basis_gates(self):
"""Test swap-gate gate circuits compiling to u3,cx"""
circuits = ref_2q_clifford.swap_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_2q_clifford.swap_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
# ---------------------------------------------------------------------
# Test t-gate
# ---------------------------------------------------------------------
def test_t_gate_deterministic_default_basis_gates(self):
"""Test t-gate circuits compiling to backend default basis_gates."""
circuits = ref_non_clifford.t_gate_circuits_deterministic(
final_measure=False)
targets = ref_non_clifford.t_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_t_gate_deterministic_waltz_basis_gates(self):
"""Test t-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_non_clifford.t_gate_circuits_deterministic(
final_measure=False)
targets = ref_non_clifford.t_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_t_gate_deterministic_minimal_basis_gates(self):
"""Test t-gate gate circuits compiling to u3,cx"""
circuits = ref_non_clifford.t_gate_circuits_deterministic(
final_measure=False)
targets = ref_non_clifford.t_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_t_gate_nondeterministic_default_basis_gates(self):
"""Test t-gate circuits compiling to backend default basis_gates."""
circuits = ref_non_clifford.t_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_non_clifford.t_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_t_gate_nondeterministic_waltz_basis_gates(self):
"""Test t-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_non_clifford.t_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_non_clifford.t_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_t_gate_nondeterministic_minimal_basis_gates(self):
"""Test t-gate gate circuits compiling to u3,cx"""
circuits = ref_non_clifford.t_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_non_clifford.t_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
# ---------------------------------------------------------------------
# Test tdg-gate
# ---------------------------------------------------------------------
def test_tdg_gate_deterministic_default_basis_gates(self):
"""Test tdg-gate circuits compiling to backend default basis_gates."""
circuits = ref_non_clifford.tdg_gate_circuits_deterministic(
final_measure=False)
targets = ref_non_clifford.tdg_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_tdg_gate_deterministic_waltz_basis_gates(self):
"""Test tdg-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_non_clifford.tdg_gate_circuits_deterministic(
final_measure=False)
targets = ref_non_clifford.tdg_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_tdg_gate_deterministic_minimal_basis_gates(self):
"""Test tdg-gate gate circuits compiling to u3,cx"""
circuits = ref_non_clifford.tdg_gate_circuits_deterministic(
final_measure=False)
targets = ref_non_clifford.tdg_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_tdg_gate_nondeterministic_default_basis_gates(self):
"""Test tdg-gate circuits compiling to backend default basis_gates."""
circuits = ref_non_clifford.tdg_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_non_clifford.tdg_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_tdg_gate_nondeterministic_waltz_basis_gates(self):
"""Test tdg-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_non_clifford.tdg_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_non_clifford.tdg_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_tdg_gate_nondeterministic_minimal_basis_gates(self):
"""Test tdg-gate gate circuits compiling to u3,cx"""
circuits = ref_non_clifford.tdg_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_non_clifford.tdg_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
# ---------------------------------------------------------------------
# Test ccx-gate
# ---------------------------------------------------------------------
def test_ccx_gate_deterministic_default_basis_gates(self):
"""Test ccx-gate circuits compiling to backend default basis_gates."""
circuits = ref_non_clifford.ccx_gate_circuits_deterministic(
final_measure=False)
targets = ref_non_clifford.ccx_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_ccx_gate_deterministic_waltz_basis_gates(self):
"""Test ccx-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_non_clifford.ccx_gate_circuits_deterministic(
final_measure=False)
targets = ref_non_clifford.ccx_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_ccx_gate_deterministic_minimal_basis_gates(self):
"""Test ccx-gate gate circuits compiling to u3,cx"""
circuits = ref_non_clifford.ccx_gate_circuits_deterministic(
final_measure=False)
targets = ref_non_clifford.ccx_gate_statevector_deterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_ccx_gate_nondeterministic_default_basis_gates(self):
"""Test ccx-gate circuits compiling to backend default basis_gates."""
circuits = ref_non_clifford.ccx_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_non_clifford.ccx_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_ccx_gate_nondeterministic_waltz_basis_gates(self):
"""Test ccx-gate gate circuits compiling to u1,u2,u3,cx"""
circuits = ref_non_clifford.ccx_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_non_clifford.ccx_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
def test_ccx_gate_nondeterministic_minimal_basis_gates(self):
"""Test ccx-gate gate circuits compiling to u3,cx"""
circuits = ref_non_clifford.ccx_gate_circuits_nondeterministic(
final_measure=False)
targets = ref_non_clifford.ccx_gate_statevector_nondeterministic()
job = execute(circuits,
self.SIMULATOR,
shots=1,
basis_gates=['u1', 'u2', 'u3', 'cx'],
backend_options=self.BACKEND_OPTS)
result = job.result()
self.assertSuccess(result)
self.compare_statevector(result, circuits, targets)
# ---------------------------------------------------------------------
# Test unitary gate qobj instruction
# ---------------------------------------------------------------------
def test_unitary_gate(self):
"""Test simulation with unitary gate circuit | |
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import numpy as np
from astropy.io import fits
from astropy.convolution import convolve
import astropy.units as u
from scipy import ndimage as nd
import itertools as it
import operator as op
import os
from warnings import warn
try:
from spectral_cube import SpectralCube, LazyMask
from spectral_cube.wcs_utils import drop_axis
spectral_cube_flag = True
except ImportError:
warn("spectral-cube is not installed. Using Moments requires"
" spectral-cube to be installed.")
spectral_cube_flag = False
# try:
# from signal_id import Noise
# signal_id_flag = True
# except ImportError:
# warn("signal-id is not installed. Disabling associated functionality.")
# signal_id_flag = False
from ._moment_errs import (moment0_error, moment1_error, linewidth_sigma_err)
class Moments(object):
"""
A unified approach to deriving the noise level in a cube, applying a
mask, and deriving moments along with their errors. All the heavy lifting
is done with
`spectral_cube <http://spectral-cube.readthedocs.org/en/latest/>`_.
Parameters
----------
cube : SpectralCube or str
Either a SpectralCube object, or the filename of a cube readable
by spectral-cube.
scale : `~astropy.units.Quantity`, optional
The noise level in the cube. Used to estimate uncertainties of the
moment maps.
moment_method : {'slice', 'cube', 'ray'}, optional
The method to use for creating the moments. See the spectral-cube
docs for an explanation of the differences.
"""
def __init__(self, cube, scale=None, moment_method='slice'):
super(Moments, self).__init__()
if not spectral_cube_flag:
raise ImportError("Moments requires the spectral-cube "
" to be installed: https://github.com/"
"radio-astro-tools/spectral-cube")
if isinstance(cube, SpectralCube):
self.cube = cube
self.save_name = None
else:
self.cube = SpectralCube.read(cube)
# Default save name to the cube name without the suffix.
self.save_name = ".".join(cube.split(".")[:-1])
if moment_method not in ['slice', 'cube', 'ray']:
raise TypeError("Moment method must be 'slice', 'cube', or 'ray'.")
self.moment_how = moment_method
self.scale = scale
self.prop_headers = None
self.prop_err_headers = None
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, value):
if value is None:
self._scale = value
else:
if not hasattr(value, 'unit'):
raise TypeError("Given scale must be an `astropy.Quantity`"
" with units matching the units of the cube.")
if not value.unit.is_equivalent(self.cube.unit):
raise u.UnitsError("Given scale must have units equivalent"
" to the units of the cube.")
if value.value < 0:
raise ValueError("Noise level is set to negative. The noise"
" must be zero (noiseless) or positive.")
self._scale = value
def apply_mask(self, mask):
'''
Apply a mask to the cube.
Parameters
----------
mask : spectral-cube Mask or numpy.ndarray, optional
The mask to be applied to the data. If None is given, RadioMask
is used with its default settings.
'''
# if mask is None:
# rad_mask = RadioMask(self.cube)
# mask = rad_mask.to_mask()
self.cube = self.cube.with_mask(mask)
def make_moments(self, axis=0, units=True):
'''
Calculate the moments.
Parameters
----------
axis : int, optional
The axis to calculate the moments along.
units : bool, optional
If enabled, the units of the arrays are kept.
'''
self._moment0 = self.cube.moment0(axis=axis, how=self.moment_how)
self._moment1 = self.cube.moment1(axis=axis, how=self.moment_how)
self._linewidth = \
self.cube.linewidth_sigma(how=self.moment_how)
if not units:
self._moment0 = self._moment0.value
self._moment1 = self._moment1.value
self._moment2 = self._moment2.value
def make_moment_errors(self, axis=0, scale=None):
'''
Calculate the errors in the moments.
Parameters
----------
axis : int, optional
The axis to calculate the moments along.
'''
if not hasattr(self, "_moment0"):
raise ValueError("Run Moments.make_moments first.")
if self.scale is None and scale is None:
warn("scale not set to the rms noise and will not be used in "
"error calculations.")
scale = 0.0 * self.cube.unit
self._moment0_err = np.zeros_like(self.moment0)
self._moment1_err = np.zeros_like(self.moment1)
self._linewidth_err = np.zeros_like(self.linewidth)
elif self.scale is not None:
scale = self.scale
self._moment0_err = moment0_error(self.cube, scale,
how=self.moment_how, axis=axis)
self._moment1_err = moment1_error(self.cube, scale,
how=self.moment_how,
axis=axis, moment0=self.moment0,
moment1=self.moment1)
self._linewidth_err = \
linewidth_sigma_err(self.cube, scale,
how=self.moment_how, moment0=self.moment0,
moment1=self.moment1,
moment1_err=self.moment1_err)
@property
def moment0(self):
return self._moment0
@property
def moment1(self):
return self._moment1
@property
def linewidth(self):
return self._linewidth
@property
def moment0_err(self):
return self._moment0_err
@property
def moment1_err(self):
return self._moment1_err
@property
def linewidth_err(self):
return self._linewidth_err
def all_moments(self):
return [self._moment0, self._moment1, self.linewidth]
def all_moment_errs(self):
return [self._moment0_err, self._moment1_err, self.linewidth_err]
def to_dict(self):
'''
Returns a dictionary with the cube and the property arrays.
'''
self.get_prop_hdrs()
prop_dict = {}
# Avoid reading in the whole cube when it is big, unless
# you set cube.allow_huge_operations=True
if self.cube._is_huge and not self.cube.allow_huge_operations:
raise ValueError("This will load the whole cube into memory. Set "
"``cube.allow_huge_operations=True`` to"
" allow this")
if _try_remove_unit(self.cube.filled_data[:]):
prop_dict['cube'] = [self.cube.filled_data[:].value,
self.cube.header]
else:
prop_dict['cube'] = [self.cube.filled_data[:], self.cube.header]
if _try_remove_unit(self.moment0):
prop_dict['moment0'] = [self.moment0.value, self.prop_headers[0]]
else:
prop_dict['moment0'] = [self.moment0, self.prop_headers[0]]
if _try_remove_unit(self.moment0_err):
prop_dict['moment0_error'] = [self.moment0_err.value,
self.prop_err_headers[0]]
else:
prop_dict['moment0_error'] = [self.moment0_err,
self.prop_err_headers[0]]
if _try_remove_unit(self.moment1):
prop_dict['centroid'] = [self.moment1.value, self.prop_headers[1]]
else:
prop_dict['centroid'] = [self.moment1, self.prop_headers[1]]
if _try_remove_unit(self.moment1_err):
prop_dict['centroid_error'] = [self.moment1_err.value,
self.prop_err_headers[1]]
else:
prop_dict['centroid_error'] = [self.moment1_err,
self.prop_err_headers[1]]
if _try_remove_unit(self.linewidth):
prop_dict['linewidth'] = [self.linewidth.value,
self.prop_headers[2]]
else:
prop_dict['linewidth'] = [self.linewidth,
self.prop_headers[2]]
if _try_remove_unit(self.linewidth_err):
prop_dict['linewidth_error'] = [self.linewidth_err.value,
self.prop_err_headers[2]]
else:
prop_dict['linewidth_error'] = [self.linewidth_err,
self.prop_err_headers[2]]
return prop_dict
def get_prop_hdrs(self):
'''
Generate headers for the moments.
'''
bunits = [self.cube.unit, self.cube.spectral_axis.unit,
self.cube.spectral_axis.unit,
self.cube.unit * self.cube.spectral_axis.unit]
comments = ["Image of the Zeroth Moment",
"Image of the First Moment",
"Image of the Second Moment",
"Image of the Integrated Intensity"]
self.prop_headers = []
self.prop_err_headers = []
for i in range(len(bunits)):
wcs = self.cube.wcs.copy()
new_wcs = drop_axis(wcs, -1)
hdr = new_wcs.to_header()
hdr_err = new_wcs.to_header()
hdr["BUNIT"] = bunits[i].to_string()
hdr_err["BUNIT"] = bunits[i].to_string()
hdr["COMMENT"] = comments[i]
hdr_err["COMMENT"] = comments[i] + " Error."
self.prop_headers.append(hdr)
self.prop_err_headers.append(hdr_err)
def to_fits(self, save_name=None, **kwargs):
'''
Save the property arrays as fits files.
Parameters
----------
save_name : str, optional
Prefix to use when saving the moment arrays.
If None is given, 'default' is used.
kwargs : Passed to `~astropy.io.fits.HDUList.writeto`.
'''
if self.prop_headers is None:
self.get_prop_hdrs()
if save_name is None:
if self.save_name is None:
Warning("No save_name has been specified, using 'default'")
self.save_name = 'default'
else:
self.save_name = save_name
labels = ["_moment0", "_centroid", "_linewidth"]
for i, (arr, err, hdr, hdr_err) in \
enumerate(zip(self.all_moments(), self.all_moment_errs(),
self.prop_headers, self.prop_err_headers)):
# Can't write quantities.
if _try_remove_unit(arr):
arr = arr.value
if _try_remove_unit(err):
err = err.value
hdu = fits.HDUList([fits.PrimaryHDU(arr, header=hdr),
fits.ImageHDU(err, header=hdr_err)])
hdu.writeto(self.save_name + labels[i] + ".fits",
**kwargs)
@staticmethod
def from_fits(fits_name, moments_prefix=None, moments_path=None,
mask_name=None, moment0=None, centroid=None, linewidth=None,
scale=None):
'''
Load pre-made moment arrays given a cube name. Saved moments must
match the naming of the cube for the automatic loading to work
(e.g. a cube called test.fits will have a moment 0 array with the name
test_moment0.fits). Otherwise, specify a path to one of the keyword
arguments.
Parameters
----------
fits_name : str
Filename of the cube or a SpectralCube object. If a filename is
given, it is also used as the prefix to the saved moment files.
moments_prefix : str, optional
If a SpectralCube object is given in ``fits_name``, the prefix
for the saved files can be provided here.
moments_path : str, optional
Path to where the moments are saved.
mask_name : str, optional
Filename of a saved mask to be applied to the data.
moment0 : str, optional
Filename of the moment0 array. Use if naming scheme is not valid
for automatic loading.
centroid : str, optional
Filename of the centroid array. Use if naming scheme is not valid
for automatic loading.
linewidth : str, optional
Filename of the linewidth array. Use if naming scheme is not valid
for automatic loading.
scale : `~astropy.units.Quantity`, optional
The noise level in the cube. Overrides estimation using
`signal_id <https://github.com/radio-astro-tools/signal-id>`_
'''
if not spectral_cube_flag:
raise ImportError("Moments requires the spectral-cube "
" to be installed: https://github.com/"
"radio-astro-tools/spectral-cube")
if moments_path is None:
moments_path = ""
if not isinstance(fits_name, SpectralCube):
root_name = os.path.basename(fits_name[:-5])
else:
root_name = moments_prefix
self = Moments(fits_name, scale=scale)
if mask_name is not None:
mask = fits.getdata(mask_name)
self.with_mask(mask=mask)
# Moment 0
if moment0 is not None:
mom0_name = moment0
else:
mom0_name = os.path.join(moments_path,
root_name + "_moment0.fits")
try:
with fits.open(mom0_name) as moment0:
self._moment0 = moment0[0].data
self._moment0_err = moment0[1].data
except IOError as e:
self._moment0 = None
self._moment0_err = None
print(e)
print("Moment 0 fits file not found.")
if centroid is not None:
mom1_name = centroid
else:
mom1_name = os.path.join(moments_path,
root_name + "_centroid.fits")
try:
with fits.open(mom1_name) as moment1:
self._moment1 = moment1[0].data
self._moment1_err = moment1[1].data
except IOError as e:
self._moment1 = None
self._moment1_err = None
print(e)
print("Centroid fits file not found.")
if linewidth is not None:
lwidth_name = linewidth
else:
lwidth_name = | |
valDef, setter=True)
# Attribute 'MinVoltageWarning' GUID 10049 Data type TYPE_UNSIGNED_NUMBER
# Minimum voltage warning level
def getMinVoltageWarning(self, moduleID):
guid = 10049
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
def setMinVoltageWarning(self, moduleID, value):
guid = 10049
portnumber = 0
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(value, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'ActiveEnergyReset' GUID 10050 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Active Energy
def doActiveEnergyReset(self, moduleID):
guid = 10050
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(1, valDef))
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'ApparentEnergyReset' GUID 10051 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Apparent Energy
def doApparentEnergyReset(self, moduleID, portnumber=1):
guid = 10051
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(1, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'MinTemperatureWarning' GUID 10052 Data type
# TYPE_SIGNED_NUMBER
def getMinTemperatureWarning(self, moduleID):
guid = 10052
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
def setMinTemperatureWarning(self, moduleID, value):
guid = 10052
portnumber = 0
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(value, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'MaxTemperatureWarning' GUID 10053 Data type
# TYPE_SIGNED_NUMBER
def getMaxTemperatureWarning(self, moduleID):
guid = 10053
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
def setMaxTemperatureWarning(self, moduleID, value):
guid = 10053
portnumber = 0
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(value, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'Startuptime' GUID 10066 Data type TYPE_UNSIGNED_NUMBER
def getStartuptime(self, moduleID):
guid = 10066
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'PowerCycleTime' GUID 10099 Data type TYPE_UNSIGNED_NUMBER
# The time that power will be switched off when power cycle is started
def getPowerCycleTime(self, moduleID, portnumber=1, length=1):
guid = 10099
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
def setPowerCycleTime(self, moduleID, value, portnumber=1, ):
guid = 10099
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(value, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'JumpBoot' GUID 40000 Data type TYPE_COMMAND
# Enter bootloader mode. Normally this command is only sent to application
# program. When the bootloader is already running, this command will only
# reply a positive acknowledge.
def getJumpBoot(self, moduleID):
guid = 40000
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
def setJumpBoot(self, moduleID, value):
guid = 40000
portnumber = 0
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(value, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'GotoFactoryMode' GUID 40002 Data type TYPE_COMMAND
def setGotoFactoryMode(self, moduleID, value):
guid = 40002
portnumber = 0
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(value, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'ApparentPower' GUID 15 Data type TYPE_UNSIGNED_NUMBER
# Apparent power (this is the product of the current and the voltage)
def getApparentPower(self, moduleID, portnumber=1, length=1):
guid = 15
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'ModInfo' GUID 40008 Data type TYPE_COMMAND
def getModInfo(self, moduleID):
guid = 40008
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'PowerFactor' GUID 16 Data type TYPE_UNSIGNED_NUMBER
# Powerfactor
def getPowerFactor(self, moduleID, portnumber=1, length=1):
guid = 16
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'MinCurrent' GUID 5010 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Minimum port current occurred since last reset
def getMinCurrent(self, moduleID, portnumber=1, length=1):
guid = 5010
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'MinPower' GUID 5011 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Minimum port power occured since last reset
def getMinPower(self, moduleID, portnumber=1, length=1):
guid = 5011
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'MinPowerFactor' GUID 5012 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Minimum powerfactor occured per port since last reset
def getMinPowerFactor(self, moduleID, portnumber=1, length=1):
guid = 5012
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'MaxPowerFactor' GUID 5013 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Maximum powerfactor occured per port since last reset
def getMaxPowerFactor(self, moduleID, portnumber=1, length=1):
guid = 5013
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'BootJumpApp' GUID 60001 Data type TYPE_COMMAND
# Jump to the application, which starts at 0x4000.
def setBootJumpApp(self, moduleID, value):
guid = 60001
portnumber = 0
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(value, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'TotalCurrent' GUID 17 Data type TYPE_UNSIGNED_NUMBER
# Total current
def getTotalCurrent(self, moduleID):
guid = 17
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'TotalRealPower' GUID 18 Data type TYPE_UNSIGNED_NUMBER
# Total real power
def getTotalRealPower(self, moduleID):
guid = 18
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'TotalApparentPower' GUID 19 Data type TYPE_UNSIGNED_NUMBER
# Total apparent power
def getTotalApparentPower(self, moduleID):
guid = 19
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'TotalActiveEnergy' GUID 20 Data type TYPE_UNSIGNED_NUMBER
# Total active energy
def getTotalActiveEnergy(self, moduleID):
guid = 20
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'TotalApparentEnergy' GUID 21 Data type TYPE_UNSIGNED_NUMBER
# Total apparent energy
def getTotalApparentEnergy(self, moduleID):
guid = 21
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'TotalPowerFactor' GUID 22 Data type TYPE_UNSIGNED_NUMBER
# Total power factor
def getTotalPowerFactor(self, moduleID):
guid = 22
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'MinTotalCurrent' GUID 5014 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Minimum port current occurred since last reset
def getMinTotalCurrent(self, moduleID):
guid = 5014
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'MinTotalPower' GUID 5015 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Minimum port power occurred since last reset
def getMinTotalPower(self, moduleID):
guid = 5015
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'MinTotalPowerFactor' GUID 5016 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Minimum total power factor occurred since last reset
def getMinTotalPowerFactor(self, moduleID):
guid = 5016
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'MaxTotalPowerFactor' GUID 5017 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Maximum total power factor occurred since last reset
def getMaxTotalPowerFactor(self, moduleID):
guid = 5017
portnumber = 0
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
# Attribute 'ActiveTotalEnergyReset' GUID 10071 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Active Total Energy / time of reset + value at that time
def doActiveTotalEnergyReset(self, moduleID):
guid = 10071
portnumber = 0
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(1, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'ApparentTotalEnergyReset' GUID 10072 Data type TYPE_UNSIGNED_NUMBER_WITH_TS
# Apparent Total Energy / time of reset + value at that time
def doApparentTotalEnergyReset(self, moduleID):
guid = 10072
portnumber = 0
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(1, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'MonitorAutoRefresh' GUID 50010 Data type TYPE_POINTER
# Get the monitor values from the | |
"Playback speed\n",
"\n",
"Unmute\n",
"\n",
"Turn fullscreen on\n",
"likeloveinsightful\n",
"9,044\n",
"<NAME> and 9,043 others\n",
"257 comments\n",
"\n",
"Like\n",
"\n",
"Comment\n",
"\n",
"Share\n",
"\n",
"Send\n",
"Feed post number 1\n",
"Larange’s profile photo\n",
"<NAME> likes this\n",
"\n",
"<NAME>\n",
"<NAME>\n",
"• 2nd\n",
"Research Scientist at Google DeepMind • Advisor at AlphaSignal.ai\n",
"1d • Edited • 1 day ago\n",
"Follow\n",
"Hi all!\n",
"We're currently looking for a part time (4 hours/week) Computer Science PhD or Masters student to help us at https://alphasignal.ai\n",
"\n",
"A few criteria:\n",
"-Must be between timezones GMT+5 (India) to GMT+9 (Japan).\n",
"-Published at least one research paper in Machine Learning.\n",
"-Has some familiarity with Pytorch, Tensorflow.\n",
"-Knows the difference between RNN, CNN, LSTM, GANs.\n",
"-Familiarity with Deep Learning terms.\n",
"-Knows about Meta AI, Deepmind, Google AI, OpenAI and other big players.\n",
"\n",
"That's it!\n",
"If you know a student who fits the role send it my way :)\n",
"\n",
"It's definitely a resume-booster.\n",
"…see more\n",
"likesupportcurious\n",
"432\n",
"<NAME> and 431 others\n",
"102 comments\n",
"\n",
"Like\n",
"\n",
"Comment\n",
"\n",
"Share\n",
"\n",
"Send\n",
"Feed post number 2\n",
"<NAME> and <NAME> follow Spot by NetApp\n",
"\n",
"Spot by NetApp\n",
"Spot by NetApp\n",
"11,440 followers\n",
"Promoted\n",
"Download our free digital guide to Microsoft Azure. Learn best practices for architecting and managing efficient, scalable cloud infrastructure built to accelerate your business\n",
"…see more\n",
"\n",
"Getting Started with Azure Cloud Infrastructure – Get the PDF Guide\n",
"get.spot.io\n",
"\n",
"Download. View Sponsored Content\n",
"Download\n",
"like\n",
"30\n",
"\n",
"Like\n",
"\n",
"Comment\n",
"\n",
"Share\n",
"\n",
"Send\n",
"\n",
"Be the first to comment on this\n",
"\n",
"Feed post number 3\n",
"Everest’s profile photo\n",
"Everest K.C. likes this\n",
"\n",
"<NAME>\n",
"<NAME>\n",
"• 2nd\n",
"System Administrator\n",
"6h • Edited • 6 hours ago\n",
"<NAME> is an extremely experienced DevOps engineer. He has an intelligent teaching method, Start teaching from the very basics to taking to a stage where you build real-world solutions and debug challenges. The designed labs were great and worked as expected without a hitch. Thanks, owlocular to give me this certificate.\n",
"…see more\n",
"Your document has finished loading\n",
"likecelebratelove\n",
"6\n",
"Everest K.C. and 5 others\n",
"1 comment\n",
"\n",
"Like\n",
"\n",
"Comment\n",
"\n",
"Share\n",
"\n",
"Send\n",
"Feed post number 4\n",
"Lalit’s profile photo\n",
"<NAME> commented on this\n",
"\n",
"<NAME>\n",
"<NAME>\n",
"• 3rd+\n",
"Commissioning Manager - Process | Commissioning Expert | Troubleshooting Expert for O&M|\n",
"20h • 20 hours ago\n",
"Riyadh AldhaleaiStatus is reachable\n",
"Riy<NAME>\n",
"• 3rd+\n",
"Manager -Manpower Services at AlMansoori Specialized Engineering\n",
"3d • 3 days ago\n",
"Follow\n",
"small test for your focus\n",
"whats is the Number inside the circle?\n",
"\n",
"Image previewActivate to view larger image\n",
"like\n",
"4\n",
"18 comments\n",
"\n",
"Like\n",
"\n",
"Comment\n",
"\n",
"Share\n",
"\n",
"Send\n",
"<NAME>\n",
"\n",
"\n",
"\n",
"Open Emoji Keyboard\n",
"\n",
"Current selected sort order is Most relevant\n",
"Most relevant\n",
"See profile for Lalit SharmaStatus is reachable\n",
"L<NAME>\n",
" 1st degree connection1st\n",
"Team Leader cum Senior Highway Engineer\n",
"1h\n",
"45283\n",
"\n",
"\n",
"Like\n",
"\n",
"Reply\n",
"\n",
"Load more comments\n",
"Feed post number 5\n",
"Sammeer’s profile photo\n",
"<NAME> loves this\n",
"\n",
"<NAME>, Ph.D\n",
"<NAME>, Ph.D\n",
"• 2nd\n",
"Social Media Marketing|Please click FOLLOW button for the latest development in Civil Engineering || 230K+ Followers ||\n",
"23h • 23 hours ago\n",
"Follow\n",
" What Causes Breakdowns in Traffic Flow?\n",
"\n",
"The layman's definition of congestion is \"too many cars trying to use a highway at the same time\". Transportation engineers formalize this idea as capacity—the ability to move vehicles past a point over a given span of time. When the capacity of a highway section is exceeded, traffic flow breaks down, speeds drop, and vehicles crowd together. These actions cause traffic to back up behind the disruption.\n",
"\n",
"Basically, there are three types of traffic flow behavior that will cause traffic flow to break down:\n",
"\n",
"1- \"Bunching\" of vehicles as a result of reduced speed.\n",
"2- Intended Interruption to Traffic Flow.\n",
"3- Vehicle Merging Maneuvers. \n",
"\n",
"With all this information, I believe this dog got the concept and found the solution better than me as a Highway engineer ;)\n",
"\n",
"If you found the content informative, you may Follow me by <NAME>, Ph.D for more!\n",
"\n",
"Video Credit: IG @ earthdixe\n",
".......................................................................\n",
"All rights and credits are reserved to the respective owner(s). If you are the main copyright owner rather than the one mentioned here of this content, contact me to claim credit or content removal.\n",
"\n",
"Check out #mehrtashsoltani for educational and practical content in civil engineering!\n",
"…see more\n",
"\n",
"Pause\n",
"Back to start of video\n",
"Skip back 10 seconds\n",
"Skip ahead 10 seconds\n",
"Current time 0:04/Duration 0:18\n",
" \n",
"1x\n",
"\n",
"Playback speed\n",
"\n",
"Unmute\n",
"\n",
"Turn fullscreen on\n",
"likelovecelebrate\n",
"3,781\n",
"<NAME> and 3,780 others\n",
"178 comments\n",
"\n",
"Like\n",
"\n",
"Comment\n",
"\n",
"Share\n",
"\n",
"Send\n",
"Feed post number 6\n",
"Supermetrics\n",
"Supermetrics\n",
"13,205 followers\n",
"Promoted\n",
"\n",
"Supermetrics has been listed on G2 as the highest rated marketing data pipeline company of 2022.\n",
"\n",
"Over 17,000 companies already use Supermetrics to streamline marketing reports.\n",
"\n",
"Join our happy customers and start your free trial today!\n",
"…see more\n",
"\n",
"Start your free trial today\n",
"supermetrics.comRegister. View Sponsored Content\n",
"Register\n",
"likelove\n",
"61\n",
"1 comment\n",
"\n",
"Like\n",
"\n",
"Comment\n",
"\n",
"Share\n",
"\n",
"Send\n",
"Feed post number 7\n",
"Samundra’s profile photo\n",
"<NAME> loves this\n",
"\n",
"<NAME>\n",
"<NAME>\n",
"• 2nd\n",
"Here to write. If it goes viral, it's not because of me. It's because it's true.\n",
"3d • 3 days ago\n",
"Follow\n",
"\n",
"No alternative text description for this imageActivate to view larger image\n",
"likelovecelebrate\n",
"68,665\n",
"<NAME> and 68,664 others\n",
"1,154 comments\n",
"\n",
"Like\n",
"\n",
"Comment\n",
"\n",
"Share\n",
"\n",
"Send\n",
"\n",
"Show more results\n",
"Add to your feed\n",
"\n",
"National Association of REALTORS®;<EMAIL> \n",
"National Association of REALTORS®\n",
"Company • Real Estate\n",
"\n",
"Follow\n",
"<NAME>\n",
"<NAME>\n",
"Bestselling Author | Host of the Women & Money Podcast | Co-Founder of SecureSave\n",
"\n",
"Follow\n",
"National Association of Home Builders\n",
"National Association of Home Builders\n",
"Company • Construction\n",
"\n",
"Follow\n",
"View all recommendations \n",
"\n",
"About\n",
"Accessibility\n",
"Help Center\n",
"\n",
"Privacy & Terms \n",
"Ad Choices\n",
"Advertising\n",
"\n",
"Business Services \n",
"Get the LinkedIn app\n",
"More\n",
" LinkedIn Corporation © 2022 <EMAIL> \n",
"<NAME>Status is online\n",
"MessagingYou are on the messaging overlay. Press enter to open the list of conversations.\n",
"\n",
"\n",
"Compose message\n",
"\n",
"You are on the messaging overlay. Press enter to open the list of conversations.\n",
"\n"
]
}
],
"source": [
"\n",
"f = open(\"C:\\\\Users\\\\super\\Downloads\\\\Week 1 Test\\\\websiteData.txt\", \"r\", encoding=\"utf8\")\n",
"print(f.read())"
]
},
{
"cell_type": "code",
"execution_count": 67,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"['<EMAIL>']\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"['<EMAIL>']\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"['<EMAIL>']\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"['<EMAIL>']\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
"[]\n",
| |
import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from discord.ext.commands import bot, has_permissions, CheckFailure
from discord import Spotify
import asyncio
import colorsys
import random
import platform
from discord import Game, Embed, Color, Status, ChannelType
from discord import Spotify
import feedparser
from subprocess import check_output
import os
import functools
import time
import datetime
from datetime import date
from datetime import datetime as datetime1
import sqlite3
from io import BytesIO
import requests
from babel.numbers import format_currency
from babel.numbers import format_number
import locale
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import json
import pytz
from pytz import timezone
class Entertainment(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print('Entertainment is loaded')
@commands.command(pass_context=True)
async def movie(self, ctx, *, name:str=None):
await ctx.trigger_typing()
url = f"http://www.omdbapi.com/?s={name}&apikey=<KEY>"
data = requests.get(url).json()
if data['Response'] == "False":
await ctx.send("Oh no! There isn't any movie(s) with this name in the database.")
else:
count = str(data['Search']).count('Title')
titles = list(data['Search'][x]['Title'] for x in range(count))
years = list(data['Search'][x]['Year'] for x in range(count))
list3 = list("%d. %s" % (n, a) for n, a in enumerate(titles, start=1))
list1 = list("{} ({})".format(title, year) for title, year in zip(list3, years))
list2 = '\n'.join(list1)
await ctx.send(f"Here are the results for your search:\n\n{list2}\n\nEnter the number of the movie to get more details. You've got 15 seconds to enter your option.")
try:
num = await self.client.wait_for('message', check=lambda message: message.author == ctx.author, timeout=15)
num2 = int(num.content)
movieid = data['Search'][num2-1]['imdbID']
url = "http://www.omdbapi.com/?i={}&apikey=4210fd67&plot=full".format(movieid)
x = requests.get(url).json()
plot = x['Plot']
embed=discord.Embed(title =x['Title'], description = "Here is your movie {}".format(ctx.message.author.name), color = ctx.author.color)
if x["Poster"] != "N/A":
embed.set_thumbnail(url = x["Poster"])
"""imdb = x['Ratings'][0]['Value']"""
"""rotten = x['Ratings'][1]['Value']"""
if len(plot) > 1024:
url2 = "http://www.omdbapi.com/?t={}&apikey=4210fd67&plot=short".format(movieid)
x = requests.get(url2).json()
plot = x['Plot']
else:
plot = plot
count1 = str(x["Ratings"]).count("Value")
if x['Ratings'] ==[]:
metacritic = "N/A"
imdb = "N/A"
rotten = "N/A"
elif 'Rotten Tomatoes' and "Metacritic" not in x['Ratings']:
rotten = "N/A"
metacritic = "N/A"
imdb = x['Ratings'][0]['Value']
elif "Metacritic" not in x['Ratings']:
imdb = x['Ratings'][0]['Value']
rotten = x['Ratings'][1]['Value']
metacritic = "N/A"
else:
metacritic = x['Ratings'][2]['Value']
imdb = x['Ratings'][0]['Value']
rotten = x['Ratings'][1]['Value']
embed.add_field(name = "__Title__", value = x["Title"])
embed.add_field(name = "__Released__", value = x["Released"])
embed.add_field(name = "__Rated__", value = x["Rated"])
embed.add_field(name = "__Runtime__", value = x["Runtime"])
embed.add_field(name = "__Genre__", value = x["Genre"])
embed.add_field(name = "__Director__", value = x["Director"])
embed.add_field(name = "__Writer__", value = x["Writer"])
embed.add_field(name = "__Actors__", value = x["Actors"])
embed.add_field(name = "__Plot__", value = plot)
embed.add_field(name = "__Countries__", value = x["Country"])
embed.add_field(name = "__Language(s) Used__", value = x["Language"])
embed.add_field(name = "__IMDB Votes__", value = x["imdbVotes"])
embed.add_field(name = f"__IMDB Rating__", value = imdb)
embed.add_field(name = f"__Rotten Tomatoes__", value = rotten)
embed.add_field(name = f'__Metacritics__', value = metacritic)
embed.add_field(name = "__Awards__", value = x['Awards'])
embed.add_field(name = "__Box Office__", value = x['BoxOffice'])
embed.add_field(name = "__DVD Release__", value = x["DVD"])
embed.add_field(name = "__Production__", value = x["Production"])
embed.add_field(name = "__Website__", value = x["Website"])
embed.add_field(name = "__Type__", value = x["Type"])
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text = "Information from the OMDB API")
await ctx.trigger_typing()
await ctx.send(embed=embed)
except asyncio.TimeoutError:
await ctx.send("Oh No! Timeout expired. Please execute the command once again.")
@commands.command(pass_context=True)
async def spotify(self, ctx):
local_tz = pytz.timezone('Asia/Calcutta')
user = ctx.author
for activity in user.activities:
if isinstance(activity, Spotify):
client_id = "3de4994a8c99485ab153804b7cfa6ff4"
client_secret = "<KEY>"
client_credentials_manager = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
result = sp.track(activity.track_id)
release_date = result['album']['release_date']
explicit = result['explicit']
embed = discord.Embed(description="**React to :thumbsup: in 20 seconds to get the song's lyrics into your DMs**")
embed.add_field(name="Artist(s)", value=", ".join(activity.artists))
embed.add_field(name="Album", value=activity.album)
embed.add_field(name="Track ID", value=activity.track_id)
embed.add_field(name="Release date", value = release_date)
embed.add_field(name="Explicit", value= explicit)
embed.add_field(name="Duration", value=str(activity.duration)[3:].split(".", 1)[0])
embed.add_field(name="Song started at", value=activity.start.astimezone(local_tz).strftime("%d-%m-%Y %I:%M %p %Z"))
embed.add_field(name="Song ending at", value=activity.end.astimezone(local_tz).strftime("%d-%m-%Y %I:%M %p %Z"))
embed.title = "**{}**".format(activity.title)
embed.set_thumbnail(url=activity.album_cover_url)
embed.url = "https://open.spotify.com/track/{}".format(activity.track_id)
embed.color = activity.color
embed.set_footer(text="{} - is currently playing this song".format(ctx.author))
messaage = await ctx.send(embed=embed)
await messaage.add_reaction(emoji="👍")
await asyncio.sleep(2)
artist = ", ".join(activity.artists)
try:
reaction, user = await self.client.wait_for('reaction_add', check=lambda reaction, user: reaction.emoji == '👍', timeout=20)
con = f"{activity.title} {artist}"
address = f"https://some-random-api.ml/lyrics?title={con}"
data = requests.get(address).json()
if 'error' in data:
await ctx.send(f"**{data['error']}**")
elif data['author'] in activity.artist:
lyrics = data['lyrics']
if len(lyrics) < 2048:
for chunk in [lyrics[i:i+2000] for i in range(0, len(lyrics), 2000)]:
embed = discord.Embed(title=data['author'], description=f"{chunk} \n [Source website]({data['links']['genius']})", color=0XFF69BF)
embed.set_author(name=data['title'], url=data['thumbnail']['genius'])
embed.set_thumbnail(url=data['thumbnail']['genius'])
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(f"**{ctx.author.name},** The lyrics of **{data['author']} - {data['title']}** is sent to your DM please check your DM's...")
await ctx.author.send(embed=embed)
else:
await ctx.send(f"**{ctx.author.name},** The lyrics of **{data['author']} - {data['title']}** is sent to your DM please check your DM's...")
for chunk in [lyrics[i:i+2000] for i in range(0, len(lyrics), 2000)]:
await ctx.author.send(chunk)
else:
await ctx.send("**Sorry, I could'nt find the lyrics of this song...**")
except asyncio.TimeoutError:
return
@commands.command(pass_context=True)
async def lyrics(self, ctx, *, track:str=None):
address = f"https://some-random-api.ml/lyrics?title={track}"
data = requests.get(address).json()
if 'error' in data:
await ctx.send(f"**{data['error']}**")
else:
lyrics = data['lyrics']
if len(lyrics) < 2048:
for chunk in [lyrics[i:i+2000] for i in range(0, len(lyrics), 2000)]:
embed = discord.Embed(title=data['title'], description=f"{chunk}", color=ctx.author.color)
embed.set_author(name=data['author'], url=data['thumbnail']['genius'])
embed.url = data['links']['genius']
embed.set_thumbnail(url=data['thumbnail']['genius'])
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(f"**{ctx.author.name},** The lyrics of **{data['author']} - {data['title']}** is sent to your DM please check your DM's...")
await ctx.author.send(embed=embed)
else:
await ctx.send(f"**{ctx.author.name},** The lyrics of **{data['author']} - {data['title']}** is sent to your DM please check your DM's...")
for chunk in [lyrics[i:i+2000] for i in range(0, len(lyrics), 2000)]:
await ctx.author.send(chunk)
@commands.command(pass_context=True)
async def anime(self, ctx, *, name:str = None):
api_address = f"https://kitsu.io/api/edge/anime?filter[text]={name}"
data = requests.get(api_address).json()
url = data['data'][0]['links']['self']
data2 = requests.get(url).json()
end_date = data2['data']['attributes']['endDate']
synopsis = data2['data']['attributes']['synopsis']
if len(synopsis) > 1024:
synopsis = "Oh no! The length of synopsis is very large, I can't print it here"
else:
synopsis = synopsis
if end_date == None:
end_date = "Not finished yet"
else:
end_date = end_date
ytlink = data2['data']['attributes']['youtubeVideoId']
await ctx.trigger_typing()
embed = discord.Embed(title="Here's the anime show that you've searched for...", color=ctx.author.color)
embed.add_field(name="Name", value=f"{data2['data']['attributes']['titles']['en'] } ({data2['data']['attributes']['titles']['ja_jp']})")
embed.add_field(name="Synopsis", value=synopsis)
embed.add_field(name="Average Rating", value=data2['data']['attributes']['averageRating'])
embed.add_field(name="Start Date", value=data2['data']['attributes']['startDate'])
embed.add_field(name="End Date", value=end_date)
embed.add_field(name="Age Rating", value=data2['data']['attributes']['ageRating'])
embed.add_field(name="Age Rating Guide", value=data2['data']['attributes']['ageRatingGuide'])
embed.add_field(name="Type of Media", value=data2['data']['attributes']['subtype'])
embed.add_field(name="Status", value=data2['data']['attributes']['status'])
embed.set_thumbnail(url=data2['data']['attributes']['posterImage']['original'])
embed.add_field(name="Episode Count", value=data2['data']['attributes']['episodeCount'])
embed.add_field(name="Episode Length", value=data2['data']['attributes']['episodeLength'])
embed.add_field(name="Youtube link", value=f"[Click here for trailer](https://www.youtube.com/watch?v={ytlink})")
embed.add_field(name="NSFW", value=data2['data']['attributes']['nsfw'])
embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f"Requested By | {ctx.author.name}")
await ctx.send(embed=embed)
@commands.command(pass_context=True)
async def series(self, ctx, *, name:str=None):
url = f"http://api.tvmaze.com/search/shows?q={name}"
data = requests.get(url).json()
count = str(data).count('score')
titles = list(data[x]['show']['name'] for x in range(count))
list1 = list("%d. %s" % (n, a) for n, a in enumerate(titles, start=1))
list2 = '\n'.join(list1)
await ctx.send(f"Here are the results for your search: \n\n{list2}\n\nEnter the number of the movie to get more details. You've got 15 seconds to enter your option.")
num = await self.client.wait_for("message", check=lambda message: message.author == ctx.author, timeout=15)
try:
num2 = int(num.content)
seriesid = data[num2-1]['show']['id']
url = f"http://api.tvmaze.com/shows/{seriesid}?embed[]=cast&embed[]=akas&embed[]=seasons&embed[]=episodes&embed[]=crew"
x = requests.get(url).json()
seasons = str(x['_embedded']['seasons']).count('summary')
episodes = str(x['_embedded']['episodes']).count('summary')
name = x['name']
type = x['type']
language = x['language']
premiered = x['premiered']
if x['_embedded']['cast'] == []:
cast = "N/A"
else:
castcount = str(x['_embedded']['cast']).count('gender')
if castcount > 10:
castcount = 10
else:
castcount = castcount
list1 = list(x['_embedded']['cast'][y]['person']['name'] for y in range(castcount))
list2 = list(x['_embedded']['cast'][y]['character']['name'] for y in range(castcount))
list3 = list("{} ({})".format(name, character) for name, character in zip(list1, list2))
cast = ", ".join(list3)
if x['_embedded']['crew'] == []:
crew = "N/A"
else:
crewcount = str(x['_embedded']['crew']).count('gender')
if crewcount > 10:
crewcount = 10
else:
crewcount = crewcount
list1 = list(x['_embedded']['crew'][y]['person']['name'] for y in range(crewcount))
list2 = list(x['_embedded']['crew'][y]['type'] for y in range(crewcount))
list3 = list("{} ({})".format(name, role) for name, role in zip(list1, list2))
crew = ", ".join(list3)
if x['_embedded']['akas'] == []:
akas = "N/A"
else:
akascount = str(x['_embedded']['akas']).count('country')
if akascount > 10:
akascount = 10
else:
akascount = akascount
list1 = list(x['_embedded']['akas'][y]['name'] for y in range(akascount))
"""list2 = list(x['_embedded']['akas'][y]['country']['name'] for y in range(akascount))
list3 = list("{} ({})". format(name, country) for name, country in zip(list1, list2))"""
akas = " ,".join(list1)
if x['genres'] == []:
genres = "N/A"
else:
genres = ", ".join(x['genres'])
summary = x['summary'].replace("<p>", "").replace("</p>", "").replace("<b>", "").replace("</b>", "").replace("<i>", "").replace("</i>", "")
if len(summary) > 1024:
summary = "Oh no! The summary is too large to post here."
status = x['status']
runtime = x['runtime']
averageruntime = x['averageRuntime']
officialsite = x['officialSite']
if x['schedule']['time'] == "":
time = "N/A"
else:
time = x['schedule']['time']
if x['schedule']['days'] == []:
days = "N/A"
| |
= cap.read()
padding_size = 0
resized_width = 1360
video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
output_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1] + padding_size * 2))
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
writer = cv2.VideoWriter('result/'+ session["third"] +'.mp4', fourcc, cap.get(cv2.CAP_PROP_FPS), output_size)
m=-1
i=1
s=0
c=1
while True:
ret, img_bgr = cap.read()
if not ret:
break
img_bgr = cv2.resize(img_bgr, video_size)
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
dets = detector(img_bgr, 1)
for k, d in enumerate(dets):
shape = sp(img_rgb, d)
face_descriptor = facerec.compute_face_descriptor(img_rgb, shape)
last_found = {'name': 'unknown', 'dist': de, 'color': (0,0,255),'percent': 0}
for name, saved_desc in descs.items():
dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
if dist < last_found['dist']:
perce = (1-dist)*100
last_found = {'name': "Target", 'dist': dist, 'color': (255,255,255), 'percent': perce}
if dist< c:
c=dist
if m == -1:
s = i
m=0
cv2.rectangle(img_bgr, pt1=(d.left(), d.top()), pt2=(d.right(), d.bottom()), color=last_found['color'], thickness=2)
cv2.putText(img_bgr, last_found['name'] + " (" + str(last_found['percent']) + "%)" , org=(d.left(), d.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
i=i+1
writer.write(img_bgr)
cv2.imshow('img', img_bgr)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
writer.release()
maxacc = (1-c)*100
print(maxacc)
time = convert(s/24)
print(convert(s/24))
count = Count.query.filter_by(id = 1).first()
count.Total_Real = count.Total_Real + 1
db.session.add(count)
db.session.commit()
flash("You have successfully Processed the Video",'success3')
return redirect(url_for('thirddashboard'))
else:
print("Error")
exit()
else:
return redirect(url_for('relogin'))
#end
#admin page
@app.route('/Admin')
def admindashboard():
if "admin" in session:
user = session["admin"]
admin = Admin.query.filter_by(usr_name = session["admin"]).first()
return render_template('admin/dashboard.html',user=user,admin=admin)
else:
return redirect(url_for('relogin'))
@app.route('/Admin/user')
def user():
if "admin" in session:
user = session["admin"]
ordinary = (db.session.query(Ordinary).filter(Ordinary.usr_name == Other.usr_name).join(Other,Other.admin_approval == 'no')).all()
authority = (db.session.query(Authority).filter(Authority.usr_name == Other.usr_name).join(Other,Other.admin_approval == 'no')).all()
admin = Admin.query.filter_by(usr_name = session["admin"]).first()
return render_template('admin/user.html',ordinary = ordinary,authority = authority,user=user,admin=admin)
else:
return redirect(url_for('relogin'))
@app.route('/Admin/user/verify/<path:username>/<path:value>')
def verify(username,value):
if "admin" in session:
user = session["admin"]
print(username)
result = value
print(result)
verify = Other.query.filter_by(usr_name = username).first()
if result == 'accept':
verify.admin_approval = 'accept'
verify.admin_id = 'Surej'
db.session.add(verify)
db.session.commit()
flash('Verified successfully')
return redirect(url_for('user',user=user))
elif result == 'reject':
verify.admin_approval = 'reject'
verify.admin_id = 'Surej'
db.session.add(verify)
db.session.commit()
flash('Verified successfully')
return redirect(url_for('user',user=user))
else:
return redirect(url_for('relogin'))
@app.route('/Admin/process')
def process():
if "admin" in session:
user = session["admin"]
succ = Other.query.filter_by(third_party_pending_order ='yes' ).all()
fail = Other.query.filter_by(third_party_pending_order ='reject' ).all()
processed = Other.query.filter_by(no_of_video_request = 2 ).all()
admin = Admin.query.filter_by(usr_name = session["admin"]).first()
print(succ)
print(fail)
print(processed)
return render_template('admin/process.html',succ = succ ,fail = fail ,processed = processed ,user=user,admin=admin)
else:
return redirect(url_for('relogin'))
@app.route('/Admin/processing/<path:uname>')
def processing(uname):
if "admin" in session:
descs = np.load('third_image/'+ uname +'.npy',allow_pickle=True)[()]
video_path = 'third_video/'+ uname + '.mp4'
print(video_path)
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
flash('Video cannot Open','error')
print("Video cannot Open")
return redirect(url_for('process'))
_, img_bgr = cap.read()
padding_size = 0
resized_width = 1920
video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
output_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1] + padding_size * 2))
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
writer = cv2.VideoWriter('result/'+ uname +'.mp4', fourcc, cap.get(cv2.CAP_PROP_FPS), output_size)
m=-1
i=1
s=0
c=1
while True:
ret, img_bgr = cap.read()
if not ret:
break
img_bgr = cv2.resize(img_bgr, video_size)
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
dets = detector(img_bgr, 1)
for k, d in enumerate(dets):
shape = sp(img_rgb, d)
face_descriptor = facerec.compute_face_descriptor(img_rgb, shape)
last_found = {'name': 'unknown', 'dist': de, 'color': (0,0,255),'percent': 0}
for name, saved_desc in descs.items():
dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
if dist < last_found['dist']:
perce = (1-dist)*100
last_found = {'name': "Target", 'dist': dist, 'color': (255,255,255), 'percent': perce}
if dist< c:
c=dist
if m == -1:
s = i
m=0
cv2.rectangle(img_bgr, pt1=(d.left(), d.top()), pt2=(d.right(), d.bottom()), color=last_found['color'], thickness=2)
cv2.putText(img_bgr, last_found['name'] + " (" + str(last_found['percent']) + "%)" , org=(d.left(), d.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
i=i+1
writer.write(img_bgr)
cap.release()
writer.release()
maxacc = (1-c)*100
print(maxacc)
time = convert(s/24)
print(convert(s/24))
success = "You have successfully Processed the Video"
send = Other.query.filter_by(usr_name = uname).first()
send.third_party_issue_id = ''
send.third_party_pending_order = ''
send.third_party_response = ''
send.date = ''
send.start_time = ''
send.end_time = ''
send.no_of_video_request = 2
db.session.add(send)
count = Count.query.filter_by(id = 1).first()
count.Total_request = count.Total_request + 1
db.session.add(count)
db.session.commit()
flash('You have successfully processed the video','procc')
return redirect(url_for('process'))
else:
return redirect(url_for('relogin'))
@app.route('/Admin/Processed/result/<path:filename>', methods=['GET', 'POST'])
def download4(filename):
if "admin" in session:
return send_from_directory(directory='result', filename=filename)
else:
return redirect(url_for('relogin'))
@app.route('/third_video/<path:filename>', methods=['GET', 'POST'])
def download3(filename):
if "admin" in session:
return send_from_directory(directory='third_video', filename=filename)
else:
return redirect(url_for('relogin'))
@app.route('/ID_Proof/<path:filename>', methods=['GET', 'POST'])
def download2(filename):
if "admin" in session:
user = session["admin"]
return send_from_directory(directory='ID_Proof', filename=filename,user=user)
else:
return redirect(url_for('relogin'))
@app.route('/Admin/third_party', methods=["GET","POST"])
def third():
if "admin" in session:
user = session["admin"]
all = db.session.query(Third.dept.distinct()).all()
admin = Admin.query.filter_by(usr_name = session["admin"]).first()
len1 = len(all)
if request.method == "POST":
dept = request.form['firstList']
new = request.form['secondList']
name = request.form['thirdList']
phone = request.form['phone']
mail1 = request.form['fourthList']
if dept == 'Other':
dept = new
exists = Third.query.filter_by(mail = mail1).first()
if not exists:
v1 = randint(0, 1000)
v2 = randint(0, 1000)
value = Count.query.filter_by(id = 1).first()
uname = dept + '_' + str(value.Third_party+1)
third_party_id = uname
psw=str(v1)+name+str(v2)
user = User(username=uname,password=<PASSWORD>,type='Third_party')
register = Third(usr_name = uname, dept=dept, name=name, mail = mail1, phone=phone, third_party_id = third_party_id)
count = Count.query.filter_by(id = 1).first()
count.Third_party = count.Third_party + 1
db.session.add(user)
db.session.add(register)
db.session.add(count)
db.session.commit()
msg = Message('Welcome to Pinpoint Family', sender = '<EMAIL>', recipients = [mail1])
msg.html = '<h5>Hi,</h5><h3>You are addded as Third Party at PINPOINT.<br>Please login PINPOINT using following details</h3><h5> Your Username : {} <br> Password : {}<br><br> Happy to connect with u <BR> Thank you<h5>'.format(uname,psw)
mail.send(msg)
flash('A new Third Party added successfully','success')
return render_template('admin/add_third.html',all = all, user=user,admin=admin)
else:
flash('Already Registered','error')
if all != None:
return render_template('admin/add_third.html',all = all,user=user,admin=admin)
else:
return render_template('admin/add_third.html',user=user,admin=admin)
else:
return redirect(url_for('relogin'))
@app.route('/Admin/editprofile', methods=["POST"])
def editadmin():
if "admin" in session:
if request.method == "POST":
fname = request.form['fname']
lname = request.form['lname']
email = request.form['email']
phone = request.form['phone']
print(fname)
print(lname)
print(email)
print(phone)
admin = Admin.query.filter_by(usr_name = session["admin"]).first()
print(admin)
admin.fname = fname
admin.lname = lname
admin.mail = email
admin.phone = phone
db.session.add(admin)
db.session.commit()
flash("You have successfully updated your profile",'success')
return redirect(url_for('admindashboard'))
else:
return redirect(url_for('relogin'))
@app.route('/Admin/updatepassword', methods=["POST"])
def updateadminpass():
if "admin" in session:
if request.method == 'POST':
currentpass = request.form['oldpass']
newpassword = request.form['newpass']
confpassword = request.form['confpass']
if newpassword == confpassword:
user = User.query.filter_by(username = session["admin"],password = currentpass).first()
if user:
if user.password == <PASSWORD>password:
flash("You have Entered same Password, Try some other",'error')
else:
user.password = <PASSWORD>
db.session.add(user)
db.session.commit()
flash("Successfully Changed Password",'success')
else:
flash("You Entered Wrong Password",'error')
else:
flash("New password is not matching",'error')
return redirect(url_for('admindashboard'))
else:
return redirect(url_for('relogin'))
@app.route('/Admin/editusername', methods=["POST"])
def edituseradmin():
if "admin" in session:
if request.method == 'POST':
currentuser = request.form['olduser']
newuser = request.form['newuser']
confuser = request.form['confuser']
if newuser == confuser:
if currentuser == session["admin"]:
user = User.query.filter_by(username = currentuser).first()
if user:
if User.query.filter_by(username = newuser).first():
flash("New Username Already exist, Try some other",'error')
return redirect(url_for('admindashboard'))
else:
if user.username == newuser:
flash("You have Entered same Username, Try some other",'error')
return redirect(url_for('admindashboard'))
else:
if user.type == "Admin":
ad = Admin.query.filter_by(usr_name = currentuser).first()
ad.usr_name = newuser
user.username = newuser
db.session.add(user)
db.session.add(ad)
db.session.commit()
session["admin"] = newuser
flash("Successfully Changed Username",'success')
return redirect(url_for('admindashboard'))
else:
flash("You Entered Wrong Username",'error')
return redirect(url_for('admindashboard'))
else:
flash("You Entered Wrong Username",'error')
return redirect(url_for('admindashboard'))
else:
flash("New Username is not matching",'error')
return redirect(url_for('admindashboard'))
else:
return redirect(url_for('relogin'))
@app.route('/Admin/delete', methods=['POST'])
def deleteadmin():
if "admin" in session:
if request.method == 'POST':
password = request.form['password']
user = User.query.filter_by(username = session["admin"],password = password ).first()
if user:
delete1 = db.session.query(User).filter(User.username == session["admin"]).first()
if user.type == "Admin":
delete2 = Admin.query.filter(Admin.usr_name == session["admin"]).first()
count = Count.query.filter(Count.id == 1).first()
count.Admin = count.Admin - 1
db.session.add(count)
db.session.delete(delete1)
db.session.delete(delete2)
db.session.commit()
session.clear()
return redirect(url_for('index'))
else:
flash("You have entered Wrong Password",'error')
return redirect(url_for('admindashboard'))
return ""
else:
return redirect(url_for('relogin'))
@app.route('/Admin/add_admin', methods=["GET","POST"])
def register():
if "admin" in session:
user = session["admin"]
admin = Admin.query.filter_by(usr_name = session["admin"]).first()
if request.method == "POST":
uname = request.form['uname']
email = request.form['mail']
fname = request.form['fname']
lname = request.form['lname']
phone = request.form['phone']
exists = User.query.filter_by(username = uname).first()
if not exists:
v1 = randint(0, 1000)
v2 = randint(100, 999)
psw=str(v1)+uname+str(v2)
value = Count.query.filter_by(id = 1).first()
admin_id = "Admin_" + str(value.Admin+1)
user = User(username=uname,password=<PASSWORD>,type='Admin')
register = Admin(usr_name = uname,fname=fname,lname=lname,mail = email, phone=phone, admin_id = admin_id)
count = Count.query.filter_by(id = 1).first()
count.Admin = count.Admin + 1
db.session.add(user)
db.session.add(register)
db.session.add(count)
db.session.commit()
msg = Message('Welcome to Pinpoint Family', sender = '<EMAIL>', recipients = [email])
msg.html = '<h5>Hi | |
exp)
rng = date_range('1/1/1990', periods=5)
iseries = Series(np.arange(5), rng) + 1
iseries.ix[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20 + 1e-30, 1e-1])
exp = Series([2, 1, 3, 5, 4, 6.0])
iranks = iseries.rank()
assert_series_equal(iranks, exp)
values = np.array(
[-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40
], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_rank_signature(self):
s = Series([0, 1])
s.rank(method='average')
self.assertRaises(ValueError, s.rank, 'average')
def test_rank_inf(self):
raise nose.SkipTest('DataFrame.rank does not currently rank '
'np.inf and -np.inf properly')
values = np.array(
[-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10,
2, 40, np.inf], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_clip(self):
val = self.ts.median()
self.assertEqual(self.ts.clip_lower(val).min(), val)
self.assertEqual(self.ts.clip_upper(val).max(), val)
self.assertEqual(self.ts.clip(lower=val).min(), val)
self.assertEqual(self.ts.clip(upper=val).max(), val)
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
assert_series_equal(result, expected)
tm.assertIsInstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [Series([np.nan, 1.0, 2.0, 3.0]), Series([None, 'a', 'b', 'c']),
Series(pd.to_datetime(
[np.nan, 1, 2, 3], unit='D'))]
for s in sers:
thresh = s[2]
l = s.clip_lower(thresh)
u = s.clip_upper(thresh)
self.assertEqual(l[notnull(l)].min(), thresh)
self.assertEqual(u[notnull(u)].max(), thresh)
self.assertEqual(list(isnull(s)), list(isnull(l)))
self.assertEqual(list(isnull(s)), list(isnull(u)))
def test_clip_against_series(self):
# GH #6966
s = Series([1.0, 1.0, 4.0])
threshold = Series([1.0, 2.0, 3.0])
assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0]))
assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0]))
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
def test_clip_with_datetimes(self):
# GH 11838
# naive and tz-aware datetimes
t = Timestamp('2015-12-01 09:30:30')
s = Series([Timestamp('2015-12-01 09:30:00'), Timestamp(
'2015-12-01 09:31:00')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00'), Timestamp(
'2015-12-01 09:30:30')])
assert_series_equal(result, expected)
t = Timestamp('2015-12-01 09:30:30', tz='US/Eastern')
s = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:31:00', tz='US/Eastern')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:30:30', tz='US/Eastern')])
assert_series_equal(result, expected)
def test_cummethods_bool(self):
# GH 6270
# looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2
def cummin(x):
return np.minimum.accumulate(x)
def cummax(x):
return np.maximum.accumulate(x)
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {'cumsum': np.cumsum,
'cumprod': np.cumprod,
'cummin': cummin,
'cummax': cummax}
args = product((a, b, c, d), methods)
for s, method in args:
expected = Series(methods[method](s.values))
result = getattr(s, method)()
assert_series_equal(result, expected)
e = pd.Series([False, True, nan, False])
cse = pd.Series([0, 1, nan, 1], dtype=object)
cpe = pd.Series([False, 0, nan, 0])
cmin = pd.Series([False, False, nan, False])
cmax = pd.Series([False, True, nan, True])
expecteds = {'cumsum': cse,
'cumprod': cpe,
'cummin': cmin,
'cummax': cmax}
for method in methods:
res = getattr(e, method)()
assert_series_equal(res, expecteds[method])
def test_isin(self):
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
result = s.isin(['A', 'C'])
expected = Series([True, False, True, False, False, False, True, True])
assert_series_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
with tm.assertRaises(TypeError):
s.isin('a')
with tm.assertRaises(TypeError):
s = Series(['aaa', 'b', 'c'])
s.isin('aaa')
def test_isin_with_i8(self):
# GH 5021
expected = Series([True, True, False, False, False])
expected2 = Series([False, True, False, False, False])
# datetime64[ns]
s = Series(date_range('jan-01-2013', 'jan-05-2013'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(s[0:2].values.astype('datetime64[D]'))
assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
assert_series_equal(result, expected2)
# timedelta64[ns]
s = Series(pd.to_timedelta(lrange(5), unit='d'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
def test_timedelta64_analytics(self):
from pandas import date_range
# index min/max
td = Series(date_range('2012-1-1', periods=3, freq='D')) - \
Timestamp('20120101')
result = td.idxmin()
self.assertEqual(result, 0)
result = td.idxmax()
self.assertEqual(result, 2)
# GH 2982
# with NaT
td[0] = np.nan
result = td.idxmin()
self.assertEqual(result, 1)
result = td.idxmax()
self.assertEqual(result, 2)
# abs
s1 = Series(date_range('20120101', periods=3))
s2 = Series(date_range('20120102', periods=3))
expected = Series(s2 - s1)
# this fails as numpy returns timedelta64[us]
# result = np.abs(s1-s2)
# assert_frame_equal(result,expected)
result = (s1 - s2).abs()
assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta('2 days')
self.assertEqual(result, expected)
result = td.min()
expected = Timedelta('1 days')
self.assertEqual(result, expected)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assertTrue(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmin()))
# datetime64[ns]
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmin()
self.assertEqual(result, 0)
s[0] = np.nan
result = s.idxmin()
self.assertEqual(result, 1)
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assertTrue(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmax()))
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmax()
self.assertEqual(result, 5)
s[5] = np.nan
result = s.idxmax()
self.assertEqual(result, 4)
# Float64Index
# GH 5914
s = pd.Series([1, 2, 3], [1.1, 2.1, 3.1])
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
s = pd.Series(s.index, s.index)
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
def test_ptp(self):
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
self.assertEqual(np.ptp(ser), np.ptp(arr))
# GH11163
s = Series([3, 5, np.nan, -3, 10])
self.assertEqual(s.ptp(), 13)
self.assertTrue(pd.isnull(s.ptp(skipna=False)))
mi = pd.MultiIndex.from_product([['a', 'b'], [1, 2, 3]])
s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi)
expected = pd.Series([6, 2], index=['a', 'b'], dtype=np.float64)
self.assert_series_equal(s.ptp(level=0), expected)
expected = pd.Series([np.nan, np.nan], index=['a', 'b'])
self.assert_series_equal(s.ptp(level=0, skipna=False), expected)
with self.assertRaises(ValueError):
s.ptp(axis=1)
s = pd.Series(['a', 'b', 'c', 'd', 'e'])
with self.assertRaises(TypeError):
s.ptp()
with self.assertRaises(NotImplementedError):
s.ptp(numeric_only=True)
def test_datetime_timedelta_quantiles(self):
# covers #9694
self.assertTrue(pd.isnull(Series([], dtype='M8[ns]').quantile(.5)))
self.assertTrue(pd.isnull(Series([], dtype='m8[ns]').quantile(.5)))
def test_empty_timeseries_redections_return_nat(self):
# covers #11245
for dtype in ('m8[ns]', 'm8[ns]', 'M8[ns]', 'M8[ns, UTC]'):
self.assertIs(Series([], dtype=dtype).min(), pd.NaT)
self.assertIs(Series([], dtype=dtype).max(), pd.NaT)
def test_unique_data_ownership(self):
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_replace(self):
N = 100
ser = Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
ser.replace([np.nan], -1, inplace=True)
exp = ser.fillna(-1)
assert_series_equal(ser, exp)
rs = ser.replace(0., np.nan)
ser[ser == 0.] = np.nan
assert_series_equal(rs, ser)
ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
ser = Series([np.nan, 0, np.inf])
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
assert_series_equal(ser.replace(np.inf, 0), filled)
ser = Series(self.ts.index)
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])
# make sure that we aren't just masking a TypeError because bools don't
# implement indexing
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
ser.replace([1, 2], [np.nan, 0])
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
assert_series_equal(result, Series([4, 3, 2, 1, 0]))
# API change from 0.12?
# GH 5319
ser = Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
assert_series_equal(result, expected)
ser = Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
assert_series_equal(result, expected)
# GH 5797
ser = Series(date_range('20130101', periods=5))
expected = ser.copy()
expected.loc[2] = Timestamp('20120101')
result = ser.replace({Timestamp('20130103'): Timestamp('20120101')})
assert_series_equal(result, expected)
result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))
assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
assert_series_equal(result, Series([0, 0, 0, 0, 4]))
s = ser.copy()
s.replace([1, 2, 3], inplace=True)
assert_series_equal(s, Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted | |
<reponame>sherry0429/dash<filename>tests/integration/devtools/test_callback_validation.py
import flask
import pytest
import dash_core_components as dcc
import dash_html_components as html
from dash import Dash
from dash.dependencies import Input, Output, State, MATCH, ALL, ALLSMALLER
debugging = dict(
debug=True, use_reloader=False, use_debugger=True, dev_tools_hot_reload=False
)
def check_errors(dash_duo, specs):
# Order-agnostic check of all the errors shown.
# This is not fully general - despite the selectors below, it only applies
# to front-end errors with no back-end errors in the list.
cnt = len(specs)
dash_duo.wait_for_text_to_equal(dash_duo.devtools_error_count_locator, str(cnt))
found = []
for i in range(cnt):
msg = dash_duo.find_elements(".dash-fe-error__title")[i].text
dash_duo.find_elements(".test-devtools-error-toggle")[i].click()
dash_duo.wait_for_element(".dash-backend-error,.dash-fe-error__info")
has_BE = dash_duo.driver.execute_script(
"return document.querySelectorAll('.dash-backend-error').length"
)
txt_selector = ".dash-backend-error" if has_BE else ".dash-fe-error__info"
txt = dash_duo.wait_for_element(txt_selector).text
dash_duo.find_elements(".test-devtools-error-toggle")[i].click()
dash_duo.wait_for_no_elements(".dash-backend-error")
found.append((msg, txt))
orig_found = found[:]
for i, (message, snippets) in enumerate(specs):
for j, (msg, txt) in enumerate(found):
if msg == message and all(snip in txt for snip in snippets):
print(j)
found.pop(j)
break
else:
raise AssertionError(
(
"error {} ({}) not found with text:\n"
" {}\nThe found messages were:\n---\n{}"
).format(
i,
message,
"\n ".join(snippets),
"\n---\n".join(
"{}\n{}".format(msg, txt) for msg, txt in orig_found
),
)
)
# ensure the errors didn't leave items in the pendingCallbacks queue
assert dash_duo.driver.execute_script("return document.title") == "Dash"
def test_dvcv001_blank(dash_duo):
app = Dash(__name__)
app.layout = html.Div()
@app.callback([], [])
def x():
return 42
dash_duo.start_server(app, **debugging)
check_errors(
dash_duo,
[
["A callback is missing Inputs", ["there are no `Input` elements."]],
[
"A callback is missing Outputs",
["Please provide an output for this callback:"],
],
],
)
def test_dvcv002_blank_id_prop(dash_duo):
# TODO: remove suppress_callback_exceptions after we move that part to FE
app = Dash(__name__, suppress_callback_exceptions=True)
app.layout = html.Div([html.Div(id="a")])
@app.callback([Output("a", "children"), Output("", "")], [Input("", "")])
def x(a):
return a
dash_duo.start_server(app, **debugging)
# the first one is just an artifact... the other 4 we care about
specs = [
["Same `Input` and `Output`", []],
[
"Callback item missing ID",
['Input[0].id = ""', "Every item linked to a callback needs an ID"],
],
[
"Callback property error",
[
'Input[0].property = ""',
"expected `property` to be a non-empty string.",
],
],
[
"Callback item missing ID",
['Output[1].id = ""', "Every item linked to a callback needs an ID"],
],
[
"Callback property error",
[
'Output[1].property = ""',
"expected `property` to be a non-empty string.",
],
],
]
check_errors(dash_duo, specs)
def test_dvcv003_duplicate_outputs_same_callback(dash_duo):
app = Dash(__name__)
app.layout = html.Div([html.Div(id="a"), html.Div(id="b")])
@app.callback(
[Output("a", "children"), Output("a", "children")], [Input("b", "children")]
)
def x(b):
return b, b
@app.callback(
[Output({"a": 1}, "children"), Output({"a": ALL}, "children")],
[Input("b", "children")],
)
def y(b):
return b, b
dash_duo.start_server(app, **debugging)
specs = [
[
"Overlapping wildcard callback outputs",
[
'Output 1 ({"a":ALL}.children)',
'overlaps another output ({"a":1}.children)',
"used in this callback",
],
],
[
"Duplicate callback Outputs",
["Output 1 (a.children) is already used by this callback."],
],
]
check_errors(dash_duo, specs)
def test_dvcv004_duplicate_outputs_across_callbacks(dash_duo):
app = Dash(__name__)
app.layout = html.Div([html.Div(id="a"), html.Div(id="b"), html.Div(id="c")])
@app.callback(
[Output("a", "children"), Output("a", "style")], [Input("b", "children")]
)
def x(b):
return b, b
@app.callback(Output("b", "children"), [Input("b", "style")])
def y(b):
return b
@app.callback(Output("a", "children"), [Input("b", "children")])
def x2(b):
return b
@app.callback(
[Output("b", "children"), Output("b", "style")], [Input("c", "children")]
)
def y2(c):
return c
@app.callback(
[Output({"a": 1}, "children"), Output({"b": ALL, "c": 1}, "children")],
[Input("b", "children")],
)
def z(b):
return b, b
@app.callback(
[Output({"a": ALL}, "children"), Output({"b": 1, "c": ALL}, "children")],
[Input("b", "children")],
)
def z2(b):
return b, b
dash_duo.start_server(app, **debugging)
specs = [
[
"Overlapping wildcard callback outputs",
[
# depending on the order callbacks get reported to the
# front end, either of these could have been registered first.
# so we use this oder-independent form that just checks for
# both prop_id's and the string "overlaps another output"
'({"b":1,"c":ALL}.children)',
"overlaps another output",
'({"b":ALL,"c":1}.children)',
"used in a different callback.",
],
],
[
"Overlapping wildcard callback outputs",
[
'({"a":ALL}.children)',
"overlaps another output",
'({"a":1}.children)',
"used in a different callback.",
],
],
["Duplicate callback outputs", ["Output 0 (b.children) is already in use."]],
["Duplicate callback outputs", ["Output 0 (a.children) is already in use."]],
]
check_errors(dash_duo, specs)
def test_dvcv005_input_output_overlap(dash_duo):
app = Dash(__name__)
app.layout = html.Div([html.Div(id="a"), html.Div(id="b"), html.Div(id="c")])
@app.callback(Output("a", "children"), [Input("a", "children")])
def x(a):
return a
@app.callback(
[Output("b", "children"), Output("c", "children")], [Input("c", "children")]
)
def y(c):
return c, c
@app.callback(Output({"a": ALL}, "children"), [Input({"a": 1}, "children")])
def x2(a):
return [a]
@app.callback(
[Output({"b": MATCH}, "children"), Output({"b": MATCH, "c": 1}, "children")],
[Input({"b": MATCH, "c": 1}, "children")],
)
def y2(c):
return c, c
dash_duo.start_server(app, **debugging)
specs = [
[
"Same `Input` and `Output`",
[
'Input 0 ({"b":MATCH,"c":1}.children)',
"can match the same component(s) as",
'Output 1 ({"b":MATCH,"c":1}.children)',
],
],
[
"Same `Input` and `Output`",
[
'Input 0 ({"a":1}.children)',
"can match the same component(s) as",
'Output 0 ({"a":ALL}.children)',
],
],
[
"Same `Input` and `Output`",
["Input 0 (c.children)", "matches Output 1 (c.children)"],
],
[
"Same `Input` and `Output`",
["Input 0 (a.children)", "matches Output 0 (a.children)"],
],
]
check_errors(dash_duo, specs)
def test_dvcv006_inconsistent_wildcards(dash_duo):
app = Dash(__name__)
app.layout = html.Div()
@app.callback(
[Output({"b": MATCH}, "children"), Output({"b": ALL, "c": 1}, "children")],
[Input({"b": MATCH, "c": 2}, "children")],
)
def x(c):
return c, [c]
@app.callback(
[Output({"a": MATCH}, "children")],
[Input({"b": MATCH}, "children"), Input({"c": ALLSMALLER}, "children")],
[State({"d": MATCH, "dd": MATCH}, "children"), State({"e": ALL}, "children")],
)
def y(b, c, d, e):
return b + c + d + e
dash_duo.start_server(app, **debugging)
specs = [
[
"`Input` / `State` wildcards not in `Output`s",
[
'State 0 ({"d":MATCH,"dd":MATCH}.children)',
"has MATCH or ALLSMALLER on key(s) d, dd",
'where Output 0 ({"a":MATCH}.children)',
],
],
[
"`Input` / `State` wildcards not in `Output`s",
[
'Input 1 ({"c":ALLSMALLER}.children)',
"has MATCH or ALLSMALLER on key(s) c",
'where Output 0 ({"a":MATCH}.children)',
],
],
[
"`Input` / `State` wildcards not in `Output`s",
[
'Input 0 ({"b":MATCH}.children)',
"has MATCH or ALLSMALLER on key(s) b",
'where Output 0 ({"a":MATCH}.children)',
],
],
[
"Mismatched `MATCH` wildcards across `Output`s",
[
'Output 1 ({"b":ALL,"c":1}.children)',
"does not have MATCH wildcards on the same keys as",
'Output 0 ({"b":MATCH}.children).',
],
],
]
check_errors(dash_duo, specs)
def test_dvcv007_disallowed_ids(dash_duo):
app = Dash(__name__)
app.layout = html.Div()
@app.callback(
Output({"": 1, "a": [4], "c": ALLSMALLER}, "children"),
[Input({"b": {"c": 1}}, "children")],
)
def y(b):
return b
dash_duo.start_server(app, **debugging)
specs = [
[
"Callback wildcard ID error",
[
'Input[0].id["b"] = {"c":1}',
"Wildcard callback ID values must be either wildcards",
"or constants of one of these types:",
"string, number, boolean",
],
],
[
"Callback wildcard ID error",
[
'Output[0].id["c"] = ALLSMALLER',
"Allowed wildcards for Outputs are:",
"ALL, MATCH",
],
],
[
"Callback wildcard ID error",
[
'Output[0].id["a"] = [4]',
"Wildcard callback ID values must be either wildcards",
"or constants of one of these types:",
"string, number, boolean",
],
],
[
"Callback wildcard ID error",
['Output[0].id has key ""', "Keys must be non-empty strings."],
],
]
check_errors(dash_duo, specs)
def bad_id_app(**kwargs):
app = Dash(__name__, **kwargs)
app.layout = html.Div(
[
html.Div(
[html.Div(id="inner-div"), dcc.Input(id="inner-input")], id="outer-div"
),
dcc.Input(id="outer-input"),
],
id="main",
)
@app.callback(Output("nuh-uh", "children"), [Input("inner-input", "value")])
def f(a):
return a
@app.callback(Output("outer-input", "value"), [Input("yeah-no", "value")])
def g(a):
return a
@app.callback(
[Output("inner-div", "children"), Output("nope", "children")],
[Input("inner-input", "value")],
[State("what", "children")],
)
def g2(a):
return [a, a]
# the right way
@app.callback(Output("inner-div", "style"), [Input("inner-input", "value")])
def h(a):
return a
return app
# These ones are raised by bad_id_app whether suppressing callback exceptions or not
dispatch_specs = [
[
"A nonexistent object was used in an `Input` of a Dash callback. "
"The id of this object is `yeah-no` and the property is `value`. "
"The string ids in the current layout are: "
"[main, outer-div, inner-div, inner-input, outer-input]",
[],
],
[
"A nonexistent object was used in an `Output` of a Dash callback. "
"The id of this object is `nope` and the property is `children`. "
"The string ids in the current layout are: "
"[main, outer-div, inner-div, inner-input, outer-input]",
[],
],
]
def test_dvcv008_wrong_callback_id(dash_duo):
dash_duo.start_server(bad_id_app(), **debugging)
specs = [
[
"ID not found in layout",
[
"Attempting to connect a callback Input item to component:",
'"yeah-no"',
"but no components with that id exist in the layout.",
"If you are assigning callbacks to components that are",
"generated by other callbacks (and therefore not in the",
"initial layout), you can suppress this exception by setting",
"`suppress_callback_exceptions=True`.",
"This ID was used in the callback(s) for Output(s):",
"outer-input.value",
],
],
[
"ID not found in layout",
[
| |
passive (priority 0) replica set
members known to this connection. This does not include
hidden or slaveDelay members, or arbiters.
"""
return self.__hosts
@property
def primary(self):
"""The current primary of the replica set.
Returns None if there is no primary.
"""
return self.__writer
@property
def secondaries(self):
"""The secondary members known to this connection.
"""
return set(self.__readers)
@property
def arbiters(self):
"""The arbiters known to this connection.
"""
return self.__arbiters
@property
def max_pool_size(self):
"""The maximum pool size limit set for this connection.
"""
return self.__max_pool_size
def get_document_class(self):
"""document_class getter"""
return self.__document_class
def set_document_class(self, klass):
"""document_class setter"""
self.__document_class = klass
document_class = property(get_document_class, set_document_class,
doc="""Default class to use for documents
returned on this connection.
""")
@property
def tz_aware(self):
"""Does this connection return timezone-aware datetimes?
"""
return self.__tz_aware
@property
def max_bson_size(self):
"""Returns the maximum size BSON object the connected primary
accepts in bytes. Defaults to 4MB in server < 1.7.4. Returns
0 if no primary is available.
"""
if self.__writer:
return self.__pools[self.__writer]['max_bson_size']
return 0
def __simple_command(self, sock, dbname, spec):
"""Send a command to the server.
"""
rqst_id, msg, _ = message.query(0, dbname + '.$cmd', 0, -1, spec)
sock.sendall(msg)
response = self.__recv_msg(1, rqst_id, sock)
response = helpers._unpack_response(response)['data'][0]
msg = "command %r failed: %%s" % spec
helpers._check_command_response(response, None, msg)
return response
def __auth(self, sock, dbname, user, passwd):
"""Authenticate socket `sock` against database `dbname`.
"""
# Get a nonce
response = self.__simple_command(sock, dbname, {'getnonce': 1})
nonce = response['nonce']
key = helpers._auth_key(nonce, user, passwd)
# Actually authenticate
query = SON([('authenticate', 1),
('user', user), ('nonce', nonce), ('key', key)])
self.__simple_command(sock, dbname, query)
def __is_master(self, host):
"""Directly call ismaster.
"""
mongo = pool.Pool(host, self.__max_pool_size,
self.__net_timeout, self.__conn_timeout,
self.__use_ssl)
sock = mongo.get_socket()[0]
response = self.__simple_command(sock, 'admin', {'ismaster': 1})
return response, mongo
def __update_pools(self):
"""Update the mapping of (host, port) pairs to connection pools.
"""
secondaries = []
for host in self.__hosts:
mongo = None
try:
if host in self.__pools:
mongo = self.__pools[host]
sock = self.__socket(mongo)
res = self.__simple_command(sock, 'admin', {'ismaster': 1})
else:
res, conn = self.__is_master(host)
bson_max = res.get('maxBsonObjectSize', MAX_BSON_SIZE)
self.__pools[host] = {'pool': conn,
'last_checkout': time.time(),
'max_bson_size': bson_max}
except (ConnectionFailure, socket.error):
if mongo:
mongo['pool'].discard_socket()
continue
# Only use hosts that are currently in 'secondary' state
# as readers.
if res['secondary']:
secondaries.append(host)
elif res['ismaster']:
self.__writer = host
self.__readers = secondaries
def refresh(self):
"""Iterate through the existing host list, or possibly the
seed list, to update the list of hosts and arbiters in this
replica set.
"""
errors = []
nodes = self.__hosts or self.__seeds
hosts = set()
for node in nodes:
mongo = None
try:
if node in self.__pools:
mongo = self.__pools[node]
sock = self.__socket(mongo)
response = self.__simple_command(sock, 'admin',
{'ismaster': 1})
else:
response, conn = self.__is_master(node)
# Check that this host is part of the given replica set.
set_name = response.get('setName')
# The 'setName' field isn't returned by mongod before 1.6.2
# so we can't assume that if it's missing this host isn't in
# the specified set.
if set_name and set_name != self.__name:
host, port = node
raise ConfigurationError("%s:%d is not a member of "
"replica set %s"
% (host, port, self.__name))
if "arbiters" in response:
self.__arbiters = set([_partition_node(h)
for h in response["arbiters"]])
if "hosts" in response:
hosts.update([_partition_node(h)
for h in response["hosts"]])
if "passives" in response:
hosts.update([_partition_node(h)
for h in response["passives"]])
except (ConnectionFailure, socket.error), why:
if mongo:
mongo['pool'].discard_socket()
errors.append("%s:%d: %s" % (node[0], node[1], str(why)))
if hosts:
self.__hosts = hosts
break
else:
if errors:
raise AutoReconnect(', '.join(errors))
raise ConfigurationError('No suitable hosts found')
self.__update_pools()
def __check_is_primary(self, host):
"""Checks if this host is the primary for the replica set.
"""
try:
mongo = None
if host in self.__pools:
mongo = self.__pools[host]
sock = self.__socket(mongo)
res = self.__simple_command(sock, 'admin', {'ismaster': 1})
else:
res, conn = self.__is_master(host)
bson_max = res.get('maxBsonObjectSize', MAX_BSON_SIZE)
self.__pools[host] = {'pool': conn,
'last_checkout': time.time(),
'max_bson_size': bson_max}
except (ConnectionFailure, socket.error), why:
if mongo:
mongo['pool'].discard_socket()
raise ConnectionFailure("%s:%d: %s" % (host[0], host[1], str(why)))
if res["ismaster"]:
return host
elif "primary" in res:
candidate = _partition_node(res["primary"])
# Don't report the same connect failure multiple times.
try:
return self.__check_is_primary(candidate)
except (ConnectionFailure, socket.error):
pass
raise AutoReconnect('%s:%d: not primary' % host)
def __find_primary(self):
"""Returns a connection to the primary of this replica set,
if one exists.
"""
if self.__writer:
return self.__pools[self.__writer]
# This is either the first connection or we had a failover.
self.refresh()
errors = []
for candidate in self.__hosts:
try:
self.__writer = self.__check_is_primary(candidate)
return self.__pools[self.__writer]
except (ConnectionFailure, socket.error), why:
errors.append(str(why))
# Couldn't find the primary.
raise AutoReconnect(', '.join(errors))
def __socket(self, mongo):
"""Get a socket from the pool.
If it's been > 1 second since the last time we checked out a
socket, we also check to see if the socket has been closed -
this let's us avoid seeing *some*
:class:`~pymongo.errors.AutoReconnect` exceptions on server
hiccups, etc. We only do this if it's been > 1 second since
the last socket checkout, to keep performance reasonable - we
can't avoid those completely anyway.
"""
sock, authset = mongo['pool'].get_socket()
now = time.time()
if now - mongo['last_checkout'] > 1:
if _closed(sock):
mongo['pool'] = pool.Pool(mongo['pool'].host,
self.__max_pool_size,
self.__net_timeout,
self.__conn_timeout,
self.__use_ssl)
sock, authset = mongo['pool'].get_socket()
mongo['last_checkout'] = now
if self.__auth_credentials or authset:
self.__check_auth(sock, authset)
return sock
def disconnect(self):
"""Disconnect from the replica set primary.
"""
self.__writer = None
def close(self):
"""Disconnect from all set members.
"""
self.__writer = None
self.__pools = {}
def __check_response_to_last_error(self, response):
"""Check a response to a lastError message for errors.
`response` is a byte string representing a response to the message.
If it represents an error response we raise OperationFailure.
Return the response as a document.
"""
response = helpers._unpack_response(response)
assert response["number_returned"] == 1
error = response["data"][0]
helpers._check_command_response(error, self.disconnect)
error_msg = error.get("err", "")
if error_msg is None:
return error
if error_msg.startswith("not master"):
self.disconnect()
raise AutoReconnect(error_msg)
if "code" in error:
if error["code"] in [11000, 11001, 12582]:
raise DuplicateKeyError(error["err"])
else:
raise OperationFailure(error["err"], error["code"])
else:
raise OperationFailure(error["err"])
def __recv_data(self, length, sock):
"""Lowest level receive operation.
Takes length to receive and repeatedly calls recv until able to
return a buffer of that length, raising ConnectionFailure on error.
"""
chunks = []
while length:
chunk = sock.recv(length)
if chunk == "":
raise ConnectionFailure("connection closed")
length -= len(chunk)
chunks.append(chunk)
return "".join(chunks)
def __recv_msg(self, operation, request_id, sock):
"""Receive a message in response to `request_id` on `sock`.
Returns the response data with the header removed.
"""
header = self.__recv_data(16, sock)
length = struct.unpack("<i", header[:4])[0]
resp_id = struct.unpack("<i", header[8:12])[0]
assert resp_id == request_id, "ids don't match %r %r" % (resp_id,
request_id)
assert operation == struct.unpack("<i", header[12:])[0]
return self.__recv_data(length - 16, sock)
def __check_bson_size(self, msg, max_size):
"""Make sure the message doesn't include BSON documents larger
than the connected server will accept.
:Parameters:
- `msg`: message to check
"""
if len(msg) == 3:
request_id, data, max_doc_size = msg
if max_doc_size > max_size:
raise InvalidDocument("BSON document too large (%d bytes)"
" - the connected server supports"
" BSON document sizes up to %d"
" bytes." %
(max_doc_size, max_size))
return (request_id, data)
# get_more and kill_cursors messages
# don't include BSON documents.
return msg
def _send_message(self, msg, safe=False, _connection_to_use=None):
"""Say something to Mongo.
Raises ConnectionFailure if the message cannot be sent. Raises
OperationFailure if `safe` is ``True`` and the
response to the getLastError call returns an error. Return the
response from lastError, or ``None`` if `safe` is ``False``.
:Parameters:
- `msg`: message to send
- `safe`: check getLastError status after sending the message
"""
if _connection_to_use in (None, -1):
mongo = self.__find_primary()
else:
mongo = self.__pools[_connection_to_use]
try:
sock = self.__socket(mongo)
rqst_id, data = self.__check_bson_size(msg,
mongo['max_bson_size'])
sock.sendall(data)
# Safe mode. We pack the message together with a lastError
# message and send both. We then get the response (to the
# lastError) and raise OperationFailure if it is an error
# response.
if safe:
response = self.__recv_msg(1, rqst_id, sock)
return self.__check_response_to_last_error(response)
return None
except(ConnectionFailure, socket.error), why:
mongo['pool'].discard_socket()
if _connection_to_use in (None, -1):
self.disconnect()
raise AutoReconnect(str(why))
except:
mongo['pool'].discard_socket()
raise
mongo['pool'].return_socket()
def __send_and_receive(self, mongo, msg, **kwargs):
"""Send a message | |
<reponame>useEvil/husky-hustle-2013
import os
import csv
import pytz
import math
import base64
import urllib
import re as regexp
import datetime as date
import husky.bitly as bitly
import gdata.media as media
import gdata.photos.service as gdata
import gdata.calendar.client as cdata
from django.db import models
from django.db.models import Count, Sum, Avg, Max
from django.db.models.signals import post_save
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.contrib.auth.models import User
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.sites.models import Site
from django import forms
from paypal import PayPal
from picasa import PicasaField, PicasaStorage
from decimal import Decimal
from registration.forms import RegistrationForm
from socialregistration.contrib.facebook.models import FacebookProfile
from socialregistration.contrib.twitter.models import TwitterProfile
from socialregistration.contrib.openid.models import OpenIDProfile
from husky.helpers import *
DONATION_GOAL = 50000
MAX_BAR_LENGTH = 225
MAX_ARROW_HEIGHT = 275
BASE_ARROW_HEIGHT = 73
RAFFLE_TICKET_AMT = 25
# Field Classes
class CurrencyField(models.DecimalField):
def __init__(self, *args, **kwargs):
kwargs['max_digits'] = 10
kwargs['decimal_places'] = 2
super(CurrencyField, self).__init__(*args, **kwargs)
def to_python(self, value):
try:
return super(CurrencyField, self).to_python(value).quantize(Decimal('0.01'))
except AttributeError:
return None
# Google Classes
class GoogleCalendarConnect(object):
gd_client = cdata.CalendarClient(source=settings.PICASA_STORAGE_OPTIONS['source'])
gd_client.ClientLogin(settings.PICASA_STORAGE_OPTIONS['email'], settings.PICASA_STORAGE_OPTIONS['password'], gd_client.source)
def client(self):
return self.gd_client
class GooglePhotoConnect(object):
gd_client = gdata.PhotosService()
gd_client.email = settings.PICASA_STORAGE_OPTIONS['email']
gd_client.password = settings.PICASA_STORAGE_OPTIONS['password']
gd_client.source = settings.PICASA_STORAGE_OPTIONS['source']
gd_client.ProgrammaticLogin()
def client(self):
return self.gd_client
class Calendar(object):
gd_client = GoogleCalendarConnect().client()
def get_events(self):
query = cdata.CalendarEventQuery()
query.start_min = date.datetime.now().strftime('%Y-%m-%d')
# query.start_max = (date.datetime.now() + date.timedelta(days=14)).strftime('%Y-%m-%d')
feed = self.gd_client.GetCalendarEventFeed(q=query, visibility='public', sortorder='ascending', orderby='starttime')
return feed
class Photo(object):
gd_client = GooglePhotoConnect().client()
def get_photo(self, album_id=None, photo_id=None):
if not photo_id or not photo_id: return
photo = self.gd_client.GetFeed('/data/feed/api/user/default/albumid/%s/photoid/%s' % (album_id, photo_id))
return photo
def get_photos(self, album_id=None):
if not album_id: return
photos = self.gd_client.GetFeed('/data/feed/api/user/default/albumid/%s?kind=photo' % (album_id))
return photos
def get_photos_by_tags(self, tags='huskyhustle'):
photos = self.gd_client.GetFeed('/data/feed/api/all?kind=photo&tag=%s' % (tags))
return photos
class Album(object):
gd_client = GooglePhotoConnect().client()
def get_album(self, album_id=None):
if not album_id: return
photos = Photo().get_photos(album_id)
return photos
def get_albums(self):
albums = self.gd_client.GetUserFeed(user=settings.PICASA_STORAGE_OPTIONS['userid'])
return albums
# Create your models here.
class Content(models.Model):
page = models.CharField(max_length=100)
content = models.TextField(max_length=65000, blank=True, null=True)
date_added = models.DateTimeField(default=date.datetime.now())
class Blog(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(User)
content = models.TextField(max_length=4000)
date_added = models.DateTimeField(default=date.datetime.now())
class Message(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(User)
content = models.TextField(max_length=4000)
date_added = models.DateTimeField(default=date.datetime.now())
class Link(models.Model):
title = models.CharField(max_length=50)
url = models.CharField(max_length=255)
shorten = models.CharField(max_length=255)
status = models.IntegerField(blank=True, null=True, choices=((0,0), (1,1)))
def __unicode__(self):
return self.title
def shortened(self):
if not self.shorten:
api = bitly.Api(login=settings.BITLY_LOGIN, apikey=settings.BITLY_APIKEY)
self.shorten = api.shorten(self.url)
self.save()
return self.shorten
class Grade(models.Model):
grade = models.IntegerField(blank=True, null=True)
title = models.CharField(max_length=15)
def __unicode__(self):
return self.title
def get_all(self):
return Grade.objects.exclude(grade=-1).all()
def most_donations_avg(self):
avg = self.total_collected() / self.total_students()
return round(avg, 2)
def most_laps_avg(self):
avg = float(self.total_laps()) / self.total_students()
return round(avg, 2)
def total_students(self):
return Children.objects.filter(teacher__grade=self).count()
def total_laps(self):
results = Children.objects.filter(teacher__grade=self).exclude(disqualify=True).aggregate(num_laps=Sum('laps'))
return results['num_laps'] or 0
def total_donations(self):
results = Donation.objects.filter(child__teacher__grade=self).aggregate(total_donations=Sum('donated'))
return float(results['total_donations'] or 0)
def total_collected(self):
results = Children.objects.filter(teacher__grade=self).aggregate(total_collected=Sum('collected'))
return float(results['total_collected'] or 0)
def percent_completed(self):
percentage = 0
if self.total_collected() and self.total_donations():
percentage = self.total_collected() / self.total_donations()
return round(percentage, 3)
class Teacher(models.Model):
title = models.CharField(max_length=5, choices=(('Mrs.', 'Mrs.'), ('Ms.', 'Ms.'), ('Miss', 'Miss'), ('Mr.', 'Mr.')), default='Mrs.')
first_name = models.CharField(max_length=50, blank=True, null=True)
last_name = models.CharField(max_length=50)
email_address = models.CharField(max_length=100)
phone_number = models.CharField(max_length=25, blank=True, null=True, default='(714) 734-1878')
room_number = models.CharField(max_length=5)
website = models.CharField(max_length=255, blank=True, null=True)
shorten = models.CharField(max_length=255, blank=True, null=True)
grade = models.ForeignKey(Grade, related_name='teachers')
list_type = models.IntegerField(blank=True, null=True)
class Meta:
ordering = ['last_name', 'first_name']
def __unicode__(self):
return '%s (%s) %s' % (self.full_name(), self.room_number, self.grade)
def full_name(self):
return '%s %s %s' % (self.title, self.first_name, self.last_name)
def find(self, last_name=None):
try:
return Teacher.objects.filter(last_name__icontains=last_name).all()
except ObjectDoesNotExist, e:
return
def total_students(self, exclude=None):
if exclude and exclude == 1:
return Children.objects.filter(teacher=self, laps__gt=0).exclude(disqualify=True).count()
elif exclude and exclude == 2:
return Children.objects.filter(teacher=self, collected__gt=0).count()
else:
return Children.objects.filter(teacher=self).count()
def total_participation(self):
return Children.objects.filter(teacher=self, collected__gt=0).count()
def get_all(self):
return Teacher.objects.exclude(grade__grade=-1).all()
def get_donate_list(self):
return Teacher.objects.exclude(list_type=2).order_by('grade','room_number').all()
def get_list(self):
return Teacher.objects.exclude(list_type=3).order_by('grade','room_number').all()
def get_donations(self):
total = Donation.objects.filter(child__teacher=self).aggregate(donated=Sum('donated'))
return float(total['donated'] or 0)
def get_donations_list(self):
donators = []
sponsors = []
children = Children.objects.filter(teacher=self).all()
for child in children:
donation = child.total_sum()
donators.append({'name': child.list_name(), 'total': float(donation['total_sum'] or 0)})
donations = Donation.objects.filter(first_name__contains=self.last_name)
totals = { }
for index, donation in enumerate(donations):
full_name = donation.child.list_name()
if totals.has_key(full_name):
totals[full_name] += donation.donated or 0
else:
totals[full_name] = donation.donated or 0
for child, total in iter(sorted(totals.iteritems())):
sponsors.append({'name': child, 'total': float(total or 0)})
return donators, sponsors
def shortened(self):
if not self.shorten:
api = bitly.Api(login=settings.BITLY_LOGIN, apikey=settings.BITLY_APIKEY)
self.shorten = api.shorten(self.website)
self.save()
return self.shorten
def reports_url(self):
site = Site.objects.get_current()
reports_url = 'http://%s/admin/results/donations-by-teacher?id=%s' % (site.domain, self.id)
return reports_url
class Children(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
identifier = models.CharField(max_length=100, unique=True)
date_added = models.DateTimeField(default=date.datetime.now())
laps = models.IntegerField(blank=True, null=True)
age = models.IntegerField(blank=True, null=True)
gender = models.CharField(max_length=1, blank=True, null=True, choices=(('M', 'Boy'), ('F', 'Girl')))
collected = CurrencyField(blank=True, null=True)
disqualify = models.IntegerField(default=0, choices=((0, 'No'), (1, 'Yes')))
pledged = CurrencyField(blank=True, null=True)
teacher = models.ForeignKey(Teacher, related_name='students')
class Meta:
ordering = ['last_name', 'first_name']
def __unicode__(self):
return self.list_name()
def list_name(self):
return '%s, %s' % (self.last_name, self.first_name)
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
def find(self, parent_only=None, first_name=None, last_name=None):
try:
if parent_only == '1':
if first_name and last_name:
return Children.objects.filter(parents__first_name__icontains=first_name, parents__last_name__icontains=last_name).distinct().all()
elif first_name:
return Children.objects.filter(parents__first_name__icontains=first_name).distinct().all()
elif last_name:
return Children.objects.filter(parents__last_name__icontains=last_name).distinct().all()
else:
if first_name and last_name:
return Children.objects.filter(first_name__icontains=first_name, last_name__icontains=last_name).distinct().all()
elif first_name:
return Children.objects.filter(first_name__icontains=first_name).distinct().all()
elif last_name:
return Children.objects.filter(last_name__icontains=last_name).distinct().all()
except ObjectDoesNotExist, e:
return
def get_collected_list(self):
return Children.objects.filter(collected__gt=0).order_by('-collected').all()
def donate_url(self):
site = Site.objects.get_current()
donate_url = 'http://%s/make-donation/%s' % (site.domain, self.identifier)
return donate_url
def manage_url(self):
site = Site.objects.get_current()
manage_url = 'http://%s/account/%s' % (site.domain, self.identifier)
return manage_url
def facebook_share_url(self):
site = Site.objects.get_current()
params = 'app_id=' + settings.FACEBOOK_APP_ID + '&link=' + self.donate_url() + '&picture=' + ('http://%s/static/images/hickslogo-1.jpg' % site.domain) + '&name=' + urllib.quote('<NAME>') + '&caption=' + urllib.quote('Donate to %s' % self.full_name()) + '&description=' + urllib.quote("Donate and help further our children's education.") + '&redirect_uri=' + 'http://%s/' % site.domain
share_url = 'https://www.facebook.com/dialog/feed?' + params
return share_url
def twitter_share_url(self):
share_url = 'https://twitter.com/intent/tweet?button_hashtag=HuskyHustle&url=%s' % self.donate_url()
return share_url
def google_share_url(self):
share_url = 'https://plus.google.com/share?url=%s' % self.donate_url()
return share_url
def grades(self):
return Grade.objects.all()
def teachers(self):
return Teacher.objects.filter(grade=self.teacher.grade).all()
def donations(self):
return Donation.objects.filter(child=self).count()
def sponsors_flat(self):
return Donation.objects.filter(child=self, per_lap=False).exclude(last_name='teacher').all()
def sponsors_perlap(self):
return Donation.objects.filter(child=self, per_lap=True).all()
def sponsors_teacher(self):
return Donation.objects.filter(child=self, last_name='teacher').all()
def sponsored_principle(self):
try:
return Donation.objects.filter(child=self, first_name='<NAME>').get()
except ObjectDoesNotExist, e:
return
def parent(self):
try:
return self.parents.filter(parentchildren__default=1).get()
except ObjectDoesNotExist, e:
return
def is_default(self, parent):
return ParentChildren.objects.filter(children=self, parent=parent).get().default
def has_parents(self):
return ParentChildren.objects.filter(children=self).all()
def total_sum(self):
return Donation.objects.filter(child=self).aggregate(total_sum=Sum('donated'))
def total_for_laps(self):
total_due = 0
for sponsor in self.sponsors_perlap():
total_due += sponsor.total()
return total_due
def total_for_flat(self):
total_due = 0
for sponsor in self.sponsors_flat():
total_due += sponsor.total()
return total_due
def total_for_sponsors(self):
total_due = 0
for sponsor in self.sponsors_teacher():
total_due += sponsor.total()
return total_due
def total_raffle_tickets(self):
tickets = int(self.collected / RAFFLE_TICKET_AMT)
return tickets
def total_due(self):
total_due = 0
for sponsor in self.sponsors_flat():
if not sponsor.paid:
total_due += sponsor.total()
for sponsor in self.sponsors_perlap():
if not sponsor.paid:
total_due += sponsor.total()
for sponsor in self.sponsors_teacher():
if not sponsor.paid:
total_due += sponsor.total()
return total_due
def total_got(self):
total_got = 0
for sponsor in self.sponsors_flat():
if sponsor.paid:
total_got += sponsor.total()
for sponsor in self.sponsors_perlap():
if sponsor.paid:
total_got += sponsor.total()
for sponsor in self.sponsors_teacher():
if sponsor.paid:
total_got += sponsor.total()
return total_got
def grand_totals(self):
total_due = self.total_due()
total_got = self.total_got()
return [total_got, total_due, (total_due + total_got)]
def calculate_totals(self, id=None):
if id:
total_collected = 0
total_pledged = 0
child = Children.objects.get(pk=id)
for sponsor in child.sponsors.all():
total_pledged += sponsor.total() or 0
if sponsor.paid:
total_collected += sponsor.total() or 0
self.collected = total_collected
self.pledged = total_pledged
self.save()
else:
children = Children.objects.all()
for child in children:
total_collected = 0
total_pledged = 0
for sponsor in child.sponsors.all():
total_pledged += sponsor.total() or 0
if sponsor.paid:
total_collected += sponsor.total() or 0
child.collected = total_collected
child.pledged = total_pledged
child.save()
class Parent(models.Model):
GUARDIAN_CHOICES = ((1,'Mother'), (2,'Father'), (3,'Guardian'))
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
email_address = models.CharField(max_length=100)
phone_number = models.CharField(max_length=25, blank=True, null=True)
default = models.BooleanField(default=1)
guardian = models.IntegerField(default=1, choices=GUARDIAN_CHOICES)
activation_key = models.CharField(max_length=200)
key_expires = models.DateTimeField()
date_added = models.DateTimeField(default=date.datetime.now())
site = models.ForeignKey(Site)
user = models.OneToOneField(User, unique=True)
children = models.ManyToManyField(Children, related_name='parents', through='ParentChildren')
class Meta:
ordering = ['last_name', 'first_name']
def __unicode__(self):
return '%s (%s)' % (self.full_name(), self.email_address)
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
def get_my_children(self):
return Children.objects.filter(parent=self, parentchildren__default=1).all()
def activate_url(self):
site = Site.objects.get_current()
activate_url = 'http://%s/activate/%s' % (site.domain, self.activation_key)
return activate_url
def link_url(self, request_id=None):
if not request_id: request_id = self.id
site = Site.objects.get_current()
link_url = 'http://%s/link/%s' % (site.domain, request_id)
return link_url
def links(self):
try:
child = self.children.all()[0]
except IndexError:
return None
try:
return child.parents.exclude(pk=self.id).all()
except ObjectDoesNotExist, e:
return None
def num_children(self):
return self.children.count()
def facebook(self, user_id=None):
if not user_id:
user_id = self.user.id
try:
facebook = FacebookProfile.objects.filter(user_id=user_id).get()
except ObjectDoesNotExist, e:
return
| |
min(self.min_dn_open_price,x.openPrice)
total_price += x.openPrice*x.volume
total_volume += x.volume
if total_volume > 0:
self.avg_dn_open_price = total_price/total_volume
def count_avg_open_price(self, grid_list):
"""计算平均开仓价"""
total_price = EMPTY_FLOAT
total_volume = EMPTY_INT
avg_price = EMPTY_FLOAT
for g in grid_list:
total_price += g.openPrice * g.volume
total_volume += g.volume
if total_volume > EMPTY_INT:
avg_price = total_price / total_volume
return avg_price
def combineOpenedGrids(self,direction,type=EMPTY_STRING):
"""合并已开仓的网格"""
total_open_price = EMPTY_FLOAT
total_close_price = EMPTY_FLOAT
total_volume = EMPTY_INT
saved_grid = None
if direction == DIRECTION_SHORT:
opened_short_grids = self.getGrids(direction=direction, opened=True, ordered=False, type = type)
if len(opened_short_grids)<=1:
return
self.writeCtaLog(u'{}个空网格合并为1个'.format(len(opened_short_grids)))
saved_grid = opened_short_grids[-1]
for g in opened_short_grids:
total_open_price += g.openPrice * g.volume
total_close_price += g.closePrice * g.volume
total_volume += g.volume
if g != saved_grid:
self.writeCtaLog(u'删除空网格 {}=>{},v:{}'.format(g.openPrice, g.closePrice, g.volume))
self.upGrids.remove(g)
else:
self.writeCtaLog(u'保留空网格 {}=>{},v:{}'.format(g.openPrice, g.closePrice, g.volume))
# 更新网格的开仓价和仓位数量
saved_grid.openPrice = int((total_open_price / total_volume)/self.minDiff)*self.minDiff
saved_grid.volume = total_volume
saved_grid.closePrice = int((total_close_price / total_volume)/self.minDiff)*self.minDiff
self.writeCtaLog(u'合并后空网格为{}=>{},v:{}'.format(saved_grid.openPrice, saved_grid.closePrice, saved_grid.volume))
elif direction == DIRECTION_LONG:
opened_long_grids = self.getGrids(direction=direction, opened=True, ordered=False, type=type)
if len(opened_long_grids) <= 1:
return
self.writeCtaLog(u'{}个多网格合并为1个'.format(len(opened_long_grids)))
saved_grid = opened_long_grids[-1]
for g in opened_long_grids:
total_open_price += g.openPrice * g.volume
total_close_price += g.closePrice * g.volume
total_volume += g.volume
if g != saved_grid:
self.writeCtaLog(u'删除多网格 {}=>{},v:{}'.format(g.openPrice, g.closePrice, g.volume))
self.dnGrids.remove(g)
else:
self.writeCtaLog(u'保留多网格 {}=>{},v:{}'.format(g.openPrice, g.closePrice, g.volume))
# 更新网格的开仓价和仓位数量
saved_grid.openPrice = int((total_open_price / total_volume) / self.minDiff) * self.minDiff
saved_grid.volume = total_volume
saved_grid.closePrice = int((total_close_price / total_volume) / self.minDiff) * self.minDiff
self.writeCtaLog(
u'合并后多网格为{}=>{},v:{}'.format(saved_grid.openPrice, saved_grid.closePrice, saved_grid.volume))
def clearDuplicateGrids(self,direction=EMPTY_STRING,type=EMPTY_STRING):
"""去除重复开仓价的未开仓网格"""
if direction == DIRECTION_SHORT or direction==EMPTY_STRING:
if len(self.upGrids) < 2:
return
checking_grids = self.getGrids(direction=DIRECTION_SHORT, opened=False,ordered=False,type=type)
if len(checking_grids) < 2:
return
open_price_list = []
remove_grids = []
for g in checking_grids:
if g.openPrice in open_price_list:
remove_grids.append(g)
continue
open_price_list.append(g.openPrice)
for rg in remove_grids:
try:
self.upGrids.remove(rg)
except:
pass
if direction == DIRECTION_LONG or direction==EMPTY_STRING:
if len(self.dnGrids) < 2:
return
checking_grids = self.getGrids(direction=DIRECTION_LONG, opened=False, ordered=False, type=type)
if len(checking_grids) < 2:
return
open_price_list = []
remove_grids = []
for g in checking_grids:
if g.openPrice in open_price_list:
remove_grids.append(g)
continue
open_price_list.append(g.openPrice)
for rg in remove_grids:
try:
self.dnGrids.remove(rg)
except:
pass
def save(self, direction=None):
"""
保存网格至本地Json文件"
2017/11/23 update: 保存时,空的列表也保存
:param direction:
:return:
"""""
# 回测模式不保存
if self.strategy and getattr(self.strategy, 'backtesting', False):
return
# 更新开仓均价
self.recount_avg_open_price()
grids_save_path = self.get_data_folder()
# 确保json名字与策略一致
if self.jsonName != self.strategy.name:
self.writeCtaLog(u'JsonName {} 与 上层策略名{} 不一致.'.format(self.jsonName, self.strategy.name))
self.jsonName = self.strategy.name
# 移除旧版上/下网格列表
old_up_json_file = os.path.join(grids_save_path, u'{0}_upGrids.json'.format(self.jsonName))
old_dn_json_file = os.path.join(grids_save_path, u'{0}_dnGrids.json'.format(self.jsonName))
if os.path.exists(old_up_json_file):
try:
os.remove(old_up_json_file)
except:
pass
if os.path.exists(old_dn_json_file):
try:
os.remove(old_dn_json_file)
except:
pass
# 新版网格持久化文件
grid_json_file = os.path.join(grids_save_path, u'{}_Grids.json'.format(self.jsonName))
self.json_file_path = grid_json_file
data = {}
up_grids = []
for grid in self.upGrids:
up_grids.append(grid.toJson())
dn_grids = []
for grid in self.dnGrids:
dn_grids.append(grid.toJson())
data[u'up_grids'] = up_grids
data[u'dn_grids'] = dn_grids
with open(grid_json_file, 'w') as f:
json_data = json.dumps(data, indent=4)
f.write(json_data)
self.writeCtaLog(u'GrideTrade保存文件{}完成'.format(grid_json_file))
def load(self, direction, openStatusFilter=[]):
"""
加载本地Json至网格
:param direction: DIRECTION_SHORT,做空网格;DIRECTION_LONG,做多网格
:param openStatusFilter: 缺省,不做过滤;True,只提取已开仓的数据,False,只提取未开仓的数据
:return:
"""
data = {}
grids_save_path = self.get_data_folder()
if self.jsonName != self.strategy.name:
self.writeCtaLog(u'JsonName {} 与 上层策略名{} 不一致.'.format(self.jsonName, self.strategy.name))
self.jsonName = self.strategy.name
# 移除旧版上/下网格列表
old_up_json_file = os.path.join(grids_save_path, u'{0}_upGrids.json'.format(self.jsonName))
old_dn_json_file = os.path.join(grids_save_path, u'{0}_dnGrids.json'.format(self.jsonName))
if os.path.exists(old_up_json_file):
try:
with open(old_up_json_file, 'r', encoding='utf8') as f:
# 解析json文件
data['up_grids'] = json.load(f)
except IOError:
self.writeCtaLog(u'读取网格{}出错'.format(old_up_json_file))
data['up_grids'] = []
try: # 移除旧版上网格文件
os.remove(old_up_json_file)
except:
pass
if os.path.exists(old_dn_json_file):
try:
with open(old_dn_json_file, 'r', encoding='utf8') as f:
# 解析json文件
data['dn_grids'] = json.load(f)
except IOError as ex:
self.writeCtaLog(u'读取网格{}出错,ex:{}'.format(old_dn_json_file,str(ex)))
data['dn_grids'] = []
try: # 移除旧版下网格文件
os.remove(old_dn_json_file)
except:
pass
# 若新版文件不存在,就保存;若存在,就优先使用新版数据文件
grid_json_file = os.path.join(grids_save_path, u'{}_Grids.json'.format(self.jsonName))
if not os.path.exists(grid_json_file):
if len(data) == 0:
data['up_grids'] = []
data['dn_grids'] = []
self.writeCtaLog(u'{}不存在,保存'.format(grid_json_file))
else:
self.writeCtaLog(u'{}不存在,保存'.format(grid_json_file))
try:
with open(grid_json_file, 'w') as f:
json_data = json.dumps(data, indent=4)
f.write(json_data)
except Exception as ex:
self.writeCtaLog(u'写入网格文件{}异常:{}'.format(grid_json_file,str(ex)))
else:
# 读取json文件
try:
with open(grid_json_file, 'r', encoding='utf8') as f:
data = json.load(f)
except Exception as ex:
self.writeCtaLog(u'读取网格文件{}异常:{}'.format(grid_json_file,str(ex)))
# 从文件获取数据
json_grids = []
if direction == DIRECTION_SHORT :
json_grids = data['up_grids'] if 'up_grids' in data else []
elif direction == DIRECTION_LONG:
json_grids = data['dn_grids'] if 'dn_grids' in data else []
grids = []
ids = []
for i in json_grids:
closePrice = float(i['closePrice'])
openPrice = float(i['openPrice'])
stopPrice = float(i['stopPrice'])
id = i.get('id')
self.writeCtaLog(u'load Grid:open:{0},close:{1},stop:{2}'.format(openPrice, closePrice, stopPrice))
grid = CtaGrid(direction=i['direction'], openprice=openPrice, closeprice=closePrice,
stopprice=stopPrice, volume=i['volume'])
if id is not None and id not in ids:
grid.id = id
ids.append(id)
grid.orderStatus = i['orderStatus'] # 挂单状态: True,已挂单,False,未挂单
grid.orderRef = i['orderRef'] # OrderId
grid.openStatus = i['openStatus'] # 开仓状态
grid.closeStatus = i['closeStatus'] # 平仓状态
strTime = i['openDatetime']
if strTime == EMPTY_STRING or type(strTime)==type(None):
grid.openDatetime = None
else:
grid.openDatetime = datetime.strptime(strTime, '%Y-%m-%d %H:%M:%S')
try:
grid.tradedVolume = i['tradedVolume'] # 已交易的合约数量
except KeyError:
grid.tradedVolume = EMPTY_INT
try:
grid.lockGrids = i['lockGrids']
except KeyError:
grid.lockGrids = []
try:
grid.type = i['type']
if grid.type == False:
grid.type = EMPTY_STRING
except KeyError:
grid.type = EMPTY_STRING
try:
grid.reuse = i['reuse']
except KeyError:
grid.reuse = False
try:
grid.openPrices = i['openPrices']
except KeyError:
grid.openPrices = {}
try:
grid.snapshot = i['snapshot']
except KeyError:
grid.snapshot = {}
self.writeCtaLog(grid.toStr())
# 增加对开仓状态的过滤,满足某些策略只提取已开仓的网格数据
if len(openStatusFilter) > 0:
if grid.openStatus not in openStatusFilter:
continue
grids.append(grid)
# 更新开仓均价
self.recount_avg_open_price()
return grids
def get_data_folder(self):
"""获取数据目录"""
# 工作目录
currentFolder = os.path.abspath(os.path.join(os.getcwd(), u'data'))
if os.path.isdir(currentFolder):
# 如果工作目录下,存在data子目录,就使用data子目录
return currentFolder
else:
# 否则,使用缺省保存目录 vnpy/trader/app/ctaStrategy/data
return os.path.abspath(os.path.join(os.path.dirname(__file__), u'data'))
def changeStrategyName(self, old_name, new_name):
"""
在线更换策略实例名称,需要把Json文件也转移
:param old_name:
:param new_name:
:return:
"""
if old_name == new_name:
self.writeCtaLog(u'更换策略实例名称失败,old:{} =>new:{}'.format(old_name, new_name))
return
data_folder = self.get_data_folder()
self.jsonName = new_name
# 旧文件
old_up_json_file = os.path.join(data_folder, u'{0}_upGrids.json'.format(old_name))
old_dn_json_file = os.path.join(data_folder, u'{0}_dnGrids.json'.format(old_name))
old_json_file = os.path.join(data_folder, u'{0}_Grids.json'.format(old_name))
# 新文件
self.json_file_path = os.path.join(data_folder, u'{0}_Grids.json'.format(new_name))
if os.path.isfile(self.json_file_path): # 新文件若存在,移除
try:
os.remove(self.json_file_path)
except Exception as ex:
self.writeCtaLog(u'GridTrade.changeStrategyName 删除文件:{}异常:{}'.format(old_up_json_file,str(ex)))
# 移动文件
if os.path.isfile(old_json_file):
try:
shutil.move(old_json_file, self.json_file_path)
return
except Exception as ex:
self.writeCtaLog(u'GridTrade.changeStrategyName 移动文件:{}=》{}异常:{}'.format(old_up_json_file, self.json_file_path, str(ex)))
else:
data = {}
if os.path.isfile(old_up_json_file):
try:
with open(old_up_json_file, 'r', encoding='utf8') as f:
# 解析json文件
data['up_grids'] = json.load(f)
except IOError:
self.writeCtaLog(u'读取网格{}出错'.format(old_up_json_file))
data['up_grids'] = []
try: # 移除旧版上网格文件
os.remove(old_up_json_file)
except IOError:
self.writeCtaLog(u'移除网格{}出错'.format(old_up_json_file))
else:
data['up_grids'] = []
if os.path.isfile(old_dn_json_file):
try:
with open(old_dn_json_file, 'r', encoding='utf8') as f:
# 解析json文件
data['dn_grids'] = json.load(f)
except IOError:
self.writeCtaLog(u'读取网格{}出错'.format(old_dn_json_file))
data['dn_grids'] = []
try: # 移除旧版上网格文件
os.remove(old_dn_json_file)
except IOError:
self.writeCtaLog(u'移除网格{}出错'.format(old_dn_json_file))
else:
data['dn_grids'] = []
try:
with open(self.json_file_path, 'w') as f:
json_data = json.dumps(data, indent=4)
f.write(json_data)
except IOError as ex:
self.writeCtaLog(u'写入网格文件{}异常:{}'.format(self.json_file_path, str(ex)))
def getJsonFilePath(self):
"""
返回上下网格的文件路径
:return:
"""
return self.json_file_path
def getTypesOfOpenedGrids(self, direction, include_empty=False):
"""
获取开仓的网格类型列表
:param direction:
:param include_empty: 是否包含空值的类型
:return:
"""
grids = self.getOpenedGrids(direction)
type_list = []
for g in grids:
if g.type not in type_list and (g.type !=EMPTY_STRING if not include_empty else True):
type_list.append(g.type)
return type_list
ARBITRAGE_LONG = u'正套'
ARBITRAGE_SHORT = u'反套'
class ArbitrageGrid(object):
"""套利网格"""
def __init__(self,direction, openprice, closeprice, stopprice=EMPTY_FLOAT, type=EMPTY_STRING):
self.leg1 = None
self.leg2 = None
self.id = str(uuid.uuid1())
self.direction = direction # 正套(ARBITRAGE_LONG) 反套(ARBITRAGE_SHORT)
self.openPrice = openprice # 开仓价格/价比
self.closePrice = closeprice # 平仓价格/价比
self.stopPrice = stopprice # 止损价格/价比
self.type = type # 套利类型(自定义)
self.snapshot = {}
def update_leg1(self,grid):
"""
添加腿1
:param grid:
:return:
"""
if isinstance(grid, CtaGrid):
self.leg1 = grid
else:
print(u'leg1 不是CtaGrid类型')
def update_leg2(self, grid):
"""
添加腿2
:param grid:
:return:
"""
if isinstance(grid, CtaGrid):
self.leg2 = grid
else:
print(u'leg2 不是CtaGrid类型')
def toJson(self):
j = OrderedDict()
j['id'] = self.id
j['direction'] = self.direction
j['openPrices'] = self.openPrice
j['closePrice'] = self.closePrice
j['stopPrice'] = self.stopPrice
j['type'] = self.type
j['snapshot'] = self.snapshot # 切片数据
try:
if self.leg1 is not None:
j['leg1'] = self.leg1.toJson()
if self.leg2 is not None:
j['leg2'] = self.leg2.toJson()
except Exception as ex:
print(u'Arbitrage Grid toJson exception:{} {}'.format(str(ex), traceback.format_exc()),file=sys.stderr)
return j
def fromJson(self,j):
if 'id' in j:
self.id = j.get('id')
self.direction = j.get('direction',EMPTY_STRING)
self.openPrice = j.get('openPrice',EMPTY_FLOAT)
self.closePrice = j.get('closePrice',EMPTY_FLOAT)
self.stopPrice = j.get('stopPrice',EMPTY_FLOAT)
self.type = j.get('type',EMPTY_STRING)
self.snapshot = j.get('snapshot',{})
if 'leg1' in j:
if self.leg1 is None:
self.leg1 = CtaGrid(direction=EMPTY_STRING,openprice=EMPTY_FLOAT,closeprice=EMPTY_FLOAT)
self.leg1.fromJson(j.get('leg1'))
if 'leg2' in j:
if self.leg2 is None:
self.leg2 = CtaGrid(direction=EMPTY_STRING,openprice=EMPTY_FLOAT,closeprice=EMPTY_FLOAT)
self.leg2.fromJson(j.get('leg2'))
class ArbitrageTrade(object):
"""
套利交易网格,仅用于持久化记录价差/价比/跨市场/期现套利等
它包含正套网格/反套网格两个队列
"""
def __init__(self, strategy, leg1_settings, leg2_settings):
"""
构造函数
:param strategy: 上层调用策略
"""
self.strategy = strategy
# 交易合约
self.leg1_symbol = leg1_settings.get('vtSymbol', EMPTY_STRING)
self.leg2_symbol = leg2_settings.get('vtSymbol', EMPTY_STRING)
# 交易合约的杠杆比率
self.leg1_size = leg1_settings.get('size', 1)
self.leg2_size = leg2_settings.get('size', 1)
# 正套队列
self.long_list = []
# 反套队列
self.short_list = []
def writeCtaLog(self, log):
"""
写入日志
:param log:
:return:
"""
if self.strategy and hasattr(self.strategy,'writeCtaLog'):
self.strategy.writeCtaLog(log)
else:
print(log)
def writeCtaError(self, log):
"""
写入错误日志
:param log:
:return:
"""
if self.strategy and hasattr(self.strategy, 'writeCtaError'):
self.strategy.writeCtaError(log)
else:
print(log,file=sys.stderr)
def toJson(self):
"""
=> json | |
median filtere
path_to_filter : full path to the boxcar filter binary file, including
the file name.
Returns
-------
str
full path of the contatenated filename.
"""
# construct stime and etime for one-day worthy of data
stime = ctr_date
etime = ctr_date + dt.timedelta(days=1)
# Cteate a tmpdir folder if not exist
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
# extract info from the localdict argument
radcode = localdict["radar"]
ftype = localdict["ftype"]
channel = localdict["channel"]
# fetch the data for a given day
#file_list = fetch_local_files(stime, etime, localdirfmt, localdict, tmpdir, fnamefmt)
###################
# Due to a bug related to davitpy, here is a walkaround to find the list of files need
# Note: the .bz files have to be manually copied from sd-data to the folder defined by localdirfmt
file_list = fetch_local_files(stime, etime, localdirfmt, localdict, tmpdir, fnamefmt)
#file_list = glob.glob(os.path.join(tmpdir, '*bz2'))
###################
# Make sure all the fetched files have desired ftype
file_list = [x for x in file_list if ftype in x]
# check if we have found files
if len(file_list) != 0:
# concatenate the files into a single file
print("Concatenating all the " + ftype + " files into one")
# choose a temp file name with time span info for cacheing
if (channel is None) or (channel == "."):
fname = '%s%s.%s.%s.%s.%s.%s' % \
(tmpdir, stime.strftime("%Y%m%d"),
stime.strftime("%H%M%S"),
etime.strftime("%Y%m%d"),
etime.strftime("%H%M%S"), radcode, ftype)
else:
fname = '%s%s.%s.%s.%s.%s.%s.%s' % \
(tmpdir, stime.strftime("%Y%m%d"),
stime.strftime("%H%M%S"),
etime.strftime("%Y%m%d"),
etime.strftime("%H%M%S"),
radcode, channel, ftype)
logging.debug('cat ' + string.join(file_list) + ' > ' + fname)
os.system('cat ' + string.join(file_list) + ' > ' + fname)
# remove the unneeded files from the tmpdir
if remove_extra_file:
print("removing unneeded " + ftype + " files")
for fn in file_list:
logging.debug('rm ' + fn)
os.system('rm ' + fn)
os.system('rm ' + fn+".bz2")
#os.system('rm ' + fn+".gz")
else:
fname = None
# Boxcar filter
if median_filter:
fname = boxcar_filter(fname, path_to_filter)
return fname
def boxcar_filter(fname, path_to_filter):
"""Does boxcar median filtering to data in a file.
Parameters
-----------
fname : str
Full path of a file (fitacf, fitex).
path_to_filter : full path to the boxcar filter binary file, including
the file name.
Returns
-------
ffname : str
Full path of a data file that is boxcar median filtered.
The filtered file name will be fname+"f"
"""
if fname is not None:
# extract the data type (e.g., fitacf, fitex, etc.) from fname
ftype = fname.split(".")[-1]
if not ftype+'f' in fname:
try:
print("boxcar filtering the data")
# do boxcar filtering
ffname = fname + 'f'
command = path_to_filter + ' ' + fname + ' > ' + ffname
logging.debug("performing: {:s}".format(command))
os.system(command)
logging.debug("done filtering")
except Exception, e:
estr = 'problem filtering file, using the unfiltered one'
logging.warning(estr)
else:
print("file " + fname + " exists")
ffname = fname
else:
ffname = None
return ffname
def dmap_to_csv(fname, stime, etime=None, sep="|",
fileType="fitacf", readOnly=False):
"""Reads data from a dmap file and writes it to
a csv file.
Parameter
---------
fname : str
Full path of a dmap file (fitacf, fitex).
stime : datetime.datetime
The start time of interest
etime : datetime.datetime
The end time of interest
sep : str
Delimiter to use
fileType : str
SuperDARN fit data type (e.g., fitacf)
Returns
-------
fname_csv : str
Full path (including the file name) of a csv file
"""
# Get a file poiter
myPtr = radDataPtr(sTime=stime, eTime=etime, fileName=fname, fileType=fileType)
# Parameter names in a fitacf file
header = sep.join(["time", "bmnum", "channel", "stid", "cp", "lmfit" , "fitex",
"exflg", "iqflg", "offset", "lmflg", "rawflg", "fType",
"acflg", "fitacf", # upto here are params in myBeam
"elv", "gflg", "nlag", "npnts", "p_l", "p_l_e", "p_s",
"p_s_e", "phi0", "phi0_e", "pwr0", "qflg", "slist", "v",
"v_e", "w_l", "w_l_e", "w_s", "w_s_e", # upto here are params in myBeam.fit
"bmazm", "frang", "ifmode", "inttsc", "inttus", "lagfr",
"ltab", "mpinc", "mplgexs", "mplgs", "mppul", "nave", "noisemean",
"noisesearch", "noisesky", "nrang", "ptab", "rsep", "rxrise",
"scan", "smsep", "tfreq", "txpl", "xcf"]) # upto here are params in myBeam.prm
# Output file name
fname_csv = fname + ".csv"
# Read the parameters of interest.
try:
myPtr.rewind()
except Exception as e:
logging.error(e)
myBeam = myPtr.readRec()
with open(fname_csv, "w") as f:
f.write(header +"\n")
while(myBeam is not None):
if(myBeam.time > myPtr.eTime): break
if(myPtr.sTime <= myBeam.time):
# Params in myBeam
time = str(myBeam.time).split(".")[0] # Remove millisecond part
bmnum = str(myBeam.bmnum)
stid = str(myBeam.stid)
cp = str(myBeam.cp)
channel = str(myBeam.channel)
lmfit = str(myBeam.lmfit)
fitex = str(myBeam.fitex)
exflg = str(myBeam.exflg)
iqflg = str(myBeam.iqflg)
offset = str(myBeam.offset)
lmflg = str(myBeam.lmflg)
rawflg = str(myBeam.rawflg)
fType = str(myBeam.fType)
acflg = str(myBeam.acflg)
fitacf = str(myBeam.fitacf)
# Params in myBeam.fit
elv = "[]" if myBeam.fit.elv is None else str(myBeam.fit.elv)
gflg = "[]" if myBeam.fit.gflg is None else str(myBeam.fit.gflg)
nlag = "[]" if myBeam.fit.nlag is None else str(myBeam.fit.nlag)
npnts = str(myBeam.fit.npnts)
p_l = "[]" if myBeam.fit.p_l is None else str(myBeam.fit.p_l)
p_l_e = "[]" if myBeam.fit.p_l_e is None else str(myBeam.fit.p_l_e)
p_l_e = p_l_e.replace("inf", "999999")
p_s = "[]" if myBeam.fit.p_s is None else str(myBeam.fit.p_s)
p_s_e = "[]" if myBeam.fit.p_s_e is None else str(myBeam.fit.p_s_e)
p_s_e = p_s_e.replace("inf", "999999")
phi0 = "[]" if myBeam.fit.phi0 is None else str(myBeam.fit.phi0)
phi0_e = "[]" if myBeam.fit.phi0_e is None else str(myBeam.fit.phi0_e)
phi0_e = phi0_e.replace("inf", "999999")
pwr0 = "[]" if myBeam.fit.pwr0 is None else str(myBeam.fit.pwr0)
qflg = "[]" if myBeam.fit.qflg is None else str(myBeam.fit.qflg)
slist = str(myBeam.fit.slist)
v = "[]" if myBeam.fit.v is None else str(myBeam.fit.v)
v_e = "[]" if myBeam.fit.v_e is None else str(myBeam.fit.v_e)
v_e = v_e.replace("inf", "999999")
w_l = "[]" if myBeam.fit.w_l is None else str(myBeam.fit.w_l)
w_l = w_l.replace("inf", "999999")
w_l_e = "[]" if myBeam.fit.w_l_e is None else str(myBeam.fit.w_l_e)
w_l_e = w_l_e.replace("inf", "999999")
w_s = "[]" if myBeam.fit.w_s is None else str(myBeam.fit.w_s)
w_s = w_s.replace("inf", "999999")
w_s_e = "[]" if myBeam.fit.w_s_e is None else str(myBeam.fit.w_s_e)
w_s_e = w_s_e.replace("inf", "999999")
# Params in myBeam.prm
bmazm = str(myBeam.prm.bmazm)
frang = str(myBeam.prm.frang)
ifmode = str(myBeam.prm.ifmode)
inttsc = str(myBeam.prm.inttsc)
inttus = str(myBeam.prm.inttus)
lagfr = str(myBeam.prm.lagfr)
ltab = "[]" if myBeam.prm.ltab is None else str(myBeam.prm.ltab)
mpinc = str(myBeam.prm.mpinc)
mplgexs = str(myBeam.prm.mplgexs)
mplgs = str(myBeam.prm.mplgs)
mppul = str(myBeam.prm.mppul)
nave = str(myBeam.prm.nave)
noisemean = str(myBeam.prm.noisemean)
noisesearch = str(myBeam.prm.noisesearch)
noisesky = str(myBeam.prm.noisesky)
nrang = str(myBeam.prm.nrang)
ptab = "[]" if myBeam.prm.ptab is None else str(myBeam.prm.ptab)
rsep = str(myBeam.prm.rsep)
rxrise = str(myBeam.prm.rxrise)
scan = str(myBeam.prm.scan)
smsep = str(myBeam.prm.smsep)
tfreq = str(myBeam.prm.tfreq)
txpl = str(myBeam.prm.txpl)
xcf = str(myBeam.prm.xcf)
# Params in myBeam.rawacf
#NOTE: add if needed
# Params in myBeam.iqdat
#NOTE: add if needed
# Params in myBeam.fPtr
#NOTE: add if needed
# Write the current lbeam record to fname_csv
line = sep.join([time, bmnum, channel, stid, cp, lmfit , fitex,
exflg, iqflg, offset, lmflg, rawflg, fType,
acflg, fitacf, # upto here are params in myBeam
elv, gflg, nlag, npnts, p_l, p_l_e, p_s,
p_s_e, phi0, phi0_e, pwr0, qflg, slist, v,
v_e, w_l, w_l_e, w_s, w_s_e, # upto here are params in myBeam.fit
bmazm, frang, ifmode, inttsc, inttus, lagfr,
ltab, mpinc, mplgexs, mplgs, mppul, nave, noisemean,
noisesearch, noisesky, nrang, ptab, rsep, rxrise,
scan, smsep, tfreq, txpl, xcf]) # upto here are params in myBeam.prm
f.write(line +"\n")
# Read the next beam record
myBeam = myPtr.readRec()
return fname_csv
# run the code
def main():
# Set the logging level
logging.getLogger().setLevel(logging.WARNING)
# input parameters
#ctr_date = dt.datetime(2012,12,31)
#ctr_date = dt.datetime(2014,11,02)
ctr_date = dt.datetime(2012,12,05)
stime = ctr_date
etime = ctr_date + dt.timedelta(days=1)
#etime = None
#rad = "fhe"
rad = "bks"
#rad = "ade"
channel = "."
ftype = "fitacf"
csv_sep = "|" # used to seperate variables in a csv file
remove_extra_file = True
median_filter=False
path_to_filter = './fitexfilter'
#localdirfmt = "/sd-data/{year}/{ftype}/{radar}/"
localdirfmt = "./sd-data/{year}/{ftype}/{radar}/"
#localdirfmt = ".data/tmp/"
localdict = {"ftype" : ftype, "radar" : rad, "channel" : channel}
tmpdir = "./data/tmp/"
fnamefmt = ['{date}.{hour}......{radar}.{channel}.{ftype}',\
'{date}.{hour}......{radar}.{ftype}']
# Fetch and concatenate files
fname = fetch_concat(ctr_date, localdirfmt, localdict, tmpdir, fnamefmt,
remove_extra_file=remove_extra_file,
median_filter=median_filter,
path_to_filter=path_to_filter)
# Convert dmap format to csv
#fname = "./data/tmp/20121231.000000.20130101.000000.fhe.fitacf"
#fname = "./data/tmp/20141101.000000.20141102.000000.bks.fitacf"
#fname = "./data/tmp/20121204.000000.20121205.000000.bks.fitacf"
#fname = "./data/tmp/20121205.000000.20121206.000000.bks.fitacf"
print("Converting from dmap format to | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
datetime enumerations module.
"""
from pyrin.core.enumerations import CoreEnum
class TimezoneEnum(CoreEnum):
"""
timezone enum.
"""
AFRICA_ABIDJAN = 'Africa/Abidjan'
AFRICA_ACCRA = 'Africa/Accra'
AFRICA_ADDIS_ABABA = 'Africa/Addis_Ababa'
AFRICA_ALGIERS = 'Africa/Algiers'
AFRICA_ASMARA = 'Africa/Asmara'
AFRICA_ASMERA = 'Africa/Asmera'
AFRICA_BAMAKO = 'Africa/Bamako'
AFRICA_BANGUI = 'Africa/Bangui'
AFRICA_BANJUL = 'Africa/Banjul'
AFRICA_BISSAU = 'Africa/Bissau'
AFRICA_BLANTYRE = 'Africa/Blantyre'
AFRICA_BRAZZAVILLE = 'Africa/Brazzaville'
AFRICA_BUJUMBURA = 'Africa/Bujumbura'
AFRICA_CAIRO = 'Africa/Cairo'
AFRICA_CASABLANCA = 'Africa/Casablanca'
AFRICA_CEUTA = 'Africa/Ceuta'
AFRICA_CONAKRY = 'Africa/Conakry'
AFRICA_DAKAR = 'Africa/Dakar'
AFRICA_DAR_ES_SALAAM = 'Africa/Dar_es_Salaam'
AFRICA_DJIBOUTI = 'Africa/Djibouti'
AFRICA_DOUALA = 'Africa/Douala'
AFRICA_EL_AAIUN = 'Africa/El_Aaiun'
AFRICA_FREETOWN = 'Africa/Freetown'
AFRICA_GABORONE = 'Africa/Gaborone'
AFRICA_HARARE = 'Africa/Harare'
AFRICA_JOHANNESBURG = 'Africa/Johannesburg'
AFRICA_JUBA = 'Africa/Juba'
AFRICA_KAMPALA = 'Africa/Kampala'
AFRICA_KHARTOUM = 'Africa/Khartoum'
AFRICA_KIGALI = 'Africa/Kigali'
AFRICA_KINSHASA = 'Africa/Kinshasa'
AFRICA_LAGOS = 'Africa/Lagos'
AFRICA_LIBREVILLE = 'Africa/Libreville'
AFRICA_LOME = 'Africa/Lome'
AFRICA_LUANDA = 'Africa/Luanda'
AFRICA_LUBUMBASHI = 'Africa/Lubumbashi'
AFRICA_LUSAKA = 'Africa/Lusaka'
AFRICA_MALABO = 'Africa/Malabo'
AFRICA_MAPUTO = 'Africa/Maputo'
AFRICA_MASERU = 'Africa/Maseru'
AFRICA_MBABANE = 'Africa/Mbabane'
AFRICA_MOGADISHU = 'Africa/Mogadishu'
AFRICA_MONROVIA = 'Africa/Monrovia'
AFRICA_NAIROBI = 'Africa/Nairobi'
AFRICA_NDJAMENA = 'Africa/Ndjamena'
AFRICA_NIAMEY = 'Africa/Niamey'
AFRICA_NOUAKCHOTT = 'Africa/Nouakchott'
AFRICA_OUAGADOUGOU = 'Africa/Ouagadougou'
AFRICA_PORTO_NOVO = 'Africa/Porto-Novo'
AFRICA_SAO_TOME = 'Africa/Sao_Tome'
AFRICA_TIMBUKTU = 'Africa/Timbuktu'
AFRICA_TRIPOLI = 'Africa/Tripoli'
AFRICA_TUNIS = 'Africa/Tunis'
AFRICA_WINDHOEK = 'Africa/Windhoek'
AMERICA_ADAK = 'America/Adak'
AMERICA_ANCHORAGE = 'America/Anchorage'
AMERICA_ANGUILLA = 'America/Anguilla'
AMERICA_ANTIGUA = 'America/Antigua'
AMERICA_ARAGUAINA = 'America/Araguaina'
AMERICA_ARGENTINA_BUENOS_AIRES = 'America/Argentina/Buenos_Aires'
AMERICA_ARGENTINA_CATAMARCA = 'America/Argentina/Catamarca'
AMERICA_ARGENTINA_COMODRIVADAVIA = 'America/Argentina/ComodRivadavia'
AMERICA_ARGENTINA_CORDOBA = 'America/Argentina/Cordoba'
AMERICA_ARGENTINA_JUJUY = 'America/Argentina/Jujuy'
AMERICA_ARGENTINA_LA_RIOJA = 'America/Argentina/La_Rioja'
AMERICA_ARGENTINA_MENDOZA = 'America/Argentina/Mendoza'
AMERICA_ARGENTINA_RIO_GALLEGOS = 'America/Argentina/Rio_Gallegos'
AMERICA_ARGENTINA_SALTA = 'America/Argentina/Salta'
AMERICA_ARGENTINA_SAN_JUAN = 'America/Argentina/San_Juan'
AMERICA_ARGENTINA_SAN_LUIS = 'America/Argentina/San_Luis'
AMERICA_ARGENTINA_TUCUMAN = 'America/Argentina/Tucuman'
AMERICA_ARGENTINA_USHUAIA = 'America/Argentina/Ushuaia'
AMERICA_ARUBA = 'America/Aruba'
AMERICA_ASUNCION = 'America/Asuncion'
AMERICA_ATIKOKAN = 'America/Atikokan'
AMERICA_ATKA = 'America/Atka'
AMERICA_BAHIA = 'America/Bahia'
AMERICA_BAHIA_BANDERAS = 'America/Bahia_Banderas'
AMERICA_BARBADOS = 'America/Barbados'
AMERICA_BELEM = 'America/Belem'
AMERICA_BELIZE = 'America/Belize'
AMERICA_BLANC_SABLON = 'America/Blanc-Sablon'
AMERICA_BOA_VISTA = 'America/Boa_Vista'
AMERICA_BOGOTA = 'America/Bogota'
AMERICA_BOISE = 'America/Boise'
AMERICA_BUENOS_AIRES = 'America/Buenos_Aires'
AMERICA_CAMBRIDGE_BAY = 'America/Cambridge_Bay'
AMERICA_CAMPO_GRANDE = 'America/Campo_Grande'
AMERICA_CANCUN = 'America/Cancun'
AMERICA_CARACAS = 'America/Caracas'
AMERICA_CATAMARCA = 'America/Catamarca'
AMERICA_CAYENNE = 'America/Cayenne'
AMERICA_CAYMAN = 'America/Cayman'
AMERICA_CHICAGO = 'America/Chicago'
AMERICA_CHIHUAHUA = 'America/Chihuahua'
AMERICA_CORAL_HARBOUR = 'America/Coral_Harbour'
AMERICA_CORDOBA = 'America/Cordoba'
AMERICA_COSTA_RICA = 'America/Costa_Rica'
AMERICA_CRESTON = 'America/Creston'
AMERICA_CUIABA = 'America/Cuiaba'
AMERICA_CURACAO = 'America/Curacao'
AMERICA_DANMARKSHAVN = 'America/Danmarkshavn'
AMERICA_DAWSON = 'America/Dawson'
AMERICA_DAWSON_CREEK = 'America/Dawson_Creek'
AMERICA_DENVER = 'America/Denver'
AMERICA_DETROIT = 'America/Detroit'
AMERICA_DOMINICA = 'America/Dominica'
AMERICA_EDMONTON = 'America/Edmonton'
AMERICA_EIRUNEPE = 'America/Eirunepe'
AMERICA_EL_SALVADOR = 'America/El_Salvador'
AMERICA_ENSENADA = 'America/Ensenada'
AMERICA_FORT_NELSON = 'America/Fort_Nelson'
AMERICA_FORT_WAYNE = 'America/Fort_Wayne'
AMERICA_FORTALEZA = 'America/Fortaleza'
AMERICA_GLACE_BAY = 'America/Glace_Bay'
AMERICA_GODTHAB = 'America/Godthab'
AMERICA_GOOSE_BAY = 'America/Goose_Bay'
AMERICA_GRAND_TURK = 'America/Grand_Turk'
AMERICA_GRENADA = 'America/Grenada'
AMERICA_GUADELOUPE = 'America/Guadeloupe'
AMERICA_GUATEMALA = 'America/Guatemala'
AMERICA_GUAYAQUIL = 'America/Guayaquil'
AMERICA_GUYANA = 'America/Guyana'
AMERICA_HALIFAX = 'America/Halifax'
AMERICA_HAVANA = 'America/Havana'
AMERICA_HERMOSILLO = 'America/Hermosillo'
AMERICA_INDIANA_INDIANAPOLIS = 'America/Indiana/Indianapolis'
AMERICA_INDIANA_KNOX = 'America/Indiana/Knox'
AMERICA_INDIANA_MARENGO = 'America/Indiana/Marengo'
AMERICA_INDIANA_PETERSBURG = 'America/Indiana/Petersburg'
AMERICA_INDIANA_TELL_CITY = 'America/Indiana/Tell_City'
AMERICA_INDIANA_VEVAY = 'America/Indiana/Vevay'
AMERICA_INDIANA_VINCENNES = 'America/Indiana/Vincennes'
AMERICA_INDIANA_WINAMAC = 'America/Indiana/Winamac'
AMERICA_INDIANAPOLIS = 'America/Indianapolis'
AMERICA_INUVIK = 'America/Inuvik'
AMERICA_IQALUIT = 'America/Iqaluit'
AMERICA_JAMAICA = 'America/Jamaica'
AMERICA_JUJUY = 'America/Jujuy'
AMERICA_JUNEAU = 'America/Juneau'
AMERICA_KENTUCKY_LOUISVILLE = 'America/Kentucky/Louisville'
AMERICA_KENTUCKY_MONTICELLO = 'America/Kentucky/Monticello'
AMERICA_KNOX_IN = 'America/Knox_IN'
AMERICA_KRALENDIJK = 'America/Kralendijk'
AMERICA_LA_PAZ = 'America/La_Paz'
AMERICA_LIMA = 'America/Lima'
AMERICA_LOS_ANGELES = 'America/Los_Angeles'
AMERICA_LOUISVILLE = 'America/Louisville'
AMERICA_LOWER_PRINCES = 'America/Lower_Princes'
AMERICA_MACEIO = 'America/Maceio'
AMERICA_MANAGUA = 'America/Managua'
AMERICA_MANAUS = 'America/Manaus'
AMERICA_MARIGOT = 'America/Marigot'
AMERICA_MARTINIQUE = 'America/Martinique'
AMERICA_MATAMOROS = 'America/Matamoros'
AMERICA_MAZATLAN = 'America/Mazatlan'
AMERICA_MENDOZA = 'America/Mendoza'
AMERICA_MENOMINEE = 'America/Menominee'
AMERICA_MERIDA = 'America/Merida'
AMERICA_METLAKATLA = 'America/Metlakatla'
AMERICA_MEXICO_CITY = 'America/Mexico_City'
AMERICA_MIQUELON = 'America/Miquelon'
AMERICA_MONCTON = 'America/Moncton'
AMERICA_MONTERREY = 'America/Monterrey'
AMERICA_MONTEVIDEO = 'America/Montevideo'
AMERICA_MONTREAL = 'America/Montreal'
AMERICA_MONTSERRAT = 'America/Montserrat'
AMERICA_NASSAU = 'America/Nassau'
AMERICA_NEW_YORK = 'America/New_York'
AMERICA_NIPIGON = 'America/Nipigon'
AMERICA_NOME = 'America/Nome'
AMERICA_NORONHA = 'America/Noronha'
AMERICA_NORTH_DAKOTA_BEULAH = 'America/North_Dakota/Beulah'
AMERICA_NORTH_DAKOTA_CENTER = 'America/North_Dakota/Center'
AMERICA_NORTH_DAKOTA_NEW_SALEM = 'America/North_Dakota/New_Salem'
AMERICA_OJINAGA = 'America/Ojinaga'
AMERICA_PANAMA = 'America/Panama'
AMERICA_PANGNIRTUNG = 'America/Pangnirtung'
AMERICA_PARAMARIBO = 'America/Paramaribo'
AMERICA_PHOENIX = 'America/Phoenix'
AMERICA_PORT_AU_PRINCE = 'America/Port-au-Prince'
AMERICA_PORT_OF_SPAIN = 'America/Port_of_Spain'
AMERICA_PORTO_ACRE = 'America/Porto_Acre'
AMERICA_PORTO_VELHO = 'America/Porto_Velho'
AMERICA_PUERTO_RICO = 'America/Puerto_Rico'
AMERICA_RAINY_RIVER = 'America/Rainy_River'
AMERICA_RANKIN_INLET = 'America/Rankin_Inlet'
AMERICA_RECIFE = 'America/Recife'
AMERICA_REGINA = 'America/Regina'
AMERICA_RESOLUTE = 'America/Resolute'
AMERICA_RIO_BRANCO = 'America/Rio_Branco'
AMERICA_ROSARIO = 'America/Rosario'
AMERICA_SANTA_ISABEL = 'America/Santa_Isabel'
AMERICA_SANTAREM = 'America/Santarem'
AMERICA_SANTIAGO = 'America/Santiago'
AMERICA_SANTO_DOMINGO = 'America/Santo_Domingo'
AMERICA_SAO_PAULO = 'America/Sao_Paulo'
AMERICA_SCORESBYSUND = 'America/Scoresbysund'
AMERICA_SHIPROCK = 'America/Shiprock'
AMERICA_SITKA = 'America/Sitka'
AMERICA_ST_BARTHELEMY = 'America/St_Barthelemy'
AMERICA_ST_JOHNS = 'America/St_Johns'
AMERICA_ST_KITTS = 'America/St_Kitts'
AMERICA_ST_LUCIA = 'America/St_Lucia'
AMERICA_ST_THOMAS = 'America/St_Thomas'
AMERICA_ST_VINCENT = 'America/St_Vincent'
AMERICA_SWIFT_CURRENT = 'America/Swift_Current'
AMERICA_TEGUCIGALPA = 'America/Tegucigalpa'
AMERICA_THULE = 'America/Thule'
AMERICA_THUNDER_BAY = 'America/Thunder_Bay'
AMERICA_TIJUANA = 'America/Tijuana'
AMERICA_TORONTO = 'America/Toronto'
AMERICA_TORTOLA = 'America/Tortola'
AMERICA_VANCOUVER = 'America/Vancouver'
AMERICA_VIRGIN = 'America/Virgin'
AMERICA_WHITEHORSE = 'America/Whitehorse'
AMERICA_WINNIPEG = 'America/Winnipeg'
AMERICA_YAKUTAT = 'America/Yakutat'
AMERICA_YELLOWKNIFE = 'America/Yellowknife'
ANTARCTICA_CASEY = 'Antarctica/Casey'
ANTARCTICA_DAVIS = 'Antarctica/Davis'
ANTARCTICA_DUMONTDURVILLE = 'Antarctica/DumontDUrville'
ANTARCTICA_MACQUARIE = 'Antarctica/Macquarie'
ANTARCTICA_MAWSON = 'Antarctica/Mawson'
ANTARCTICA_MCMURDO = 'Antarctica/McMurdo'
ANTARCTICA_PALMER = 'Antarctica/Palmer'
ANTARCTICA_ROTHERA = 'Antarctica/Rothera'
ANTARCTICA_SOUTH_POLE = 'Antarctica/South_Pole'
ANTARCTICA_SYOWA = 'Antarctica/Syowa'
ANTARCTICA_TROLL = 'Antarctica/Troll'
ANTARCTICA_VOSTOK = 'Antarctica/Vostok'
ARCTIC_LONGYEARBYEN = 'Arctic/Longyearbyen'
ASIA_ADEN = 'Asia/Aden'
ASIA_ALMATY = 'Asia/Almaty'
ASIA_AMMAN = 'Asia/Amman'
ASIA_ANADYR = 'Asia/Anadyr'
ASIA_AQTAU = 'Asia/Aqtau'
ASIA_AQTOBE = 'Asia/Aqtobe'
ASIA_ASHGABAT = 'Asia/Ashgabat'
ASIA_ASHKHABAD = 'Asia/Ashkhabad'
ASIA_BAGHDAD = 'Asia/Baghdad'
ASIA_BAHRAIN = 'Asia/Bahrain'
ASIA_BAKU = 'Asia/Baku'
ASIA_BANGKOK = 'Asia/Bangkok'
ASIA_BARNAUL = 'Asia/Barnaul'
ASIA_BEIRUT = 'Asia/Beirut'
ASIA_BISHKEK = 'Asia/Bishkek'
ASIA_BRUNEI = 'Asia/Brunei'
ASIA_CALCUTTA = 'Asia/Calcutta'
ASIA_CHITA = 'Asia/Chita'
ASIA_CHOIBALSAN = 'Asia/Choibalsan'
ASIA_CHONGQING = 'Asia/Chongqing'
ASIA_CHUNGKING = 'Asia/Chungking'
ASIA_COLOMBO = 'Asia/Colombo'
ASIA_DACCA = 'Asia/Dacca'
ASIA_DAMASCUS = 'Asia/Damascus'
ASIA_DHAKA = 'Asia/Dhaka'
ASIA_DILI = 'Asia/Dili'
ASIA_DUBAI = 'Asia/Dubai'
ASIA_DUSHANBE = 'Asia/Dushanbe'
ASIA_GAZA = 'Asia/Gaza'
ASIA_HARBIN = 'Asia/Harbin'
ASIA_HEBRON = 'Asia/Hebron'
ASIA_HO_CHI_MINH = 'Asia/Ho_Chi_Minh'
ASIA_HONG_KONG = 'Asia/Hong_Kong'
ASIA_HOVD = 'Asia/Hovd'
ASIA_IRKUTSK = 'Asia/Irkutsk'
ASIA_ISTANBUL = 'Asia/Istanbul'
ASIA_JAKARTA = 'Asia/Jakarta'
ASIA_JAYAPURA = 'Asia/Jayapura'
ASIA_JERUSALEM = 'Asia/Jerusalem'
ASIA_KABUL = 'Asia/Kabul'
ASIA_KAMCHATKA = 'Asia/Kamchatka'
ASIA_KARACHI = 'Asia/Karachi'
ASIA_KASHGAR = 'Asia/Kashgar'
ASIA_KATHMANDU = 'Asia/Kathmandu'
ASIA_KATMANDU = 'Asia/Katmandu'
ASIA_KHANDYGA = 'Asia/Khandyga'
ASIA_KOLKATA = 'Asia/Kolkata'
ASIA_KRASNOYARSK = 'Asia/Krasnoyarsk'
ASIA_KUALA_LUMPUR = 'Asia/Kuala_Lumpur'
ASIA_KUCHING = 'Asia/Kuching'
ASIA_KUWAIT = 'Asia/Kuwait'
ASIA_MACAO = 'Asia/Macao'
ASIA_MACAU = 'Asia/Macau'
ASIA_MAGADAN = 'Asia/Magadan'
ASIA_MAKASSAR = 'Asia/Makassar'
ASIA_MANILA = 'Asia/Manila'
ASIA_MUSCAT = 'Asia/Muscat'
ASIA_NICOSIA = 'Asia/Nicosia'
ASIA_NOVOKUZNETSK = 'Asia/Novokuznetsk'
ASIA_NOVOSIBIRSK = 'Asia/Novosibirsk'
ASIA_OMSK = 'Asia/Omsk'
ASIA_ORAL = 'Asia/Oral'
ASIA_PHNOM_PENH = 'Asia/Phnom_Penh'
ASIA_PONTIANAK = 'Asia/Pontianak'
ASIA_PYONGYANG = 'Asia/Pyongyang'
ASIA_QATAR = 'Asia/Qatar'
ASIA_QYZYLORDA = 'Asia/Qyzylorda'
ASIA_RANGOON = 'Asia/Rangoon'
ASIA_RIYADH = 'Asia/Riyadh'
ASIA_SAIGON = 'Asia/Saigon'
ASIA_SAKHALIN = 'Asia/Sakhalin'
ASIA_SAMARKAND = 'Asia/Samarkand'
ASIA_SEOUL = 'Asia/Seoul'
ASIA_SHANGHAI = 'Asia/Shanghai'
ASIA_SINGAPORE = 'Asia/Singapore'
ASIA_SREDNEKOLYMSK = 'Asia/Srednekolymsk'
ASIA_TAIPEI = 'Asia/Taipei'
ASIA_TASHKENT = 'Asia/Tashkent'
ASIA_TBILISI = 'Asia/Tbilisi'
ASIA_TEHRAN = 'Asia/Tehran'
ASIA_TEL_AVIV = 'Asia/Tel_Aviv'
ASIA_THIMBU = 'Asia/Thimbu'
ASIA_THIMPHU = 'Asia/Thimphu'
ASIA_TOKYO = 'Asia/Tokyo'
ASIA_TOMSK = 'Asia/Tomsk'
ASIA_UJUNG_PANDANG = 'Asia/Ujung_Pandang'
ASIA_ULAANBAATAR = 'Asia/Ulaanbaatar'
ASIA_ULAN_BATOR = 'Asia/Ulan_Bator'
ASIA_URUMQI = 'Asia/Urumqi'
ASIA_UST_NERA = 'Asia/Ust-Nera'
ASIA_VIENTIANE = 'Asia/Vientiane'
ASIA_VLADIVOSTOK = 'Asia/Vladivostok'
ASIA_YAKUTSK = 'Asia/Yakutsk'
ASIA_YEKATERINBURG = 'Asia/Yekaterinburg'
ASIA_YEREVAN = 'Asia/Yerevan'
ATLANTIC_AZORES = 'Atlantic/Azores'
ATLANTIC_BERMUDA = 'Atlantic/Bermuda'
ATLANTIC_CANARY = 'Atlantic/Canary'
ATLANTIC_CAPE_VERDE = 'Atlantic/Cape_Verde'
ATLANTIC_FAEROE = 'Atlantic/Faeroe'
ATLANTIC_FAROE = 'Atlantic/Faroe'
ATLANTIC_JAN_MAYEN = 'Atlantic/Jan_Mayen'
ATLANTIC_MADEIRA = 'Atlantic/Madeira'
ATLANTIC_REYKJAVIK = 'Atlantic/Reykjavik'
ATLANTIC_SOUTH_GEORGIA = 'Atlantic/South_Georgia'
ATLANTIC_ST_HELENA = 'Atlantic/St_Helena'
ATLANTIC_STANLEY = 'Atlantic/Stanley'
AUSTRALIA_ACT = 'Australia/ACT'
AUSTRALIA_ADELAIDE = 'Australia/Adelaide'
AUSTRALIA_BRISBANE = 'Australia/Brisbane'
AUSTRALIA_BROKEN_HILL = 'Australia/Broken_Hill'
AUSTRALIA_CANBERRA = 'Australia/Canberra'
AUSTRALIA_CURRIE = 'Australia/Currie'
AUSTRALIA_DARWIN = 'Australia/Darwin'
AUSTRALIA_EUCLA = 'Australia/Eucla'
AUSTRALIA_HOBART = 'Australia/Hobart'
AUSTRALIA_LHI = 'Australia/LHI'
AUSTRALIA_LINDEMAN = 'Australia/Lindeman'
AUSTRALIA_LORD_HOWE = 'Australia/Lord_Howe'
AUSTRALIA_MELBOURNE = 'Australia/Melbourne'
AUSTRALIA_NSW = 'Australia/NSW'
AUSTRALIA_NORTH = 'Australia/North'
AUSTRALIA_PERTH = 'Australia/Perth'
AUSTRALIA_QUEENSLAND = 'Australia/Queensland'
AUSTRALIA_SOUTH = 'Australia/South'
AUSTRALIA_SYDNEY = 'Australia/Sydney'
AUSTRALIA_TASMANIA = 'Australia/Tasmania'
AUSTRALIA_VICTORIA = 'Australia/Victoria'
AUSTRALIA_WEST = 'Australia/West'
AUSTRALIA_YANCOWINNA = 'Australia/Yancowinna'
BRAZIL_ACRE = 'Brazil/Acre'
BRAZIL_DENORONHA = 'Brazil/DeNoronha'
BRAZIL_EAST = 'Brazil/East'
BRAZIL_WEST = 'Brazil/West'
CET = 'CET'
CST6CDT = 'CST6CDT'
CANADA_ATLANTIC = 'Canada/Atlantic'
CANADA_CENTRAL = 'Canada/Central'
CANADA_EAST_SASKATCHEWAN = 'Canada/East-Saskatchewan'
CANADA_EASTERN = 'Canada/Eastern'
CANADA_MOUNTAIN = 'Canada/Mountain'
CANADA_NEWFOUNDLAND = 'Canada/Newfoundland'
CANADA_PACIFIC = 'Canada/Pacific'
CANADA_SASKATCHEWAN = 'Canada/Saskatchewan'
CANADA_YUKON = 'Canada/Yukon'
CHILE_CONTINENTAL = 'Chile/Continental'
CHILE_EASTERISLAND = 'Chile/EasterIsland'
CUBA = 'Cuba'
EET = 'EET'
EST = 'EST'
EST5EDT = 'EST5EDT'
EGYPT = 'Egypt'
EIRE = 'Eire'
ETC_GMT = 'Etc/GMT'
ETC_GMT_PLUS_0 = 'Etc/GMT+0'
ETC_GMT_PLUS_1 = 'Etc/GMT+1'
ETC_GMT_PLUS_10 = 'Etc/GMT+10'
ETC_GMT_PLUS_11 = 'Etc/GMT+11'
ETC_GMT_PLUS_12 = 'Etc/GMT+12'
ETC_GMT_PLUS_2 = 'Etc/GMT+2'
ETC_GMT_PLUS_3 = 'Etc/GMT+3'
ETC_GMT_PLUS_4 = 'Etc/GMT+4'
ETC_GMT_PLUS_5 = 'Etc/GMT+5'
ETC_GMT_PLUS_6 = 'Etc/GMT+6'
ETC_GMT_PLUS_7 = 'Etc/GMT+7'
ETC_GMT_PLUS_8 = 'Etc/GMT+8'
ETC_GMT_PLUS_9 = 'Etc/GMT+9'
ETC_GMT_MINUS_0 = 'Etc/GMT-0'
ETC_GMT_MINUS_1 = 'Etc/GMT-1'
ETC_GMT_MINUS_10 = 'Etc/GMT-10'
ETC_GMT_MINUS_11 = 'Etc/GMT-11'
ETC_GMT_MINUS_12 = 'Etc/GMT-12'
ETC_GMT_MINUS_13 = 'Etc/GMT-13'
ETC_GMT_MINUS_14 = 'Etc/GMT-14'
ETC_GMT_MINUS_2 = 'Etc/GMT-2'
ETC_GMT_MINUS_3 = 'Etc/GMT-3'
ETC_GMT_MINUS_4 = 'Etc/GMT-4'
ETC_GMT_MINUS_5 = 'Etc/GMT-5'
ETC_GMT_MINUS_6 = 'Etc/GMT-6'
ETC_GMT_MINUS_7 = 'Etc/GMT-7'
ETC_GMT_MINUS_8 = 'Etc/GMT-8'
ETC_GMT_MINUS_9 = 'Etc/GMT-9'
ETC_GMT0 = 'Etc/GMT0'
ETC_GREENWICH = 'Etc/Greenwich'
ETC_UCT = 'Etc/UCT'
ETC_UTC = 'Etc/UTC'
ETC_UNIVERSAL = 'Etc/Universal'
ETC_ZULU = 'Etc/Zulu'
EUROPE_AMSTERDAM = 'Europe/Amsterdam'
EUROPE_ANDORRA = | |
<gh_stars>10-100
#!/usr/bin/env python
#
# $Id: kfssetup.py 36 2007-11-12 02:43:36Z sriramsrao $
#
# Copyright 2007 Kosmix Corp.
#
# This file is part of Kosmos File System (KFS).
#
# Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Script to setup KFS servers on a set of nodes
# This scripts reads a machines.cfg file that describes the meta/chunk
# servers configurations and installs the binaries/scripts and creates
# the necessary directory hierarchy.
#
import os,sys,os.path,getopt
import socket,threading,popen2
import md5
from ConfigParser import ConfigParser
# Use the python config parser to parse out machines setup
# Input file format for machines.cfg
# [metaserver]
# type: metaserver
# clusterkey: <cluster name>
# node: <value>
# rundir: <dir>
# baseport: <port>
#
# [chunkserver1]
# node: <value>
# rundir: <dir>
# baseport: <port>
# space: <space exported by the server> (n m/g)
# {chunkdir: <dir>}
# [chunkserver2]
# ...
# [chunkserverN]
# ...
#
# where, space is expressed in units of MB/GB or bytes.
#
# Install on each machine with the following directory hierarchy:
# rundir/
# bin/ -- binaries, config file, kfscp/kfslog/kfschunk dirs
# logs/ -- log output from running the binary
# scripts/ -- all the helper scripts
# If a path for storing the chunks isn't specified, then it defaults to bin
#
unitsScale = {'g' : 1 << 30, 'm' : 1 << 20, 'k' : 1 << 10, 'b' : 1}
maxConcurrent = 25
chunkserversOnly = 0
tarProg = 'gtar'
md5String = ""
def which(program):
import os
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def setupMeta(section, config, outputFn, packageFn):
""" Setup the metaserver binaries/config files on a node. """
global chunkserversOnly
if chunkserversOnly > 0:
print "Chunkservers only is set; not doing meta"
return
key = config.get(section, 'clusterkey')
baseport = config.getint(section, 'baseport')
rundir = config.get(section, 'rundir')
fh = open(outputFn, 'w')
print >> fh, "metaServer.clientPort = %d" % baseport
print >> fh, "metaServer.chunkServerPort = %d" % (baseport + 100)
print >> fh, "metaServer.clusterKey = %s" % (key)
print >> fh, "metaServer.cpDir = %s/bin/kfscp" % rundir
print >> fh, "metaServer.logDir = %s/bin/kfslog" % rundir
if config.has_option(section, 'loglevel'):
print >> fh, "metaServer.loglevel = %s" % config.get(section, 'loglevel')
if config.has_option(section, 'worm'):
print >> fh, "metaServer.wormMode = 1"
if config.has_option(section, 'numservers'):
print >> fh, "metaServer.minChunkservers = %s" % config.get(section, 'numservers')
if config.has_option(section, 'md5sumfilename'):
print >> fh, "metaServer.md5sumFilename = %s" % config.get(section, 'md5sumfilename')
fh.close()
if config.has_option(section, 'webuiConfFile'):
confFile = config.get(section, 'webuiConfFile')
fh = open(confFile, 'w')
print >> fh, "[webserver]"
print >> fh, "webServer.metaserverPort = %d" % baseport
print >> fh, "webServer.port = %d" % (baseport + 50)
print >> fh, "webServer.allMachinesFn = %s/webui/all-machines.txt" % rundir
print >> fh, "webServer.docRoot = %s/webui/files" % rundir
fh.close()
cmd = "%s -zcf %s bin/logcompactor bin/metaserver %s lib webui scripts/*" % (tarProg, packageFn, outputFn)
os.system(cmd)
installArgs = "-r %s -d %s -m" % (tarProg, rundir)
return installArgs
def setupChunkConfig(section, config, outputFn):
""" Setup the chunkserver binaries/config files on a node. """
metaNode = config.get('metaserver', 'node')
metaToChunkPort = config.getint('metaserver', 'baseport') + 100
hostname = config.get(section, 'node')
# for rack-aware replication, we assume that nodes on different racks are on different subnets
s = socket.gethostbyname(hostname)
ipoctets = s.split('.')
rackId = int(ipoctets[2])
#
fh = open (outputFn, 'w')
print >> fh, "chunkServer.metaServer.hostname = %s" % metaNode
print >> fh, "chunkServer.metaServer.port = %d" % metaToChunkPort
print >> fh, "chunkServer.clientPort = %d" % config.getint(section, 'baseport')
print >> fh, "chunkServer.clusterKey = %s" % config.get('metaserver', 'clusterkey')
print >> fh, "chunkServer.rackId = %d" % (rackId)
print >> fh, "chunkServer.md5sum = %s" % (md5String)
if config.has_option(section, 'sorterport'):
print >> fh, "chunkServer.sorter.port = %s" % config.get(section, 'sorterport')
if config.has_option(section, 'loglevel'):
print >> fh, "chunkServer.loglevel = %s" % config.get(section, 'loglevel')
space = config.get(section, 'space')
s = space.split()
if (len(s) >= 2):
units = s[1].lower()
else:
units = 'b'
value = int(s[0]) * unitsScale[ units[0] ]
print >> fh, "chunkServer.totalSpace = %d" % value
rundir = config.get(section, 'rundir')
if config.has_option(section, 'chunkdir'):
chunkDir = config.get(section, 'chunkdir')
else:
chunkDir = "%s/bin/kfschunk" % (rundir)
print >> fh, "chunkServer.chunkDir = %s" % (chunkDir)
print >> fh, "chunkServer.logDir = %s/bin/kfslog" % (rundir)
if config.has_option(section, 'loglevel'):
print >> fh, "chunkServer.loglevel = %s" % config.get(section, 'loglevel')
fh.close()
def setupChunk(section, config, outputFn, packageFn):
""" Setup the chunkserver binaries/config files on a node. """
setupChunkConfig(section, config, outputFn)
cmd = "%s -zcf %s bin/chunkscrubber bin/chunkserver bin/sailfish/* %s lib scripts/*" % (tarProg, packageFn, outputFn)
os.system(cmd)
rundir = config.get(section, 'rundir')
if config.has_option(section, 'chunkdir'):
chunkDir = config.get(section, 'chunkdir')
else:
chunkDir = "%s/bin/kfschunk" % (rundir)
installArgs = "-r %s -d %s -c \"%s\" " % (tarProg, rundir, chunkDir)
return installArgs
def usage():
""" Print out the usage for this program. """
print "%s [-f, --file <server.cfg>] [-m , --machines <chunkservers.txt>] [-r, --tar <tar|gtar>] \
[-w, --webui <webui dir>] [ [-b, --bin <dir with binaries>] {-u, --upgrade} | [-U, --uninstall] ]\n" % sys.argv[0]
return
def copyDir(srcDir, dstDir):
""" Copy files from src to dest"""
cmd = "cp -r %s %s" % (srcDir, dstDir)
os.system(cmd)
def computeMD5(datadir, digest):
"""Update the MD5 digest using the MD5 of all the files in a directory"""
files = os.listdir(datadir)
for f in sorted(files):
path = os.path.join(datadir, f)
if os.path.isdir(path):
continue
fh = open(path, 'r')
while 1:
buf = fh.read(4096)
if buf == "":
break
digest.update(buf)
def getFiles(buildDir, webuidir):
""" Copy files from buildDir/bin, buildDir/lib and . to ./bin, ./lib, and ./scripts
respectively."""
global md5String
cmd = "mkdir -p ./scripts; cp ./* scripts; chmod u+w scripts/*"
os.system(cmd)
s = "%s/bin" % buildDir
if (os.path.exists(s + "/amd64")):
s += "/amd64"
copyDir(s, './bin')
digest = md5.new()
computeMD5('./bin', digest)
s = "%s/lib" % buildDir
if (os.path.exists(s + "/amd64")):
s += "/amd64"
copyDir(s, './lib')
computeMD5('./lib', digest)
md5String = digest.hexdigest()
copyDir(webuidir, './webui')
def cleanup(fn):
""" Cleanout the dirs we created. """
cmd = "rm -rf ./scripts ./bin ./lib ./webui %s " % fn
os.system(cmd)
class InstallWorker(threading.Thread):
"""InstallWorker thread that runs a command on remote node"""
def __init__(self, sec, conf, tmpdir, i, m):
threading.Thread.__init__(self)
self.section = sec
self.config = conf
self.tmpdir = tmpdir
self.id = i
self.mode = m
self.doBuildPkg = 1
def singlePackageForAll(self, packageFn, installArgs):
self.doBuildPkg = 0
self.packageFn = packageFn
self.installArgs = installArgs
def buildPackage(self):
if (self.section == 'metaserver'):
self.installArgs = setupMeta(self.section, self.config, self.configOutputFn, self.packageFn)
else:
self.installArgs = setupChunk(self.section, self.config, self.configOutputFn, self.packageFn)
def doInstall(self):
fn = os.path.basename(self.packageFn)
if (self.section == 'metaserver'):
if chunkserversOnly > 0:
return
c = "scp -pr -o StrictHostKeyChecking=no -q %s kfsinstall.sh %s:/tmp/; ssh -o StrictHostKeyChecking=no %s 'mv /tmp/%s /tmp/kfspkg.tgz; sh /tmp/kfsinstall.sh %s %s ' " % \
(self.packageFn, self.dest, self.dest, fn, self.mode, self.installArgs)
else:
# chunkserver
configFn = os.path.basename(self.configOutputFn)
c = "scp -pr -o StrictHostKeyChecking=no -q %s kfsinstall.sh %s %s:/tmp/; ssh -o StrictHostKeyChecking=no %s 'mv /tmp/%s /tmp/kfspkg.tgz; mv /tmp/%s /tmp/ChunkServer.prp; sh /tmp/kfsinstall.sh %s %s ' " % \
(self.packageFn, self.configOutputFn, self.dest, self.dest, fn, configFn, self.mode, self.installArgs)
p = popen2.Popen3(c, True)
for out in p.fromchild:
if len(out) > 1:
print '[%s]: %s' % (self.dest, out[:-1])
def cleanup(self):
if self.doBuildPkg > 0:
# if we built the package, nuke it
c = "rm -f %s %s" % (self.configOutputFn, self.packageFn)
else:
c = "rm -f %s" % (self.configOutputFn)
os.system(c)
c = "ssh -o StrictHostKeyChecking=no %s 'rm -f /tmp/install.sh /tmp/kfspkg.tgz' " % self.dest
popen2.Popen3(c, True)
def run(self):
self.configOutputFn = "%s/fn.%d" % (self.tmpdir, self.id)
if self.doBuildPkg > 0:
self.packageFn = "%s/kfspkg.%d.tgz" % (self.tmpdir, self.id)
self.buildPackage()
else:
setupChunkConfig(self.section, self.config, self.configOutputFn)
self.dest = config.get(self.section, 'node')
self.doInstall()
self.cleanup()
def doInstall(config, builddir, tmpdir, webuidir, upgrade, serialMode):
if not config.has_section('metaserver'):
raise config.NoSectionError, "No metaserver section"
if not os.path.exists(builddir):
print "%s : directory doesn't exist\n" % builddir
sys.exit(-1)
getFiles(builddir, webuidir)
if | |
to a string:
setattr(f, x.name, str(arg[x.name]))
elif isinstance(x.type(), ZwiBase):
# we expect that f.<value> here is a suitable dict
f0 = getattr(f, x.name)
if f0 is not None and x.name in arg:
debug(2, f'dict: {f0=} {arg[x.name]=}')
f0.traverse(ZwiBase.from_dict, arg[x.name])
else:
raise SystemExit('oops')
pass
else:
raise SystemExit(f'Unexpected type: {x.type=}')
pass
return arg
@staticmethod
def to_dict(f, x, arg):
"""Assing values to a supplied dict().
Does not construct the dict or add any missing fields.
"""
if x.name in arg:
# print(f'{x.name=} {x.type=} {arg[x.name]=}')
if x.type in (int, bool):
arg[x.name] = getattr(f, x.name)
elif x.type is str:
arg[x.name] = getattr(f, x.name)
elif isinstance(x.type(), ZwiBase):
# we expect that self.<value> here is a suitable dict
f0 = getattr(f, x.name)
if f0 is not None:
debug(2, f'dict: {f0=} {arg[x.name]=}')
f0.traverse(f.to_dict, arg[x.name])
pass
pass
else:
raise SystemExit(f'Unexpected type: {x.type=}')
pass
return arg
@staticmethod
def from_seq(f, x, arg):
"""Assign values from a sequence.
We perform a depth-first traversal.
"""
# print(f'{x.name=} {x.type=}')
if x.type in (int, bool):
setattr(f, x.name, arg[0][arg[1]])
arg[1] += 1
elif x.type is str:
setattr(f, x.name, arg[0][arg[1]])
arg[1] += 1
elif isinstance(x.type(), ZwiBase):
# decide what we need the recursion to do:
f0 = getattr(f, x.name)
debug(2, f'from_seq: {x.name=} {f0}')
if f0 is not None:
return f0.traverse(f0.from_seq, arg)
pass
debug(0, f'from_seq: {x.name=} {f0}')
raise SystemExit('logic error')
else:
raise SystemExit(f'Unexpected type: {x.type=}')
return arg
pass
@dataclass
class ZwiFollowers(ZwiBase):
def __eq__(self, other):
rv = self.difference(other, ignore=['userAgent', 'addDate', 'delDate'])
if isinstance(rv, bool):
return rv
return (len(rv) == 0)
def __ne__(self, other):
return not self.__eq__(other)
followerId: int = 0
followeeId: int = 0
status: str = ''
isFolloweeFavoriteOfFollower: bool = False
@dataclass
class FollowerProfile(ZwiBase):
publicId: str = ''
firstName: str = ''
lastName: str = ''
male: bool = True
imageSrc: str = ''
imageSrcLarge: str = ''
playerType: str = ''
countryAlpha3: str = ''
countryCode: int = 0
useMetric: bool = True
riding: bool = False
@dataclass
class Privacy(ZwiBase):
approvalRequired: bool = False
displayWeight: bool = False
minor: bool = False
privateMessaging: bool = False
defaultFitnessDataPrivacy: bool = False
suppressFollowerNotification: bool = False
displayAge: bool = True
defaultActivityPrivacy: str = ''
pass
privacy: Privacy = Privacy()
@dataclass
class SocialFacts(ZwiBase):
followersCount: int = 0
followeesCount: int = 0
followeesInCommonWithLoggedInPlayer: int = 0
followerStatusOfLoggedInPlayer: str = ''
followeeStatusOfLoggedInPlayer: str = ''
isFavoriteOfLoggedInPlayer: bool = True
pass
socialFacts: SocialFacts = SocialFacts()
worldId: int = 0
enrolledZwiftAcademy: bool = False
playerTypeId: int = 0
playerSubTypeId: int = 0
currentActivityId: int = 0
likelyInGame: bool = False
pass
profile: FollowerProfile = FollowerProfile()
addDate: str = f'''{datetime.now().isoformat(timespec='minutes')}'''
delDate: str = 'not yet'
@classmethod
def wers(cls, data):
"""Init a followers-type entry."""
if isinstance(data, dict):
# assumed to be per the Zwift format.
assert 'followerProfile' in data
data['profile'] = data['followerProfile']
e = ZwiFollowers()
e.traverse(ZwiBase.from_dict, data)
return e
if isinstance(data, tuple) or isinstance(data, list):
# assumed to be the internal format
e = ZwiFollowers()
e.traverse(ZwiBase.from_seq, [data, 0])
return e
raise Exception(f'funny type ({type(data)=}) of {data!r}')
@classmethod
def wees(cls, data):
"""Init a followees-type entry."""
if isinstance(data, dict):
# assumed to be per the Zwift format.
assert 'followeeProfile' in data
data['profile'] = data['followeeProfile']
e = ZwiFollowers()
e.traverse(ZwiBase.from_dict, data)
return e
if isinstance(data, tuple) or isinstance(data, list):
# assumed to be the internal format
e = ZwiFollowers()
e.traverse(ZwiBase.from_seq, [data, 0])
return e
raise Exception(f'funny type of {data!r}')
@classmethod
def column_names(cls, pk='', create=False):
"""generate the columnt list for a DB create."""
tmap = {int: 'INT', bool: 'INT', str: 'TEXT'}
def fun(f, x, arg):
"""Function to enumerate the column names."""
if x.type in (int, bool, str):
if not create: # INSERT/SELECT usage
arg.append(f'{x.name}') # .. no type
elif x.name == pk: # CREATE and PRIMARY
arg.append(f'{x.name} {tmap[x.type]} PRIMARY KEY')
else: # CREATE, no PRIMARY
arg.append(f'{x.name} {tmap[x.type]}')
elif isinstance(x.type(), ZwiBase):
f0 = getattr(f, x.name)
if f0 is not None:
f0.traverse(fun, arg)
pass
pass
pass
return cls().traverse(fun, list())
def column_values(self):
"""generate the values list for a DB insert."""
def fun(f, x, arg):
"""Function to enumerate the column values."""
val = getattr(f, x.name)
if x.type in (int, bool):
if val is None:
val = 0
else:
val = int(val)
pass
arg.append(f'{val}')
elif x.type is str:
# We need to replace all single ' with double ''
val = str(val).replace("'", "''")
arg.append(f"'{val}'")
elif isinstance(x.type(), ZwiBase):
f0 = getattr(f, x.name)
if f0 is not None:
f0.traverse(fun, arg)
pass
pass
pass
return self.traverse(fun, list())
pass
@dataclass
class ZwiProfile(ZwiBase):
def __eq__(self, other):
rv = self.difference(other, ignore=['addDate', 'delDate'])
if isinstance(rv, bool):
return rv
return (len(rv) == 0)
def __ne__(self, other):
return not self.__eq__(other)
id: int = 0
publicId: str = ''
firstName: str = ''
lastName: str = ''
male: bool = True
imageSrc: str = ''
imageSrcLarge: str = ''
playerType: str = ''
countryAlpha3: str = ''
countryCode: int = 0
useMetric: bool = True
riding: bool = False
@dataclass
class Privacy(ZwiBase):
approvalRequired: bool = False
displayWeight: bool = False
minor: bool = False
privateMessaging: bool = False
defaultFitnessDataPrivacy: bool = False
suppressFollowerNotification: bool = False
displayAge: bool = True
defaultActivityPrivacy: str = ''
pass
privacy: Privacy = Privacy()
@dataclass
class SocialFacts(ZwiBase):
followersCount: int = 0
followeesCount: int = 0
followeesInCommonWithLoggedInPlayer: int = 0
followerStatusOfLoggedInPlayer: str = ''
followeeStatusOfLoggedInPlayer: str = ''
isFavoriteOfLoggedInPlayer: bool = True
pass
socialFacts: SocialFacts = SocialFacts()
worldId: int = 0
enrolledZwiftAcademy: bool = False
playerTypeId: int = 0
playerSubTypeId: int = 0
currentActivityId: int = 0
likelyInGame: bool = False
address: str = None,
age: int = 0,
bodyType: int = 0,
connectedToStrava: bool = False,
connectedToTrainingPeaks: bool = False,
connectedToTodaysPlan: bool = False,
connectedToUnderArmour: bool = False,
connectedToWithings: bool = False,
connectedToFitbit: bool = False,
connectedToGarmin: bool = False,
connectedToRuntastic: bool = False,
connectedToZwiftPower: bool = False,
stravaPremium: bool = False,
bt: str = None,
dob: str = None,
emailAddress: str = None,
height: int = 0,
location: str = '',
preferredLanguage: str = '',
mixpanelDistinctId: str = '',
profileChanges: bool = False,
weight: int = 0,
b: bool = False,
createdOn: str = '',
source: str = '',
origin: str = '',
launchedGameClient: str = '',
ftp: int = 0,
userAgent: str = '',
runTime1miInSeconds: int = 0,
runTime5kmInSeconds: int = 0,
runTime10kmInSeconds: int = 0,
runTimeHalfMarathonInSeconds: int = 0,
runTimeFullMarathonInSeconds: int = 0,
cyclingOrganization: str = None,
licenseNumber: str = None,
bigCommerceId: str = '',
marketingConsent: str = None,
achievementLevel: int = 0,
totalDistance: int = 0,
totalDistanceClimbed: int = 0,
totalTimeInMinutes: int = 0,
totalInKomJersey: int = 0,
totalInSprintersJersey: int = 0,
totalInOrangeJersey: int = 0,
totalWattHours: int = 0,
totalExperiencePoints: int = 0,
totalGold: int = 0,
runAchievementLevel: int = 0,
totalRunDistance: int = 0,
totalRunTimeInMinutes: int = 0,
totalRunExperiencePoints: int = 0,
totalRunCalories: int = 0,
powerSourceType: str = '',
powerSourceModel: str = '',
virtualBikeModel: str = '',
numberOfFolloweesInCommon: int = 0,
affiliate: str = None,
avantlinkId: str = None,
fundraiserId: str = None,
addDate: str = f'''{datetime.now().isoformat(timespec='minutes')}'''
@classmethod
def from_zwift(cls, data):
"""Init from Zwift API response."""
if isinstance(data, dict):
# assumed to be per the Zwift format.
e = ZwiProfile()
e.traverse(ZwiBase.from_dict, data)
return e
raise Exception(f'funny type of {data!r}')
@classmethod
def from_seq(cls, data):
"""Init from sequence."""
if isinstance(data, tuple) or isinstance(data, list):
# assumed to be the internal format
e = ZwiProfile()
e.traverse(ZwiBase.from_seq, [data, 0])
return e
raise Exception(f'funny type of {data!r}')
@classmethod
def column_names(cls, pk='', create=False):
"""generate the columnt list for a DB create."""
tmap = {int: 'INT', bool: 'INT', str: 'TEXT'}
def fun(f, x, arg):
"""Function to enumerate the column names."""
if x.type in (int, bool, str):
if not create: # INSERT/SELECT usage
arg.append(f'{x.name}') # .. no type
elif x.name == pk: # CREATE and PRIMARY
arg.append(f'{x.name} {tmap[x.type]} PRIMARY KEY')
else: # CREATE, no PRIMARY
arg.append(f'{x.name} {tmap[x.type]}')
elif isinstance(x.type(), ZwiBase):
f0 = getattr(f, x.name)
if f0 is not None:
f0.traverse(fun, arg)
pass
pass
pass
return cls().traverse(fun, list())
def column_values(self):
"""generate the values list for a DB insert."""
| |
= variant_ids[mask]
tss_distance = tss_distance[mask]
if interaction_df is not None:
genotypes_t, mask_t = filter_maf_interaction(genotypes_t, interaction_mask_t=interaction_mask_t,
maf_threshold_interaction=maf_threshold_interaction)
mask = mask_t.cpu().numpy()
variant_ids = variant_ids[mask]
tss_distance = tss_distance[mask]
n = len(variant_ids)
if genotypes_t.shape[0] > 0:
# process first phenotype in group
phenotype_id = phenotype_ids[0]
phenotype_t = torch.tensor(phenotypes[0], dtype=torch.float).to(device)
if interaction_df is None:
res = calculate_cis_nominal(genotypes_t, phenotype_t, residualizer=residualizer)
tstat, slope, slope_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
else:
res = calculate_interaction_nominal(genotypes_t, phenotype_t.unsqueeze(0), interaction_t,
residualizer=residualizer, return_sparse=False,
variant_ids=variant_ids)
tstat, b, b_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
px = [phenotype_id]*n
# iterate over remaining phenotypes in group
for phenotype, phenotype_id in zip(phenotypes[1:], phenotype_ids[1:]):
phenotype_t = torch.tensor(phenotype, dtype=torch.float).to(device)
if interaction_df is None:
res = calculate_cis_nominal(genotypes_t, phenotype_t, residualizer=residualizer)
tstat0, slope0, slope_se0, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
else:
res = calculate_interaction_nominal(genotypes_t, phenotype_t.unsqueeze(0), interaction_t,
residualizer=residualizer, return_sparse=False,
variant_ids=variant_ids)
tstat0, b0, b_se0, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
# find associations that are stronger for current phenotype
if interaction_df is None:
ix = np.where(np.abs(tstat0) > np.abs(tstat))[0]
else:
ix = np.where(np.abs(tstat0[:,2]) > np.abs(tstat[:,2]))[0]
# update relevant positions
for j in ix:
px[j] = phenotype_id
if interaction_df is None:
tstat[ix] = tstat0[ix]
slope[ix] = slope0[ix]
slope_se[ix] = slope_se0[ix]
else:
tstat[ix] = tstat0[ix]
b[ix] = b0[ix]
b_se[ix] = b_se0[ix]
chr_res['phenotype_id'].extend(px)
chr_res['variant_id'].extend(variant_ids)
chr_res['tss_distance'][start:start+n] = tss_distance
chr_res['af'][start:start+n] = af
chr_res['ma_samples'][start:start+n] = ma_samples
chr_res['ma_count'][start:start+n] = ma_count
if interaction_df is None:
chr_res['pval_nominal'][start:start+n] = tstat
chr_res['slope'][start:start+n] = slope
chr_res['slope_se'][start:start+n] = slope_se
else:
chr_res['pval_g'][start:start+n] = tstat[:,0]
chr_res['b_g'][start:start+n] = b[:,0]
chr_res['b_g_se'][start:start+n] = b_se[:,0]
chr_res['pval_i'][start:start+n] = tstat[:,1:1+ni]
chr_res['b_i'][start:start+n] = b[:,1:1+ni]
chr_res['b_i_se'][start:start+n] = b_se[:,1:1+ni]
chr_res['pval_gi'][start:start+n] = tstat[:,1+ni:]
chr_res['b_gi'][start:start+n] = b[:,1+ni:]
chr_res['b_gi_se'][start:start+n] = b_se[:,1+ni:]
# top association for the group
if interaction_df is not None:
ix = np.nanargmax(np.abs(tstat[:,1+ni:]).max(1)) # top association among all interactions tested
# index order: 0, 1, 1+ni, 2, 2+ni, 3, 3+ni, ...
order = [0] + [i if j % 2 == 0 else i+ni for i in range(1,ni+1) for j in range(2)]
top_s = [chr_res['phenotype_id'][start:start+n][ix], variant_ids[ix],
tss_distance[ix], af[ix], ma_samples[ix], ma_count[ix]]
for i in order:
top_s += [tstat[ix,i], b[ix,i], b_se[ix,i]]
top_s = pd.Series(top_s, index=col_order)
top_s['num_phenotypes'] = len(phenotype_ids)
if run_eigenmt: # compute eigenMT correction
top_s['tests_emt'] = eigenmt.compute_tests(genotypes_t, var_thresh=0.99, variant_window=200)
best_assoc.append(top_s)
start += n # update pointer
logger.write(f' time elapsed: {(time.time()-start_time)/60:.2f} min')
# convert to dataframe, compute p-values and write current chromosome
if start < len(chr_res['af']):
for x in chr_res:
chr_res[x] = chr_res[x][:start]
if write_stats:
if interaction_df is not None:
cols = ['pval_i', 'b_i', 'b_i_se', 'pval_gi', 'b_gi', 'b_gi_se']
if ni == 1: # squeeze columns
for k in cols:
chr_res[k] = chr_res[k][:,0]
else: # split interactions
for i in range(0, ni): # fix order
for k in cols:
chr_res[k.replace('i', f"i{i+1}")] = None
for k in cols:
for i in range(0, ni):
chr_res[k.replace('i', f"i{i+1}")] = chr_res[k][:,i]
del chr_res[k]
chr_res_df = pd.DataFrame(chr_res)
if interaction_df is None:
m = chr_res_df['pval_nominal'].notnull()
chr_res_df.loc[m, 'pval_nominal'] = 2*stats.t.cdf(-chr_res_df.loc[m, 'pval_nominal'].abs(), dof)
else:
if ni == 1:
m = chr_res_df['pval_gi'].notnull()
chr_res_df.loc[m, 'pval_g'] = 2*stats.t.cdf(-chr_res_df.loc[m, 'pval_g'].abs(), dof)
chr_res_df.loc[m, 'pval_i'] = 2*stats.t.cdf(-chr_res_df.loc[m, 'pval_i'].abs(), dof)
chr_res_df.loc[m, 'pval_gi'] = 2*stats.t.cdf(-chr_res_df.loc[m, 'pval_gi'].abs(), dof)
else:
m = chr_res_df['pval_gi1'].notnull()
chr_res_df.loc[m, 'pval_g'] = 2*stats.t.cdf(-chr_res_df.loc[m, 'pval_g'].abs(), dof)
for i in range(1, ni+1):
chr_res_df.loc[m, f'pval_i{i}'] = 2*stats.t.cdf(-chr_res_df.loc[m, f'pval_i{i}'].abs(), dof)
chr_res_df.loc[m, f'pval_gi{i}'] = 2*stats.t.cdf(-chr_res_df.loc[m, f'pval_gi{i}'].abs(), dof)
# substitute column headers
chr_res_df.rename(columns=var_dict, inplace=True)
print(' * writing output')
chr_res_df.to_parquet(os.path.join(output_dir, f'{prefix}.cis_qtl_pairs.{chrom}.parquet'))
if interaction_df is not None and len(best_assoc) > 0:
best_assoc = pd.concat(best_assoc, axis=1, sort=False).T.set_index('phenotype_id').infer_objects()
m = best_assoc['pval_g'].notnull()
best_assoc.loc[m, 'pval_g'] = 2*stats.t.cdf(-best_assoc.loc[m, 'pval_g'].abs(), dof)
if ni == 1:
best_assoc.loc[m, 'pval_i'] = 2*stats.t.cdf(-best_assoc.loc[m, 'pval_i'].abs(), dof)
best_assoc.loc[m, 'pval_gi'] = 2*stats.t.cdf(-best_assoc.loc[m, 'pval_gi'].abs(), dof)
else:
for i in range(1, ni+1):
best_assoc.loc[m, f'pval_i{i}'] = 2*stats.t.cdf(-best_assoc.loc[m, f'pval_i{i}'].abs(), dof)
best_assoc.loc[m, f'pval_gi{i}'] = 2*stats.t.cdf(-best_assoc.loc[m, f'pval_gi{i}'].abs(), dof)
if run_eigenmt and ni == 1: # leave correction of specific p-values up to user for now (TODO)
if group_s is None:
best_assoc['pval_emt'] = np.minimum(best_assoc['tests_emt']*best_assoc['pval_gi'], 1)
else:
best_assoc['pval_emt'] = np.minimum(best_assoc['num_phenotypes']*best_assoc['tests_emt']*best_assoc['pval_gi'], 1)
best_assoc['pval_adj_bh'] = eigenmt.padjust_bh(best_assoc['pval_emt'])
if ni > 1: # substitute column headers
best_assoc.rename(columns=var_dict, inplace=True)
if write_top:
best_assoc.to_csv(os.path.join(output_dir, f'{prefix}.cis_qtl_top_assoc.txt.gz'),
sep='\t', float_format='%.6g')
else:
return best_assoc
logger.write('done.')
def prepare_cis_output(r_nominal, r2_perm, std_ratio, g, num_var, dof, variant_id, tss_distance, phenotype_id, nperm=10000):
"""Return nominal p-value, allele frequencies, etc. as pd.Series"""
r2_nominal = r_nominal*r_nominal
pval_perm = (np.sum(r2_perm>=r2_nominal)+1) / (nperm+1)
slope = r_nominal * std_ratio
tstat2 = dof * r2_nominal / (1 - r2_nominal)
slope_se = np.abs(slope) / np.sqrt(tstat2)
n2 = 2*len(g)
af = np.sum(g) / n2
if af <= 0.5:
ma_samples = np.sum(g>0.5)
ma_count = np.sum(g[g>0.5])
else:
ma_samples = np.sum(g<1.5)
ma_count = n2 - np.sum(g[g>0.5])
res_s = pd.Series(OrderedDict([
('num_var', num_var),
('beta_shape1', np.NaN),
('beta_shape2', np.NaN),
('true_df', np.NaN),
('pval_true_df', np.NaN),
('variant_id', variant_id),
('tss_distance', tss_distance),
('ma_samples', ma_samples),
('ma_count', ma_count),
('af', af),
('pval_nominal', pval_from_corr(r2_nominal, dof)),
('slope', slope),
('slope_se', slope_se),
('pval_perm', pval_perm),
('pval_beta', np.NaN),
]), name=phenotype_id)
return res_s
def _process_group_permutations(buf, variant_df, tss, dof, group_id, nperm=10000, beta_approx=True):
"""
Merge results for grouped phenotypes
buf: [r_nominal, std_ratio, var_ix, r2_perm, g, num_var, phenotype_id]
"""
# select phenotype with strongest nominal association
max_ix = np.argmax(np.abs([b[0] for b in buf]))
r_nominal, std_ratio, var_ix = buf[max_ix][:3]
g, num_var, phenotype_id = buf[max_ix][4:]
# select best phenotype correlation for each permutation
r2_perm = np.max([b[3] for b in buf], 0)
# return r_nominal, std_ratio, var_ix, r2_perm, g, num_var, phenotype_id
variant_id = variant_df.index[var_ix]
tss_distance = variant_df['pos'].values[var_ix] - tss
res_s = prepare_cis_output(r_nominal, r2_perm, std_ratio, g, num_var, dof, variant_id, tss_distance, phenotype_id, nperm=nperm)
if beta_approx:
res_s[['pval_beta', 'beta_shape1', 'beta_shape2', 'true_df', 'pval_true_df']] = calculate_beta_approx_pval(r2_perm, r_nominal*r_nominal, dof*0.25)
res_s['group_id'] = group_id
res_s['group_size'] = len(buf)
return res_s
def map_cis(genotype_df, variant_df, phenotype_df, phenotype_pos_df, covariates_df=None,
group_s=None, maf_threshold=0, beta_approx=True, nperm=10000,
window=1000000, random_tiebreak=False, logger=None, seed=None,
verbose=True, warn_monomorphic=True):
"""Run cis-QTL mapping"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if logger is None:
logger = SimpleLogger()
logger.write('cis-QTL mapping: empirical p-values for phenotypes')
logger.write(f' * {phenotype_df.shape[1]} samples')
logger.write(f' * {phenotype_df.shape[0]} phenotypes')
if group_s is not None:
logger.write(f' * {len(group_s.unique())} phenotype groups')
group_dict = group_s.to_dict()
if covariates_df is not None:
assert np.all(phenotype_df.columns==covariates_df.index), 'Sample names in phenotype matrix columns and covariate matrix rows do not match!'
assert ~(covariates_df.isnull().any().any()), f'Missing or null values in covariates matrix, in columns {",".join(covariates_df.columns[covariates_df.isnull().any(axis=0)].astype(str))}'
logger.write(f' * {covariates_df.shape[1]} covariates')
residualizer = Residualizer(torch.tensor(covariates_df.values, dtype=torch.float32).to(device))
dof = phenotype_df.shape[1] - 2 - covariates_df.shape[1]
else:
residualizer = None
dof = phenotype_df.shape[1] - 2
logger.write(f' * {genotype_df.shape[0]} variants')
if maf_threshold > 0:
logger.write(f' * applying in-sample {maf_threshold} MAF filter')
if random_tiebreak:
logger.write(f' * randomly selecting top variant in case of ties')
genotype_ix = np.array([genotype_df.columns.tolist().index(i) for i in phenotype_df.columns])
genotype_ix_t = torch.from_numpy(genotype_ix).to(device)
# permutation indices
n_samples = phenotype_df.shape[1]
ix = np.arange(n_samples)
if seed is not None:
logger.write(f' * using seed {seed}')
np.random.seed(seed)
permutation_ix_t = torch.LongTensor(np.array([np.random.permutation(ix) for i in range(nperm)])).to(device)
res_df = []
igc = genotypeio.InputGeneratorCis(genotype_df, variant_df, phenotype_df, phenotype_pos_df, group_s=group_s, window=window)
if igc.n_phenotypes == 0:
raise ValueError('No valid phenotypes found.')
start_time = time.time()
logger.write(' * computing permutations')
if group_s is None:
for k, (phenotype, genotypes, genotype_range, phenotype_id) in enumerate(igc.generate_data(verbose=verbose), 1):
# copy genotypes to GPU
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes_t = genotypes_t[:,genotype_ix_t]
impute_mean(genotypes_t)
if maf_threshold > 0:
maf_t = calculate_maf(genotypes_t)
mask_t = maf_t >= maf_threshold
genotypes_t = genotypes_t[mask_t]
mask = mask_t.cpu().numpy().astype(bool)
genotype_range = genotype_range[mask]
# filter monomorphic variants
mono_t = (genotypes_t == genotypes_t[:, [0]]).all(1)
if mono_t.any():
genotypes_t = genotypes_t[~mono_t]
genotype_range = genotype_range[~mono_t.cpu()]
if warn_monomorphic:
logger.write(f' * WARNING: excluding {mono_t.sum()} monomorphic variants')
if genotypes_t.shape[0] == 0:
logger.write(f'WARNING: skipping {phenotype_id} (no valid variants)')
continue
phenotype_t = torch.tensor(phenotype, dtype=torch.float).to(device)
res = calculate_cis_permutations(genotypes_t, phenotype_t, permutation_ix_t,
residualizer=residualizer, random_tiebreak=random_tiebreak)
r_nominal, std_ratio, var_ix, r2_perm, g = [i.cpu().numpy() for i in res]
var_ix = genotype_range[var_ix]
variant_id = variant_df.index[var_ix]
tss_distance = variant_df['pos'].values[var_ix] - igc.phenotype_tss[phenotype_id]
res_s = prepare_cis_output(r_nominal, r2_perm, std_ratio, g, genotypes_t.shape[0], dof, variant_id, tss_distance, phenotype_id, nperm=nperm)
if beta_approx:
res_s[['pval_beta', 'beta_shape1', 'beta_shape2', 'true_df', 'pval_true_df']] = calculate_beta_approx_pval(r2_perm, r_nominal*r_nominal, dof)
res_df.append(res_s)
else: # grouped mode
for k, (phenotypes, genotypes, genotype_range, phenotype_ids, group_id) in enumerate(igc.generate_data(verbose=verbose), 1):
# copy genotypes to GPU
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes_t = genotypes_t[:,genotype_ix_t]
impute_mean(genotypes_t)
if maf_threshold > 0:
maf_t = calculate_maf(genotypes_t)
mask_t = maf_t >= maf_threshold
genotypes_t = genotypes_t[mask_t]
mask = mask_t.cpu().numpy().astype(bool)
genotype_range = genotype_range[mask]
# filter monomorphic variants
mono_t = (genotypes_t == genotypes_t[:, [0]]).all(1)
if mono_t.any():
genotypes_t = genotypes_t[~mono_t]
genotype_range = genotype_range[~mono_t.cpu()]
if warn_monomorphic:
logger.write(f' * WARNING: excluding {mono_t.sum()} | |
and obj.has_multi_state_intersection(
self.multi_state_available
):
multi_state_allowed = True
return state_allowed and multi_state_allowed
class SubAction(ADCMModel):
action = models.ForeignKey(Action, on_delete=models.CASCADE)
name = models.CharField(max_length=160)
display_name = models.CharField(max_length=160, blank=True)
script = models.CharField(max_length=160)
script_type = models.CharField(max_length=16, choices=SCRIPT_TYPE)
state_on_fail = models.CharField(max_length=64, blank=True)
params = models.JSONField(default=dict)
class HostComponent(ADCMModel):
cluster = models.ForeignKey(Cluster, on_delete=models.CASCADE)
host = models.ForeignKey(Host, on_delete=models.CASCADE)
service = models.ForeignKey(ClusterObject, on_delete=models.CASCADE)
component = models.ForeignKey(ServiceComponent, on_delete=models.CASCADE)
state = models.CharField(max_length=64, default='created')
class Meta:
unique_together = (('host', 'service', 'component'),)
CONFIG_FIELD_TYPE = (
('string', 'string'),
('text', 'text'),
('password', 'password'),
('secrettext', 'secrettext'),
('json', 'json'),
('integer', 'integer'),
('float', 'float'),
('option', 'option'),
('variant', 'variant'),
('boolean', 'boolean'),
('file', 'file'),
('list', 'list'),
('map', 'map'),
('structure', 'structure'),
('group', 'group'),
)
class PrototypeConfig(ADCMModel):
prototype = models.ForeignKey(Prototype, on_delete=models.CASCADE)
action = models.ForeignKey(Action, on_delete=models.CASCADE, null=True, default=None)
name = models.CharField(max_length=160)
subname = models.CharField(max_length=160, blank=True)
default = models.TextField(blank=True)
type = models.CharField(max_length=16, choices=CONFIG_FIELD_TYPE)
display_name = models.CharField(max_length=160, blank=True)
description = models.TextField(blank=True)
limits = models.JSONField(default=dict)
ui_options = models.JSONField(blank=True, default=dict)
required = models.BooleanField(default=True)
group_customization = models.BooleanField(null=True)
class Meta:
unique_together = (('prototype', 'action', 'name', 'subname'),)
class PrototypeExport(ADCMModel):
prototype = models.ForeignKey(Prototype, on_delete=models.CASCADE)
name = models.CharField(max_length=160)
class Meta:
unique_together = (('prototype', 'name'),)
class PrototypeImport(ADCMModel):
prototype = models.ForeignKey(Prototype, on_delete=models.CASCADE)
name = models.CharField(max_length=160)
min_version = models.CharField(max_length=80)
max_version = models.CharField(max_length=80)
min_strict = models.BooleanField(default=False)
max_strict = models.BooleanField(default=False)
default = models.JSONField(null=True, default=None)
required = models.BooleanField(default=False)
multibind = models.BooleanField(default=False)
class Meta:
unique_together = (('prototype', 'name'),)
class ClusterBind(ADCMModel):
cluster = models.ForeignKey(Cluster, on_delete=models.CASCADE)
service = models.ForeignKey(ClusterObject, on_delete=models.CASCADE, null=True, default=None)
source_cluster = models.ForeignKey(
Cluster, related_name='source_cluster', on_delete=models.CASCADE
)
source_service = models.ForeignKey(
ClusterObject,
related_name='source_service',
on_delete=models.CASCADE,
null=True,
default=None,
)
__error_code__ = 'BIND_NOT_FOUND'
class Meta:
unique_together = (('cluster', 'service', 'source_cluster', 'source_service'),)
JOB_STATUS = (
('created', 'created'),
('running', 'running'),
('success', 'success'),
('failed', 'failed'),
)
class UserProfile(ADCMModel):
login = models.CharField(max_length=32, unique=True)
profile = models.JSONField(default=str)
class Role(ADCMModel):
name = models.CharField(max_length=32, unique=True)
description = models.TextField(blank=True)
permissions = models.ManyToManyField(Permission, blank=True)
user = models.ManyToManyField(User, blank=True)
group = models.ManyToManyField(Group, blank=True)
class TaskLog(ADCMModel):
object_id = models.PositiveIntegerField()
object_type = models.ForeignKey(ContentType, null=True, on_delete=models.CASCADE)
task_object = GenericForeignKey('object_type', 'object_id')
action = models.ForeignKey(Action, on_delete=models.SET_NULL, null=True, default=None)
pid = models.PositiveIntegerField(blank=True, default=0)
selector = models.JSONField(default=dict)
status = models.CharField(max_length=16, choices=JOB_STATUS)
config = models.JSONField(null=True, default=None)
attr = models.JSONField(default=dict)
hostcomponentmap = models.JSONField(null=True, default=None)
hosts = models.JSONField(null=True, default=None)
verbose = models.BooleanField(default=False)
start_date = models.DateTimeField()
finish_date = models.DateTimeField()
lock = models.ForeignKey('ConcernItem', null=True, on_delete=models.SET_NULL, default=None)
def lock_affected(self, objects: Iterable[ADCMEntity]) -> None:
if self.lock:
return
first_job = JobLog.obj.filter(task=self).order_by('id').first()
reason = MessageTemplate.get_message_from_template(
MessageTemplate.KnownNames.LockedByJob.value,
job=first_job,
target=self.task_object,
)
self.lock = ConcernItem.objects.create(
type=ConcernType.Lock.value,
name=None,
reason=reason,
blocking=True,
owner=self.task_object,
cause=ConcernCause.Job.value,
)
self.save()
for obj in objects:
obj.add_to_concerns(self.lock)
def unlock_affected(self) -> None:
if not self.lock:
return
lock = self.lock
self.lock = None
self.save()
lock.delete()
class JobLog(ADCMModel):
task = models.ForeignKey(TaskLog, on_delete=models.SET_NULL, null=True, default=None)
action = models.ForeignKey(Action, on_delete=models.SET_NULL, null=True, default=None)
sub_action = models.ForeignKey(SubAction, on_delete=models.SET_NULL, null=True, default=None)
pid = models.PositiveIntegerField(blank=True, default=0)
selector = models.JSONField(default=dict)
log_files = models.JSONField(default=list)
status = models.CharField(max_length=16, choices=JOB_STATUS)
start_date = models.DateTimeField()
finish_date = models.DateTimeField(db_index=True)
__error_code__ = 'JOB_NOT_FOUND'
class GroupCheckLog(ADCMModel):
job = models.ForeignKey(JobLog, on_delete=models.SET_NULL, null=True, default=None)
title = models.TextField()
message = models.TextField(blank=True, null=True)
result = models.BooleanField(blank=True, null=True)
class Meta:
constraints = [models.UniqueConstraint(fields=['job', 'title'], name='unique_group_job')]
class CheckLog(ADCMModel):
group = models.ForeignKey(GroupCheckLog, blank=True, null=True, on_delete=models.CASCADE)
job = models.ForeignKey(JobLog, on_delete=models.SET_NULL, null=True, default=None)
title = models.TextField()
message = models.TextField()
result = models.BooleanField()
LOG_TYPE = (
('stdout', 'stdout'),
('stderr', 'stderr'),
('check', 'check'),
('custom', 'custom'),
)
FORMAT_TYPE = (
('txt', 'txt'),
('json', 'json'),
)
class LogStorage(ADCMModel):
job = models.ForeignKey(JobLog, on_delete=models.CASCADE)
name = models.TextField(default='')
body = models.TextField(blank=True, null=True)
type = models.CharField(max_length=16, choices=LOG_TYPE)
format = models.CharField(max_length=16, choices=FORMAT_TYPE)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['job'], condition=models.Q(type='check'), name='unique_check_job'
)
]
# Stage: Temporary tables to load bundle
class StagePrototype(ADCMModel):
type = models.CharField(max_length=16, choices=PROTO_TYPE)
parent = models.ForeignKey("self", on_delete=models.CASCADE, null=True, default=None)
name = models.CharField(max_length=160)
path = models.CharField(max_length=160, default='')
display_name = models.CharField(max_length=160, blank=True)
version = models.CharField(max_length=80)
edition = models.CharField(max_length=80, default='community')
license_path = models.CharField(max_length=160, default=None, null=True)
license_hash = models.CharField(max_length=64, default=None, null=True)
required = models.BooleanField(default=False)
shared = models.BooleanField(default=False)
constraint = models.JSONField(default=get_default_constraint)
requires = models.JSONField(default=list)
bound_to = models.JSONField(default=dict)
adcm_min_version = models.CharField(max_length=80, default=None, null=True)
description = models.TextField(blank=True)
monitoring = models.CharField(max_length=16, choices=MONITORING_TYPE, default='active')
config_group_customization = models.BooleanField(default=False)
__error_code__ = 'PROTOTYPE_NOT_FOUND'
def __str__(self):
return str(self.name)
class Meta:
unique_together = (('type', 'parent', 'name', 'version'),)
class StageUpgrade(ADCMModel):
name = models.CharField(max_length=160, blank=True)
description = models.TextField(blank=True)
min_version = models.CharField(max_length=80)
max_version = models.CharField(max_length=80)
min_strict = models.BooleanField(default=False)
max_strict = models.BooleanField(default=False)
from_edition = models.JSONField(default=get_default_from_edition)
state_available = models.JSONField(default=list)
state_on_success = models.CharField(max_length=64, blank=True)
class StageAction(AbstractAction):
prototype = models.ForeignKey(StagePrototype, on_delete=models.CASCADE)
class StageSubAction(ADCMModel):
action = models.ForeignKey(StageAction, on_delete=models.CASCADE)
name = models.CharField(max_length=160)
display_name = models.CharField(max_length=160, blank=True)
script = models.CharField(max_length=160)
script_type = models.CharField(max_length=16, choices=SCRIPT_TYPE)
state_on_fail = models.CharField(max_length=64, blank=True)
params = models.JSONField(default=dict)
class StagePrototypeConfig(ADCMModel):
prototype = models.ForeignKey(StagePrototype, on_delete=models.CASCADE)
action = models.ForeignKey(StageAction, on_delete=models.CASCADE, null=True, default=None)
name = models.CharField(max_length=160)
subname = models.CharField(max_length=160, blank=True)
default = models.TextField(blank=True)
type = models.CharField(max_length=16, choices=CONFIG_FIELD_TYPE)
display_name = models.CharField(max_length=160, blank=True)
description = models.TextField(blank=True)
limits = models.JSONField(default=dict)
ui_options = models.JSONField(blank=True, default=dict)
required = models.BooleanField(default=True)
group_customization = models.BooleanField(null=True)
class Meta:
unique_together = (('prototype', 'action', 'name', 'subname'),)
class StagePrototypeExport(ADCMModel):
prototype = models.ForeignKey(StagePrototype, on_delete=models.CASCADE)
name = models.CharField(max_length=160)
class Meta:
unique_together = (('prototype', 'name'),)
class StagePrototypeImport(ADCMModel):
prototype = models.ForeignKey(StagePrototype, on_delete=models.CASCADE)
name = models.CharField(max_length=160)
min_version = models.CharField(max_length=80)
max_version = models.CharField(max_length=80)
min_strict = models.BooleanField(default=False)
max_strict = models.BooleanField(default=False)
default = models.JSONField(null=True, default=None)
required = models.BooleanField(default=False)
multibind = models.BooleanField(default=False)
class Meta:
unique_together = (('prototype', 'name'),)
class DummyData(ADCMModel):
date = models.DateTimeField(auto_now=True)
class MessageTemplate(ADCMModel):
"""
Templates for `ConcernItem.reason
There are two sources of templates - they are pre-created in migrations or loaded from bundles
expected template format is
{
'message': 'Lorem ${ipsum} dolor sit ${amet}',
'placeholder': {
'lorem': {'type': 'cluster'},
'amet': {'type': 'action'}
}
}
placeholder fill functions have unified interface:
@classmethod
def _func(cls, placeholder_name, **kwargs) -> dict
TODO: load from bundle
TODO: check consistency on creation
TODO: separate JSON processing logic from model
"""
name = models.CharField(max_length=160, unique=True)
template = models.JSONField()
class KnownNames(Enum):
LockedByJob = 'locked by running job on target' # kwargs=(job, target)
ConfigIssue = 'object config issue' # kwargs=(source, )
RequiredServiceIssue = 'required service issue' # kwargs=(source, )
RequiredImportIssue = 'required import issue' # kwargs=(source, )
HostComponentIssue = 'host component issue' # kwargs=(source, )
class PlaceHolderType(Enum):
Action = 'action'
ADCMEntity = 'adcm_entity'
ADCM = 'adcm'
Cluster = 'cluster'
Service = 'service'
Component = 'component'
Provider = 'provider'
Host = 'host'
Job = 'job'
@classmethod
def get_message_from_template(cls, name: str, **kwargs) -> dict:
"""Find message template by its name and fill placeholders"""
tpl = cls.obj.get(name=name).template
filled_placeholders = {}
try:
for ph_name, ph_data in tpl['placeholder'].items():
filled_placeholders[ph_name] = cls._fill_placeholder(ph_name, ph_data, **kwargs)
except (KeyError, AttributeError, TypeError, AssertionError) as ex:
if isinstance(ex, KeyError):
msg = f'Message templating KeyError: "{ex.args[0]}" not found'
elif isinstance(ex, AttributeError):
msg = f'Message templating AttributeError: "{ex.args[0]}"'
elif isinstance(ex, TypeError):
msg = f'Message templating TypeError: "{ex.args[0]}"'
elif isinstance(ex, AssertionError):
msg = 'Message templating AssertionError: expected kwarg were not found'
else:
msg = None
raise AdcmEx('MESSAGE_TEMPLATING_ERROR', msg=msg) from ex
tpl['placeholder'] = filled_placeholders
return tpl
@classmethod
def _fill_placeholder(cls, ph_name: str, ph_data: dict, **ph_source_data) -> dict:
type_map = {
cls.PlaceHolderType.Action.value: cls._action_placeholder,
cls.PlaceHolderType.ADCMEntity.value: cls._adcm_entity_placeholder,
cls.PlaceHolderType.ADCM.value: cls._adcm_entity_placeholder,
cls.PlaceHolderType.Cluster.value: cls._adcm_entity_placeholder,
cls.PlaceHolderType.Service.value: cls._adcm_entity_placeholder,
cls.PlaceHolderType.Component.value: cls._adcm_entity_placeholder,
cls.PlaceHolderType.Provider.value: cls._adcm_entity_placeholder,
cls.PlaceHolderType.Host.value: cls._adcm_entity_placeholder,
cls.PlaceHolderType.Job.value: cls._job_placeholder,
}
return type_map[ph_data['type']](ph_name, **ph_source_data)
@classmethod
def _action_placeholder(cls, _, **kwargs) -> dict:
action = kwargs.get('action')
assert action
target = kwargs.get('target')
assert target
ids = target.get_id_chain()
ids['action'] = action.pk
return {
'type': cls.PlaceHolderType.Action.value,
'name': action.display_name,
'ids': ids,
}
@classmethod
def _adcm_entity_placeholder(cls, ph_name, **kwargs) -> dict:
obj = kwargs.get(ph_name)
assert obj
return {
'type': obj.prototype.type,
'name': obj.display_name,
'ids': obj.get_id_chain(),
}
@classmethod
def _job_placeholder(cls, _, **kwargs) -> dict:
job = kwargs.get('job')
assert job
action = job.sub_action or job.action
return {
'type': cls.PlaceHolderType.Job.value,
'name': action.display_name or action.name,
'ids': job.id,
}
class ConcernType(models.TextChoices):
Lock = 'lock', 'lock'
Issue = 'issue', 'issue'
Flag = 'flag', 'flag'
class ConcernCause(models.TextChoices):
Config = 'config', 'config'
Job = 'job', 'job'
HostComponent = 'host-component', 'host-component'
Import = 'import', 'import'
Service = 'service', 'service'
class ConcernItem(ADCMModel):
"""
Representation for object's lock/issue/flag
Man-to-many from ADCMEntities
One-to-one from TaskLog
...
`type` is literally type of concern
`name` is used for (un)setting flags from ansible playbooks
`reason` is used to display/notify on front-end, text template and data for URL generation
should be generated from pre-created templates model `MessageTemplate`
`blocking` blocks actions from running
`owner` is object-origin of concern
`cause` is owner's parameter causing concern
`related_objects` are back-refs from affected ADCMEntities.concerns
"""
type = models.CharField(max_length=8, choices=ConcernType.choices, default=ConcernType.Lock)
name = models.CharField(max_length=160, null=True, unique=True)
reason = models.JSONField(default=dict)
blocking = models.BooleanField(default=True)
owner_id = models.PositiveIntegerField(null=True)
owner_type = models.ForeignKey(ContentType, null=True, on_delete=models.CASCADE)
owner | |
import matplotlib as mpl
from matplotlib import cm, colors
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import argparse
import csv
import sys
from scipy.sparse import csc_matrix, dia_matrix, linalg as sla
from scipy.stats import norm
from smoothfdr.smoothed_fdr import SmoothedFdr, GaussianKnown, calc_plateaus
from smoothfdr.normix import GridDistribution, predictive_recursion, empirical_null
import smoothfdr.signal_distributions as signal_distributions
from smoothfdr.utils import *
from smoothfdr.plotutils import *
def calculate_1d_signal_weights(split_points, split_weights):
'''Generate signal weights from the user-specified splits.'''
signal_weights = np.zeros((split_points[-1] + 1, 1))
cur_split = 0
cur_point = 0
while cur_split < len(split_weights):
cur_weight = split_weights[cur_split]
while cur_point < split_points[cur_split]:
signal_weights[cur_point] = cur_weight
cur_point += 1
cur_split += 1
return signal_weights
def calculate_2d_signal_weights(width, height, default_weight, x_min, x_max, y_min, y_max, weights):
'''Generate signal weights from the user-specified splits.'''
signal_weights = np.zeros((width, height)) + default_weight
for region in zip(x_min, x_max, y_min, y_max, weights):
signal_weights[region[0]:region[1]+1,region[2]:region[3]+1] = region[4]
return signal_weights
def calculate_3d_signal_weights(width, height, depth, default_weight, x_min, x_max, y_min, y_max, z_min, z_max, weights):
'''Generate signal weights from the user-specified splits.'''
signal_weights = np.zeros((width, height, depth)) + default_weight
for region in zip(x_min, x_max, y_min, y_max, z_min, z_max, weights):
signal_weights[region[0]:region[1]+1,region[2]:region[3]+1,region[4]:region[5]+1] = region[6]
return signal_weights
def load_data(filename, header=True):
'''Loads a CSV file containing the z-scores.'''
with open(filename, 'rb') as f:
reader = csv.reader(f)
data = []
# skip the header line
if header:
reader.next()
# read in all the rows
for line in reader:
data.append(np.array([float(x) if x != 'True' and x != 'False' else (1 if x == 'True' else 0) for x in line], dtype='double'))
# Return the matrix of z-scores
return np.array(data, dtype='double') if len(data) > 1 else data[0]
def load_neurodata(filename, header=True):
'''Loads a CSV file containing the z-scores of neuro-image data that is not rectangular.'''
with open(filename, 'r') as f:
reader = csv.reader(f)
# skip the header line
if header:
reader.next()
rows = []
for line in reader:
if len(line) == 0:
continue
rows.append(np.array([float(x) for x in line]))
return np.array(rows).T
def save_sweeporder(data, sweeporder, filename):
'''Saves the sweeporder to file.'''
with open(filename, 'wb') as f:
writer = csv.writer(f)
writer.writerow(['Index', 'Z-Score'])
for s in sweeporder:
writer.writerow([s, data[s]])
def save_plateaus(plateaus, filename):
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(['PlateauID','Level','NodeIDs'])
for plateau,(level,nodes) in enumerate(plateaus):
nodes = list(nodes)
if type(nodes[0]) is tuple:
nodes = [';'.join([str(x) for x in y]) for y in nodes]
writer.writerow([plateau, level] + list(nodes))
def load_plateaus(filename):
with open(filename, 'r') as f:
reader = csv.reader(f)
reader.next()
plateaus = []
for line in reader:
vals = line[2:]
plateaus.append([tuple([int(y) for y in x.split(';')]) for x in vals])
return plateaus
def calc_signal_weights(args):
if args.dimensions == '3d':
# Get the weights for each point in the 3d grid
return calculate_3d_signal_weights(args.width, args.height, args.depth,
args.default_weight,
args.region_min_x, args.region_max_x,
args.region_min_y, args.region_max_y,
args.region_min_z, args.region_max_z,
args.region_weights)
elif args.dimensions == '2d':
# Get the weights for each point in the 2d grid
return calculate_2d_signal_weights(args.width, args.height,
args.default_weight,
args.region_min_x, args.region_max_x,
args.region_min_y, args.region_max_y,
args.region_weights)
elif args.dimensions == '1d':
# Get the weights for each point along the line
return calculate_1d_signal_weights(args.split_points, args.split_weights)
else:
raise Exception('Only 1- and 2-dimensional data are supported.')
def main():
parser = argparse.ArgumentParser(description='Runs the smoothed FDR algorithm.')
parser.add_argument('--verbose', type=int, default=0, help='Print detailed progress information to the console. 0=none, 1=outer-loop only, 2=all details.')
parser.add_argument('--data_file', help='The file containing the raw z-score data.')
parser.add_argument('--no_data_header', action='store_true', help='Specifies that there is no header line in the data file.')
parser.add_argument('--signals_file', help='The file containing the true signal points.')
parser.add_argument('--generate_data', dest='generate_data', action='store_true', help='Generate synthetic data and save it to the file specified.')
parser.add_argument('--save_weights', help='The file where the resulting smoothed weights will be saved.')
parser.add_argument('--save_final_weights', help='The file where the resulting smoothed weights after any postprocessing will be saved.')
parser.add_argument('--save_posteriors', help='The file where the resulting smoothed posteriors will be saved.')
parser.add_argument('--save_plateaus', help='The file where the resulting smoothed plateaus will be saved.')
parser.add_argument('--save_signal', help='The file where the estimated signal will be saved.')
parser.add_argument('--save_final_posteriors', help='The file where the final resulting posteriors after any postprocessing will be saved.')
parser.add_argument('--save_oracle_posteriors', help='The file where the oracle posteriors will be saved.')
parser.add_argument('--save_discoveries', help='The file where the inferred discoveries will be saved.')
# Generic d
parser.add_argument('--empirical_null', dest='empirical_null', action='store_true', help='Estimate the null distribution empirically (recommended).')
parser.add_argument('--null_mean', type=float, default=0., help='The mean of the null distribution.')
parser.add_argument('--null_stdev', type=float, default=1., help='The variance of the null distribution.')
parser.add_argument('--signal_mean', type=float, default=0., help='The mean of the signal distribution.')
parser.add_argument('--signal_stdev', type=float, default=3., help='The variance of the signal distribution.')
parser.add_argument('--signal_dist_name', help='The name of the signal distribution. This will dynamically call it by name. It must be in the signal_distributions.py file and have both the foo_pdf and foo_sample functions defined.')
# Predictive recursion settings
parser.add_argument('--estimate_signal', dest='estimate_signal', action='store_true', help='Use predictive recursion to estimate the signal distribution.')
parser.add_argument('--pr_grid_x', nargs=3, type=int, default=[-7,7,57], help='The grid parameters (min, max, points) for the predictive recursion approximate distribution.')
parser.add_argument('--pr_sweeps', type=int, default=50, help='The number of randomized sweeps to make over the data.')
parser.add_argument('--pr_nullprob', type=float, default=1.0, help='The initial guess for the marginal probability of coming from the null distribution.')
parser.add_argument('--pr_decay', type=float, default=-0.67, help='The exponential decay rate for the recursive update weights.')
parser.add_argument('--pr_save_sweeporder', help='Save the sweep orders to the specified file.')
# Plot settings
parser.add_argument('--plot_data', help='The file to which the scatterplot of the data will be saved.')
parser.add_argument('--plot_results', help='The file to which the results will be plotted.')
parser.add_argument('--plot_signal', help='The file to which the estimated signal distribution will be plotted.')
parser.add_argument('--plot_true_signal', dest='plot_true_signal', action='store_true', help='Plot the true signal distribution along with the estimated one in plot_signal.')
parser.add_argument('--plot_signal_bounds', nargs=2, type=int, default=[-7, 7], help='The min and max values to plot the signal along')
parser.add_argument('--plot_path', help='The file to which the solution path of the penalty (lambda) will be plotted.')
parser.add_argument('--plot_adaptive', help='The file to which the results of the adaptive lasso solution will be plotted.')
parser.add_argument('--plot_final', help='The file to which the results of the final solution will be plotted.')
parser.add_argument('--plot_discoveries', help='The file to which the final discoveries (the ones after post-processing) will be plotted.')
parser.add_argument('--plot_final_discoveries', help='The file to which the discoveries will be plotted.')
parser.add_argument('--plot_path_results', help='The file format of the intermediate results plots along the path.')
# Penalty (lambda) settings
parser.add_argument('--solution_path', dest='solution_path', action='store_true', help='Use the solution path of the generalized lasso to find a good value for the penalty weight (lambda).')
parser.add_argument('--min_penalty_weight', type=float, default=0.2, help='The minimum amount the lambda penalty can take in the solution path.')
parser.add_argument('--max_penalty_weight', type=float, default=1.5, help='The maximum amount the lambda penalty can take in the solution path.')
parser.add_argument('--penalty_bins', type=int, default=30, help='The number of lambda penalty values in the solution path.')
parser.add_argument('--dof_tolerance', type=float, default=1e-4, help='The difference threshold for calculating the degrees of freedom.')
parser.add_argument('--penalty_weight', '--lambda', type=float, default=0.3, help='The lambda penalty that controls the sparsity (only used if --solution_path is not specified).')
parser.add_argument('--adaptive_lasso', dest='adaptive_lasso', action='store_true', help='Use an adaptive lasso value that re-weights the penalties to be inversely proportional to the size of the solution path choice with the uniform penalty.')
parser.add_argument('--adaptive_lasso_gamma', type=float, default=1.0, help='The exponent to use for the adaptive lasso weights.')
parser.add_argument('--postprocess_plateaus', dest='postprocess_plateaus', action='store_true', help='Perform unpenalized regression on each plateau as a final post-processing step.')
# Smoothed FDR optimization settings
parser.add_argument('--converge', type=float, default=1e-6, help='The convergence threshold for the main optimization loop.')
parser.add_argument('--max_steps', type=int, default=100, help='The maximum number of steps for the main optimization loop.')
parser.add_argument('--m_converge', type=float, default=1e-6, help='The convergence threshold for the q-step <-> m-step loop.')
parser.add_argument('--m_max_steps', type=float, default=1, help='The maximum number of steps for the q-step <-> m-step loop.')
parser.add_argument('--cd_converge', type=float, default=1e-6, help='The convergence threshold for the inner loop.')
parser.add_argument('--cd_max_steps', type=float, default=100000, help='The maximum number of steps for the inner loop.')
parser.add_argument('--admm_alpha', type=float, default=1.8, help='The step size value for the ADMM solver (if used).')
parser.add_argument('--admm_adaptive', dest='admm_adaptive', action='store_true', help='Use an adaptive soft-thresholding value instead of the constant penalty value.')
parser.add_argument('--admm_inflate', type=float, default=2., help='The inflation/deflation rate for the ADMM step size.')
parser.add_argument('--dual_solver', choices=['cd', 'sls', 'lbfgs', 'admm', 'graph'], default='admm', help='The method used to solve the fused lasso problem in the M-step.')
# FDR reporting settings
parser.add_argument('--fdr_level', type=float, default=0.1, help='The false discovery rate level to use when reporting discoveries.')
subparsers = parser.add_subparsers(dest='dimensions', help='The dimensions of the dataset (1d, 2d, 3d, or fmri).')
# 1D data settings
parser_1d = subparsers.add_parser('1d', help='Settings for 1-dimensional data.')
parser_1d.add_argument('--split_points', nargs='+', type=int, default=[0, 250, 500, 750, 1000], help='The locations at which the signal weight changes. The first split point should always be 0.')
| |
= ''
acc_txt = '{:.2f}%'.format(float(acc))
score_txt = '{:,}'.format(int(plays[i]['score']))
if var_attr == 'max_combo':
var_txt = max_combo_txt
elif var_attr == 'score':
var_txt = score_txt
else:
var_txt = acc_txt
# other components
rank_txt = '{}'.format(self.RANK_EMOTES[plays[i]['rank']])
misses_txt = '{}m'.format(plays[i]['count_miss'])
mods_txt = '+{}'.format(utils.fix_mods(''.join(mods_list)))
is_spaced = True
if gamemode == 3:
is_spaced = False
score_breakdown = self._get_score_breakdown(plays[i], gamemode, spaced=is_spaced)
# print('Play ', plays[i]) # **
if 'pp' in plays[i] and plays[i]['pp'] is not None and \
float(plays[i]['pp']) != 0:
pp_txt = '{:}pp'.format(round(plays[i]['pp']))
else: # otherwise calculate play pp (like for recent/failed)
# play_pp = round(full_beatmap_info['extra_info']['play_pp'])
pp_txt = '-pp'# .format(play_pp)
play_time = datetime.datetime.strptime(plays[i]["date"], '%Y-%m-%d %H:%M:%S')
try:
timeago = utils.time_ago(datetime.datetime.utcnow(), play_time,
shift=0, abbr=True)
except:
try:
timeago = utils.time_ago(datetime.datetime.utcnow(), play_time,
shift=2, abbr=True)
except:
timeago = utils.time_ago(datetime.datetime.utcnow(), play_time,
shift=8, abbr=True)
timeago += ' ago'
# put the information together
info = '`{:<3}`{}`{:<42}{:<10}`\n`{:<3}{:<28}{:<9}{:<8}{:<10}`\n'.format(
number_txt, rank_txt, full_map_txt, mods_txt,
'▸', score_breakdown, var_txt, pp_txt, timeago)
desc += info
em = discord.Embed(description=desc, colour=server_user.colour)
# online_status = await self._get_online_icon(user, api)
sort_txt = ""
title = header_txt
page_txt = ''
if page_info:
page_txt = ' | Page {} of {}'.format(page_info[0], page_info[1])
profile_link_url = self.owoAPI.get_user_url(user['user_id'], api=api)
server_name = self.owoAPI.get_server_name(api)
em.set_author(name = title, url=profile_link_url, icon_url=flag_url)
em.set_footer(text = "On osu! {} Server{}".format(server_name, page_txt),
icon_url=self.owoAPI.get_server_avatar(api))
em.set_thumbnail(url=avatar_url)
return em
# Gives a user profile image with some information
async def create_play_list_embed(self, ctx,
user, plays, beatmaps, gamemode, api='bancho', header_txt=None, page_info=None):
server_user = ctx.message.author
server = ctx.message.guild
avatar_url = await self.owoAPI.get_user_avatar(user['user_id'], api)
flag_url = self.owoAPI.get_country_flag_url(user['country'])
gamemode_text = utils.get_gamemode_text(gamemode)
msg = ''
desc = ''
# takes in the processed userbest
for i in range(len(plays)):
# handle mods
mod_num = int(plays[i]['enabled_mods'])
mods_list = utils.num_to_mod(mod_num)
if not mods_list:
mods_list.append('No Mod')
# print('mod list', mods_list)
beatmap_url = self.owoAPI.get_beatmap_url(beatmaps[i])
extra_info = {}
extra_info['play_info'] = plays[i]
# print('test 3') # **
# only don't force cache for recent
if 'recent' in header_txt.lower():
force_osu_cache=False
else:
force_osu_cache=True
try:
# print(beatmaps[i])
full_beatmap_info, bmap_raw, bmap_file_path = await self.owoAPI.get_full_beatmap_info(
beatmaps[i], extra_info=extra_info, mods=mod_num, force_osu_cache=force_osu_cache)
except: # if the beatmap not longer exists
# print(sys.exc_info())
continue
# print(full_beatmap_info)
info = ''
# generate the title portion
if 'Number' not in header_txt or len(plays) != 1:
number_txt = '{}. '.format(str(plays[i]['play_idx']))
else:
number_txt = ''
if gamemode == 0:
if 'stars_mod' in full_beatmap_info.keys():
star_str = '{:.2f}'.format(float(full_beatmap_info['stars_mod']))
else:
star_str = '{:.2f}'.format(float(full_beatmap_info['difficulty_rating']))
else:
star_str = self.adjust_val_str_mod(
full_beatmap_info, "difficulty_rating",
int(plays[i]['enabled_mods']), gamemode)
star_str = '{}★'.format(star_str)
star_str = self._fix_star_arrow(star_str)
title_txt = '**{}[{} [{}]{}]({}) +{}** [{}]\n'.format(
number_txt , beatmaps[i]['title'].replace('*','\*'),
beatmaps[i]['version'].replace('*','\*'),
self._get_keys(beatmaps[i], gamemode, beatmaps[i]['version']),
beatmap_url, utils.fix_mods(''.join(mods_list)), star_str)
info += title_txt
# create details section
choke_text = '' # choke text
# if there is a max combo available in the beatmap, compare with play.
if (('max_combo' in full_beatmap_info) and \
('extra_info' in full_beatmap_info and full_beatmap_info['extra_info']) and \
('fc_pp' in full_beatmap_info['extra_info']) and \
(full_beatmap_info['max_combo'] is not None)) and \
(gamemode == 0) and \
(int(plays[i]['count_miss']) >= 1 or (int(plays[i]['max_combo']) <= 0.95*int(full_beatmap_info['max_combo']) and 'S' in plays[i]['rank'])):
choke_text += ' _({:.2f}pp for {:.2f}% FC)_'.format(
float(full_beatmap_info['extra_info']['fc_pp']),
float(full_beatmap_info['extra_info']['fc_acc']))
# max combo text
max_combo_txt = 'x{:,}/{:,}'.format(
int(plays[i]['max_combo']), int(full_beatmap_info['max_combo'])) \
if 'max_combo' in full_beatmap_info and full_beatmap_info['max_combo'] \
else "x{:,}".format(int(plays[i]['max_combo']))
# add droid pp if necessary
droid_pp_txt = ''
if api == 'droid':
try:
droid_calc_info, _, _ = await self.owoAPI.get_full_beatmap_info(
beatmaps[i], extra_info=extra_info, mods=mod_num, api='droid')
droid_pp_txt = ' | **{:.2f}DPP**'.format(
float(droid_calc_info['extra_info']['play_pp']))
except:
pass
# check if pp is in plays
# print('Play ', plays[i]) # **
if 'pp' in plays[i] and plays[i]['pp'] is not None and \
float(plays[i]['pp']) != 0:
pp_txt = '**{:.2f}PP**{}{}'.format(float(plays[i]['pp']),
droid_pp_txt, choke_text)
elif 'extra_info' in full_beatmap_info: # otherwise calculate play pp (like for recent/failed)
# check if it's non-FC
play_pp = float(full_beatmap_info['extra_info']['play_pp'])
if ('max_combo' in full_beatmap_info and full_beatmap_info['max_combo'] is not None) and \
(int(plays[i]['count_miss']) >= 1 or (int(plays[i]['max_combo']) <= 0.96*int(full_beatmap_info['max_combo']) and 'S' in plays[i]['rank'])) and \
(abs(play_pp - float(full_beatmap_info['extra_info']['fc_pp'])) > 3):
pp_txt = '**{:.2f}PP**{}{}'.format(play_pp,
droid_pp_txt, choke_text)
else:
pp_txt = '**{:.2f}PP**{} (_Unofficial_)'.format(
play_pp, droid_pp_txt)
else:
pp_txt = '**-PP**{} (_Unofficial_)'.format(droid_pp_txt)
# calculate accuracy if needed, depends on api
if 'accuracy' not in plays[i]:
acc = utils.calculate_acc(plays[i], gamemode)
else:
acc = plays[i]['accuracy']
# define acc text
if gamemode == 3:
if float(plays[i]['count_300']) != 0:
ratio_300 = float(plays[i]['count_geki'])/float(plays[i]['count_300'])
acc_txt = '{:.2f}% ▸ {:.2f}:1'.format(round(acc, 2), ratio_300)
else:
acc_txt = '{:.2f}% ▸ ∞:1'.format(round(acc, 2))
else:
acc_txt = '{:.2f}%'.format(round(acc, 2))
info += '▸ **{}** ▸ {} ▸ {}\n'.format(
self.RANK_EMOTES[plays[i]['rank']], pp_txt, acc_txt)
info += '▸ {:,} ▸ {} ▸ {}\n'.format(
int(plays[i]['score']), max_combo_txt,
self._get_score_breakdown(plays[i], gamemode))
# whatever this mess is. deals with displaying time
play_time = datetime.datetime.strptime(plays[i]["date"], '%Y-%m-%d %H:%M:%S')
try:
timeago = utils.time_ago(datetime.datetime.utcnow(), play_time, shift=0)
except:
try:
timeago = utils.time_ago(datetime.datetime.utcnow(), play_time, shift=2)
except:
timeago = utils.time_ago(datetime.datetime.utcnow(), play_time, shift=8)
info += '▸ Score Set {}Ago\n'.format(timeago)
desc += info
em = discord.Embed(description=desc, colour=server_user.colour)
# online_status = await self._get_online_icon(user, api)
title = header_txt
page_txt = ''
if page_info:
page_txt = ' | Page {} of {}'.format(page_info[0], page_info[1])
profile_link_url = self.owoAPI.get_user_url(user['user_id'], api=api)
server_name = self.owoAPI.get_server_name(api)
em.set_author(name = title, url=profile_link_url, icon_url=flag_url)
em.set_footer(text = "On osu! {} Server{}".format(server_name, page_txt),
icon_url=self.owoAPI.get_server_avatar(api))
em.set_thumbnail(url=avatar_url)
return (msg, em)
# Gives a user profile image with some information
async def create_no_choke_list_embed(self, ctx,
user, plays, beatmaps, gamemode, api='bancho', header_txt=None, page_info=None):
server_user = ctx.message.author
server = ctx.message.guild
avatar_url = await self.owoAPI.get_user_avatar(user['user_id'], api)
flag_url = self.owoAPI.get_country_flag_url(user['country'])
gamemode_text = utils.get_gamemode_text(gamemode)
msg = ''
desc = ''
# takes in the processed userbest
for i in range(len(plays)):
# handle mods
mod_num = int(plays[i]['enabled_mods'])
mods_list = utils.num_to_mod(mod_num)
if not mods_list:
mods_list.append('No Mod')
# print('mod list', mods_list)
beatmap_url = self.owoAPI.get_beatmap_url(beatmaps[i])
extra_info = {}
extra_info['play_info'] = plays[i]
# print('test 3') # **
try: # unfortunately, have to calculate this again
# print(beatmaps[i])
full_beatmap_info, bmap_raw, bmap_file_path = await self.owoAPI.get_full_beatmap_info(
beatmaps[i], extra_info=extra_info, mods=mod_num, force_osu_cache=True)
except: # if the beatmap not longer exists
# print(sys.exc_info())
continue
# print(full_beatmap_info)
info = ''
# generate the title portion
if 'Number' not in header_txt or len(plays) != 1:
number_txt = '{} `[{}]`. '.format(
str(plays[i]['play_idx']), str(plays[i]['original_idx']))
else:
number_txt = ''
if gamemode == 0:
if 'stars_mod' in full_beatmap_info.keys():
star_str = '{:.2f}'.format(float(full_beatmap_info['stars_mod']))
else:
star_str = '{:.2f}'.format(float(full_beatmap_info['difficulty_rating']))
else:
star_str = self.adjust_val_str_mod(
full_beatmap_info, "difficulty_rating",
int(plays[i]['enabled_mods']), gamemode)
title_txt = '**{}[{} [{}]{}]({}) +{}** [{}★]\n'.format(
number_txt , beatmaps[i]['title'].replace('*','\*'),
beatmaps[i]['version'].replace('*','\*'),
self._get_keys(beatmaps[i], gamemode, beatmaps[i]['version']),
beatmap_url, utils.fix_mods(''.join(mods_list)), star_str)
info += title_txt
if plays[i]['original'] is not None:
# max combo text
max_combo_txt = 'x{:,} ➔ **x{:,}**/{:,}'.format(
int(plays[i]['original']['max_combo']),
int(plays[i]['max_combo']),
int(plays[i]['max_combo']))
pp_txt = '{:.2f} ➔ **{:.2f}PP**'.format(
float(plays[i]['original']['pp']),
float(plays[i]['pp']))
acc_txt = '{:.2f}% ➔ **{:.2f}%**'.format(
round(plays[i]['original']['accuracy'], 2), round(plays[i]['accuracy'], 2))
info += '▸ {} ➔ **{}** ▸ {} ▸ {}\n'.format(
self.RANK_EMOTES[plays[i]['original']['rank']],
self.RANK_EMOTES[plays[i]['rank']], pp_txt, acc_txt)
info += '▸ {} ▸ {} ➔ **{}**\n'.format(
max_combo_txt,
self._get_score_breakdown(plays[i]['original'], gamemode),
self._get_score_breakdown(plays[i], gamemode))
else:
# max combo text
max_combo_txt = 'x{:,}/{:,}'.format(
int(plays[i]['max_combo']),
int(bmap_raw.max_combo()))
pp_txt = '{:.2f}PP'.format(
float(plays[i]['pp']))
acc_txt = '{:.2f}%'.format(
round(plays[i]['accuracy'], 2))
info += '▸ {} ▸ {} ▸ {}\n'.format(
self.RANK_EMOTES[plays[i]['rank']], pp_txt, acc_txt)
info += '▸ {} ▸ {}\n'.format(
max_combo_txt,
self._get_score_breakdown(plays[i], gamemode))
desc += info
em = discord.Embed(description=desc, colour=server_user.colour)
# online_status = await self._get_online_icon(user, api)
title = header_txt
page_txt = ''
if page_info:
page_txt = ' | Page {} of {}'.format(page_info[0], page_info[1])
profile_link_url = self.owoAPI.get_user_url(user['user_id'], api=api)
server_name = self.owoAPI.get_server_name(api)
em.set_author(name = title, url=profile_link_url, icon_url=flag_url)
em.set_footer(text = "On osu! {} Server{}".format(server_name, page_txt),
icon_url=self.owoAPI.get_server_avatar(api))
em.set_thumbnail(url=avatar_url)
return (msg, em)
def _get_score_breakdown(self, play, gamemode, spaced=False):
gamemode = int(gamemode)
if spaced:
if gamemode == 0 or gamemode == 2:
ret = "[ {} / {} / {} / {} ]".format(play['count_300'], play['count_100'], play['count_50'], play['count_miss'])
elif gamemode == 1:
ret = "[ {} / {} / {} ]".format(play['count_300'], play['count_100'], play['count_miss'])
elif gamemode == 3:
ret = "[ {} / {} / {} / {} / {} / {} ]".format(play['count_geki'], play['count_300'],
play['count_katu'], play['count_100'], play['count_50'], play['count_miss'])
else:
if gamemode == 0 or gamemode == 2:
ret = "[{}/{}/{}/{}]".format(play['count_300'], play['count_100'], play['count_50'], play['count_miss'])
elif gamemode == 1:
ret = "[{}/{}/{}]".format(play['count_300'], play['count_100'], play['count_miss'])
elif gamemode == 3:
ret = "[{}/{}/{}/{}/{}/{}]".format(play['count_geki'], play['count_300'],
play['count_katu'], play['count_100'], play['count_50'], play['count_miss'])
return ret
def _get_keys(self, beatmap, gamemode, version):
# cs is the the key number
ret = ""
if gamemode == 3:
if "{}k".format(beatmap["cs"]) not in version.lower():
| |
#!/usr/bin/python
# ========================================================
# Python script for PiBot-A: maze solver
# Version 1.0 - by <NAME> - www.retas.de
# ========================================================
# ----------------- TERMINOLOGY OF NODES -----------------
#
# --|-- crossing, +
#
# --| T-junction, T
#
# --. turn
# |
#
# -- dead end
#
# --O disk (target)
#
# ----------------- TERMINOLOGY OF MOVES -----------------
#
# L = left (at + or T or turn)
# R = right (at + or T or turn)
# S = straight (at + or T)
# T = turn (at dead end)
#
# --------------------------------------------------------
from __future__ import print_function
from pololu_drv8835_rpi import motors, MAX_SPEED
from time import sleep
from sys import exit, argv
import RPi.GPIO as GPIO
# Signal handler for SIGTERM
import signal
def sigterm_handler(signal, frame):
motors.setSpeeds(0, 0)
exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
# --------------------------------------------------------
# Some constants
# --------------------------------------------------------
# GPIO pins of sensors
GPIO.setmode(GPIO.BCM)
GPIO_right = 21
GPIO_middle = 20
GPIO_left = 19
GPIO.setup(GPIO_right, GPIO.IN)
GPIO.setup(GPIO_middle, GPIO.IN)
GPIO.setup(GPIO_left, GPIO.IN)
# Three speed constants for different purposes
v3 = MAX_SPEED # = 480
v2 = 380
v1 = 150
# Loop period
delay = 0.001
# ========================================================
# Functions
# ========================================================
# If an argument is given, print some diagnostic values
# and pause until a key is hit.
def pause(*args):
if pausing:
motors.setSpeeds(0, 0)
print(way, *args)
raw_input(read_sensors())
# --------------------------------------------------------
# Read sensor input
def read_sensors(*sensor):
L = GPIO.input(GPIO_left)
M = GPIO.input(GPIO_middle)
R = GPIO.input(GPIO_right)
if len(sensor) == 0: return (L, M, R)
elif sensor[0] == "left": return L
elif sensor[0] == "middle": return M
elif sensor[0] == "right": return R
# --------------------------------------------------------
# Drive some distance, time to sleep is calculated from
# given value (val) and result of calibration (cal).
def drive(val):
sec = val * cal/500
sleep (sec)
# --------------------------------------------------------
# Calibrate: Drive two 180 degree turns in both directions
# and measure time needed.
def calibrate():
tl1 = turn("left")
tl2 = turn("left")
tr1 = turn("right")
tr2 = turn("right")
cal = (tl1 + tl2 + tr1 + tr2) / 4
print ("CAL:", tl1, tl2, tr1, tr2, "=>", cal)
return cal
# --------------------------------------------------------
# Turn left or right: at first leave the black line just
# under the sensors (if there is a line), then continue
# turning until the black line is reached again.
def turn(dir):
if dir == "left":
motors.setSpeeds(-v3, v3)
else:
motors.setSpeeds(v3, -v3)
# Start with a short turn to ensure that we will
# leave the line under (or next to) the sensors.
sleep (100 * delay)
# Count loops while turning (for calibration)
turn = 100
# Turn until line is lost
while read_sensors("middle") == 1:
turn += 1
sleep (delay)
# Turn until line is reached again
while read_sensors("middle") == 0:
turn += 1
sleep (delay)
return turn
# --------------------------------------------------------
# Drive some distance until the node is under the axle.
def axle_to_node():
# Number of loops depend on calibration
cnt = (cal - 100) / 2
# Correct drifts to the left or right while driving
while cnt:
(L, M, R) = read_sensors()
if L == 1 and R == 0:
motors.setSpeeds(v1, v2)
elif R == 1 and L == 0:
motors.setSpeeds(v2, v1)
else:
motors.setSpeeds(v2, v2)
sleep (delay)
cnt -= 1
motors.setSpeeds(v2, v2)
# --------------------------------------------------------
# Show lists of nodes and moves.
def show_steps(nodes, moves):
print ("------------------")
for i in range (0, len(nodes)):
print ("%2d %-12s %s" % (i, nodes[i], moves[i]))
print ("------------------")
# --------------------------------------------------------
# Finish: stop moving.
def finish(*result):
# Stand still for a moment
print (*result)
motors.setSpeeds(0, 0)
sleep (0.5)
# Depending on success ("HOORAY!") or failure ...
f = 1 if result[0] == "HOORAY!" else -1
# ... nod or shake your head 4 times.
for x in range (0, 4):
motors.setSpeeds(f * -v1, -v1)
drive (0.1)
motors.setSpeeds(f * v1, v1)
drive (0.1)
motors.setSpeeds(0, 0)
# Loop forever
while True: sleep (1)
# --------------------------------------------------------
# Determine type of node by comparing sensors at the node
# with sensors behind the node. Example: (0, 1, 1) at node
# and (0, 1, 0) behind node results in: T-junction right.
def type_of_node(sensors):
# Status at node
if sensors == (0, 1, 1): at_node = "line_right"
elif sensors == (1, 1, 0): at_node = "line_left"
elif sensors == (1, 1, 1): at_node = "line_xing"
else:
finish("UNEXPECTED NODE:", sensors)
pause("AT_NODE:", at_node)
# Drive until node is under the axle
axle_to_node()
# Read sensors behind the node
sensors = read_sensors()
if sensors == (0, 0, 0): behind_node = "blank"
elif sensors == (1, 1, 1): behind_node = "black"
else: behind_node = "line"
# Determine type of node
n = (at_node, behind_node)
if behind_node == "black": node = "disk"
elif n == ("line_right", "blank"): node = "turn_right"
elif n == ("line_left", "blank"): node = "turn_left"
elif n == ("line_xing", "line"): node = "crossing"
elif n == ("line_xing", "blank"): node = "T_straight"
elif n == ("line_right", "line"): node = "T_right"
elif n == ("line_left", "line"): node = "T_left"
return (node)
# --------------------------------------------------------
# Calculate shortest path: make a copy of lists "nodes"
# and "moves" with values of the shortest path ("nodes_sp"
# and "moves_sp").
def calculate_path():
global remaining_turns
# Remove turns, but remember the number of turns
# before first node is reached (remaining_turns),
# needed for the second last section of the way back.
behind_first_node = False
for i in range (0, len(nodes)):
if nodes[i] == "turn_left" \
or nodes[i] == "turn_right":
if not behind_first_node:
remaining_turns += 1
else:
behind_first_node = True
moves_sp.append(moves[i])
nodes_sp.append(nodes[i])
print ("AFTER REMOVING TURNS:")
show_steps (nodes_sp, moves_sp)
# Remove dead ends by substituting each sequence
# of moves that contains a turn at a dead end (T)
# by an adequate sequence that does not.
while "T" in moves_sp:
i = moves_sp.index("T")
seq = "".join(moves_sp[i-1:i+2])
if seq == "STL": subst = "R"
elif seq == "STR": subst = "L"
elif seq == "LTL": subst = "S"
elif seq == "RTR": subst = "S"
elif seq == "LTS": subst = "R"
elif seq == "RTL": subst = "T"
elif seq == "LTR": subst = "T"
elif seq == "STS": subst = "T"
else:
finish("UNEXPECTED SEQ:", seq)
print ("SUBST:", i, seq, "=>", subst)
moves_sp[i-1] = subst
del moves_sp[i:i+2]
del nodes_sp[i:i+2]
print ("SHORTEST PATH:")
show_steps (nodes_sp, moves_sp)
print ("REMAINING TURNS:", remaining_turns)
print ("REMAINING LOOPS:", remaining_loops)
# ========================================================
# MAIN
# ========================================================
# If an argument is given, pause at pause()
pausing = True if len(argv) == 2 else False
nodes = [] # list of nodes on way to disk
moves = [] # list of moves on way to disk
nodes_sp = [] # list of nodes in shortest path
moves_sp = [] # list of moves in shortest path
remaining_turns = 0 # turns after last + or T node
remaining_loops = 0 # loops (main loop) after last turn
way = "to_first_node" # current way
moving = "straight" # current moving
try:
# After calibration start driving straight ahead
cal = calibrate()
motors.setSpeeds(v3, v3)
while True: # Main loop
# Repeat this loop every delay seconds
sleep (delay)
# Read sensors at current position
(L, M, R) = read_sensors()
# ------------------------------------------------
# Simply drive
# ------------------------------------------------
# Count loops until the first node is reached
# (needed for the very last section of the way
# back).
if way == "to_first_node":
remaining_loops += 1
# Decrement remaining loops (see above) on the
# last sestion of way back (behind the last node).
elif way == "remaining_loops":
remaining_loops -= 1
if remaining_loops == 0:
drive(0.15) # lenght of robot
finish("HOORAY!")
# Drive and correct drifts to the left or right
if (L, M, R) == (0, 1, 0):
# We are on the line: go straight ahead
motors.setSpeeds(v3, v3)
moving = "straight"
continue
elif L == 1 and R == 0:
# Deviation to the right: correct to the left
motors.setSpeeds(v1, v3)
moving = "left"
continue
elif R == 1 and L == 0:
# Deviation to the left: correct to the right
motors.setSpeeds(v3, v1)
moving = "right"
continue
# ------------------------------------------------
# At some node
# ------------------------------------------------
# If we are in the very last section of way back:
# ignore the node and just pass or cross it
# (should be an exceptional case).
if way == "remaining_loops":
continue
# If we were on the way to the first node: we are
# now on the way to the disk.
elif way == "to_first_node":
way = "to_disk"
# ------------------------------------------------
# Dead end: drive axle over the node and turn
# ------------------------------------------------
if (L, M, R) == (0, 0, 0):
axle_to_node()
turn("left")
nodes.append("dead_end")
moves.append("T")
motors.setSpeeds(v3, v3)
moving = "straight"
continue
# ------------------------------------------------
# Other type of node
# ------------------------------------------------
# All sensors are black. This can be a crossing
# line (+ or T), or it is because lastly we made
# a correction after a deviation to left or right.
# Then we have to sway to the opposite side and
# read the outer sensor there. After that we have
# to compensate these movements to adjust our
# orientation to straight ahead again.
(L1, M1, R1) = (L, M, R)
if moving == "left":
drive(0.04)
motors.setSpeeds(v2, -v2)
drive(0.1)
R1 = read_sensors("right")
motors.setSpeeds(-v2, v2)
drive(0.07)
elif moving == "right":
drive(0.04)
motors.setSpeeds(-v2, v2)
drive(0.1)
L1 = read_sensors("left")
motors.setSpeeds(v2, -v2)
drive(0.07)
sensors = (L1, M1, R1)
# Determine type of node
node = type_of_node(sensors)
# ------------------------------------------------
# At disk: turn and calculate shortest path
# ------------------------------------------------
if node == "disk":
drive(0.15)
motors.setSpeeds(0, 0)
sleep (1.0)
turn("left")
way = "back"
print ("WAY TO DISK:")
show_steps (nodes, moves)
calculate_path()
continue
# ------------------------------------------------
# Other node: decide on movement depending on way
# ------------------------------------------------
motors.setSpeeds(v3, v3)
# ------------------------------------------------
# On the way to the disk: search for the disk
# following the left-hand rule and fill lists
# "nodes" and "moves".
if way == "to_disk":
nodes.append(node)
pause ("NODE:", node)
if node == "turn_right":
turn("right")
moves.append("R")
elif node != | |
import os
import shutil
import random
import math
import time
import argparse
import yaml
from collections import OrderedDict
import sys
# for import parent utils
sys.path.append('../')
import utils
import modules
import datasets
import models
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.utils.checkpoint as torch_cp
import torch_optimizer as toptim
import transformers
from transformers import Pipeline, BertTokenizer, AlbertModel, \
AutoTokenizer, AutoModel, AlbertConfig, AutoConfig
class Args:
pass
# XXX: change all class init param to context
# but the code will be hard to reuse and not transparent
class Context:
def __init__(self):
self.config = Args()
self.vocab = None
self.persona_vocab = None
class Trainer:
def __init__(self):
self.context = Context()
args = self.parse_args()
self.args = args
self.best_valid_loss = float('inf')
self.device = utils.get_device(args.device)
utils.set_random_seed(self.args.seed, self.device)
self.logger = utils.create_logger(self.args.log_path, 'trainer')
self.ensure_deps()
# self.grad_util = utils.Grads()
self.logger.info('Build vocab and embeddings...')
self.pretrain_feature_model = None
self.tokenizer = None
self.persona_vocab = None
if self.args.use_mem_n2n:
self.build_persona_vocab()
if self.args.pretrain_feature:
self.build_pretrain_feature_model()
else:
self.build_vocab_and_embeddings()
self.logger.info('Build dataloaders...')
self.build_dataloaders()
self.logger.info('Build model...')
self.build_model()
self.logger.info('Build loss fns...')
self.build_loss_fns()
def parse_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', default='configs/large.yaml', type=str, required=False,
help='Provide config in config_file or as other commandline args')
parser.add_argument('--experiment_name', default='', type=str, required=False, help='')
parser.add_argument('--device', default='cuda', type=str, required=False, help='use cpu for easy debug')
parser.add_argument('--seed', default=42, type=int, required=False, help='')
parser.add_argument('--n_epochs', default=10, type=int, required=False, help='')
parser.add_argument('--n_epochs_early_stage', default=0, type=int, required=False, help='')
parser.add_argument('--batch_size', default=128, type=int, required=False, help='')
parser.add_argument('--limit_example_length', default=256, type=int, required=False, help='')
parser.add_argument('--max_seq_length', default=300, type=int, required=False, help='')
parser.add_argument('--max_context_size', default=10, type=int, required=False, help='')
parser.add_argument('--shuffle_data', action='store_true', required=False, help='')
parser.add_argument('--max_vocab_size', default=40000, type=int, required=False, help='')
parser.add_argument('--pretrain_emb', action='store_true', required=False, help='')
parser.add_argument('--share_encoder_decoder', action='store_true', required=False, help='')
parser.add_argument('--pretrain_feature', action='store_true', required=False, help='')
parser.add_argument('--pretrain_feature_model_name', default='', type=str, required=False, help='')
parser.add_argument('--pretrain_feature_type', default='emb', type=str, required=False, help='')
parser.add_argument('--emb_freeze', action='store_true', required=False, help='')
parser.add_argument('--emb_dim', default=200, type=int, required=False, help='')
parser.add_argument('--persona_vocab_size', type=int, required=False, help='Will auto fill')
parser.add_argument('--dropout', default=0.1, type=float, required=False, help='')
parser.add_argument('--num_layers', default=6, type=int, required=False, help='')
parser.add_argument('--num_groups', default=1, type=int, required=False, help='')
parser.add_argument('--n_head', default=8, type=int, required=False, help='')
parser.add_argument('--d_model', default=512, type=int, required=False, help='')
parser.add_argument('--d_ff', default=2048, type=int, required=False, help='')
parser.add_argument('--attn_alpha', default=1, type=int, required=False, help='')
parser.add_argument('--adapter_d_ff', default=2048, type=int, required=False, help='')
parser.add_argument('--factor_ff', action='store_true', required=False, help='')
parser.add_argument('--use_rezero', action='store_true', required=False, help='')
parser.add_argument('--use_mem_n2n', action='store_true', required=False, help='')
parser.add_argument('--mem_n2n_hops', default=3, type=int, required=False, help='')
parser.add_argument('--mem_n2n_layer_share', default='adjacent', type=str, required=False, help='')
parser.add_argument('--lr', default=0.5, type=float, required=False, help='')
parser.add_argument('--weight_decay', default=0.99, type=float, required=False, help='')
parser.add_argument('--clip_grad', default=1, type=int, required=False, help='')
parser.add_argument('--use_scheduler', default=False, type=bool, required=False, help='')
parser.add_argument('--warmup_steps', default=2000, type=int, required=False, help='')
parser.add_argument('--gradient_accumulation', default=1, type=int, required=False, help='')
parser.add_argument('--adapter_finetune', default=False, type=bool, required=False, help='')
parser.add_argument('--auxiliary_task', default='MLM', type=str, required=False, help='')
parser.add_argument('--alpha', default=0.5, type=float, required=False, help='LM loss weight')
parser.add_argument('--model_path', default='models/', type=str, required=False, help='')
parser.add_argument('--pretrained_fname', type=str, required=False, help='')
parser.add_argument('--data_path', default='datas/', type=str, required=False, help='')
parser.add_argument('--cache_path', default='caches/', type=str, required=False, help='')
parser.add_argument('--log_path', default='logs/', type=str, required=False, help='')
parser.add_argument('--corpus_fname', default='datas/corpus.txt', type=str, required=False, help='')
parser.add_argument('--vec_fname', default='models/vec.txt', type=str, required=False, help='')
parser.add_argument('--vocab_fname', default='models/vocab.txt', type=str, required=False, help='')
parser.add_argument('--persona_vocab_fname', default='', type=str, required=False, help='')
parser.add_argument('--lr_finder', action='store_true', required=False, help='')
# TODO: let commandline temp args override args in config_file
args = parser.parse_args()
if args.config_file != '':
parser.set_defaults(**yaml.load(open(args.config_file)))
args = parser.parse_args()
return args
def ensure_deps(self):
if self.args.pretrain_emb:
try:
v = '3.8.3'
import gensim
assert gensim.__version__ >= v
except:
raise Exception('If pretrain_emb enabled, please install gensim>=%s' % v)
def build_persona_vocab(self):
self.persona_vocab = datasets.PersonaVocab(self.args.persona_vocab_fname)
self.args.persona_vocab_size = len(self.persona_vocab)
def build_vocab_and_embeddings(self):
args = self.args
if args.pretrain_emb and (
not os.path.exists(args.vec_fname)
or not os.path.exists(args.vocab_fname)
):
self.logger.info('Pretraining word2vec...')
utils.build_word2vec(args.corpus_fname, args.vec_fname, args.vocab_fname,
max_vocab_size=args.max_seq_length, emb_dim=args.d_model)
embeddings, gensim_vocab = None, None
if args.pretrain_emb:
self.logger.info('Loading word2vec...')
embeddings, gensim_vocab = utils.load_embeddings_and_vocab(args.vec_fname, args.vocab_fname)
embeddings = embeddings.to(self.device)
self.vocab = utils.Vocab(gensim_vocab, args.data_path)
self.input_dim = len(self.vocab)
if args.pretrain_emb:
elen = embeddings.shape[0]
if self.input_dim > elen:
args.emb_freeze = False
append = torch.nn.init.kaiming_uniform_(
torch.zeros(self.input_dim - elen, embeddings.shape[1])).to(self.device)
embeddings = torch.cat([embeddings, append], dim=0)
self.pad_idx = self.vocab.stoi(utils.PAD)
self.embeddings = embeddings
def build_pretrain_feature_model(self):
mn = self.args.pretrain_feature_model_name
if 'albert' in mn:
pretrain_feature_tokenizer = BertTokenizer.from_pretrained(mn)
config = AlbertConfig.from_pretrained(mn)
config.output_hidden_states = True
self.pretrain_feature_model = AlbertModel.from_pretrained(mn,
config=config).to(self.device)
else:
pretrain_feature_tokenizer = AutoTokenizer.from_pretrained(mn)
config = AutoConfig.from_pretrained(mn)
config.output_hidden_states = True
self.pretrain_feature_model = AutoModel.from_pretrained(mn,
config=config).to(self.device)
self.pretrain_feature_model.requires_grad_(False)
# self.pretrain_feature_model.requires_grad_(True)
# pipeline input is raw data, we have ids, so direct use model
# self.pretrain_feature_pipeline = Pipeline('feature-extraction',
# model=self.pretrain_feature_model, tokenizer=pretrain_feature_tokenizer)
# TODO: pre calc feature and save to file, it use less memory for train and faster
# XXX: only used this tokenizer vocab, did not used for byte pair split, now just split by space
utils.add_special_tokens_(self.pretrain_feature_model, pretrain_feature_tokenizer)
# FIXME: this changed args should saved to checkpoint file
if self.args.pretrain_feature_type == 'mem_n2n':
self.args.emb_dim = self.pretrain_feature_model.config.hidden_size
self.args.d_model = self.pretrain_feature_model.config.hidden_size
elif self.args.pretrain_feature_type == 'feature':
self.args.emb_dim = self.pretrain_feature_model.config.hidden_size
else:
if self.pretrain_feature_model.base_model_prefix != 'bert':
self.args.emb_dim = self.pretrain_feature_model.config.embedding_size
else:
self.args.emb_dim = self.pretrain_feature_model.config.hidden_size
# XXX: for 'xlnet'
# self.args.d_model = self.pretrain_feature_model.config.hidden_size
if 'weight' in self.args.pretrain_feature_type:
# few effects
self.args.d_model = self.pretrain_feature_model.config.hidden_size
self.args.n_head = self.pretrain_feature_model.config.num_attention_heads
self.args.d_ff = self.pretrain_feature_model.config.intermediate_size
self.args.factor_ff = False
self.vocab = datasets.ChatVocab(pretrain_feature_tokenizer)
self.input_dim = len(self.vocab)
self.pad_idx = self.vocab.stoi(utils.PAD)
self.embeddings = None
# too slow
# self.tokenizer = pretrain_feature_tokenizer.tokenize
self.tokenizer = None
def build_dataloaders(self):
args = self.args
is_mlm = self.args.auxiliary_task == 'MLM'
gb = lambda batch: datasets.generate_batch(batch,
self.vocab, self.persona_vocab, is_mlm)
gb_lm = lambda batch: datasets.generate_lm_batch(batch, self.vocab, is_mlm)
if args.n_epochs_early_stage > 0:
dp = datasets.LMDataProcesser(limit_length=args.limit_example_length,
max_seq_length=args.max_seq_length,
tokenizer=self.tokenizer)
ds = utils.PersonaDataset(
self.vocab, args.max_seq_length, args.limit_example_length,
data_path=args.data_path, cache_path=args.cache_path,
data_processer=dp, mode='train_lm')
self.logger.info('---------------------------------')
self.logger.info('datasets len: %s' % len(ds))
self.train_iter = DataLoader(ds, batch_size=args.batch_size,
collate_fn=gb_lm, shuffle=True)
else:
dp = datasets.ChatDataProcesser(limit_length=args.limit_example_length,
max_seq_length=args.max_seq_length, max_context_size=args.max_context_size,
vocab=self.vocab, persona_vocab=self.persona_vocab,
tokenizer=self.tokenizer)
ds = utils.PersonaDataset(
self.vocab, args.max_seq_length, args.limit_example_length,
data_path=args.data_path, cache_path=args.cache_path,
data_processer=dp, mode='train_char')
self.logger.info('---------------------------------')
self.logger.info('datasets len: %s' % len(ds))
# when Dataset is stream, try utils.DataLoaderX (prefetch_generator), https://github.com/IgorSusmelj/pytorch-styleguide/issues/5
self.train_iter = DataLoader(ds, batch_size=args.batch_size,
collate_fn=gb, shuffle=args.shuffle_data)
dp = datasets.ChatDataProcesser(limit_length=args.limit_example_length,
max_seq_length=args.max_seq_length, max_context_size=args.max_context_size,
vocab=self.vocab, persona_vocab=self.persona_vocab,
tokenizer=self.tokenizer)
ds = utils.PersonaDataset(
self.vocab, args.max_seq_length, args.limit_example_length,
data_path=args.data_path, cache_path=args.cache_path,
data_processer=dp, mode='valid_char')
self.valid_iter = DataLoader(ds, batch_size=args.batch_size,
collate_fn=gb, shuffle=False)
dp = datasets.ChatDataProcesser(limit_length=args.limit_example_length,
max_seq_length=args.max_seq_length, max_context_size=args.max_context_size,
vocab=self.vocab, persona_vocab=self.persona_vocab,
tokenizer=self.tokenizer)
ds = utils.PersonaDataset(
self.vocab, args.max_seq_length, args.limit_example_length,
data_path=args.data_path, cache_path=args.cache_path,
data_processer=dp, mode='test_char')
self.test_iter = DataLoader(ds, batch_size=args.batch_size,
collate_fn=gb, shuffle=False)
def build_model(self):
args = self.args
output_dim = self.input_dim
input_dim = self.input_dim
self.best_model = None
# TODO: change all modules param to single config,
# change input_dim and output_dim to args.vocab_size
self.model = models.AR.build(args, input_dim,
output_dim, self.vocab, self.embeddings,
self.pretrain_feature_model).to(self.device)
self.optimizer = transformers.AdamW(self.model.parameters(), lr=args.lr, correct_bias=True,
#self.optimizer = optim.AdamW(self.model.parameters(), lr=args.lr,
#self.optimizer = toptim.Lamb(self.model.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
self.logger.info(self.model)
self.logger.info(f'The model has {utils.count_parameters(self.model):,} trainable parameters')
if args.use_scheduler:
#self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, 1.0, gamma=0.95)
#self.scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(self.optimizer, 2)
if args.warmup_steps == 0:
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,
mode='min', factor=0.5, min_lr=1.5e-4, patience=60, verbose=True)
else:
# XXX: scheduler will run once at start, even if has no scheduler.step()
total_steps = int(len(self.train_iter.dataset) * args.n_epochs
/ args.batch_size / args.gradient_accumulation)
self.scheduler = transformers.get_linear_schedule_with_warmup(self.optimizer,
num_warmup_steps=args.warmup_steps, num_training_steps=total_steps)
if args.pretrained_fname is None:
pass
# pytorch module will auto init_weights with uniform
# self.model.apply(models.init_weights)
else:
self.logger.info()
self.logger.info(f'Load pretrained model {args.pretrained_fname}...')
self.load_model()
def build_loss_fns(self):
self.out_loss_fn = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
def run_early_stage(self):
for epoch in range(self.args.n_epochs_early_stage):
start_time = time.time()
train_loss = self.train_lm(epoch)
self.save_model(epoch, 'lm')
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
self.logger.info('-' * 89)
self.logger.info('Experiment %s: ' % self.args.experiment_name)
self.logger.info(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
self.logger.info(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
def run(self):
if self.args.n_epochs_early_stage > 0:
self.logger.info('Run early stage...')
trainer.run_early_stage()
# after fin, rerun with pretrained model
return
self.logger.info('Run main stage...')
best_val_loss = float("inf")
for epoch in range(self.args.n_epochs):
start_time = time.time()
train_loss = self.train(epoch)
valid_loss = self.eval(self.valid_iter)
if valid_loss < best_val_loss:
best_val_loss = valid_loss
self.best_model = self.model
self.save_model(epoch)
# scheduler.step()
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
self.logger.info('-' * 89)
self.logger.info('Experiment %s: ' % self.args.experiment_name)
self.logger.info(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
self.logger.info(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
self.logger.info(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
test_loss = self.eval(self.test_iter)
self.logger.info(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |')
# self.grad_util.plot()
def train_lm(self, epoch):
self.model.train()
epoch_loss = 0
for batch_idx, feature in enumerate(self.train_iter):
start_time = time.time()
self.optimizer.zero_grad()
utils.feature_to_device(feature, self.device)
out = self.model(feature)
loss = self.out_loss_fn(out.view(-1, out.shape[-1]),
feature.y.view(-1))
# utils.self.logger.info_backward_graph(loss)
loss.backward()
if self.args.clip_grad is not None:
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_grad)
self.optimizer.step()
iloss = loss.item()
epoch_loss += iloss
end_time = time.time()
secs = end_time - start_time
self.logger.info(f'Step {batch_idx+1}/{epoch+1:02} | Train Loss: {iloss:.3f} | Train PPL: {math.exp(iloss):7.3f} | Time: {secs:.3f}s\n')
return epoch_loss / len(self.train_iter)
def train(self, epoch, data_iter=None):
self.model.train()
if data_iter is None:
data_iter = self.train_iter
epoch_loss = 0
for batch_idx, feature in enumerate(data_iter):
| |
regex = r'^[A-Za-z0-9 !$&\'*\-,.?^_`{}~#+%]*$'
return len(name) <= 64 and re.match(regex, name)
def get_uwhr_person(self, eid, source='uwhr'):
"""
Returns an irws.UWhrPerson object for the given eid.
If the person is not an employee, returns None.
If the netid isn't found, throws IRWSNotFound.
If there is an error contacting IRWS, throws DataFailureException.
"""
eid = self._clean(eid)
source = self._clean(source)
url = "/%s/v2/person/%s/%s" % (self._service_name, source, eid)
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status == 404:
return None
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
person_data = json.loads(response.data)['person'][0]
return UWhrPerson(**person_data)
def get_sdb_person(self, sid):
"""
Returns an irws.SdbPerson object for the given eid.
If the person is not a student, returns None.
If the netid isn't found, throws IRWSNotFound.
If there is an error contacting IRWS, throws DataFailureException.
"""
sid = self._clean(sid)
url = "/%s/v2/person/sdb/%s" % (self._service_name, sid)
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status == 404:
return None
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
person_data = json.loads(response.data)['person'][0]
return SdbPerson(**person_data)
def get_cascadia_person(self, id):
"""
Returns an irws.CascadiaPerson object for the given id.
If the netid isn't found, throws IRWSNotFound.
If there is an error contacting IRWS, throws DataFailureException.
"""
id = self._clean(id)
url = "/%s/v2/person/cascadia/%s" % (self._service_name, id)
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status == 404:
return None
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return self._cascadia_person_from_json(response.data)
def get_scca_person(self, id):
"""
Returns an irws.SccaPerson object for the given id.
If the netid isn't found, throws IRWSNotFound.
If there is an error contacting IRWS, throws DataFailureException.
"""
id = self._clean(id)
url = "/%s/v2/person/scca/%s" % (self._service_name, id)
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status == 404:
return None
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return self._scca_person_from_json(response.data)
def get_supplemental_person(self, id):
"""
Returns an irws.SupplementalPerson object for the given id.
If the netid isn't found, throws IRWSNotFound.
If there is an error contacting IRWS, throws DataFailureException.
"""
id = self._clean(id)
url = "/%s/v2/person/supplemental/%s" % (self._service_name, id)
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status == 404:
return None
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)['person'][0]
return SupplementalPerson(**data)
def get_generic_person(self, uri):
"""
Returns an irws.GenericPerson object for the given uri.
The uris come in from values in irws.Person.identifiers.
Raises DataFailureExeption on error.
"""
uri = quote(uri, '/')
url = '/%s/v2%s' % (self._service_name, uri)
response = self.dao.getURL(url, {'Accept': 'application/json'})
if response.status == 404:
return None
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return self._generic_person_from_json(response.data)
def get_subscription(self, netid, subscription):
"""
Returns an irws.Subscription object for the given netid. If the
netid isn't found, nothing will be returned. If there is an error
communicating with the IRWS, a DataFailureException will be thrown.
"""
netid = self._clean(netid)
url = "/%s/v2/subscription?uwnetid=%s&subscription=%d" % (self._service_name, netid.lower(), subscription)
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status == 404:
return None
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return self._subscription_from_json(response.data)
def get_pdsentry_by_netid(self, netid):
"""
Returns an irws.PdsEntry object for the given netid.
If the person doesn't have a pds entry, returns None.
If the netid isn't found, throws #TODO
If there is an error contacting IRWS, throws DataFailureException.
"""
netid = self._clean(netid)
url = "/%s/v2/pdsentry/validid=uwnetid=%s" % (self._service_name, netid)
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status == 404:
return None
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return self._pdsentry_from_json(response.data)
def put_pac(self, eid, source='uwhr'):
"""
Creates a PAC for the employee. Returns the Pac.
"""
eid = self._clean(eid)
source = self._clean(source)
url = "/%s/v2/person/%s/%s/pac?-force" % (self._service_name, source, eid)
response = self.dao.putURL(url, {"Accept": "application/json"}, '')
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return self._pac_from_json(response.data)
def verify_sdb_pac(self, sid, pac):
"""
Verifies a permanent student PAC. Returns 200 (ok) or 400 (no)
"""
sid = self._clean(sid)
pac = self._clean(pac)
url = "/%s/v2/person/sdb/%s?pac=%s" % (self._service_name, sid, pac)
response = self.dao.getURL(url, {"Accept": "application/json"})
if (response.status == 200 or response.status == 400 or response.status == 404):
return response.status
raise DataFailureException(url, response.status, response.data)
def verify_sc_pin(self, netid, pin):
"""
Verifies a service center one-time pin. Returns 200 (ok) or 400 (no).
OK clears the pin.
"""
netid = self._clean(netid)
pin = self._clean(pin)
# make sure there is a pin subscription
url = "/%s/v2/subscription/63/%s" % (self._service_name, netid)
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status == 200:
sub = json.loads(response.data)['subscription'][0]
# verify pending subscription and unexpired, unused pac
if sub['status_code'] != '23' or sub['pac'] != 'Y':
return 404
else:
return response.status
url = "/%s/v2/subscribe/63/%s?action=1&pac=%s" % (self._service_name, netid, pin)
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status == 200:
# delete the pac
url = "/%s/v2/subscribe/63/%s?action=2" % (self._service_name, netid)
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status != 200:
# the pin was good. we return OK, but note the error
logger.error('Delete SC pin failed: %d' % response.status)
return 200
if (response.status == 400 or response.status == 404):
return response.status
raise DataFailureException(url, response.status, response.data)
def get_qna(self, netid):
"""
Returns a list irws.QnA for the given netid.
"""
netid = self._clean(netid)
url = "/%s/v2/qna?uwnetid=%s" % (self._service_name, netid)
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status == 404:
return None
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return self._qna_from_json(response.data)
def get_verify_qna(self, netid, answers):
"""
Verifies that all answers are present and that all are correct.
answers: ordered list of answers
"""
questions = self.get_qna(netid)
if len(questions) != len(answers):
return False
for index, answer in enumerate(answers, start=1):
answer = re.sub(r'\W+', '', answer)
url = "/%s/v2/qna/%s/%s/check?ans=%s" % (self._service_name, index, quote(netid), quote(answer))
response = self.dao.getURL(url, {"Accept": "application/json"})
if response.status in (400, 404):
logger.debug('qna wrong answer #{}, status = {}'.format(index, response.status))
return False
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return True
def verify_person_attribute(self, netid, attribute, value):
"""
Verify that the given attribute (eg birthdate) matches the value for the netid.
Rather than chase all of the person identifier urls client-side, irws will return the
list of identifiers. For birthdate, IRWS has the added value of discarding silly
birthdates and matching on partial birthdates.
"""
netid = self._clean(netid)
attribute = self._clean(attribute)
value = self._clean(value)
url = "/%s/v2/person?uwnetid=%s&%s=%s" % (self._service_name, netid, attribute, value)
return self.dao.getURL(url, {'Accept': 'application/json'}).status == 200
def _cascadia_person_from_json(self, data):
"""
Internal method, for creating the CascadiaPerson object.
"""
person_data = json.loads(data)['person'][0]
person = CascadiaPerson()
person.validid = person_data['validid']
person.regid = person_data['regid']
person.lname = person_data['lname']
person.categories = person_data['categories']
if 'birthdate' in person_data:
person.birthdate = person_data['birthdate']
if 'department' in person_data:
person.department = person_data['department']
if 'in_feed' in person_data:
person.in_feed = person_data['in_feed']
person.status_code = person_data.get('status_code', person.__class__.status_code)
return person
def _scca_person_from_json(self, data):
"""
Internal method, for creating the SccaPerson object.
"""
person_data = json.loads(data)['person'][0]
person = SccaPerson()
person.validid = person_data['validid']
person.regid = person_data['regid']
person.lname = person_data['lname']
person.categories = person_data['categories']
if 'birthdate' in person_data:
person.birthdate = person_data['birthdate']
if 'scca_company' in person_data:
person.scca_company = person_data['scca_company']
if 'scca_cca_eppn' in person_data:
person.scca_cca_eppn = person_data['scca_cca_eppn']
if 'scca_fhc_eppn' in person_data:
person.scca_fhc_eppn = person_data['scca_fhc_eppn']
if 'in_feed' in person_data:
person.in_feed = person_data['in_feed']
person.status_code = person_data.get('status_code', person.__class__.status_code)
return person
def _person_from_json(self, data):
persj = json.loads(data)['person'][0]
idj = persj['identity']
person = Person()
person.regid = idj['regid']
if 'lname' in idj:
person.lname = idj['lname']
if 'fname' in idj:
person.fname = idj['fname']
if 'identifiers' in idj:
person.identifiers = copy.deepcopy(idj['identifiers'])
return person
def _regid_from_json(self, data):
rj = json.loads(data)['regid'][0]
regid = Regid()
regid.regid = rj['regid']
regid.entity_code = rj['entity_code']
regid.entity_name = rj['entity_name']
regid.status_code = rj['status_code']
regid.status_name = rj['status_name']
return regid
def _pw_recover_from_json(self, data):
info = json.loads(data)['profile'][0]
ret = Profile()
if 'validid' in info:
ret.validid = info['validid']
if 'recover_contacts' in info:
ret.recover_contacts = info['recover_contacts']
if 'recover_block_reasons' in info:
ret.recover_block_reasons = info['recover_block_reasons']
return ret
def _uwnetid_from_json_obj(self, id_data):
uwnetid = UWNetId()
uwnetid.uwnetid = id_data['uwnetid']
uwnetid.validid = id_data['validid']
uwnetid.uid = id_data['uid']
uwnetid.disenfran = id_data['disenfran']
return uwnetid
def _subscription_from_json(self, data):
sub_data = json.loads(data)['subscription'][0]
subscription = Subscription()
subscription.uwnetid = sub_data['uwnetid']
subscription.subscription_code = sub_data['subscription_code']
subscription.subscription_name = sub_data['subscription_name']
subscription.subscription_data = sub_data.get('subscription_data', None)
subscription.status_code = sub_data.get('status_code', None)
subscription.status_name = sub_data.get('status_name', None)
return subscription
def _pdsentry_from_json(self, data):
"""
Internal method, for creating the PDSEntry object.
"""
person_data = json.loads(data)['pdsentry'][0]['entry']
person = PDSEntry()
person.regid = person_data.get('uwRegID', '')
person.objectclass = person_data.get('objectClass', [])
person.test = person_data.get('uwTest', '')
person.eid = | |
then INFO
>>> with logInfo() :
>>> ...do something...
"""
return logLevel ( logging.INFO - 1 )
# =============================================================================
# Temporarily enable/disable all loggers with level less then WARNING
# @code
# with logInfo() :
# ...do something...
# @endcode
def logWarning () :
"""Temporarily disable all loggers with level less then WARNING
>>> with logWarning() :
>>> ...do something...
"""
return logLevel ( logging.WARNING - 1 )
# =============================================================================
# Temporarily enable/disable all loggers with level less then ERROR
# @code
# with logError() :
# ...do something...
# @endcode
def logError () :
"""Temporarily disable all loggers with level less then ERROR
>>> with logWarning() :
>>> ...do something...
"""
return logLevel ( logging.ERROR - 1 )
# =============================================================================
# Temporarily enable/disable all loggers with level less then FATAL
# @code
# with logFatal() :
# ...do something...
# @endcode
def logFatal () :
"""Temporarily disable all loggers with level less then ERROR
>>> with logWarning() :
>>> ...do something...
"""
return logLevel ( logging.FATAL - 1 )
# =============================================================================
## BASIC colorization
# =============================================================================
from ostap.logger.colorized import ( with_colors ,
colored_string ,
attention ,
allright ,
infostr ,
decolorize )
# =============================================================================
__colored_logger = []
# =============================================================================
## reset colorization of logging
def reset_colors () :
"""Reset colorization of logging
>>> reset_colors()
"""
for a in logging_levels :
logging.addLevelName ( a , logging_levels [ a ] )
#
while __colored_logger :
__colored_logger.pop()
from ostap.logger.colorized import set_with_colors
set_with_colors ( False )
return with_colors()
# =============================================================================
## make colors
def make_colors () :
"""Colorize logging
"""
if __colored_logger : return
from ostap.logger.colorized import set_with_colors
set_with_colors ( True )
if not with_colors () : return
def makeName ( level , fg = None , bg = None , blink = False , underline = False , bgb = False , fgb = False ) :
name = logging.getLevelName ( level )
bold = fg is None and bg is None and not uderline
bold = True
return colored_string ( name , fg , bg , bold , blink , underline , fg_bright = fgb , bg_bright = bgb )
from ostap.logger.colorized import RED , BLUE , YELLOW , GREEN , WHITE
logging.addLevelName ( logging.CRITICAL , makeName ( logging.CRITICAL , fg = RED , bg = BLUE , blink = True ) )
logging.addLevelName ( logging.WARNING , makeName ( logging.WARNING , fg = RED , bg = YELLOW , underline = True , bgb = True ) )
logging.addLevelName ( logging.ERROR , makeName ( logging.ERROR , fg = YELLOW , bg = RED , blink = True , bgb = True , fgb = True ) )
logging.addLevelName ( logging.INFO , makeName ( logging.INFO , bg = BLUE , fg = WHITE ) )
logging.addLevelName ( logging.DEBUG , makeName ( logging.DEBUG , bg = GREEN , fg = WHITE ) )
logging.addLevelName ( logging.VERBOSE , makeName ( logging.VERBOSE , bg = YELLOW , fg = WHITE , bgb = True , fgb = True ) )
__colored_logger.append ( 1 )
return with_colors()
# =============================================================================
## @class ColorLogging
# Simple context manager to switch on coloring
# @code
# with ColorLogging():
# ... do something ...
# @endcode
class ColorLogging(object) :
"""Simple context manager to swith on coloring
>>> with ColorLogging() :
... do something ...
"""
def __init__ ( self , color = True ) :
self.color = color
def __enter__ ( self ) :
self.with_color = with_colors()
if self.color and not self.with_color : make_colors ()
elif self.with_color and not self.color : reset_colors ()
return self
def __exit__ ( self , *_ ) :
if self.color and not self.with_color : reset_colors ()
elif self.with_color and not self.color : make_colors ()
# =============================================================================
## simple context manager to switch on color logging
# @code
# with logColor() :
# ... do something ...
# @endcode
def logColor ( color = True ) :
"""Simple context manager to switch on coloring
>>> with logColor () :
... do something ...
"""
return ColorLogging ( color )
# =============================================================================
## simple context manager to switch off color logging
# @code
# with logNoColor() :
# ... do something ...
# @endcode
def logNoColor () :
"""Simple context manager to switch on coloring
>>> with logNoColor () :
... do something ...
"""
return ColorLogging ( False )
# =============================================================================
## simple context manager to switch off color logging
# @code
# with noColor() :
# ... do something ...
# @endcode
def noColor () :
"""Simple context manager to switch on coloring
>>> with noColor () :
... do something ...
"""
return ColorLogging ( False )
# =============================================================================
## @class KeepColorLogging
# Simple context manager to preserve coloring
# @code
# with KeepColorLogging():
# ... do something ...
# @endcode
class KeepColorLogging(object) :
"""Simple context manager to preserve coloring
>>> with KeepColorLogging() :
... do something ...
"""
def __enter__ ( self ) :
self.with_color = with_colors()
return self
def __exit__ ( self , *_ ) :
if self.with_color and not with_colors() : make_colors ()
elif with_colors() and not self.with_color : reset_colors ()
# =============================================================================
## simple context manager to preserve color logging
# @code
# with keepColor() :
# ... do something ...
# @endcode
def keepColor () :
"""Simple context manager to preserve color logging
>>> with keepColor () :
... do something ...
"""
return KeepColorLogging ()
# =============================================================================
# Actions!
# =============================================================================
## reset colors
if isatty () : make_colors()
## define the default logging thresholds as 'INFO'
setLogging ( 3 )
# =============================================================================
# Log file?
# =============================================================================
log_file = os.getenv ( 'OSTAP_LOGFILE' , '' )
if log_file :
## set buffering to be 1-line and decolorize the output
class LogHandler(logging.FileHandler) :
def __init__(self, filename, mode='w', encoding=None, delay=0):
logging.FileHandler.__init__ ( self , filename , mode , encoding, delay )
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
stream = open(self.baseFilename, self.mode, buffering = 1 )
return stream
def emit(self, record):
"""Emit an ddecolorize the record
"""
lname = logging_levels.get ( record.levelno , '' )
if not lname : lname = '%s' % record.levelno
record.levelname = lname
if with_colors () : record.msg = decolorize ( record.msg )
return logging.FileHandler.emit ( self , record )
loglev = os.getenv ( 'OSTAP_LOGLEVEL' , '%s' % logging.INFO )
try :
loglev = int ( loglev )
if not loglev in logging_levels : loglev = logging.INFO
except :
loglev = logging.INFO
log_handler = LogHandler ( log_file , mode = 'w' )
log_handler.setLevel ( loglev )
formatter = logging.Formatter ( logging_file_format , logging_date_format )
log_handler.setFormatter ( formatter )
logging.root.addHandler ( log_handler )
if log_file :
logger = getLogger('ostap.logger.logger')
func = lambda : logger.info ( 'Log-file is %s' % log_file )
func ()
import atexit
atexit.register ( func )
logging.disable ( logging.INFO - 1 )
# =============================================================================
if __name__ == '__main__' :
setLogging ( 0 )
logger = getLogger ( 'ostap.logger.logger')
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
with noColor() : pass
logger.verbose ( 'This is VERBOSE message' )
logger.debug ( 'This is DEBUG message' )
logger.info ( 'This is INFO message' )
logger.warning ( 'This is WARNING message' )
logger.error ( 'This is ERROR message' )
logger.fatal ( 'This is FATAL message' )
logger.critical ( 'This is CRITICAL message' )
with logColor() :
logger.verbose ( 'This is VERBOSE message' )
logger.debug ( 'This is DEBUG message' )
logger.info ( 'This is INFO message' )
logger.warning ( 'This is WARNING message' )
logger.error ( 'This is ERROR message' )
logger.fatal ( 'This is FATAL message' )
logger.critical ( 'This is CRITICAL message' )
with noColor () :
logger.verbose ( 'This is VERBOSE message' )
logger.debug ( 'This is DEBUG message' )
logger.info ( 'This is INFO | |
int(x) * int(y), p)
cnt += remain // int_p
else:
break
print('[72]: ', cnt)
return
'''
Problem 73
Consider the fraction, n/d, where n and d are positive integers. If n<d and HCF(n,d)=1, it is called a reduced proper fraction.
If we list the set of reduced proper fractions for d ≤ 8 in ascending order of size, we get:
1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8
It can be seen that there are 3 fractions between 1/3 and 1/2.
How many fractions lie between 1/3 and 1/2 in the sorted set of reduced proper fractions for d ≤ 12,000?
'''
def p73():
L = 12000 + 1
ret = [0 for x in range(L)]
for d in range(2, L):
if d % 2 == 0:
n = 1 if d % 3 == 0 else 3
else: # d % 2 != 0
n = 2 if d % 3 == 0 else 6
left = ((d * n // 3) // n) + 1
right = (((d * n // 2) - 1) // n) + 1
for i in range(left, right):
# count +1 for coprime i and d
if gcd(i, d) == 1:
ret[d] += 1
print('[73]: ', sum(ret))
return
'''
Problem 74
The number 145 is well known for the property that the sum of the factorial of its digits is equal to 145:
1! + 4! + 5! = 1 + 24 + 120 = 145
Perhaps less well known is 169, in that it produces the longest chain of numbers that link back to 169;
it turns out that there are only three such loops that exist:
169 → 363601 → 1454 → 169
871 → 45361 → 871
872 → 45362 → 872
It is not difficult to prove that EVERY starting number will eventually get stuck in a loop. For example,
69 → 363600 → 1454 → 169 → 363601 (→ 1454)
78 → 45360 → 871 → 45361 (→ 871)
540 → 145 (→ 145)
Starting with 69 produces a chain of five non-repeating terms,
but the longest non-repeating chain with a starting number below one million is sixty terms.
How many chains, with a starting number below one million, contain exactly sixty non-repeating terms?
'''
def check_74_factorial(factorials, i, str_i):
check = []
check.append(i)
cnt = 0
while (1):
cnt += 1
if cnt > 60:
return 0
sums = [factorials[int(str_i[j])] for j in range(0, len(str_i))]
res = sum(sums)
if res not in check:
check.append(res)
else:
return cnt
str_i = str(res)
def p74():
factorials = [factorial(i) for i in range(0, 10)]
ret = [i for i in range(1000000, 1, -1) if check_74_factorial(factorials, i, str(i)) == 60]
print('[74]: ', len(ret))
'''
Problem 75
It turns out that 12 cm is the smallest length of wire that can be bent to form an integer sided right angle triangle in exactly one way, but there are many more examples.
12 cm: (3,4,5)
24 cm: (6,8,10)
30 cm: (5,12,13)
36 cm: (9,12,15)
40 cm: (8,15,17)
48 cm: (12,16,20)
In contrast, some lengths of wire, like 20 cm, cannot be bent to form an integer sided right angle triangle,
and other lengths allow more than one solution to be found; for example,
using 120 cm it is possible to form exactly three different integer sided right angle triangles.
120 cm: (30,40,50), (20,48,52), (24,45,51)
Given that L is the length of the wire, for how many values of L ≤ 1,500,000 can exactly one integer sided right angle triangle be formed?
'''
def p75():
# a = m * m - n * n
# b = 2 * m * n
# c = m * m + n * n
L = 1500000
right_triangle = set()
for n in count(1, 1):
if n > L / 3:
break
for m in count(n + 1, 1):
ts = (2 * (m ** 2)) + (2 * (m * n))
if ts > L:
break
if (m + n) % 2 == 0:
continue
if gcd(m, n) != 1:
continue
right_triangle.add(ts)
checks = [0 for x in range(0, L + 1)]
for r in right_triangle:
for i in count(1):
pos = r * i
if pos > L:
break
checks[pos] += 1
ret = [x for x in range(len(checks)) if checks[x] == 1]
print('[75]: ', len(ret))
return
'''
Problem 76
It is possible to write five as a sum in exactly six different ways:
4 + 1
3 + 2
3 + 1 + 1
2 + 2 + 1
2 + 1 + 1 + 1
1 + 1 + 1 + 1 + 1
How many different ways can one hundred be written as a sum of at least two positive integers?
'''
def p76():
target = 100
ways = [1] + [0] * target
for i in range(1, target): # number set
for j in range(i, target + 1):
ways[j] += ways[j - i]
print('[76]: ', ways[target])
'''
Problem 77
It is possible to write ten as the sum of primes in exactly five different ways:
7 + 3
5 + 5
5 + 3 + 2
3 + 3 + 2 + 2
2 + 2 + 2 + 2 + 2
What is the first value which can be written as the sum of primes in over five thousand different ways?
'''
# def p31_dreashire():
# target = 200
# coins = [1, 2, 5, 10, 20, 50, 100, 200]
# ways = [1] + [0]*target
# for coin in coins:
# for i in range(coin, target+1):
# ways[i] += ways[i-coin]
# print("Ways to make change =", ways[target])
def p77():
target = 100
primes = prime_sieve(target)
ways = [1] + [0] * target
for p in primes:
for i in range(p, target + 1):
ways[i] += ways[i - p]
print('[77]: ', [i for i, w in enumerate(ways) if w > 5000][0])
return
'''
Problem 78
Let p(n) represent the number of different ways in which n coins can be separated into piles. For example, five coins can be separated into piles in exactly seven different ways, so p(5)=7.
OOOOO <-- different from problem 76
OOOO O
OOO OO
OOO O O
OO OO O
OO O O O
O O O O O
Find the least value of n for which p(n) is divisible by one million.
'''
def pentagonal_seq(n): # https://oeis.org/A001318
if n == 0:
return 0
if n == 1:
return 1
return n * (3 * n - 1) // 2
def partition(n, part, penta): # https://oeis.org/A000041
if n == 0:
return 1
if n == 1:
return 1
ret = 0
for idx, p in enumerate(penta):
if n - p < 0:
break
if idx % 4 == 0 or idx % 4 == 1:
ret += part[n - p]
else:
ret -= part[n - p]
return ret
def p78(): # Answer 55374 6.31 sec
part = [1, 1]
penta = []
for i in range(1, 1000000):
penta.append(pentagonal_seq(i))
penta.append(pentagonal_seq(i * -1))
for n in count(2, 1):
ret = partition(n, part, penta)
part.append(ret)
if ret % 1000000 == 0:
print('[78]: ', n)
return
'''
Problem 79
A common security method used for online banking is to ask the user for three random characters from a passcode.
For example, if the passcode was <PASSWORD>, they may ask for the 2nd, 3rd, and 5th characters; the expected reply would be: 317.
The text file, keylog.txt, contains fifty successful login attempts.
Given that the three characters are always asked for in order, analyse the file so as to determine the shortest possible secret passcode of unknown length.
'''
def p79():
print('[79]: ', 73162890) # by hand
# Simple algorithm:
#
# 1. Work out all the digits necessary, put them in a unordered list
# 2. Use the logins to do a normal bubble-sort of the digits by:
# a. for each two-digit pair to compare:
# b. find a login that contains both digits
# c. use the ordering in the login to order the digits correctly
#
# Python:
#
# def read_logins():
# f = open("p079_keylog.txt")
# l = f.read().split('\n')
# f.close()
# return l
#
# def get_digits(logins):
# digits = set()
# result = []
# for login in logins:
# for digit in login:
# | |
if new_image in all_images:
return create_new_image()
else:
image_count = len(all_images)
genesis_count = len(genesis_arr)
if image_count < TOTAL_IMAGES / 10 and image_count % 7 == 0 and genesis_count < 90:
new_image['War_paint'] = create_genesis(image_count)
if TOTAL_IMAGES / 10 <= image_count and image_count < 2 * TOTAL_IMAGES / 10 and image_count % 9 == 0 and genesis_count < 215:
new_image['War_paint'] = create_genesis(image_count)
if 2 * TOTAL_IMAGES / 10 <= image_count and image_count < 3 * TOTAL_IMAGES / 10 and image_count % 8 == 5 and genesis_count < 315:
new_image['War_paint'] = create_genesis(image_count)
if 3 * TOTAL_IMAGES / 10 <= image_count and image_count < 4 * TOTAL_IMAGES / 10 and image_count % 5 == 3 and genesis_count < 390:
new_image['War_paint'] = create_genesis(image_count)
if 4 * TOTAL_IMAGES / 10 <= image_count and image_count < 5 * TOTAL_IMAGES / 10 and image_count % 7 == 0 and genesis_count < 500:
new_image['War_paint'] = create_genesis(image_count)
if 5 * TOTAL_IMAGES / 10 <= image_count and image_count < 6 * TOTAL_IMAGES / 10 and image_count % 6 == 2 and genesis_count < 575:
new_image['War_paint'] = create_genesis(image_count)
if 6 * TOTAL_IMAGES / 10 <= image_count and image_count < 7 * TOTAL_IMAGES / 10 and image_count % 6 == 4 and genesis_count < 600:
new_image['War_paint'] = create_genesis(image_count)
if 7 * TOTAL_IMAGES / 10 <= image_count and image_count < 8 * TOTAL_IMAGES / 10 and image_count % 4 == 0 and genesis_count < 800:
new_image['War_paint'] = create_genesis(image_count)
if 8 * TOTAL_IMAGES / 10 <= image_count and image_count < 9 * TOTAL_IMAGES / 10 and image_count % 7 == 3 and genesis_count < 910:
new_image['War_paint'] = create_genesis(image_count)
if 9 * TOTAL_IMAGES / 10 <= image_count and image_count < TOTAL_IMAGES and image_count % 9 == 8 and genesis_count < 1000:
new_image['War_paint'] = create_genesis(image_count)
# test 500/50-genesis
# if image_count < TOTAL_IMAGES / 10 and image_count % 4 == 0 and genesis_count < 3:
# new_image['War_paint'] = create_genesis()
# if TOTAL_IMAGES/10 <= image_count and image_count < 2 * TOTAL_IMAGES / 10 and image_count % 4 == 0 and genesis_count < 7:
# new_image['War_paint'] = create_genesis()
# if 2 * TOTAL_IMAGES / 10 <= image_count and image_count < 3 * TOTAL_IMAGES / 10 and image_count % 4 == 2 and genesis_count < 11:
# new_image['War_paint'] = create_genesis()
# if 3 * TOTAL_IMAGES / 10 <= image_count and image_count < 4 * TOTAL_IMAGES / 10 and image_count % 4 == 3 and genesis_count < 15:
# new_image['War_paint'] = create_genesis()
# if 4 * TOTAL_IMAGES / 10 <= image_count and image_count < 5 * TOTAL_IMAGES / 10 and image_count % 4 == 0 and genesis_count < 20:
# new_image['War_paint'] = create_genesis()
# if 5 * TOTAL_IMAGES / 10 <= image_count and image_count < 6 * TOTAL_IMAGES / 10 and image_count % 4 == 2 and genesis_count < 25:
# new_image['War_paint'] = create_genesis()
# if 6 * TOTAL_IMAGES / 10 <= image_count and image_count < 7 * TOTAL_IMAGES / 10 and image_count % 4 == 2 and genesis_count < 30:
# new_image['War_paint'] = create_genesis()
# if 7 * TOTAL_IMAGES / 10 <= image_count and image_count < 8 * TOTAL_IMAGES / 10 and image_count % 4 == 0 and genesis_count < 35:
# new_image['War_paint'] = create_genesis()
# if 8 * TOTAL_IMAGES / 10 <= image_count and image_count < 9 * TOTAL_IMAGES / 10 and image_count % 4 == 3 and genesis_count < 42:
# new_image['War_paint'] = create_genesis()
# if 9 * TOTAL_IMAGES / 10 <= image_count and image_count < TOTAL_IMAGES and image_count % 4 == 3 and genesis_count < 50:
# new_image['War_paint'] = create_genesis()
# if new_image['War_paint'] == 'war_paint-genesis':
# print('--------------------', genesis_count)
return new_image
# Generate the unique combinations based on trait weightings
for i in range(TOTAL_IMAGES):
new_trait_image = create_new_image()
all_images.append(new_trait_image)
# Returns true if all images are unique
def all_images_unique(all_images):
seen = list()
return not any(i in seen or seen.append(i) for i in all_images)
print("Are all images unique?", all_images_unique(all_images))
# Add token Id to each image
i = 0
for item in all_images:
item["tokenId"] = i
i = i + 1
# print(all_images)
# # Get Trait Counts
# background_count = {}
# for item in background:
# background_count[item] = 0
# weapon_back_count = {}
# for item in weapon_back:
# weapon_back_count[item] = 0
# helm_backpieces_count = {}
# for item in helm_backpieces:
# helm_backpieces_count[item] = 0
# armor_back_pieces_count = {}
# for item in armor_back_pieces:
# armor_back_pieces_count[item] = 0
# base_body_count = {}
# for item in base_body:
# base_body_count[item] = 0
# tattoos_count = {}
# for item in tattoos:
# tattoos_count[item] = 0
# battle_armors_count = {}
# for item in battle_armors:
# battle_armors_count[item] = 0
# scars_count = {}
# for item in scars:
# scars_count[item] = 0
# facial_expressions_count = {}
# for item in facial_expressions:
# facial_expressions_count[item] = 0
# war_paint_count = {}
# for item in war_paint:
# war_paint_count[item] = 0
# eyebrows_count = {}
# for item in eyebrows:
# eyebrows_count[item] = 0
# hair_count = {}
# for item in hair:
# hair_count[item] = 0
# mage_hoods_count = {}
# for item in mage_hoods:
# mage_hoods_count[item] = 0
# beards_count = {}
# for item in beards:
# beards_count[item] = 0
# arms_count = {}
# for item in arms:
# arms_count[item] = 0
# weapon_count = {}
# for item in weapon:
# weapon_count[item] = 0
# arm_armor_count = {}
# for item in arm_armor:
# arm_armor_count[item] = 0
# weapon_mage_effect_count = {}
# for item in weapon_mage_effect:
# weapon_mage_effect_count[item] = 0
# extras_count = {}
# for item in extras:
# extras_count[item] = 0
# head_pieces_count = {}
# for item in head_pieces:
# head_pieces_count[item] = 0
# for image in all_images:
# background_count[image["Background"]] += 1
# weapon_back_count[image["Weapon_back"]] += 1
# helm_backpieces_count[image["Helm_backpieces"]] += 1
# armor_back_pieces_count[image["Armor_back_pieces"]] += 1
# base_body_count[image["Base_body"]] += 1
# tattoos_count[image["Tattoos"]] += 1
# battle_armors_count[image["Battle_armors"]] += 1
# scars_count[image["Scars"]] += 1
# facial_expressions_count[image["Facial_expressions"]] += 1
# war_paint_count[image["War_paint"]] += 1
# eyebrows_count[image["Eyebrows"]] += 1
# hair_count[image["Hair"]] += 1
# mage_hoods_count[image["Mage_hoods"]] += 1
# beards_count[image["Beards"]] += 1
# arms_count[image["Arms"]] += 1
# weapon_count[image["Weapon"]] += 1
# arm_armor_count[image["Arm_armor"]] += 1
# weapon_mage_effect_count[image["Weapon_mage_effect"]] += 1
# extras_count[image["Extras"]] += 1
# head_pieces_count[image["Head_pieces"]] += 1
# print(background_count)
# print(weapon_back_count)
# print(helm_backpieces_count)
# print(armor_back_pieces_count)
# print(base_body_count)
# print(tattoos_count)
# print(battle_armors_count)
# print(scars_count)
# print(facial_expressions_count)
# print(war_paint_count)
# print(eyebrows_count)
# print(hair_count)
# print(mage_hoods_count)
# print(beards_count)
# print(arms_count)
# print(weapon_count)
# print(arm_armor_count)
# print(weapon_mage_effect_count)
# print(extras_count)
# print(head_pieces_count)
#### Generate Metadata for all Traits
METADATA_FILE_NAME = './metadata/all-traits.json';
with open(METADATA_FILE_NAME, 'w') as outfile:
json.dump(all_images, outfile, indent=4)
#### Generate Images
for item in all_images:
im1 = Image.open(f'./trait-layers/1. BACKGROUNDS/{background_files[item["Background"]]}.png').convert('RGBA')
im3 = Image.open(f'./trait-layers/3. HELM BACKPIECES/{helm_backpieces_files[item["Helm_backpieces"]]}.png').convert('RGBA')
im4 = Image.open(f'./trait-layers/4. ARMOR BACK PIECES/{armor_back_pieces_files[item["Armor_back_pieces"]]}.png').convert('RGBA')
im5 = Image.open(f'./trait-layers/5. BASE BODY/{base_body_files[item["Base_body"]]}.png').convert('RGBA')
im6 = Image.open(f'./trait-layers/6. TATTOOS/{tattoos_files[item["Tattoos"]]}.png').convert('RGBA')
im7 = Image.open(f'./trait-layers/9. FACIAL EXPRESSIONS/{facial_expressions_files[item["Facial_expressions"]]}.png').convert('RGBA')
im8 = Image.open(f'./trait-layers/10. WAR PAINT/{war_paint_files[item["War_paint"]]}.png').convert('RGBA')
im9 = Image.open(f'./trait-layers/12. HAIR/{hair_files[item["Hair"]]}.png').convert('RGBA')
im10 = Image.open(f'./trait-layers/8. SCARS/{scars_files[item["Scars"]]}.png').convert('RGBA')
im11 = Image.open(f'./trait-layers/11. EYEBROWS/{eyebrows_files[item["Eyebrows"]]}.png').convert('RGBA')
im12 = Image.open(f'./trait-layers/14. BEARDS/{beards_files[item["Beards"]]}.png').convert('RGBA')
im13 = Image.open(f'./trait-layers/7. BATTLE ARMORS/{battle_armors_files[item["Battle_armors"]]}.png').convert('RGBA')
im14 = Image.open(f'./trait-layers/13. MAGE HOODS/{mage_hoods_files[item["Mage_hoods"]]}.png').convert('RGBA')
im15 = Image.open(f'./trait-layers/15. ARMS/{arms_files[item["Arms"]]}.png').convert('RGBA')
im16 = Image.open(f'./trait-layers/16. EXTRAS/{extras_files[item["Extras"]]}.png').convert('RGBA')
im17 = Image.open(f'./trait-layers/18. WEAPON/{weapon_files[item["Weapon"]]}.png').convert('RGBA')
im18 = Image.open(f'./trait-layers/19. ARM ARMOR/{arm_armor_files[item["Arm_armor"]]}.png').convert('RGBA')
im_no_image = Image.open(f'./trait-layers/19. ARM ARMOR/no_arm_armor.png').convert('RGBA')
# im19 = Image.open(f'./trait-layers/20. WEAPON MAGE EFFECT/{weapon_mage_effect_files[item["Weapon_mage_effect"]]}.png').convert('RGBA')
# im20 = Image.open(f'./trait-layers/21. HEAD PIECES/{head_pieces_files[item["Head_pieces"]]}.png').convert('RGBA')
#Create each composite
if 'enclosed_armors' in item['Battle_armors'] or not 'no_mage_hoods' in item['Mage_hoods']:
com1 = Image.alpha_composite(im1, im3)
else:
com1 = Image.alpha_composite(im1, im_no_image)
if 'weapon_one_hand-back' in item['Weapon'] or 'weapon_back' in item['Weapon']:
com1 = Image.alpha_composite(im1, im17)
com2 = Image.alpha_composite(im4, im5)
com3 = Image.alpha_composite(im6, im7)
com4 = Image.alpha_composite(im8, im10)
com5 = Image.alpha_composite(im9, im11)
if 'enclosed_armors' in item['Battle_armors']:
com6 = Image.alpha_composite(im12, im13)
if not 'extras-10' in item['Extras']:
com6 = Image.alpha_composite(com6, im_no_image)
com6 = Image.alpha_composite(com6, im16)
com6 = Image.alpha_composite(com6, im13)
else:
com6 = Image.alpha_composite(im13, im12)
if not 'extras-10' in item['Extras']:
com6 = Image.alpha_composite(com6, im_no_image)
com6 = Image.alpha_composite(com6, im16)
com6 = Image.alpha_composite(com6, im12)
# Mage hand CAN ONLY be with mage hood
if not 'no_mage_hoods' in item['Mage_hoods'] and 'helmetless' in item['Battle_armors']:
com7 = Image.alpha_composite(im13, im14)
else:
com7 = Image.alpha_composite(im15, im15)
com8 = Image.alpha_composite(im_no_image, im15)
com9 = Image.alpha_composite(im_no_image, im_no_image)
if not 'no_battle_armors' in item['Battle_armors']:
com10 = Image.alpha_composite(im_no_image, im18)
else:
com10 = Image.alpha_composite(im_no_image, im15)
com11 = Image.alpha_composite(com1, com2)
com12 = Image.alpha_composite(com3, com4)
com13 = Image.alpha_composite(com5, com6)
com14 = Image.alpha_composite(com7, com8)
com15 = Image.alpha_composite(com9, com10)
com16 = Image.alpha_composite(com11, com12)
com17 = Image.alpha_composite(com13, com14)
com18 | |
# encoding: utf-8
"""
protocol.py
Created by <NAME> on 2009-08-25.
Copyright (c) 2009-2012 Exa Networks. All rights reserved.
Modified by Orange - 2014
"""
#import os
#import copy
import time
import socket
from struct import unpack
from bagpipe.exabgp.rib.table import Table
from bagpipe.exabgp.rib.delta import Delta
from bagpipe.exabgp.utils import hexa
from bagpipe.exabgp.structure.address import AFI,SAFI
from bagpipe.exabgp.structure.ip import BGPPrefix,Inet,to_IP
from bagpipe.exabgp.structure.vpn import VPNLabelledPrefix
from bagpipe.exabgp.structure.evpn import EVPNNLRI
from bagpipe.exabgp.structure.rtc import RouteTargetConstraint
from bagpipe.exabgp.structure.asn import ASN,AS_TRANS
from bagpipe.exabgp.network.connection import Connection
from bagpipe.exabgp.message import Message,defix,Failure
from bagpipe.exabgp.message.nop import NOP
from bagpipe.exabgp.message.open import Open,Unknown,Parameter,Capabilities,RouterID,MultiProtocol,RouteRefresh,CiscoRouteRefresh,MultiSession,Graceful
from bagpipe.exabgp.message.update import Update
from bagpipe.exabgp.message.update.eor import EOR
from bagpipe.exabgp.message.keepalive import KeepAlive
from bagpipe.exabgp.message.notification import Notification, Notify #, NotConnected
from bagpipe.exabgp.message.update.route import ReceivedRoute # ,Route
from bagpipe.exabgp.message.update.attributes import Attributes
from bagpipe.exabgp.message.update.attribute import AttributeID
from bagpipe.exabgp.message.update.attribute.flag import Flag
from bagpipe.exabgp.message.update.attribute.origin import Origin
from bagpipe.exabgp.message.update.attribute.aspath import ASPath,AS4Path
from bagpipe.exabgp.message.update.attribute.nexthop import NextHop
from bagpipe.exabgp.message.update.attribute.med import MED
from bagpipe.exabgp.message.update.attribute.localpref import LocalPreference
from bagpipe.exabgp.message.update.attribute.communities import Community,Communities,ECommunity,ECommunities
from bagpipe.exabgp.message.update.attribute.originator_id import OriginatorId
from bagpipe.exabgp.message.update.attribute.pmsi_tunnel import PMSITunnel
#from bagpipe.exabgp.message.update.attribute.mprnlri import MPRNLRI
#from bagpipe.exabgp.message.update.attribute.mpurnlri import MPURNLRI
from bagpipe.exabgp.processes import ProcessError
from bagpipe.exabgp.log import Logger
logger = Logger()
MAX_BACKLOG = 200000
# README: Move all the old packet decoding in another file to clean up the includes here, as it is not used anyway
class Protocol (object):
decode = True
strict = False
def __init__ (self,peer,connection=None):
self.peer = peer
self.neighbor = peer.neighbor
self.connection = connection
self._delta = Delta(Table(peer))
self._asn4 = False
self._messages = {}
self._frozen = 0
self.message_size = 4096
# XXX: we use self.peer.neighbor.peer_address when we could use self.neighbor.peer_address
def me (self,message):
return "Peer %15s ASN %-7s %s" % (self.peer.neighbor.peer_address,self.peer.neighbor.peer_as,message)
def connect (self):
# allows to test the protocol code using modified StringIO with a extra 'pending' function
if not self.connection:
peer = self.neighbor.peer_address
local = self.neighbor.local_address
md5 = self.neighbor.md5
ttl = self.neighbor.ttl
self.connection = Connection(peer,local,md5,ttl)
message = 'neighbor %s connected\n' % self.peer.neighbor.peer_address
try:
proc = self.peer.supervisor.processes
for name in proc.notify(self.neighbor.peer_address):
proc.write(name,message)
except ProcessError:
raise Failure('Could not send message(s) to helper program(s) : %s' % message)
def check_keepalive (self):
left = int (self.connection.last_read + self.neighbor.hold_time - time.time())
if left <= 0:
raise Notify(4,0)
return left
def close (self):
#self._delta.last = 0
if self.connection:
# must be first otherwise we could have a loop caused by the raise in the below
self.connection.close()
self.connection = None
message = 'neighbor %s down\n' % self.peer.neighbor.peer_address
try:
proc = self.peer.supervisor.processes
for name in proc.notify(self.neighbor.peer_address):
proc.write(name,message)
except ProcessError:
raise Failure('Could not send message(s) to helper program(s) : %s' % message)
# Read from network .......................................................
def read_message (self):
# This call reset the time for the timeout in
if not self.connection.pending(True):
return NOP('')
length = 19
data = ''
while length:
if self.connection.pending():
delta = self.connection.read(length)
data += delta
length -= len(delta)
# The socket is closed
if not data:
raise Failure('The TCP connection is closed')
if data[:16] != Message.MARKER:
# We are speaking BGP - send us a valid Marker
raise Notify(1,1,'The packet received does not contain a BGP marker')
raw_length = data[16:18]
length = unpack('!H',raw_length)[0]
msg = data[18]
if ( length < 19 or length > 4096):
# BAD Message Length
raise Notify(1,2)
if (
(msg == Open.TYPE and length < 29) or
(msg == Update.TYPE and length < 23) or
(msg == Notification.TYPE and length < 21) or
(msg == KeepAlive.TYPE and length != 19)
):
# MUST send the faulty length back
raise Notify(1,2,raw_length)
#(msg == RouteRefresh.TYPE and length != 23)
length -= 19
data = ''
while length:
if self.connection.pending():
delta = self.connection.read(length)
data += delta
length -= len(delta)
# The socket is closed
if not data:
raise Failure('The TCP connection is closed')
if msg == Notification.TYPE:
raise Notification(ord(data[0]),ord(data[1]))
if msg == KeepAlive.TYPE:
return self.KeepAliveFactory(data)
if msg == Open.TYPE:
return self.OpenFactory(data)
if msg == Update.TYPE:
if self.neighbor.parse_routes:
update = self.UpdateFactory(data)
return update
else:
return NOP('')
if self.strict:
raise Notify(1,3,msg)
return NOP(data)
def read_open (self,_open,ip):
message = self.read_message()
if message.TYPE == NOP.TYPE:
return message
if message.TYPE != Open.TYPE:
raise Notify(5,1,'The first packet received is not an open message (%s)' % message)
if _open.asn.asn4() and not message.capabilities.announced(Capabilities.FOUR_BYTES_ASN):
raise Notify(2,0,'We have an ASN4 and you do not speak it. bye.')
self._asn4 = message.capabilities.announced(Capabilities.FOUR_BYTES_ASN)
if message.asn == AS_TRANS:
peer_as = message.capabilities[Capabilities.FOUR_BYTES_ASN]
else:
peer_as = message.asn
if peer_as != self.neighbor.peer_as:
raise Notify(2,2,'ASN in OPEN (%d) did not match ASN expected (%d)' % (message.asn,self.neighbor.peer_as))
# RFC 6286 : http://tools.ietf.org/html/rfc6286
#if message.router_id == RouterID('0.0.0.0'):
# message.router_id = RouterID(ip)
if message.router_id == RouterID('0.0.0.0'):
raise Notify(2,3,'0.0.0.0 is an invalid router_id according to RFC6286')
if message.router_id == self.neighbor.router_id and message.asn == self.neighbor.local_as:
raise Notify(2,3,'BGP Indendifier collision (%s) on IBGP according to RFC 6286' % message.router_id)
if message.hold_time < 3:
raise Notify(2,6,'Hold Time is invalid (%d)' % message.hold_time)
if message.hold_time >= 3:
self.neighbor.hold_time = min(self.neighbor.hold_time,message.hold_time)
# XXX: Does not work as the capa is not yet defined
if message.capabilities.announced(Capabilities.EXTENDED_MESSAGE):
# untested !
if self.peer.bgp.message_size:
self.message_size = self.peer.bgp.message_size
# README: This limit what we are announcing may cause some issue if you add new family and SIGHUP
# README: So it is commented until I make my mind to add it or not (as Juniper complain about mismatch capabilities)
# # Those are the capacity we need to announce those routes
# for family in _open.capabilities[Capabilities.MULTIPROTOCOL_EXTENSIONS]:
# # if the peer does not support them, tear down the session
# if family not in message.capabilities[Capabilities.MULTIPROTOCOL_EXTENSIONS]:
# afi,safi = family
# raise Notify(2,0,'Peers does not speak %s %s' % (afi,safi))
return message
def read_keepalive (self):
message = self.read_message()
if message.TYPE == NOP.TYPE:
return message
if message.TYPE != KeepAlive.TYPE:
raise Notify(5,2)
return message
# Sending message to peer .................................................
# we do not buffer those message in purpose
def new_open (self,restarted,asn4):
if asn4:
asn = self.neighbor.local_as
else:
asn = AS_TRANS
o = Open(4,asn,self.neighbor.router_id.ip,Capabilities().default(self.neighbor,restarted),self.neighbor.hold_time)
if not self.connection.write(o.message()):
raise Failure('Could not send open')
return o
def new_keepalive (self,force=False):
left = int(self.connection.last_write + self.neighbor.hold_time.keepalive() - time.time())
k = KeepAlive()
m = k.message()
if force:
written = self.connection.write(k.message())
if not written:
logger.message(self.me(">> KEEPALIVE buffered"))
self._messages[self.neighbor.peer_as].append(('KEEPALIVE',m))
else:
self._frozen = 0
return left,k
if left <= 0:
written = self.connection.write(k.message())
if not written:
logger.message(self.me(">> KEEPALIVE buffered"))
self._messages[self.neighbor.peer_as].append(('KEEPALIVE',m))
else:
self._frozen = 0
return left,k
return left,None
def new_notification (self,notification):
return self.connection.write(notification.message())
# messages buffered in case of failure
def buffered (self):
return self._messages.get(self.neighbor.peer_as,[]) != []
def _backlog (self,maximum=0):
backlog = self._messages.get(self.neighbor.peer_as,[])
if backlog:
if not self._frozen:
self._frozen = time.time()
if self._frozen and self._frozen + (self.neighbor.hold_time) < time.time():
raise Failure('peer %s not reading on socket - killing session' % self.neighbor.peer_as)
logger.message(self.me("unable to send route for %d second (maximum allowed %d)" % (time.time()-self._frozen,self.neighbor.hold_time)))
nb_backlog = len(backlog)
if nb_backlog > MAX_BACKLOG:
raise Failure('over %d routes buffered for peer %s - killing session' % (MAX_BACKLOG,self.neighbor.peer_as))
logger.message(self.me("backlog of %d/%d routes" % (nb_backlog,MAX_BACKLOG)))
count = 0
while backlog:
count += 1
name,update = backlog[0]
written = self.connection.write(update)
if not written:
break
logger.message(self.me(">> DEBUFFERED %s" % name))
backlog.pop(0)
self._frozen = 0
yield count
if maximum and count >= maximum:
break
self._messages[self.neighbor.peer_as] = backlog
def _announce (self,name,generator):
def chunked (generator,size):
chunk = ''
for data in generator:
if len(data) > size:
raise Failure('Can not send BGP update larger than %d bytes on this connection.' % size)
if len(chunk) + len(data) <= size:
chunk += data
continue
yield chunk
chunk = data
if chunk:
yield chunk
count = 0
# The message size is the whole BGP message INCLUDING headers !
for update in chunked(generator,self.message_size-19):
count += 1
if self._messages[self.neighbor.peer_as]:
logger.message(self.me(">> %s could not be sent, some messages are still in the buffer" % name))
self._messages[self.neighbor.peer_as].append((name,update))
continue
written = self.connection.write(update)
if not written:
logger.message(self.me(">> %s buffered" % name))
self._messages[self.neighbor.peer_as].append((name,update))
yield count
def new_announce (self):
for answer in self._backlog():
yield answer
asn4 = not not self.peer.open.capabilities.announced(Capabilities.FOUR_BYTES_ASN)
for answer in self._announce('UPDATE',self._delta.announce(asn4,self.neighbor.local_as,self.neighbor.peer_as)):
yield answer
def new_update (self):
for answer in self._backlog():
yield answer
asn4 = not not self.peer.open.capabilities.announced(Capabilities.FOUR_BYTES_ASN)
for answer in self._announce('UPDATE',self._delta.update(asn4,self.neighbor.local_as,self.neighbor.peer_as)):
yield answer
def new_eors (self,families):
for answer in self._backlog():
pass
eor = EOR()
eors = eor.eors(families)
for answer in self._announce('EOR',eors):
pass
# Message Factory .................................................
def KeepAliveFactory (self,data):
return KeepAlive()
def _key_values (self,name,data):
if len(data) < 2:
raise Notify(2,0,"Bad length for OPEN %s (<2) %s" % (name,hexa(data)))
l = ord(data[1])
boundary = l+2
if len(data) < boundary:
raise Notify(2,0,"Bad length for OPEN %s (buffer underrun) %s" % (name,hexa(data)))
key = ord(data[0])
value = data[2:boundary]
rest = data[boundary:]
return key,value,rest
def CapabilitiesFactory (self,data):
capabilities = Capabilities()
option_len = ord(data[0])
if option_len:
data = data[1:]
while data:
key,value,data = self._key_values('parameter',data)
# Paramaters must only be sent once.
if key == Parameter.AUTHENTIFICATION_INFORMATION:
raise Notify(2,5)
if key == Parameter.CAPABILITIES:
while value:
k,capv,value = self._key_values('capability',value)
# Multiple Capabilities can be present in a single attribute
#if r:
# raise Notify(2,0,"Bad length for OPEN %s (size mismatch) %s" % ('capability',hexa(value)))
if k == Capabilities.MULTIPROTOCOL_EXTENSIONS:
if k not in capabilities:
capabilities[k] = MultiProtocol()
afi = AFI(unpack('!H',capv[:2])[0])
safi = SAFI(ord(capv[3]))
capabilities[k].append((afi,safi))
continue
if k == Capabilities.GRACEFUL_RESTART:
restart = unpack('!H',capv[:2])[0]
restart_flag = restart >> 12
restart_time = restart & Graceful.TIME_MASK
value_gr = capv[2:]
families = []
while value_gr:
afi = AFI(unpack('!H',value_gr[:2])[0])
safi = SAFI(ord(value_gr[2]))
flag_family = ord(value_gr[0])
families.append((afi,safi,flag_family))
value_gr = value_gr[4:]
capabilities[k] = Graceful(restart_flag,restart_time,families)
continue
if k == Capabilities.FOUR_BYTES_ASN:
capabilities[k] = ASN(unpack('!L',capv[:4])[0])
continue
if k == Capabilities.ROUTE_REFRESH:
capabilities[k] = RouteRefresh()
continue
if k == Capabilities.CISCO_ROUTE_REFRESH:
capabilities[k] = CiscoRouteRefresh()
continue
if k == Capabilities.MULTISESSION_BGP:
capabilities[k] = MultiSession()
continue
if k == Capabilities.MULTISESSION_BGP_RFC:
capabilities[k] = MultiSession()
continue
if k not in capabilities:
capabilities[k] = Unknown(k,[ord(_) for _ in capv])
else:
raise Notify(2,0,'Unknown OPEN parameter %s' % hex(key))
return capabilities
def OpenFactory (self,data):
version = ord(data[0])
if version != 4:
# Only version 4 is supported nowdays..
raise Notify(2,1,data[0])
asn = unpack('!H',data[1:3])[0]
hold_time = unpack('!H',data[3:5])[0]
numeric = unpack('!L',data[5:9])[0]
router_id = "%d.%d.%d.%d" % (numeric>>24,(numeric>>16)&0xFF,(numeric>>8)&0xFF,numeric&0xFF)
capabilities = self.CapabilitiesFactory(data[9:])
return Open(version,asn,router_id,capabilities,hold_time)
def UpdateFactory (self,data):
length = len(data)
# withdrawn
lw,withdrawn,data = defix(data)
if len(withdrawn) != lw:
raise Notify(3,1)
la,attribute,announced = defix(data)
if len(attribute) != la:
raise Notify(3,1)
# The RFC check ...
#if lw + la + 23 > length:
if 2 + lw + 2+ la + len(announced) != length:
raise Notify(3,1)
routes = []
while withdrawn:
nlri = BGPPrefix(AFI.ipv4,withdrawn)
route = ReceivedRoute(nlri,'withdraw')
withdrawn = withdrawn[len(nlri):]
routes.append(route)
self.mp_routes = []
attributes = self.AttributesFactory(attribute)
routes.extend(self.mp_routes)
while announced:
nlri = BGPPrefix(AFI.ipv4,announced)
route = ReceivedRoute(nlri,'announce')
# XXX: Should this be a deep copy
route.attributes = attributes
announced = announced[len(nlri):]
routes.append(route)
#logger.info(self.me('Received route %s' % nlri))
#print "routes", routes
#print "attributes", attributes
if routes:
return Update(routes)
return NOP('')
def AttributesFactory (self,data):
try:
self.attributes = Attributes()
return self._AttributesFactory(data).attributes
except IndexError:
raise Notify(3,2,data)
def __new_ASPath (self,data,asn4=False):
if len(data) == 0:
return ASPath(asn4)
if asn4:
size = | |
= re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class corner(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, location=None, latitude=None, longitude=None):
self.location = _cast(None, location)
self.latitude = _cast(float, latitude)
self.longitude = _cast(float, longitude)
pass
def factory(*args_, **kwargs_):
if corner.subclass:
return corner.subclass(*args_, **kwargs_)
else:
return corner(*args_, **kwargs_)
factory = staticmethod(factory)
def get_location(self): return self.location
def set_location(self, location): self.location = location
def get_latitude(self): return self.latitude
def set_latitude(self, latitude): self.latitude = latitude
def get_longitude(self): return self.longitude
def set_longitude(self, longitude): self.longitude = longitude
def validate_cornerType(self, value):
# Validate type cornerType, a restriction on xs:string.
pass
def validate_latAngleType(self, value):
# Validate type latAngleType, a restriction on xs:float.
pass
def validate_longAngleType(self, value):
# Validate type longAngleType, a restriction on xs:float.
pass
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='corner', namespacedef_='', pretty_print=True):
# Check if we are at the root level and output the XML header
if level == 0:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('\n')
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
# Check if we are at the root level and output attributes first before namespacedef
if level == 0:
outfile.write('<%s%s' % (namespace_, name_))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='corner')
outfile.write('%s' % (namespacedef_ and ' ' + namespacedef_ or ''))
else:
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='corner')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='corner', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='corner'):
if self.location is not None and 'location' not in already_processed:
already_processed.add('location')
outfile.write(' location=%s' % (quote_attrib(self.location), ))
if self.latitude is not None and 'latitude' not in already_processed:
already_processed.add('latitude')
outfile.write(' latitude="%s"' % self.gds_format_float(self.latitude, input_name='latitude'))
if self.longitude is not None and 'longitude' not in already_processed:
already_processed.add('longitude')
outfile.write(' longitude="%s"' % self.gds_format_float(self.longitude, input_name='longitude'))
def exportChildren(self, outfile, level, namespace_='', name_='corner', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='corner'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.location is not None and 'location' not in already_processed:
already_processed.add('location')
showIndent(outfile, level)
outfile.write('location="%s",\n' % (self.location,))
if self.latitude is not None and 'latitude' not in already_processed:
already_processed.add('latitude')
showIndent(outfile, level)
outfile.write('latitude=%f,\n' % (self.latitude,))
if self.longitude is not None and 'longitude' not in already_processed:
already_processed.add('longitude')
showIndent(outfile, level)
outfile.write('longitude=%f,\n' % (self.longitude,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('location', node)
if value is not None and 'location' not in already_processed:
already_processed.add('location')
self.location = value
self.validate_cornerType(self.location) # validate type cornerType
value = find_attr_value_('latitude', node)
if value is not None and 'latitude' not in already_processed:
already_processed.add('latitude')
try:
self.latitude = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (latitude): %s' % exp)
self.validate_latAngleType(self.latitude) # validate type latAngleType
value = find_attr_value_('longitude', node)
if value is not None and 'longitude' not in already_processed:
already_processed.add('longitude')
try:
self.longitude = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (longitude): %s' % exp)
self.validate_longAngleType(self.longitude) # validate type longAngleType
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class corner
class bounding_coordinates(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, west=None, east=None, north=None, south=None):
self.west = west
self.east = east
self.north = north
self.south = south
def factory(*args_, **kwargs_):
if bounding_coordinates.subclass:
return bounding_coordinates.subclass(*args_, **kwargs_)
else:
return bounding_coordinates(*args_, **kwargs_)
factory = staticmethod(factory)
def get_west(self): return self.west
def set_west(self, west): self.west = west
def get_east(self): return self.east
def set_east(self, east): self.east = east
def get_north(self): return self.north
def set_north(self, north): self.north = north
def get_south(self): return | |
# Copyright 2012-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser, os, platform, re, shlex, shutil, subprocess
from . import coredata
from .linkers import ArLinker, VisualStudioLinker
from . import mesonlib
from .mesonlib import EnvironmentException, Popen_safe
from . import mlog
from . import compilers
from .compilers import (
CLANG_OSX,
CLANG_STANDARD,
CLANG_WIN,
GCC_CYGWIN,
GCC_MINGW,
GCC_OSX,
GCC_STANDARD,
ICC_STANDARD,
is_assembly,
is_header,
is_library,
is_llvm_ir,
is_object,
is_source,
)
from .compilers import (
ArmCCompiler,
ArmCPPCompiler,
ArmclangCCompiler,
ArmclangCPPCompiler,
ClangCCompiler,
ClangCPPCompiler,
ClangObjCCompiler,
ClangObjCPPCompiler,
G95FortranCompiler,
GnuCCompiler,
GnuCPPCompiler,
GnuFortranCompiler,
GnuObjCCompiler,
GnuObjCPPCompiler,
ElbrusCCompiler,
ElbrusCPPCompiler,
ElbrusFortranCompiler,
IntelCCompiler,
IntelCPPCompiler,
IntelFortranCompiler,
JavaCompiler,
MonoCompiler,
VisualStudioCsCompiler,
NAGFortranCompiler,
Open64FortranCompiler,
PathScaleFortranCompiler,
PGIFortranCompiler,
RustCompiler,
SunFortranCompiler,
ValaCompiler,
VisualStudioCCompiler,
VisualStudioCPPCompiler,
)
build_filename = 'meson.build'
known_cpu_families = (
'aarch64',
'arm',
'e2k',
'ia64',
'mips',
'mips64',
'parisc',
'ppc',
'ppc64',
'sparc64',
'x86',
'x86_64'
)
def detect_gcovr(version='3.1', log=False):
gcovr_exe = 'gcovr'
try:
p, found = Popen_safe([gcovr_exe, '--version'])[0:2]
except (FileNotFoundError, PermissionError):
# Doesn't exist in PATH or isn't executable
return None, None
found = search_version(found)
if p.returncode == 0:
if log:
mlog.log('Found gcovr-{} at {}'.format(found, shlex.quote(shutil.which(gcovr_exe))))
return gcovr_exe, mesonlib.version_compare(found, '>=' + version)
return None, None
def find_coverage_tools():
gcovr_exe, gcovr_new_rootdir = detect_gcovr()
lcov_exe = 'lcov'
genhtml_exe = 'genhtml'
if not mesonlib.exe_exists([lcov_exe, '--version']):
lcov_exe = None
if not mesonlib.exe_exists([genhtml_exe, '--version']):
genhtml_exe = None
return gcovr_exe, gcovr_new_rootdir, lcov_exe, genhtml_exe
def detect_ninja(version='1.5', log=False):
for n in ['ninja', 'ninja-build']:
try:
p, found = Popen_safe([n, '--version'])[0:2]
except (FileNotFoundError, PermissionError):
# Doesn't exist in PATH or isn't executable
continue
found = found.strip()
# Perhaps we should add a way for the caller to know the failure mode
# (not found or too old)
if p.returncode == 0 and mesonlib.version_compare(found, '>=' + version):
if log:
mlog.log('Found ninja-{} at {}'.format(found, shlex.quote(shutil.which(n))))
return n
def detect_native_windows_arch():
"""
The architecture of Windows itself: x86 or amd64
"""
# These env variables are always available. See:
# https://msdn.microsoft.com/en-us/library/aa384274(VS.85).aspx
# https://blogs.msdn.microsoft.com/david.wang/2006/03/27/howto-detect-process-bitness/
arch = os.environ.get('PROCESSOR_ARCHITEW6432', '').lower()
if not arch:
try:
# If this doesn't exist, something is messing with the environment
arch = os.environ['PROCESSOR_ARCHITECTURE'].lower()
except KeyError:
raise EnvironmentException('Unable to detect native OS architecture')
return arch
def detect_windows_arch(compilers):
"""
Detecting the 'native' architecture of Windows is not a trivial task. We
cannot trust that the architecture that Python is built for is the 'native'
one because you can run 32-bit apps on 64-bit Windows using WOW64 and
people sometimes install 32-bit Python on 64-bit Windows.
We also can't rely on the architecture of the OS itself, since it's
perfectly normal to compile and run 32-bit applications on Windows as if
they were native applications. It's a terrible experience to require the
user to supply a cross-info file to compile 32-bit applications on 64-bit
Windows. Thankfully, the only way to compile things with Visual Studio on
Windows is by entering the 'msvc toolchain' environment, which can be
easily detected.
In the end, the sanest method is as follows:
1. Check if we're in an MSVC toolchain environment, and if so, return the
MSVC toolchain architecture as our 'native' architecture.
2. If not, check environment variables that are set by Windows and WOW64 to
find out the architecture that Windows is built for, and use that as our
'native' architecture.
"""
os_arch = detect_native_windows_arch()
if os_arch != 'amd64':
return os_arch
# If we're on 64-bit Windows, 32-bit apps can be compiled without
# cross-compilation. So if we're doing that, just set the native arch as
# 32-bit and pretend like we're running under WOW64. Else, return the
# actual Windows architecture that we deduced above.
for compiler in compilers.values():
# Check if we're using and inside an MSVC toolchain environment
if compiler.id == 'msvc' and 'VCINSTALLDIR' in os.environ:
if float(compiler.get_toolset_version()) < 10.0:
# On MSVC 2008 and earlier, check 'BUILD_PLAT', where
# 'Win32' means 'x86'
platform = os.environ.get('BUILD_PLAT', 'x86')
if platform == 'Win32':
return 'x86'
else:
# On MSVC 2010 and later 'Platform' is only set when the
# target arch is not 'x86'. It's 'x64' when targeting
# x86_64 and 'arm' when targeting ARM.
platform = os.environ.get('Platform', 'x86').lower()
if platform == 'x86':
return platform
if compiler.id == 'gcc' and compiler.has_builtin_define('__i386__'):
return 'x86'
return os_arch
def detect_cpu_family(compilers):
"""
Python is inconsistent in its platform module.
It returns different values for the same cpu.
For x86 it might return 'x86', 'i686' or somesuch.
Do some canonicalization.
"""
if mesonlib.is_windows():
trial = detect_windows_arch(compilers)
else:
trial = platform.machine().lower()
if trial.startswith('i') and trial.endswith('86'):
return 'x86'
if trial.startswith('arm'):
return 'arm'
if trial.startswith('ppc64'):
return 'ppc64'
if trial in ('amd64', 'x64'):
trial = 'x86_64'
if trial == 'x86_64':
# On Linux (and maybe others) there can be any mixture of 32/64 bit
# code in the kernel, Python, system etc. The only reliable way
# to know is to check the compiler defines.
for c in compilers.values():
try:
if c.has_builtin_define('__i386__'):
return 'x86'
except mesonlib.MesonException:
# Ignore compilers that do not support has_builtin_define.
pass
return 'x86_64'
# Add fixes here as bugs are reported.
if trial not in known_cpu_families:
mlog.warning('Unknown CPU family {!r}, please report this at '
'https://github.com/mesonbuild/meson/issues/new with the'
'output of `uname -a` and `cat /proc/cpuinfo`'.format(trial))
return trial
def detect_cpu(compilers):
if mesonlib.is_windows():
trial = detect_windows_arch(compilers)
else:
trial = platform.machine().lower()
if trial in ('amd64', 'x64'):
trial = 'x86_64'
if trial == 'x86_64':
# Same check as above for cpu_family
for c in compilers.values():
try:
if c.has_builtin_define('__i386__'):
return 'i686' # All 64 bit cpus have at least this level of x86 support.
except mesonlib.MesonException:
pass
return 'x86_64'
if trial == 'e2k':
# Make more precise CPU detection for Elbrus platform.
trial = platform.processor().lower()
# Add fixes here as bugs are reported.
return trial
def detect_system():
system = platform.system().lower()
if system.startswith('cygwin'):
return 'cygwin'
return system
def detect_msys2_arch():
if 'MSYSTEM_CARCH' in os.environ:
return os.environ['MSYSTEM_CARCH']
return None
def search_version(text):
# Usually of the type 4.1.4 but compiler output may contain
# stuff like this:
# (Sourcery CodeBench Lite 2014.05-29) 4.8.3 20140320 (prerelease)
# Limiting major version number to two digits seems to work
# thus far. When we get to GCC 100, this will break, but
# if we are still relevant when that happens, it can be
# considered an achievement in itself.
#
# This regex is reaching magic levels. If it ever needs
# to be updated, do not complexify but convert to something
# saner instead.
version_regex = '(?<!(\d|\.))(\d{1,2}(\.\d+)+(-[a-zA-Z0-9]+)?)'
match = re.search(version_regex, text)
if match:
return match.group(0)
return 'unknown version'
class Environment:
private_dir = 'meson-private'
log_dir = 'meson-logs'
def __init__(self, source_dir, build_dir, options):
self.source_dir = source_dir
self.build_dir = build_dir
self.scratch_dir = os.path.join(build_dir, Environment.private_dir)
self.log_dir = os.path.join(build_dir, Environment.log_dir)
os.makedirs(self.scratch_dir, exist_ok=True)
os.makedirs(self.log_dir, exist_ok=True)
try:
self.coredata = coredata.load(self.get_build_dir())
self.first_invocation = False
except FileNotFoundError:
# WARNING: Don't use any values from coredata in __init__. It gets
# re-initialized with project options by the interpreter during
# build file parsing.
self.coredata = coredata.CoreData(options)
# Used by the regenchecker script, which runs meson
self.coredata.meson_command = mesonlib.meson_command
self.first_invocation = True
if self.coredata.cross_file:
self.cross_info = CrossBuildInfo(self.coredata.cross_file)
else:
self.cross_info = None
self.cmd_line_options = options.cmd_line_options.copy()
# List of potential compilers.
if mesonlib.is_windows():
self.default_c = ['cl', 'cc', 'gcc', 'clang']
self.default_cpp = ['cl', 'c++', 'g++', 'clang++']
else:
self.default_c = ['cc', 'gcc', 'clang']
self.default_cpp = ['c++', 'g++', 'clang++']
if mesonlib.is_windows():
self.default_cs = ['csc', 'mcs']
else:
self.default_cs = ['mcs', 'csc']
self.default_objc = ['cc']
self.default_objcpp = ['c++']
self.default_fortran = ['gfortran', 'g95', 'f95', 'f90', 'f77', 'ifort']
self.default_rust = ['rustc']
self.default_static_linker = ['ar']
self.vs_static_linker = ['lib']
self.gcc_static_linker = ['gcc-ar']
self.clang_static_linker = ['llvm-ar']
# Various prefixes and suffixes for import libraries, shared libraries,
# static libraries, and executables.
# Versioning is added to these names in the backends as-needed.
cross = self.is_cross_build()
if (not cross and mesonlib.is_windows()) \
or | |
"""
Copyright (c) 2020, 2021 Oracle and/or its affiliates.
Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""
import unittest
import os
import shutil
import tempfile
from java.util.logging import Level
from oracle.weblogic.deploy.compare import CompareException
from oracle.weblogic.deploy.logging import SummaryHandler
from oracle.weblogic.deploy.util import PyWLSTException
from compare_model import ModelFileDiffer
from wlsdeploy.aliases.model_constants import APPLICATION
from wlsdeploy.aliases.model_constants import APP_DEPLOYMENTS
from wlsdeploy.aliases.model_constants import LIBRARY
from wlsdeploy.aliases.model_constants import MAX_THREADS_CONSTRAINT
from wlsdeploy.aliases.model_constants import MIN_THREADS_CONSTRAINT
from wlsdeploy.aliases.model_constants import RESOURCES
from wlsdeploy.aliases.model_constants import SELF_TUNING
from wlsdeploy.aliases.model_constants import SOURCE_PATH
from wlsdeploy.aliases.model_constants import TOPOLOGY
from wlsdeploy.aliases.model_constants import WORK_MANAGER
from wlsdeploy.logging.platform_logger import PlatformLogger
from wlsdeploy.util import dictionary_utils
from wlsdeploy.util import model_helper
from wlsdeploy.util.model_context import ModelContext
from wlsdeploy.util.model_translator import FileToPython
class CompareModelTestCase(unittest.TestCase):
_resources_dir = '../../test-classes'
_tests_dir = '../../unit-tests'
_results_dir = _tests_dir + '/compare'
_use_ordering = True
def setUp(self):
self.name = 'CompareModelTestCase'
self._logger = PlatformLogger('wlsdeploy.compare_model')
self._program_name = 'CompareModelTestCase'
if not os.path.isdir(self._tests_dir):
os.mkdir(self._tests_dir)
if not os.path.isdir(self._results_dir):
os.mkdir(self._results_dir)
# add summary handler to validate logger to check results
self._summary_handler = SummaryHandler()
PlatformLogger('wlsdeploy.validate').logger.addHandler(self._summary_handler)
def tearDown(self):
# remove summary handler for next test suite
PlatformLogger('wlsdeploy.validate').logger.removeHandler(self._summary_handler)
def testCompareModelFull(self):
_method_name = 'testCompareModelFull'
_variables_file = self._resources_dir + '/compare_model_model1.10.properties'
_new_model_file = self._resources_dir + '/compare_model_model2.yaml'
_old_model_file = self._resources_dir + '/compare_model_model1.yaml'
_temp_dir = os.path.join(tempfile.gettempdir(), _method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
os.mkdir(_temp_dir)
mw_home = os.environ['MW_HOME']
args_map = {
'-oracle_home': mw_home,
'-variable_file': _variables_file,
'-output_dir' : _temp_dir,
'-domain_type' : 'WLS',
'-trailing_arguments': [ _new_model_file, _old_model_file ]
}
try:
model_context = ModelContext('CompareModelTestCase', args_map)
obj = ModelFileDiffer(_new_model_file, _old_model_file, model_context, _temp_dir)
return_code = obj.compare()
self.assertEqual(return_code, 0)
yaml_result = _temp_dir + os.sep + 'diffed_model.yaml'
json_result = _temp_dir + os.sep + 'diffed_model.json'
stdout_result = obj.get_compare_msgs()
model_dictionary = FileToPython(yaml_result).parse()
yaml_exists = os.path.exists(yaml_result)
json_exists = os.path.exists(json_result)
self.assertEqual(yaml_exists, True)
self.assertEqual(json_exists, True)
self.assertEqual(len(stdout_result), 14)
self.assertEqual(model_dictionary.has_key('resources'), True)
self.assertEqual(model_dictionary.has_key('topology'), True)
self.assertEqual(model_dictionary.has_key('appDeployments'), True)
self.assertEqual(model_dictionary['topology'].has_key('ServerTemplate'), True)
self.assertEqual(model_dictionary['topology'].has_key('Cluster'), True)
self.assertEqual(model_dictionary['topology']['ServerTemplate'].has_key('cluster-1-template'), True)
self.assertEqual(model_dictionary['topology']['Cluster'].has_key('cluster-2'), True)
self.assertEqual(model_dictionary['appDeployments'].has_key('Library'), True)
self.assertEqual(model_dictionary['appDeployments'].has_key('Application'), True)
self.assertEqual(model_dictionary['appDeployments']['Application'].has_key('myear'), True)
self.assertEqual(model_dictionary['resources'].has_key('JMSSystemResource'), True)
self.assertEqual(model_dictionary['resources']['JMSSystemResource'].has_key('MyJmsModule'), True)
self.assertEqual(model_dictionary['resources'].has_key('SingletonService'), True)
self.assertEqual(model_dictionary['appDeployments']['Library'].has_key('!jax-rs#[email protected]'), True)
self.assertEqual(model_dictionary['appDeployments']['Library'].has_key('!jsf#[email protected]'), True)
self.assertEqual(model_dictionary['appDeployments']['Application']['myear'].has_key('ModuleType'), False)
except (CompareException, PyWLSTException), te:
return_code = 2
self._logger.severe('WLSDPLY-05709',
te.getLocalizedMessage(), error=te,
class_name=self._program_name, method_name=_method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
self.assertEqual(return_code, 0)
def testCompareModelSecurityConfigurationAttribute(self):
_method_name = 'testCompareModelSecurityConfigurationAttribute'
_new_model_file = self._resources_dir + '/compare/model-sc1-new.yaml'
_old_model_file = self._resources_dir + '/compare/model-sc1-old.yaml'
_temp_dir = os.path.join(tempfile.gettempdir(), _method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
os.mkdir(_temp_dir)
mw_home = os.environ['MW_HOME']
args_map = {
'-oracle_home': mw_home,
'-output_dir' : _temp_dir,
'-domain_type' : 'WLS',
'-trailing_arguments': [ _new_model_file, _old_model_file ]
}
try:
model_context = ModelContext('CompareModelTestCase', args_map)
obj = ModelFileDiffer(_new_model_file, _old_model_file, model_context, _temp_dir)
return_code = obj.compare()
self.assertEqual(return_code, 0)
yaml_result = _temp_dir + os.sep + 'diffed_model.yaml'
json_result = _temp_dir + os.sep + 'diffed_model.json'
stdout_result = obj.get_compare_msgs()
model_dictionary = FileToPython(yaml_result).parse()
self.assertEqual(model_dictionary.has_key('topology'), True)
self.assertEqual(model_dictionary['topology'].has_key('SecurityConfiguration'), True)
self.assertEqual(model_dictionary['topology']['SecurityConfiguration'].has_key('Realm'), True)
self.assertEqual(model_dictionary['topology']['SecurityConfiguration']['Realm'].has_key('myrealm'), True)
self.assertEqual(model_dictionary['topology']['SecurityConfiguration']['Realm']['myrealm'].has_key('Auditor'), False)
self.assertEqual(model_dictionary['topology']['SecurityConfiguration']['Realm']['myrealm'].has_key('AuthenticationProvider'), True)
self.assertEqual(model_dictionary['topology']['SecurityConfiguration']['Realm']['myrealm'].has_key('PasswordValidator'), False)
except (CompareException, PyWLSTException), te:
return_code = 2
self._logger.severe('WLSDPLY-05709',
te.getLocalizedMessage(), error=te,
class_name=self._program_name, method_name=_method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
self.assertEqual(return_code, 0)
def testCompareModelSecurityConfigurationCustomList(self):
_method_name = 'testCompareModelSecurityConfigurationCustomList'
_new_model_file = self._resources_dir + '/compare/model-sc2-new.yaml'
_old_model_file = self._resources_dir + '/compare/model-sc2-old.yaml'
_temp_dir = os.path.join(tempfile.gettempdir(), _method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
os.mkdir(_temp_dir)
mw_home = os.environ['MW_HOME']
args_map = {
'-oracle_home': mw_home,
'-output_dir' : _temp_dir,
'-domain_type' : 'WLS',
'-trailing_arguments': [ _new_model_file, _old_model_file ]
}
try:
model_context = ModelContext('CompareModelTestCase', args_map)
obj = ModelFileDiffer(_new_model_file, _old_model_file, model_context, _temp_dir)
return_code = obj.compare()
self.assertEqual(return_code, 0)
yaml_result = _temp_dir + os.sep + 'diffed_model.yaml'
json_result = _temp_dir + os.sep + 'diffed_model.json'
stdout_result = obj.get_compare_msgs()
model_dictionary = FileToPython(yaml_result).parse()
self.assertEqual(model_dictionary.has_key('topology'), True)
self.assertEqual(model_dictionary['topology'].has_key('SecurityConfiguration'), True)
self.assertEqual(model_dictionary['topology']['SecurityConfiguration'].has_key('Realm'), True)
self.assertEqual(model_dictionary['topology']['SecurityConfiguration']['Realm'].has_key('myrealm'), True)
self.assertEqual(model_dictionary['topology']['SecurityConfiguration']['Realm']['myrealm'].has_key('Auditor'), False)
self.assertEqual(model_dictionary['topology']['SecurityConfiguration']['Realm']['myrealm'].has_key('AuthenticationProvider'), True)
self.assertEqual(model_dictionary['topology']['SecurityConfiguration']['Realm']['myrealm'].has_key('PasswordValidator'), True)
except (CompareException, PyWLSTException), te:
return_code = 2
self._logger.severe('WLSDPLY-05709',
te.getLocalizedMessage(), error=te,
class_name=self._program_name, method_name=_method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
self.assertEqual(return_code, 0)
def testCompareModelInvalidModel(self):
_method_name = 'testCompareModelInvalidModel'
_variables_file = self._resources_dir + '/compare_model_model1.10.properties'
_new_model_file = self._resources_dir + '/compare_model_model3.yaml'
_old_model_file = self._resources_dir + '/compare_model_model1.yaml'
_temp_dir = os.path.join(tempfile.gettempdir(), _method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
os.mkdir(_temp_dir)
mw_home = os.environ['MW_HOME']
args_map = {
'-oracle_home': mw_home,
'-variable_file': _variables_file,
'-output_dir' : _temp_dir,
'-domain_type' : 'WLS',
'-trailing_arguments': [ _new_model_file, _old_model_file ]
}
try:
model_context = ModelContext('CompareModelTestCase', args_map)
obj = ModelFileDiffer(_new_model_file, _old_model_file, model_context, _temp_dir)
return_code = obj.compare()
except (CompareException, PyWLSTException), te:
return_code = 2
# self._logger.severe('WLSDPLY-05709', te.getLocalizedMessage(), error=te,
# class_name=self._program_name, method_name=_method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
self.assertNotEqual(return_code, 0)
def testCompareModelInvalidFile(self):
_method_name = 'testCompareModelInvalidFile'
_variables_file = self._resources_dir + '/compare_model_model1.10.properties'
_new_model_file = self._resources_dir + '/compare_model_model4.yaml'
_old_model_file = self._resources_dir + '/compare_model_model1.yaml'
_temp_dir = os.path.join(tempfile.gettempdir(), _method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
os.mkdir(_temp_dir)
mw_home = os.environ['MW_HOME']
args_map = {
'-oracle_home': mw_home,
'-variable_file': _variables_file,
'-output_dir' : _temp_dir,
'-domain_type' : 'WLS',
'-trailing_arguments': [ _new_model_file, _old_model_file ]
}
try:
model_context = ModelContext('CompareModelTestCase', args_map)
obj = ModelFileDiffer(_new_model_file, _old_model_file, model_context, _temp_dir)
# expected parse error for model4, disable logging
yaml_logger = PlatformLogger('wlsdeploy.yaml')
yaml_level = yaml_logger.get_level()
yaml_logger.set_level(Level.OFF)
compare_logger = PlatformLogger('wlsdeploy.compare_model')
compare_level = compare_logger.get_level()
compare_logger.set_level(Level.OFF)
return_code = obj.compare()
# Restore original log levels
yaml_logger.set_level(yaml_level)
compare_logger.set_level(compare_level)
except (CompareException, PyWLSTException), te:
return_code = 2
# self._logger.severe('WLSDPLY-05709', te.getLocalizedMessage(), error=te,
# class_name=self._program_name, method_name=_method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
self.assertNotEqual(return_code, 0)
def testDeleteModelAppDeployments(self):
_method_name = 'testCompareModelFull'
_variables_file = self._resources_dir + '/compare_model_model1.10.properties'
_new_model_file = self._resources_dir + '/compare_model_model5.yaml'
_old_model_file = self._resources_dir + '/compare_model_model1.yaml'
_temp_dir = os.path.join(tempfile.gettempdir(), _method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
os.mkdir(_temp_dir)
mw_home = os.environ['MW_HOME']
args_map = {
'-oracle_home': mw_home,
'-variable_file': _variables_file,
'-output_dir' : _temp_dir,
'-domain_type' : 'WLS',
'-trailing_arguments': [ _new_model_file, _old_model_file ]
}
try:
model_context = ModelContext('CompareModelTestCase', args_map)
obj = ModelFileDiffer(_new_model_file, _old_model_file, model_context, _temp_dir)
return_code = obj.compare()
self.assertEqual(return_code, 0)
yaml_result = _temp_dir + os.sep + 'diffed_model.yaml'
stdout_result = obj.get_compare_msgs()
model_dictionary = FileToPython(yaml_result).parse()
yaml_exists = os.path.exists(yaml_result)
self.assertEqual(yaml_exists, True)
self.assertEqual(len(stdout_result), 0)
self.assertEqual(model_dictionary.has_key('appDeployments'), True)
self.assertEqual(model_dictionary['appDeployments'].has_key('Library'), True)
self.assertEqual(model_dictionary['appDeployments'].has_key('Application'), True)
self.assertEqual(model_dictionary['appDeployments']['Application'].has_key('!myear'), True)
self.assertEqual(model_dictionary['appDeployments']['Library'].has_key('!jax-rs#[email protected]'), True)
self.assertEqual(model_dictionary['appDeployments']['Library'].has_key('!jsf#[email protected]'), True)
except (CompareException, PyWLSTException), te:
return_code = 2
self._logger.severe('WLSDPLY-05709',
te.getLocalizedMessage(), error=te,
class_name=self._program_name, method_name=_method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
self.assertEqual(return_code, 0)
def testCompareModelFull2(self):
_method_name = 'testCompareModelFull2'
# This test for
# 1. Changing weblogic password
# 2. Changing RCU password
# 3. Deleting an application
_variables_file = self._resources_dir + '/compare_model_model1.10.properties'
_new_model_file = self._resources_dir + '/compare_model_model7.yaml'
_old_model_file = self._resources_dir + '/compare_model_model6.yaml'
_temp_dir = os.path.join(tempfile.gettempdir(), _method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
os.mkdir(_temp_dir)
mw_home = os.environ['MW_HOME']
args_map = {
'-oracle_home': mw_home,
'-variable_file': _variables_file,
'-output_dir' : _temp_dir,
'-domain_type' : 'WLS',
'-trailing_arguments': [ _new_model_file, _old_model_file ]
}
try:
model_context = ModelContext('CompareModelTestCase', args_map)
obj = ModelFileDiffer(_new_model_file, _old_model_file, model_context, _temp_dir)
return_code = obj.compare()
self.assertEqual(return_code, 0)
yaml_result = _temp_dir + os.sep + 'diffed_model.yaml'
json_result = _temp_dir + os.sep + 'diffed_model.json'
stdout_result = obj.get_compare_msgs()
model_dictionary = FileToPython(yaml_result).parse()
yaml_exists = os.path.exists(yaml_result)
json_exists = os.path.exists(json_result)
self.assertEqual(yaml_exists, True)
self.assertEqual(json_exists, True)
self.assertEqual(len(stdout_result), 0)
self.assertEqual(model_dictionary.has_key('domainInfo'), True)
self.assertEqual(model_dictionary['domainInfo'].has_key('AdminPassword'), True)
self.assertEqual(model_dictionary['domainInfo']['AdminPassword'], '<PASSWORD>')
self.assertEqual(model_dictionary['domainInfo'].has_key('AdminUser'), False)
self.assertEqual(model_dictionary['domainInfo'].has_key('RCUDbInfo'), True)
self.assertEqual(model_dictionary['domainInfo']['RCUDbInfo'].has_key('rcu_admin_password'), True)
self.assertEqual(len(model_dictionary['domainInfo']['RCUDbInfo']), 1)
self.assertEqual(len(model_dictionary['domainInfo']), 2)
self.assertEqual(model_dictionary.has_key('appDeployments'), True)
self.assertEqual(model_dictionary['appDeployments'].has_key('Application'), True)
self.assertEqual(model_dictionary['appDeployments']['Application'].has_key('!yourear'), True)
self.assertEqual(len(model_dictionary['appDeployments']['Application']), 1)
except (CompareException, PyWLSTException), te:
return_code = 2
self._logger.severe('WLSDPLY-05709',
te.getLocalizedMessage(), error=te,
class_name=self._program_name, method_name=_method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
self.assertEqual(return_code, 0)
def testCompareModelFull3(self):
_method_name = 'testCompareModelFull3'
# This test for
# 1. Changing MailSessionProperty
# 2. Changing ODL HandlerDefaults
# 3. Changing ODL Handler property
# 4. Changing ODL Logger attributes
_variables_file = self._resources_dir + '/compare_model_model1.10.properties'
_new_model_file = self._resources_dir + '/compare_model_model8.yaml'
_old_model_file = self._resources_dir + '/compare_model_model7.yaml'
_temp_dir = os.path.join(tempfile.gettempdir(), _method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
os.mkdir(_temp_dir)
mw_home = os.environ['MW_HOME']
args_map = {
'-oracle_home': mw_home,
'-variable_file': _variables_file,
'-output_dir' : _temp_dir,
'-domain_type' : 'WLS',
'-trailing_arguments': [ _new_model_file, _old_model_file ]
}
try:
model_context = ModelContext('CompareModelTestCase', args_map)
obj = ModelFileDiffer(_new_model_file, _old_model_file, model_context, _temp_dir)
return_code = obj.compare()
self.assertEqual(return_code, 0)
yaml_result = _temp_dir + os.sep + 'diffed_model.yaml'
json_result = _temp_dir + os.sep + 'diffed_model.json'
stdout_result = obj.get_compare_msgs()
model_dictionary = FileToPython(yaml_result).parse()
yaml_exists = os.path.exists(yaml_result)
json_exists = os.path.exists(json_result)
self.assertEqual(yaml_exists, True)
self.assertEqual(json_exists, True)
self.assertEqual(len(stdout_result), 0)
self.assertEqual(model_dictionary.has_key('resources'), True)
self.assertEqual(model_dictionary['resources'].has_key('MailSession'), True)
self.assertEqual(model_dictionary['resources']['MailSession'].has_key('MyMailSession'), True)
mail_session = model_dictionary['resources']['MailSession']['MyMailSession']
self.assertEqual(mail_session.has_key('Properties'), True)
self.assertEqual(mail_session['Properties'].has_key('mail.imap.port'), True)
self.assertEqual(mail_session['Properties']['mail.imap.port'], 993)
self.assertEqual(model_dictionary['resources'].has_key('ODLConfiguration'), True)
self.assertEqual(model_dictionary['resources']['ODLConfiguration'].has_key('config'), True)
self.assertEqual(model_dictionary['resources']['ODLConfiguration']['config'].has_key('HandlerDefaults'),
True)
self.assertEqual(
model_dictionary['resources']['ODLConfiguration']['config']['HandlerDefaults'].has_key('maxFileSize'),
True)
self.assertEqual(
model_dictionary['resources']['ODLConfiguration']['config']['HandlerDefaults']['maxFileSize'],
14857620)
self.assertEqual(model_dictionary['resources']['ODLConfiguration']['config'].has_key('Handler'),
True)
self.assertEqual(
model_dictionary['resources']['ODLConfiguration']['config']['Handler'].has_key('odl-handler'),
True)
self.assertEqual(
model_dictionary['resources']['ODLConfiguration']['config']['Handler']['odl-handler']
.has_key('Properties'), True)
self.assertEqual(
model_dictionary['resources']['ODLConfiguration']['config']['Handler']['odl-handler']['Properties']
.has_key('maxFileSize'), True)
self.assertEqual(
model_dictionary['resources']['ODLConfiguration']['config']['Handler']['odl-handler']
['Properties']['maxFileSize'], 14857620)
self.assertEqual(model_dictionary['resources']['ODLConfiguration']['config'].has_key('Logger'),
True)
self.assertEqual(
model_dictionary['resources']['ODLConfiguration']['config']['Logger']
.has_key('oracle.communications.ordermanagement.automation.plugin.AutomationPluginManager'),
True)
self.assertEqual(
model_dictionary['resources']['ODLConfiguration']['config']['Logger']
['oracle.communications.ordermanagement.automation.plugin.AutomationPluginManager']
.has_key('Level'), True)
self.assertEqual(
model_dictionary['resources']['ODLConfiguration']['config']['Logger']
['oracle.communications.ordermanagement.automation.plugin.AutomationPluginManager']['Level'],
'TRACE:16')
self.assertEqual(
len(model_dictionary['resources']['ODLConfiguration']['config']), 3)
self.assertEqual(
len(model_dictionary['resources']['ODLConfiguration']['config']['Logger']), 1)
self.assertEqual(
len(model_dictionary['resources']['ODLConfiguration']['config']['HandlerDefaults']), 1)
except (CompareException, PyWLSTException), te:
return_code = 2
self._logger.severe('WLSDPLY-05709',
te.getLocalizedMessage(), error=te,
class_name=self._program_name, method_name=_method_name)
if os.path.exists(_temp_dir):
shutil.rmtree(_temp_dir)
self.assertEqual(return_code, 0)
def testCompareModel4(self):
_method_name = 'testCompareModel4'
_models_dir = self._resources_dir + '/compare'
_new_model_file = _models_dir + '/model-4-new.yaml'
_old_model_file = _models_dir + '/model-4-old.yaml'
_output_dir = os.path.join(self._results_dir, 'model-4')
if not os.path.isdir(_output_dir):
os.mkdir(_output_dir)
args_map = {
'-oracle_home': '/oracle',
'-output_dir': _output_dir,
'-trailing_arguments': [_new_model_file, _old_model_file]
}
try:
model_context = ModelContext('CompareModelTestCase', args_map)
differ = ModelFileDiffer(_new_model_file, _old_model_file, model_context, _output_dir)
return_code = differ.compare()
self.assertEqual(return_code, 0)
yaml_result = _output_dir + os.sep + 'diffed_model.yaml'
self.assertEqual(os.path.exists(yaml_result), True, "YAML result should exist: " + yaml_result)
json_result = _output_dir + os.sep + 'diffed_model.json'
self.assertEqual(os.path.exists(json_result), True, "JSON result should exist: " + json_result)
# see comments in the model for erased | |
<reponame>gmargaryan/hardy<filename>scripts/Cisco_IOS_15_Benchmark_scripts.py<gh_stars>0
import re
import yaml
from netmiko import Netmiko
### Debug output ###
def debug_output(command, pattern, output):
print ("\ncommand:\n" + command)
print ("\npattern:\n" + pattern)
print ("\noutput:\n" + output + "\n")
### Manual ###
def manual(net_connect,host_parameters, debug):
return (False, "MANUAL")
### Dependecies ####
def if_eigrp_configured (net_connect,host_parameters, debug):
result = False
message = "NO EIGRP"
command_1 = "sh run | sec router eigrp"
pattern_1 = ".+"
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# compare output with pattern
match_1 = re.search(pattern_1, output_str_1)
if match_1:
result = True
message = ""
return (result, message)
def if_ospf_configured (net_connect,host_parameters, debug):
result = False
message = "NO OSPF"
command_1 = "sh run | sec router ospf"
pattern_1 = ".+"
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# compare output with pattern
match_1 = re.search(pattern_1, output_str_1)
if match_1:
result = True
message = ""
return (result, message)
def if_bgp_configured (net_connect,host_parameters, debug):
result = False
message = "NO BGP"
command_1 = "sh run | sec router bgp"
pattern_1 = ".+"
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# compare output with pattern
match_1 = re.search(pattern_1, output_str_1)
if match_1:
result = True
message = ""
return (result, message)
def if_rip_configured (net_connect,host_parameters, debug):
result = False
message = "NO RIP"
command_1 = "sh run | sec router rip"
pattern_1 = ".+"
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# compare output with pattern
match_1 = re.search(pattern_1, output_str_1)
if match_1:
result = True
message = ""
return (result, message)
### Scripts ####
def Create_acl_for_use_with_line_vty(net_connect,host_parameters, debug):
result = False
command_1 = "show conf | sec line vty"
pattern_1 = "access-class (\w+) in"
command_2_without_acl_name = "show ip access-list %s"
pattern_2 = ".+"
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# compare output with pattern
match_1 = re.search(pattern_1, output_str_1)
if match_1:
acl_name = match_1.group(1)
command_2 = command_2_without_acl_name % acl_name
output_str_2 = net_connect.send_command(command_2 )
if debug:
debug_output(command_2, pattern_2, output_str_2)
else:
pass
match_2 = re.search(pattern_2, output_str_2)
if match_2:
result = True
return (result, "")
def Create_an_acl_for_use_with_SNMP(net_connect,host_parameters, debug):
result = False
command_1 = "show conf | sec snmp-server community"
pattern_1 = "snmp-server community .* R[OW] (\w+)"
command_2_without_acl_name = "show ip access-list %s"
pattern_2 = ".+"
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# str -> list
output_list_1 = output_str_1.split("\n")
# check each line with pattern
for output_line in output_list_1:
match_1 = re.search(pattern_1, output_line)
if match_1:
acl_name = match_1.group(1)
command_2 = command_2_without_acl_name % acl_name
output_str_2 = net_connect.send_command(command_2)
if debug:
debug_output(command_2, pattern_2, output_str_2)
else:
pass
match_2 = re.search(pattern_2, output_str_2)
if match_2:
result = True
else:
result = False
break
return (result, "")
def Set_no_ip_proxy_arp(net_connect,host_parameters, debug):
result = False
message = "NO INT"
command_1 = "sh ip interface brief | incl Eth|eth"
pattern_1 = "^(\w+[0-9/.]+)\s+[0-9.]+\s+YES.*"
command_2_without_int_name = "sh run int %s | incl proxy-arp"
pattern_2 = "no ip proxy-arp"
interface = ""
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# str -> list
output_list_1 = output_str_1.split("\n")
# check each line with pattern
for output_line in output_list_1:
match_1 = re.search(pattern_1, output_line)
if match_1:
result = False
message = ""
interface = match_1.group(1)
command_2 = command_2_without_int_name % interface
output_str_2 = net_connect.send_command(command_2)
if debug:
debug_output(command_2, pattern_2, output_str_2)
else:
pass
match_2 = re.search(pattern_2, output_str_2)
if match_2:
result = True
else:
break
return (result, message)
def Set_ip_verify_unicast_source_reachable_via(net_connect,host_parameters, debug):
command_without_int = "sh ip int %s | incl verify source"
pattern = ".+"
result = True
for interface in host_parameters['interfaces']['untrust']:
if (interface['name']):
command = command_without_int % interface['name']
output_str = net_connect.send_command(command)
if debug:
debug_output(command, pattern, output_str)
else:
pass
match = re.search(pattern, output_str)
if not(match):
result = False
break
else:
pass
if result:
for interface in host_parameters['interfaces']['internet']:
if (interface['name']):
command = command_without_int % interface['name']
output_str = net_connect.send_command(command)
if debug:
debug_output(command, pattern, output_str)
else:
pass
match = re.search(pattern, output_str)
if not(match):
result = False
break
else:
pass
if result:
for interface in host_parameters['interfaces']['dmz']:
if (interface['name']):
command = command_without_int % interface['name']
output_str = net_connect.send_command(command)
if debug:
debug_output(command, pattern, output_str)
else:
pass
match = re.search(pattern, output_str)
if not(match):
result = False
break
else:
pass
return (result, "")
def Set_ip_acl_extended_to_Forbid_Private_Source_Addresses_from_External_Networks(net_connect,host_parameters, debug):
return (False, "MANUAL")
def Set_inbound_ip_access_group_on_the_External_Interface(net_connect,host_parameters, debug):
return (False, "MANUAL")
def Set_ip_authentication_key_chain_eigrp(net_connect,host_parameters, debug):
result = False
message = "NO INT"
command_1 = "sh ip eigrp interfaces | excl Vi|Lo"
pattern_1 = "^(\w+[0-9/.]+)\s+.+"
command_2_without_int_name = "sh run int %s | incl key-chain"
pattern_2 = "ip authentication key-chain eigrp "
interface = ""
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# str -> list
output_list_1 = output_str_1.split("\n")
# check each line with pattern
for output_line in output_list_1:
match_1 = re.search(pattern_1, output_line)
if (match_1):
result = False
message = ""
interface = match_1.group(1)
command_2 = command_2_without_int_name % interface
output_str_2 = net_connect.send_command(command_2)
if debug:
debug_output(command_2, pattern_2, output_str_2)
else:
pass
match_2 = re.search(pattern_2, output_str_2)
if match_2:
result = True
else:
break
return (result, message)
def Set_ip_authentication_mode_eigrp(net_connect,host_parameters, debug):
result = False
message = "NO INT"
command_1 = "sh ip eigrp interfaces | excl Vi|Lo"
pattern_1 = "^(\w+[0-9/.]+)\s+.+"
command_2_without_int_name = "sh run int %s | incl authentication mode"
pattern_2 = "ip authentication mode eigrp .+ "
interface = ""
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# str -> list
output_list_1 = output_str_1.split("\n")
# check each line with pattern
for output_line in output_list_1:
match_1 = re.search(pattern_1, output_line)
if match_1:
result = False
message = ""
interface = match_1.group(1)
command_2 = command_2_without_int_name % interface
output_str_2 = net_connect.send_command(command_2)
if debug:
debug_output(command_2, pattern_2, output_str_2)
else:
pass
match_2 = re.search(pattern_2, output_str_2)
if match_2:
result = True
else:
break
return (result, message)
def Set_authentication_message_digest_for_OSPF_area(net_connect,host_parameters, debug):
result = False
message = "NO INT"
command_1 = "sh ip ospf interface brief | excl Vi|Lo"
pattern_1 = "^\w+[0-9/.]+\s+\d+\s+(\d+)\s+.+"
command_2 = "sh run | sec router ospf "
pattern_2_without_area_name = "area %s authentication message-digest"
area = ""
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# str -> list
output_list_1 = output_str_1.split("\n")
# check each line with pattern
for output_line in output_list_1:
match_1 = re.search(pattern_1, output_line)
if match_1:
result = False
message = ""
area = match_1.group(1)
pattern_2 = pattern_2_without_area_name % area
output_str_2 = net_connect.send_command(command_2)
if debug:
debug_output(command_2, pattern_2, output_str_2)
else:
pass
match_2 = re.search(pattern_2, output_str_2)
if match_2:
result = True
else:
break
return (result, message)
def Set_ip_ospf_message_digest_key_md5(net_connect,host_parameters, debug):
result = False
message = "NO INT"
command_1 = "sh ip ospf interface brief | excl Vi|Lo"
pattern_1 = "^(\w+[0-9/.]+)\s+\d+\s+\d+\s+.+"
command_2_without_int_name = "sh run int %s | incl ip ospf message-digest-key"
pattern_2 = "ip ospf message-digest-key \d+ md5 .+"
interface = ""
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# str -> list
output_list_1 = output_str_1.split("\n")
# check each line with pattern
for output_line in output_list_1:
match_1 = re.search(pattern_1, output_line)
if match_1:
result = False
message = ""
interface = match_1.group(1)
command_2 = command_2_without_int_name % interface
output_str_2 = net_connect.send_command(command_2)
if debug:
debug_output(command_2, pattern_2, output_str_2)
else:
pass
match_2 = re.search(pattern_2, output_str_2)
if match_2:
result = True
else:
break
return (result, message)
def Set_ip_rip_authentication_key_chain(net_connect,host_parameters, debug):
result = False
message = "NO INT"
command_1 = "sh ip protocols | sec rip"
pattern_1 = "\s+(\w+[0-9/.]+)\s+[12]"
command_2_without_int_name = "sh run int %s | incl ip rip authentication key-chain"
pattern_2 = "ip rip authentication key-chain .+"
interface = ""
output_str_1 = net_connect.send_command(command_1)
if debug:
debug_output(command_1, pattern_1, output_str_1)
else:
pass
# str -> list
output_list_1 = output_str_1.split("\n")
# check each line with pattern
for output_line in output_list_1:
match_1 = re.search(pattern_1, output_line)
if (match_1 and not(re.search('Lo', match_1.group(1))) and not(re.search('Virtual-Access', match_1.group(1)))):
result = False
message = ""
interface = match_1.group(1)
command_2 = command_2_without_int_name % interface
output_str_2 = net_connect.send_command(command_2)
if debug:
debug_output(command_2, pattern_2, output_str_2)
else:
pass
match_2 = re.search(pattern_2, output_str_2)
if match_2:
result = True
else:
break
return (result, message)
def Set_ip_rip_authentication_mode_to_md5(net_connect,host_parameters, debug):
result = False
message = "NO INT"
command_1 = "sh ip protocols | sec rip"
pattern_1 = "\s+([FfGgEe]\w+[0-9/.]+)\s+[12]"
command_2_without_int_name = "show run interface %s | incl ip rip authentication mode"
pattern_2 = "ip rip authentication mode md5"
interface = ""
output_str_1 = net_connect.send_command(command_1)
if debug:
| |
"""Quantum mechanical angular momemtum."""
from sympy import (
Add, binomial, cos, diff, exp, Expr, factorial, I, Integer, Matrix, Mul, N, pi,
Rational, S, sin, simplify, sqrt, Sum, Symbol, symbols, sympify
)
from sympy.matrices.matrices import zeros
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympy.physics.quantum.qexpr import QExpr
from sympy.physics.quantum.operator import (
HermitianOperator, Operator, UnitaryOperator
)
from sympy.physics.quantum.state import Bra, Ket, State
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.physics.quantum.constants import hbar
from sympy.physics.quantum.hilbert import ComplexSpace
from sympy.physics.quantum.tensorproduct import TensorProduct
from sympy.physics.quantum.cg import CG
from sympy.physics.quantum.qapply import qapply
__all__ = [
'm_values',
'Jplus',
'Jminus',
'Jx',
'Jy',
'Jz',
'J2',
'JxKet',
'JxBra',
'JyKet',
'JyBra',
'JzKet',
'JzBra',
'JxKetCoupled',
'JxBraCoupled',
'JyKetCoupled',
'JyBraCoupled',
'JzKetCoupled',
'JzBraCoupled',
'Rotation',
'WignerD',
'couple',
'uncouple'
]
def m_values(j):
j = sympify(j)
size = 2*j + 1
if not size.is_Integer or not size > 0:
raise ValueError(
'Only integer or half-integer values allowed for j, got: : %r' % j
)
return size, [j-i for i in range(int(2*j+1))]
def couple(tp):
""" Couple an uncoupled spin states
This function can be used to couple an uncoupled tensor product of spin
states. All of the eigenstates to be coupled must be of the same class. It
will return a linear combination of eigenstates that are subclasses of
CoupledSpinState.
Parameters
==========
tp: TensorProduct
TensorProduct of spin states to be coupled
Examples
========
Couple a tensor product of numerical states:
>>> from sympy.physics.quantum.spin import JzKet, couple
>>> from sympy.physics.quantum.tensorproduct import TensorProduct
>>> couple(TensorProduct(JzKet(1,0), JzKet(1,1)))
-sqrt(2)*|1,1,1,1>/2 + sqrt(2)*|2,1,1,1>/2
Couple a tensor product of symbolic states:
>>> from sympy import symbols
>>> j1,m1,j2,m2 = symbols('j1 m1 j2 m2')
>>> couple(TensorProduct(JzKet(j1,m1), JzKet(j2,m2)))
Sum(CG(j1, m1, j2, m2, j, m1 + m2)*|j,m1 + m2>, (j, 0, j1 + j2))
"""
states = tp.args
evect = states[0].__class__
if not all([arg.__class__ is evect for arg in states]):
raise TypeError('All operands must be of the same class')
evect = evect.coupled_class()
if all(state.j.is_number for state in states):
# Numerical coupling
vect = TensorProduct(*[state._represent() for state in states])
maxj = states[0].j + states[1].j
j1, j2 = states[0].j, states[1].j
if maxj == int(maxj):
minj = 0
else:
minj = S(1)/2
result = []
for i in range(maxj-minj+1):
j = maxj-i
for k in range(2*j+1):
m = j-k
max_m1 = min(j1, m+j2)
min_m1 = max(-j1, m-j2)
min_m2 = m-max_m1
result.append(Add(*[vect[(j1-(max_m1-l))*(2*j2+1)+(j2-(min_m2+l)),0] * CG(j1,max_m1-l,j2,min_m2+l,j,m) * evect(j,m,j1,j2) for l in range(max_m1-min_m1+1)]))
if all(state.m.is_number for state in states):
return Add(*result).doit()
else:
return Add(*result)
else:
# Symbolic coupling
maxj = Add(*[state.j for state in states])
m = Add(*[state.m for state in states])
j = symbols('j')
if not maxj.is_number or maxj == int(maxj):
minj = 0
else:
minj = S(1)/2
j1 = states[0].j
j2 = states[1].j
m1 = states[0].m
m2 = states[1].m
return Sum(CG(j1,m1,j2,m2,j,m) * evect(j,m), (j,minj,maxj))
def uncouple(*args):
""" Uncouple a coupled spin state
Gives the uncoupled representation of a coupled spin state. Arguments must
be either a spin state that is a subclass of CoupledSpinState or a spin
state that is a subclass of SpinState and an array giving the j values
of the spaces that are to be coupled
Parameters
==========
args: CoupledSpinState or SpinState
The state that is to be coupled. If a subclass of SpinState is used,
the state must be followed by the j values of the spaces that are to
be coupled.
Examples
========
Uncouple a numerical state using a CoupledSpinState state:
>>> from sympy.physics.quantum.spin import JzKetCoupled, uncouple
>>> from sympy import S
>>> uncouple(JzKetCoupled(1, 0, S(1)/2, S(1)/2))
sqrt(2)*|1/2,-1/2>x|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>x|1/2,-1/2>/2
Perform the same calculation using a SpinState state:
>>> from sympy.physics.quantum.spin import JzKet
>>> uncouple(JzKet(1, 0), S(1)/2, S(1)/2)
sqrt(2)*|1/2,-1/2>x|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>x|1/2,-1/2>/2
Uncouple a symbolic state using a CoupledSpinState state:
>>> from sympy import symbols
>>> j,m,j1,j2 = symbols('j m j1 j2')
>>> uncouple(JzKetCoupled(j, m, j1, j2))
Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>x|j2,m2>, (m1, -j1, j1), (m2, -j2, j2))
Perform the same calculation using a SpinState state
>>> uncouple(JzKet(j, m), j1, j2)
Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>x|j2,m2>, (m1, -j1, j1), (m2, -j2, j2))
"""
if len(args) == 3:
state, j1, j2 = args
evect = state.__class__
elif len(args) == 1:
state = args[0]
evect = state.uncoupled_class()
j1, j2 = state.jvals
state = evect(state.j, state.m)
else:
raise TypeError
j = state.j
m = state.m
if state.j.is_number and state.m.is_number:
result = []
for i_m1 in range(2*j1+1):
m1 = j1-i_m1
for i_m2 in range(2*j2+1):
m2 = j2-i_m2
result.append(CG(j1,m1,j2,m2,j,m).doit() * TensorProduct(evect(j1,m1), evect(j2,m2)))
return Add(*result)
else:
m1,m2,mi = symbols('m1 m2 mi')
# Hack to get rotation angles
angles = (evect(0,mi)._represent())[0].args[3:6]
out_state = TensorProduct(evect(j1,m1),evect(j2,m2))
if angles == (0,0,0):
lt = CG(j1,m1,j2,m2,state.j,state.m)
return Sum(lt * out_state, (m1,-j1,j1), (m2,-j2,j2))
else:
lt = CG(j1,m1,j2,m2,state.j,mi) * Rotation.D(state.j,mi,state.m,*angles)
return Sum(lt * out_state, (mi,-state.j,state.j), (m1,-j1,j1), (m2,-j2,j2))
#-----------------------------------------------------------------------------
# SpinOperators
#-----------------------------------------------------------------------------
class SpinOpBase(object):
"""Base class for spin operators."""
@classmethod
def _eval_hilbert_space(cls, label):
# We consider all j values so our space is infinite.
return ComplexSpace(S.Infinity)
@property
def name(self):
return self.args[0]
def _print_contents(self, printer, *args):
return '%s%s' % (unicode(self.name), self._coord)
# def _sympyrepr(self, printer, *args):
# return '%s(%s)' % (
# self.__class__.__name__, printer._print(self.label,*args)
#
def _print_contents_pretty(self, printer, *args):
a = stringPict(unicode(self.name))
b = stringPict(self._coord)
return self._print_subscript_pretty(a, b)
def _print_contents_latex(self, printer, *args):
return r'%s_%s' % ((unicode(self.name), self._coord))
def _represent_base(self, basis, **options):
j = options.get('j', Rational(1,2))
size, mvals = m_values(j)
result = zeros(size, size)
for p in range(size):
for q in range(size):
me = self.matrix_element(j, mvals[p], j, mvals[q])
result[p, q] = me
return result
def _apply_op(self, ket, orig_basis, **options):
state = ket.rewrite(self.basis)
# If the state has only one term
if isinstance(state, State):
return self._apply_operator(state, **options)
# state is a linear combination of states
return qapply(self*state).rewrite(orig_basis)
def _apply_operator_JxKet(self, ket, **options):
return self._apply_op(ket, 'Jx', **options)
def _apply_operator_JyKet(self, ket, **options):
return self._apply_op(ket, 'Jy', **options)
def _apply_operator_JzKet(self, ket, **options):
return self._apply_op(ket, 'Jz', **options)
def _apply_operator_TensorProduct(self, tp, **options):
if isinstance(self, J2Op):
raise NotImplementedError
result = []
for n in range(len(tp.args)):
arg = []
arg.extend(tp.args[:n])
arg.append(self._apply_operator(tp.args[n]))
arg.extend(tp.args[n+1:])
result.append(tp.__class__(*arg))
return Add(*result).expand()
class JplusOp(SpinOpBase, Operator):
"""The J+ operator."""
_coord = '+'
basis = 'Jz'
def _eval_commutator_JminusOp(self, other):
return 2*hbar*JzOp(self.name)
def _apply_operator_JzKet(self, ket, **options):
j = ket.j
m = ket.m
if m.is_Number and j.is_Number:
if m >= j:
return S.Zero
return hbar*sqrt(j*(j+S.One)-m*(m+S.One))*JzKet(j, m+S.One)
def matrix_element(self, j, m, jp, mp):
result = hbar*sqrt(j*(j+S.One)-mp*(mp+S.One))
result *= KroneckerDelta(m, mp+1)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
def _eval_rewrite_as_xyz(self, *args):
return JxOp(args[0]) + I*JyOp(args[0])
class JminusOp(SpinOpBase, Operator):
"""The J- operator."""
_coord = '-'
basis = 'Jz'
def _apply_operator_JzKet(self, ket, **options):
j = ket.j
m = ket.m
if m.is_Number and j.is_Number:
if m <= -j:
return S.Zero
return hbar*sqrt(j*(j+S.One)-m*(m-S.One))*JzKet(j, m-S.One)
def matrix_element(self, j, m, jp, mp):
result = hbar*sqrt(j*(j+S.One)-mp*(mp-S.One))
result *= KroneckerDelta(m, mp-1)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
def _eval_rewrite_as_xyz(self, *args):
return JxOp(args[0]) - I*JyOp(args[0])
class JxOp(SpinOpBase, HermitianOperator):
"""The Jx operator."""
_coord = 'x'
basis = 'Jx'
def _eval_commutator_JyOp(self, other):
return I*hbar*JzOp(self.name)
def _eval_commutator_JzOp(self, other):
return -I*hbar*JyOp(self.name)
def _apply_operator_JxKet(self, ket, **options):
return (hbar*ket.m)*ket
def _apply_operator_JzKet(self, ket, **options):
jp = JplusOp(self.name)._apply_operator_JzKet(ket, **options)
jm = JminusOp(self.name)._apply_operator_JzKet(ket, **options)
return (jp + jm)/Integer(2)
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
jp = JplusOp(self.name)._represent_JzOp(basis, **options)
jm = JminusOp(self.name)._represent_JzOp(basis, **options)
return (jp + jm)/Integer(2)
def _eval_rewrite_as_plusminus(self, *args):
return (JplusOp(args[0]) + JminusOp(args[0]))/2
class JyOp(SpinOpBase, HermitianOperator):
"""The Jy operator."""
_coord = 'y'
basis = 'Jy'
def _eval_commutator_JzOp(self, other):
return I*hbar*JxOp(self.name)
def _eval_commutator_JxOp(self, other):
return -I*hbar*J2Op(self.name)
def _apply_operator_JyKet(self, ket, **options):
return (hbar*ket.m)*ket
def _apply_operator_JzKet(self, ket, **options):
jp = JplusOp(self.name)._apply_operator_JzKet(ket, **options)
jm = JminusOp(self.name)._apply_operator_JzKet(ket, **options)
return (jp - jm)/(Integer(2)*I)
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
jp = JplusOp(self.name)._represent_JzOp(basis, **options)
jm = JminusOp(self.name)._represent_JzOp(basis, **options)
return (jp - jm)/(Integer(2)*I)
def _eval_rewrite_as_plusminus(self, *args):
return (JplusOp(args[0]) - JminusOp(args[0]))/(2*I)
class JzOp(SpinOpBase, HermitianOperator):
"""The Jz operator."""
_coord = 'z'
basis = 'Jz'
def _eval_commutator_JxOp(self, other):
return I*hbar*JyOp(self.name)
def _eval_commutator_JyOp(self, other):
return -I*hbar*JxOp(self.name)
def _eval_commutator_JplusOp(self, other):
return hbar*JplusOp(self.name)
def _eval_commutator_JminusOp(self, other):
return -hbar*JminusOp(self.name)
def _apply_operator_JzKet(self, ket, **options):
return (hbar*ket.m)*ket
def matrix_element(self, j, m, jp, mp):
result = hbar*mp
result *= KroneckerDelta(m, mp)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, | |
# Copyright (c) 2009-2012 <NAME>. See LICENSE for details.
import sys
from gevent.hub import greenlet, getcurrent, get_hub, GreenletExit, Waiter, PY3, iwait, wait
from gevent.timeout import Timeout
from collections import deque
__all__ = ['Greenlet',
'joinall',
'killall']
class SpawnedLink(object):
"""A wrapper around link that calls it in another greenlet.
Can be called only from main loop.
"""
__slots__ = ['callback']
def __init__(self, callback):
if not callable(callback):
raise TypeError("Expected callable: %r" % (callback, ))
self.callback = callback
def __call__(self, source):
g = greenlet(self.callback, get_hub())
g.switch(source)
def __hash__(self):
return hash(self.callback)
def __eq__(self, other):
return self.callback == getattr(other, 'callback', other)
def __str__(self):
return str(self.callback)
def __repr__(self):
return repr(self.callback)
def __getattr__(self, item):
assert item != 'callback'
return getattr(self.callback, item)
class SuccessSpawnedLink(SpawnedLink):
"""A wrapper around link that calls it in another greenlet only if source succeed.
Can be called only from main loop.
"""
__slots__ = []
def __call__(self, source):
if source.successful():
return SpawnedLink.__call__(self, source)
class FailureSpawnedLink(SpawnedLink):
"""A wrapper around link that calls it in another greenlet only if source failed.
Can be called only from main loop.
"""
__slots__ = []
def __call__(self, source):
if not source.successful():
return SpawnedLink.__call__(self, source)
class Greenlet(greenlet):
"""A light-weight cooperatively-scheduled execution unit."""
def __init__(self, run=None, *args, **kwargs):
hub = get_hub()
greenlet.__init__(self, parent=hub)
if run is not None:
self._run = run
self.args = args
self.kwargs = kwargs
self._links = deque()
self.value = None
self._exception = _NONE
self._notifier = None
self._start_event = None
@property
def loop(self):
# needed by killall
return self.parent.loop
if PY3:
def __bool__(self):
return self._start_event is not None and self._exception is _NONE
else:
def __nonzero__(self):
return self._start_event is not None and self._exception is _NONE
@property
def started(self):
# DEPRECATED
return bool(self)
def ready(self):
"""Return true if and only if the greenlet has finished execution."""
return self.dead or self._exception is not _NONE
def successful(self):
"""Return true if and only if the greenlet has finished execution successfully,
that is, without raising an error."""
return self._exception is None
def __repr__(self):
classname = self.__class__.__name__
result = '<%s at %s' % (classname, hex(id(self)))
formatted = self._formatinfo()
if formatted:
result += ': ' + formatted
return result + '>'
def _formatinfo(self):
try:
return self._formatted_info
except AttributeError:
pass
try:
result = getfuncname(self.__dict__['_run'])
except Exception:
pass
else:
args = []
if self.args:
args = [repr(x)[:50] for x in self.args]
if self.kwargs:
args.extend(['%s=%s' % (key, repr(value)[:50]) for (key, value) in self.kwargs.items()])
if args:
result += '(' + ', '.join(args) + ')'
# it is important to save the result here, because once the greenlet exits '_run' attribute will be removed
self._formatted_info = result
return result
return ''
@property
def exception(self):
"""Holds the exception instance raised by the function if the greenlet has finished with an error.
Otherwise ``None``.
"""
if self._exception is not _NONE:
return self._exception
def throw(self, *args):
"""Immediatelly switch into the greenlet and raise an exception in it.
Should only be called from the HUB, otherwise the current greenlet is left unscheduled forever.
To raise an exception in a safely manner from any greenlet, use :meth:`kill`.
If a greenlet was started but never switched to yet, then also
a) cancel the event that will start it
b) fire the notifications as if an exception was raised in a greenlet
"""
if self._start_event is None:
self._start_event = _dummy_event
else:
self._start_event.stop()
try:
greenlet.throw(self, *args)
finally:
if self._exception is _NONE and self.dead:
# the greenlet was never switched to before and it will never be, _report_error was not called
# the result was not set and the links weren't notified. let's do it here.
# checking that self.dead is true is essential, because throw() does not necessarily kill the greenlet
# (if the exception raised by throw() is caught somewhere inside the greenlet).
if len(args) == 1:
arg = args[0]
#if isinstance(arg, type):
if type(arg) is type(Exception):
args = (arg, arg(), None)
else:
args = (type(arg), arg, None)
elif not args:
args = (GreenletExit, GreenletExit(), None)
self._report_error(args)
def start(self):
"""Schedule the greenlet to run in this loop iteration"""
if self._start_event is None:
self._start_event = self.parent.loop.run_callback(self.switch)
def start_later(self, seconds):
"""Schedule the greenlet to run in the future loop iteration *seconds* later"""
if self._start_event is None:
self._start_event = self.parent.loop.timer(seconds)
self._start_event.start(self.switch)
@classmethod
def spawn(cls, *args, **kwargs):
"""Return a new :class:`Greenlet` object, scheduled to start.
The arguments are passed to :meth:`Greenlet.__init__`.
"""
g = cls(*args, **kwargs)
g.start()
return g
@classmethod
def spawn_later(cls, seconds, *args, **kwargs):
"""Return a Greenlet object, scheduled to start *seconds* later.
The arguments are passed to :meth:`Greenlet.__init__`.
"""
g = cls(*args, **kwargs)
g.start_later(seconds)
return g
def kill(self, exception=GreenletExit, block=True, timeout=None):
"""Raise the exception in the greenlet.
If block is ``True`` (the default), wait until the greenlet dies or the optional timeout expires.
If block is ``False``, the current greenlet is not unscheduled.
The function always returns ``None`` and never raises an error.
`Changed in version 0.13.0:` *block* is now ``True`` by default.
"""
# XXX this function should not switch out if greenlet is not started but it does
# XXX fix it (will have to override 'dead' property of greenlet.greenlet)
if self._start_event is None:
self._start_event = _dummy_event
else:
self._start_event.stop()
if not self.dead:
waiter = Waiter()
self.parent.loop.run_callback(_kill, self, exception, waiter)
if block:
waiter.get()
self.join(timeout)
# it should be OK to use kill() in finally or kill a greenlet from more than one place;
# thus it should not raise when the greenlet is already killed (= not started)
def get(self, block=True, timeout=None):
"""Return the result the greenlet has returned or re-raise the exception it has raised.
If block is ``False``, raise :class:`gevent.Timeout` if the greenlet is still alive.
If block is ``True``, unschedule the current greenlet until the result is available
or the timeout expires. In the latter case, :class:`gevent.Timeout` is raised.
"""
if self.ready():
if self.successful():
return self.value
else:
raise self._exception
if block:
switch = getcurrent().switch
self.rawlink(switch)
try:
t = Timeout.start_new(timeout)
try:
result = self.parent.switch()
assert result is self, 'Invalid switch into Greenlet.get(): %r' % (result, )
finally:
t.cancel()
except:
# unlinking in 'except' instead of finally is an optimization:
# if switch occurred normally then link was already removed in _notify_links
# and there's no need to touch the links set.
# Note, however, that if "Invalid switch" assert was removed and invalid switch
# did happen, the link would remain, causing another invalid switch later in this greenlet.
self.unlink(switch)
raise
if self.ready():
if self.successful():
return self.value
else:
raise self._exception
else:
raise Timeout
def join(self, timeout=None):
"""Wait until the greenlet finishes or *timeout* expires.
Return ``None`` regardless.
"""
if self.ready():
return
else:
switch = getcurrent().switch
self.rawlink(switch)
try:
t = Timeout.start_new(timeout)
try:
result = self.parent.switch()
assert result is self, 'Invalid switch into Greenlet.join(): %r' % (result, )
finally:
t.cancel()
except Timeout as ex:
self.unlink(switch)
if ex is not t:
raise
except:
self.unlink(switch)
raise
def _report_result(self, result):
self._exception = None
self.value = result
if self._links and not self._notifier:
self._notifier = self.parent.loop.run_callback(self._notify_links)
def _report_error(self, exc_info):
exception = exc_info[1]
if isinstance(exception, GreenletExit):
self._report_result(exception)
return
self._exception = exception
if self._links and not self._notifier:
self._notifier = self.parent.loop.run_callback(self._notify_links)
self.parent.handle_error(self, *exc_info)
def run(self):
try:
if self._start_event is None:
self._start_event = _dummy_event
else:
self._start_event.stop()
try:
result = self._run(*self.args, **self.kwargs)
except:
self._report_error(sys.exc_info())
return
self._report_result(result)
finally:
self.__dict__.pop('_run', None)
self.__dict__.pop('args', None)
self.__dict__.pop('kwargs', None)
def rawlink(self, callback):
"""Register a callable to be executed when the greenlet finishes the execution.
WARNING: the callable will be called in the HUB greenlet.
"""
if not callable(callback):
raise TypeError('Expected callable: %r' % (callback, ))
self._links.append(callback)
if self.ready() and self._links and not self._notifier:
self._notifier = self.parent.loop.run_callback(self._notify_links)
def link(self, callback, SpawnedLink=SpawnedLink):
"""Link greenlet's completion to a callable.
The *callback* will be called with this instance as an argument
once this greenlet's dead. A callable is called in its own greenlet.
"""
self.rawlink(SpawnedLink(callback))
def unlink(self, callback):
"""Remove the callback set by :meth:`link` or :meth:`rawlink`"""
try:
self._links.remove(callback)
except ValueError:
pass
def link_value(self, callback, SpawnedLink=SuccessSpawnedLink):
"""Like :meth:`link` but *callback* is only notified when the greenlet has completed successfully"""
self.link(callback, SpawnedLink=SpawnedLink)
| |
# coding: utf-8
import plfit
import os
from pylab import *
if not os.path.isdir('figures'):
os.mkdir('figures')
blackouts = np.loadtxt('blackouts.txt', dtype='int')
cities = np.loadtxt('cities.txt', dtype='int')
earthquakes = np.loadtxt('earthquakes.txt')
melville = np.loadtxt('melville.txt')
solarflares = np.loadtxt('solarflares.txt', dtype='int')
terrorism = np.loadtxt('terrorism.txt')
fires = np.loadtxt('fires.txt')
## Solarflares are discrete but well-approximated (I hope...) by continuous...
#plf = plfit.plfit(solarflares.ravel(), discrete=False, usefortran=True, verbose=True, quiet=False)
#plc = plfit.plfit(solarflares.ravel(), discrete=False, usecy=True, verbose=True, quiet=False)
#pl = plfit.plfit(solarflares.ravel(), discrete=False, usefortran=False, verbose=True, quiet=False)
#print "Solarflares (Clauset): n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (19447,9.00,77.83,8009,52.46,2.37,0.08,580,0.76)
#for ppp in (pl,plf,plc):
# print "Solarflares (me) : n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (ppp.data.shape[0], ppp.data.mean(), ppp.data.std(), ppp.data.max(), ppp._xmin, ppp._alpha, ppp._alphaerr, ppp._ngtx, ppp._ks_prob)
# np.testing.assert_almost_equal(ppp._xmin, 323, 2)
# np.testing.assert_almost_equal(ppp._alpha, 1.79, 2)
# np.testing.assert_almost_equal(ppp._alphaerr, 0.02, 2)
# assert ppp._ngtx == 1711
# Earthquakes are a BAD FIT in the original manuscript
#plf = plfit.plfit(earthquakes.ravel(), nosmall=True, usefortran=True, verbose=True, quiet=False)
#plc = plfit.plfit(earthquakes.ravel(), nosmall=True, usecy=True, verbose=True, quiet=False)
#pl = plfit.plfit(earthquakes.ravel(), nosmall=True, usefortran=False, verbose=True, quiet=False)
#print "Earthquakes (Clauset): n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (19447,9.00,77.83,8009,52.46,2.37,0.08,580,0.76)
#for ppp in (pl,plf,plc):
# print "Earthquakes (me) : n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (ppp.data.shape[0], ppp.data.mean(), ppp.data.std(), ppp.data.max(), ppp._xmin, ppp._alpha, ppp._alphaerr, ppp._ngtx, ppp._ks_prob)
# np.testing.assert_almost_equal(ppp._xmin, 0.794, 2)
# np.testing.assert_almost_equal(ppp._alpha, 1.64, 2)
# np.testing.assert_almost_equal(ppp._alphaerr, 0.04, 2)
# assert ppp._ngtx == 11697
plf = plfit.plfit(cities.ravel() / 1e3, usefortran=True, verbose=True, quiet=False)
plc = plfit.plfit(cities.ravel() / 1e3, usecy=True, verbose=True, quiet=False)
pl = plfit.plfit(cities.ravel() / 1e3, usefortran=False, verbose=True, quiet=False)
print("Cities (Clauset): n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (19447,9.00,77.83,8009,52.46,2.37,0.08,580,0.76))
for ppp in (pl,plf,plc):
print("Cities (me) : n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (ppp.data.shape[0], ppp.data.mean(), ppp.data.std(), ppp.data.max(), ppp._xmin, ppp._alpha, ppp._alphaerr, ppp._ngtx, ppp._ks_prob))
np.testing.assert_almost_equal(ppp._xmin, 52.46, 2)
np.testing.assert_almost_equal(ppp._alpha, 2.37, 2)
np.testing.assert_almost_equal(ppp._alphaerr, 0.08, 2)
assert ppp._ngtx == 580
figure(1)
clf()
title("Cities")
subplot(131)
pl.plotpdf()
subplot(132)
title("Cities")
pl.xminvsks()
subplot(133)
pl.alphavsks()
savefig("figures/cities_kstests.png")
figure(2)
title("Cities")
pl.plotcdf()
savefig("figures/cities_cdf.png")
pl = plfit.plfit(melville.ravel(), verbose=True, quiet=False)
p,sims = pl.test_pl(usefortran=True, niter=100)
print("Melville (me) : n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (pl.data.shape[0], pl.data.mean(), pl.data.std(), pl.data.max(), pl._xmin, pl._alpha, pl._alphaerr, pl._ngtx, p))
print("Melville (Clauset): n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (18855,11.14,148.33,14086,7,1.95,0.02,2958,0.49))
# count of word use 18 855 11.14 148.33 14 086 7 ± 2 1.95(2) 2958 ± 987 0.49
# words 0.49 4.43 0.00 0.395 0.69 9.09 0.00 4.13 0.00 −0.899 0.18 goo
figure(3)
clf()
title("Melville")
subplot(131)
pl.plotpdf()
subplot(132)
title("Melville")
pl.xminvsks()
subplot(133)
pl.alphavsks()
savefig("figures/Melville_kstests.png")
figure(4)
title("Melville")
pl.plotcdf()
savefig("figures/Melville_cdf.png")
pl = plfit.plfit(solarflares.ravel(), verbose=True, quiet=False)
plf = plfit.plfit(solarflares.ravel(), verbose=True, quiet=False, usefortran=True)
plc = plfit.plfit(solarflares.ravel(), verbose=True, quiet=False, usecy=True)
p,sims = pl.test_pl(usefortran=True, niter=100)
print("Solarflares (me) : n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (pl.data.shape[0], pl.data.mean(), pl.data.std(), pl.data.max(), pl._xmin, pl._alpha, pl._alphaerr, pl._ngtx, p))
print("Solarflares (Clauset): n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (12773, 689.41, 6520.59, 231300, 323, 1.79, 0.02, 1711, 1.00))
for ppp in (pl,plf,plc):
np.testing.assert_almost_equal(ppp._xmin, 323, 1)
np.testing.assert_almost_equal(ppp._alpha, 1.79, 2)
np.testing.assert_almost_equal(ppp._alphaerr, 0.02, 2)
assert ppp._ngtx == 1711
figure(5)
clf()
title("Solar Flares")
subplot(131)
pl.plotpdf()
subplot(132)
title("Solar Flares")
pl.xminvsks()
subplot(133)
pl.alphavsks()
savefig("figures/SolarFlares_kstests.png")
figure(6)
title("SolarFlares")
pl.plotcdf()
savefig("figures/SolarFlares_cdf.png")
pl = plfit.plfit(terrorism.ravel(), verbose=True, quiet=False)
plf = plfit.plfit(terrorism.ravel(), verbose=True, quiet=False, usefortran=True)
plc = plfit.plfit(terrorism.ravel(), verbose=True, quiet=False, usecy=True)
p,sims = pl.test_pl(usefortran=True, niter=100, nosmall=False)
print("Terrorism (me) : n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (pl.data.shape[0], pl.data.mean(), pl.data.std(), pl.data.max(), pl._xmin, pl._alpha, pl._alphaerr, pl._ngtx, p))
print("Terrorism (Clauset): n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (9101, 4.35, 31.58, 2749, 12, 2.4, 0.2, 547, 0.68))
figure(6)
clf()
title("Terrorism")
subplot(131)
pl.plotpdf()
subplot(132)
title("Terrorism")
pl.xminvsks()
subplot(133)
pl.alphavsks()
savefig("figures/Terorrism_kstests.png")
figure(7)
title("Terorrism")
pl.plotcdf()
savefig("figures/Terorrism_cdf.png")
# """
# power law log-normal exponential stretched exp. power law + cut-off support for
# data set p LR p LR p LR p LR p power law
# birds 0.55 -0.850 0.40 1.87 0.06 -0.882 0.38 -1.24 0.12 moderate
# blackouts 0.62 -0.412 0.68 1.21 0.23 -0.417 0.68 -0.382 0.38 moderate
# book sales 0.66 -0.267 0.79 2.70 0.01 3.885 0.00 -0.140 0.60 moderate
# cities 0.76 -0.090 0.93 3.65 0.00 0.204 0.84 -0.123 0.62 moderate
# fires 0.05 -1.78 0.08 4.00 0.00 -1.82 0.07 -5.02 0.00 with cut-off
# flares 1.00 -0.803 0.42 13.7 0.00 -0.546 0.59 -4.52 0.00 with cut-off
# HTTP 0.00 1.77 0.08 11.8 0.00 2.65 0.01 0.000 1.00 none
# quakes 0.00 -7.14 0.00 11.6 0.00 -7.09 0.00 -24.4 0.00 with cut-off
# religions 0.42 -0.073 0.94 1.59 0.11 1.75 0.08 -0.167 0.56 moderate
# surnames 0.20 -0.836 0.40 2.89 0.00 -0.844 0.40 -1.36 0.10 with cut-off
# wars 0.20 -0.737 0.46 3.68 0.00 -0.767 0.44 -0.847 0.19 moderate
# wealth 0.00 0.249 0.80 6.20 0.00 8.05 0.00 -0.142 0.59 none
# web hits 0.00 -10.21 0.00 8.55 0.00 10.94 0.00 -74.66 0.00 with cut-off
# web links 0.00 -2.24 0.03 25.3 0.00 -1.08 0.28 -21.2 0.00 with cut-o
#
#
#
# Poisson log-normal exponential stretched exp. power law + cut-off support for
# data set p LR p LR p LR p LR p LR p power law
# Internet 0.29 5.31 0.00 −0.807 0.42 6.49 0.00 0.493 0.62 −1.97 0.05 with cut-off
# calls 0.63 17.9 0.00 −2.03 0.04 35.0 0.00 14.3 0.00 −30.2 0.00 with cut-off
# citations 0.20 6.54 0.00 −0.141 0.89 5.91 0.00 1.72 0.09 −0.007 0.91 moderate
# email 0.16 4.65 0.00 −1.10 0.27 0.639 0.52 −1.13 0.26 −1.89 0.05 with cut-off
# metabolic 0.00 3.53 0.00 −1.05 0.29 5.59 0.00 3.66 0.00 0.000 1.00 none
# papers 0.90 5.71 0.00 −0.091 0.93 3.08 0.00 0.709 0.48 −0.016 0.86 moderate
# proteins 0.31 3.05 0.00 −0.456 0.65 2.21 0.03 0.055 0.96 −0.414 0.36 moderate
# species 0.10 5.04 0.00 −1.63 0.10 2.39 0.02 −1.59 0.11 −3.80 0.01 with cut-off
# terrorism 0.68 1.81 0.07 −0.278 0.78 2.457 0.01 0.772 0.44 −0.077 0.70 moderate
# words 0.49 4.43 0.00 0.395 0.69 9.09 0.00 4.13 0.00 −0.899 0.18 goo
#
#
#
#
# quantity n hxi σ xmax xˆmin α n ˆ tail p
# count of word use 18 855 11.14 148.33 14 086 7 ± 2 1.95(2) 2958 ± 987 0.49
# protein interaction degree 1846 2.34 3.05 56 5 ± 2 3.1(3) 204 ± 263 0.31
# metabolic degree 1641 5.68 17.81 468 4 ± 1 2.8(1) 748 ± 136 0.00
# Internet degree 22 688 5.63 37.83 2583 21 ± 9 2.12(9) 770 ± 1124 0.29
# telephone calls received 51 360 423 3.88 179.09 375 746 120 ± 49 2.09(1) 102 592 ± 210 147 0.63
# intensity of wars 115 15.70 49.97 382 2.1 ± 3.5 1.7(2) 70 ± 14 0.20
# terrorist attack severity 9101 4.35 31.58 2749 12 ± 4 2.4(2) 547 ± 1663 0.68
# HTTP size (kilobytes) 226 386 7.36 57.94 10 971 36.25 ± 22.74 2.48(5) 6794 ± 2232 0.00
# species per genus 509 5.59 6.94 56 4 ± 2 2.4(2) 233 ± 138 0.10
# bird species sightings 591 3384.36 10 952.34 138 705 6679 ± 2463 2.1(2) 66 ± 41 0.55
# blackouts (×10
# 3
# ) 211 253.87 610.31 7500 230 ± 90 2.3(3) 59 ± 35 0.62
# sales of books (×10
# 3
# ) 633 1986.67 1396.60 19 077 2400 ± 430 3.7(3) 139 ± 115 0.66
# population of cities (×10
# 3
# ) 19 447 9.00 77.83 8 009 52.46 ± 11.88 2.37(8) 580 ± 177 0.76
# email address books size 4581 12.45 21.49 333 57 ± 21 3.5(6) 196 ± 449 0.16
# forest fire size (acres) 203 785 0.90 20.99 4121 6324 ± 3487 2.2(3) 521 ± 6801 0.05
# solar flare intensity 12 773 689.41 6520.59 231 300 323 ± 89 1.79(2) 1711 ± 384 1.00
# quake intensity (×10
# 3
# ) 19 302 24.54 563.83 63 096 0.794 ± 80.198 1.64(4) 11 697 ± 2159 0.00
# religious followers (×10
# 6
# ) 103 27.36 136.64 1050 3.85 ± 1.60 1.8(1) 39 ± 26 0.42
# freq. of surnames (×10
# 3
# ) 2753 50.59 113.99 2502 111.92 ± 40.67 2.5(2) 239 ± 215 0.20
# net worth (mil. USD) 400 2388.69 4 167.35 46 000 900 ± 364 2.3(1) 302 ± 77 0.00
# citations to papers 415 229 16.17 44.02 8904 160 ± 35 3.16(6) 3455 ± 1859 0.20
# papers authored 401 445 7.21 16.52 1416 133 ± 13 4.3(1) 988 ± 377 0.90
# hits to web sites 119 724 9.83 392.52 129 641 2 ± 13 1.81(8) 50 981 ± 16 898 0.00
# links to web sites 241 428 853 9.15 106 871.65 1 199 466 3684 ± 151 2.336(9) 28 986 ± 1560 0.00
#
#
# Poisson log-normal exponential stretched | |
"""Tokenization utilties for exrepssions."""
import re
from typing import Dict, List, Tuple
import torch
from selfies import decoder, split_selfies
from transformers import BertTokenizer, XLNetTokenizer
SMILES_TOKENIZER_PATTERN = r"(\%\([0-9]{3}\)|\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\||\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"
class RegexTokenizer:
"""Run regex tokenization"""
def __init__(self, regex_pattern: str) -> None:
"""Constructs a RegexTokenizer.
Args:
regex_pattern: regex pattern used for tokenization
"""
self.regex_pattern = regex_pattern
self.regex = re.compile(self.regex_pattern)
def tokenize(self, text: str) -> List[str]:
"""Regex tokenization.
Args:
text: text to tokenize.
Returns:
extracted tokens.
"""
tokens = [token for token in self.regex.findall(text)]
return tokens
class PropertyTokenizer:
"""Run a property tokenization."""
def __init__(self) -> None:
"""Constructs a PropertyTokenizer."""
self.regex = re.compile(r"\s*(<\w+>)\s*?(\+|-)?(\d+)(\.)?(\d+)?\s*")
def tokenize(self, text: str) -> List[str]:
"""Tokenization of a property.
Args:
text: text to tokenize.
Returns:
extracted tokens.
"""
tokens = []
matched = self.regex.match(text)
if matched:
property_name, sign, units, dot, decimals = matched.groups()
tokens = [property_name]
if sign:
tokens += [f"_{sign}_"]
tokens += [
f"_{number}_{position}_" for position, number in enumerate(units[::-1])
][::-1]
if dot:
tokens += [f"_{dot}_"]
if decimals:
tokens += [
f"_{number}_-{position}_"
for position, number in enumerate(decimals, 1)
]
return tokens
class PropertyTokenizerSquare(PropertyTokenizer):
"""Run a property tokenization."""
def __init__(self) -> None:
"""Constructs a PropertyTokenizer."""
self.regex = re.compile(r"\s*(\[\w+\])\s*?(\+|-)?(\d+)(\.)?(\d+)?\s*")
class CharacterTokenizer:
def __init__(self) -> None:
"""Constructs a tokenizer that simply splits each character"""
self.tokenizer = lambda x: list(x)
def tokenize(self, text: str) -> List[str]:
"""Tokenize an expression.
Args:
text: text to tokenize.
Returns:
extracted tokens.
"""
return self.tokenizer(text)
class SelfiesTokenizer(CharacterTokenizer):
def __init__(self) -> None:
"""Constructs an expression tokenizer for SELFIES
Args:
expression_tokenizer: Separator token for properties and molecule.
Defaults to '|'.
"""
self.tokenizer = lambda x: list(split_selfies(x))
class ExpressionTokenizer:
def __init__(
self, expression_tokenizer: str = "|", language: str = "SMILES"
) -> None:
"""Constructs an expression tokenizer.
Args:
expression_tokenizer (str): Token separating the property. Defaults to '|'.
Must not occur in the language itself.
language (str): Identifier for the (chemical) language. Should be either
'SMILES', 'SELFIES' or 'AAS'.
"""
self.language = language
if language == "SMILES":
self.text_tokenizer = RegexTokenizer(regex_pattern=SMILES_TOKENIZER_PATTERN)
elif language == "SELFIES":
self.text_tokenizer = SelfiesTokenizer()
elif language == "AAS":
self.text_tokenizer = CharacterTokenizer()
else:
raise ValueError(
f"Unsupported language {language}, choose 'SMILES', 'SELFIES' or 'AAS'."
)
self.property_tokenizer = PropertyTokenizer()
self.expression_separator = expression_tokenizer
def tokenize(self, text: str) -> List[str]:
"""Tokenize an expression.
Args:
text: text to tokenize.
Returns:
extracted tokens.
"""
splitted_expression = text.split(self.expression_separator)
tokens = []
for property_expression in splitted_expression[:-1]:
tokens.extend(self.property_tokenizer.tokenize(property_expression))
tokens.append(self.expression_separator)
tokens.extend(self.text_tokenizer.tokenize(splitted_expression[-1]))
return tokens
class ExpressionBertTokenizer(BertTokenizer):
"""
Constructs a bert-based tokenizer used for the Regression Transformer.
Args:
vocab_file: path to a token per line vocabulary file.
"""
def __init__(
self,
vocab_file,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
pad_even: bool = True,
language: str = "SMILES",
**kwargs,
) -> None:
"""Constructs an ExpressionTokenizer.
Args:
vocab_file: vocabulary file containing tokens.
unk_token: unknown token. Defaults to "[UNK]".
sep_token: separator token. Defaults to "[SEP]".
pad_token: pad token. Defaults to "[PAD]".
cls_token: cls token. Defaults to "[CLS]".
mask_token: mask token. Defaults to "[MASK]".
pad_even (bool): Boolean indicating whether sequences of odd length should
be padded to have an even length. Neede for PLM in XLNet. Defaults to
True.
language (str): Identifier for the (chemical) language. Should be either
'SMILES', 'SELFIES' or 'AAS'.
"""
super().__init__(
vocab_file=vocab_file,
do_lower_case=False,
do_basic_tokenize=True,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs,
)
# define tokenization utilities
self.language = language
if language == "SMILES":
self.text_tokenizer = RegexTokenizer(regex_pattern=SMILES_TOKENIZER_PATTERN)
elif self.language == "SELFIES":
self.text_tokenizer = SelfiesTokenizer()
elif language == "AAS":
self.text_tokenizer = CharacterTokenizer()
else:
raise ValueError(
f"Unsupported language {language}, choose 'SMILES', 'SELFIES' or 'AAS'."
)
self.property_tokenizer = PropertyTokenizer()
self.expression_separator = "|"
self.separator_idx = self.vocab[self.expression_separator]
self.pad_even = pad_even
# DEPRECATED
if pad_even:
self.pad_even_fn = lambda x: x if len(x) % 2 == 0 else x + [self.pad_token]
else:
self.pad_even_fn = lambda x: x
@property
def vocab_list(self) -> List[str]:
"""List vocabulary tokens.
Returns:
a list of vocabulary tokens.
"""
return list(self.vocab.keys())
def _tokenize(self, text: str) -> List[str]:
"""Tokenize a text representing an expression.
Args:
text: text to tokenize.
Returns:
extracted tokens.
"""
splitted_expression = text.split(self.expression_separator)
tokens = []
for property_expression in splitted_expression[:-1]:
tokens.extend(self.property_tokenizer.tokenize(property_expression))
tokens.append(self.expression_separator)
tokens.extend(self.text_tokenizer.tokenize(splitted_expression[-1]))
# TODO: remove this hack
# This is a hack to get around DataCollatorForLanguageModeling requiring even
# length sequences
return self.pad_even_fn(tokens)
def add_padding_tokens(
self, token_ids: List[int], max_length: int, padding_right: bool = True
) -> List[int]:
"""Adds padding tokens to return a sequence of length max_length.
By default padding tokens are added to the right of the sequence.
Args:
token_ids: token indexes.
max_length: maximum length of the sequence.
padding_right: whether the sequence is padded on the right. Defaults to True.
Returns:
padded sequence of token indexes.
"""
padding_ids = [self.pad_token_id] * (max_length - len(token_ids))
if padding_right:
return token_ids + padding_ids
else:
return padding_ids + token_ids
@staticmethod
def get_sample_label(mlm_label: List[str], mlm_input: List[str]) -> List[str]:
"""MLM case: Retrieve true sample sequence from mlm label and mlm input.
NOTE: Also works for PLM.
Args:
mlm_label (List[str]): Target sample used in MLM.
mlm_input (List[str]): MLM input sample.
Returns:
List[str]: Sample sequence as part of the dataset
"""
return [i if el == "[UNK]" else el for el, i in zip(mlm_label, mlm_input)]
@staticmethod
def get_sample_prediction(
mlm_prediction: List[str], mlm_input: List[str]
) -> List[str]:
"""MLM case: Retrieve predicted sequence from mlm prediction and mlm input
NOTE: Also works for PLM.
Args:
mlm_label (List[str]): Target sample used in MLM.
mlm_input (List[str]): MLM input sample.
Returns:
List[str]: Sample sequence as part of the dataset
"""
return [
i if i not in ["[MASK]"] else o for o, i in zip(mlm_prediction, mlm_input)
]
@staticmethod
def floating_tokens_to_float(token_ids: List[str]) -> float:
"""Converts tokens representing a float value into a float.
NOTE: Expects that non-floating tokens are strippped off
Args:
token_ids: List of tokens, each representing a float.
E.g.: ['_0_0_', '_._', '_9_-1_', '_3_-2_', '_1_-3_']
Returns:
float: Float representation for the list of tokens.
"""
try:
float_string = "".join([token.split("_")[1] for token in token_ids])
float_value = float(float_string)
except ValueError:
float_value = -1
return float_value
def aggregate_tokens(
self, token_ids: List[str], label_mode: bool, cls_first: bool = True
) -> Tuple[str, Dict]:
"""Receives tokens of one sample and returns sequence (e.g. SMILES) and
a dict of properties.
Args:
token_ids (List[str]): List of tokens.
label_mode (bool): Whether the token_ids are labels or predictions.
cls_first (bool, optional): Whether CLS token occurres first, default: True
Returns:
Tuple[str, Dict]:
str: SMILES/SELFIES sequence of sample.
Dict: A dictionary with property names (e.g. 'qed') as key and
properties as values.
"""
edx = min(
token_ids.index("[SEP]") if "[SEP]" in token_ids else 1000,
token_ids.index("[PAD]") if "[PAD]" in token_ids else 1000,
)
edx = -1 if edx == 1000 else edx
seq = (
"".join(token_ids[token_ids.index("|") + 1 : edx])
if "|" in token_ids
else "".join(token_ids)
)
property_dict = {}
for idx, t in enumerate(token_ids):
if t.startswith("<") and t.endswith(">"):
key = t[1:-1]
# Convert float
end_floating_idx = idx + 1
while token_ids[end_floating_idx].startswith("_"):
end_floating_idx += 1
prop = self.floating_tokens_to_float(
token_ids[idx + 1 : end_floating_idx]
)
property_dict[key] = prop
return seq, property_dict
def to_readable(self, sequence: str) -> str:
"""Safely returns a readable string irrespective of whether the language is
SMILES, SELFIES or AAS.
Args:
sequence (str): A string representing a molecule (either SMILES or SELFIES)
or amino acid sequence.
Returns:
str: A SMILES representing the same molecule.
"""
if self.language == "SMILES":
return sequence
elif self.language == "SELFIES":
return decoder(sequence)
elif self.language == "AAS":
return sequence
else:
raise AttributeError(f"Unknown language {self.language}")
class XLNetRTTokenizer(XLNetTokenizer):
"""
A XLNet-based tokenizer for the Regression Transformer, build for the
humicroedit dataset
"""
def set_property_tokenizer(
self,
tokenizer: PropertyTokenizer,
expression_separator: str = '{',
expression_end: str = '}',
property_token: str = '[<PASSWORD>]',
):
"""
Set the property tokenizer to be used by the main tokenizer.
Args:
tokenizer: a property tokenizer.
expression_separator: a token that separates the property from the rest.
expression_end: a token that ends the joke-token sequence.
property_token: the property token.
"""
self.property_tokenizer = tokenizer
# The start token indicating the joke tokens
self.expression_separator = expression_separator
self.expressiond_end = expression_end
self.property_token = property_token
def set_vocab(self):
self.vocab = self.get_vocab()
self.idx_to_token = dict(zip(self.vocab.values(), self.vocab.keys()))
def _tokenize(self, text: str) -> List[str]:
"""
Core tokenization function.
Args:
text: A string to be tokenized.
Returns:
A | |
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataset.config import widerface_640 as cfg
# from layers.DCNv2 import DCN
RELU_FIRST = True
OPS = {
"none": lambda C, stride, affine: Zero(stride),
"avg_pool_3x3": lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
"max_pool_3x3": lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
"skip_connect": lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
"sep_conv_3x3": lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),
"sep_conv_5x5": lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),
"sep_conv_7x7": lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),
"dil_conv_3x3": lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),
"dil_conv_3x3_3": lambda C, stride, affine: DilConv(C, C, 3, stride, 3, 3, affine=affine),
"dil_conv_5x5": lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),
"conv_1x3_3x1": lambda C, stride, affine: RfeConv(C, C, 3, stride, 1, affine=affine),
"conv_1x5_5x1": lambda C, stride, affine: RfeConv(C, C, 5, stride, 2, affine=affine),
# "dconv_3x3": lambda C, stride, affine: D_Conv(C, C, 3, 1, affine=affine, bn=False),
"conv_1x3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,3), stride=stride, padding=(0,1), bn=False),
"conv_3x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(3,1), stride=stride, padding=(1,0), bn=False),
"conv_1x5": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,5), stride=stride, padding=(0,2), bn=False),
"conv_5x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(5,1), stride=stride, padding=(2,0), bn=False),
"conv_1x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=1, stride=1, padding=0, bn=False),
} # black: disable
BN_OPS = {
"none": lambda C, stride, affine: Zero(stride),
"avg_pool_3x3": lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
"max_pool_3x3": lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
"skip_connect": lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
"sep_conv_3x3": lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine, bn=True),
"sep_conv_5x5": lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine, bn=True),
"sep_conv_7x7": lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine, bn=True),
"dil_conv_3x3": lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine, bn=True),
"dil_conv_3x3_3": lambda C, stride, affine: DilConv(C, C, 3, stride, 3, 3, affine=affine, bn=True),
"dil_conv_5x5": lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine, bn=True),
"conv_1x3_3x1": lambda C, stride, affine: RfeConv(C, C, 3, stride, 1, affine=affine, bn=True),
"conv_1x5_5x1": lambda C, stride, affine: RfeConv(C, C, 5, stride, 2, affine=affine, bn=True),
# "dconv_3x3": lambda C, stride, affine: D_Conv(C, C, 3, 1, affine=affine, bn=True),
"conv_1x3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,3), stride=stride, padding=(0,1), bn=True),
"conv_3x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(3,1), stride=stride, padding=(1,0), bn=True),
"conv_1x5": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,5), stride=stride, padding=(0,2), bn=True),
"conv_5x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(5,1), stride=stride, padding=(2,0), bn=True),
"conv_1x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=1, stride=1, padding=0, bn=True),
}
NORMAL_OPS = {
"none": lambda C, stride, affine: Zero(stride),
"avg_pool_3x3": lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
"max_pool_3x3": lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
"skip_connect": lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
"sep_conv_3x3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=3, stride=stride, padding=1, bn=True),
"sep_conv_5x5": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=5, stride=stride, padding=1, bn=True),
"sep_conv_7x7": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=7, stride=stride, padding=1, bn=True),
"dil_conv_3x3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=3, stride=stride, padding=2, bn=True, dilation=2),
"dil_conv_3x3_3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=3, stride=stride, padding=3, bn=True, dilation=3),
"dil_conv_5x5": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=5, stride=stride, padding=4, bn=True, dilation=2),
"conv_1x3_3x1": lambda C, stride, affine: RfeConv(C, C, 3, stride, 1, affine=affine, bn=True),
"conv_1x5_5x1": lambda C, stride, affine: RfeConv(C, C, 5, stride, 2, affine=affine, bn=True),
# "dconv_3x3": lambda C, stride, affine: D_Conv(C, C, 3, 1, affine=affine, bn=True),
"conv_1x3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,3), stride=stride, padding=(0,1), bn=True),
"conv_3x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(3,1), stride=stride, padding=(1,0), bn=True),
"conv_1x5": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,5), stride=stride, padding=(0,2), bn=True),
"conv_5x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(5,1), stride=stride, padding=(2,0), bn=True),
}
class Normal_Relu_Conv(nn.Module):
def __init__(self, C_in, C_out, affine=True, bn=False, **kwargs):
super(Normal_Relu_Conv, self).__init__()
if not bn:
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(C_in, C_in, bias=True, **kwargs),
)
else:
if cfg['GN']:
bn_layer = nn.GroupNorm(32, C_out)
elif cfg["syncBN"]:
bn_layer = nn.SyncBatchNorm(C_out)
else:
bn_layer = nn.BatchNorm2d(C_out)
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(C_in, C_in, bias=False, **kwargs),
bn_layer,
)
if RELU_FIRST:
self.op = nn.Sequential()
self.op.add_module('0', nn.ReLU())
for i in range(1, len(op)+1):
self.op.add_module(str(i), op[i-1])
else:
self.op = op
self.op.add_module(str(len(op)), nn.ReLU())
# self.op = op
def forward(self, x):
return self.op(x)
class _GumbelSoftMax(torch.autograd.Function):
"""
implementing the MixedOp, but carried out in a different way as DARTS
DARTS adds all operations together, then select the maximal one to construct the final network,
however, during the late process, more weights are assigned to the None, this is unreasonable under the
circumstance that per operation has the unsure number of inputs.
Thus, we modifies the original DARTS by applying way in GDAS to test.
This class aims to compute the gradients by ourself.
"""
@staticmethod
def forward(ctx, weights):
weights_norm = F.softmax(weights, dim=-1)
ctx.saved_for_backward = weights_norm
# select the max one
mask = torch.zeros_like(weights_norm).to(weights.device)
_, idx = weights_norm.topk(dim=-1, k=1, largest=True)
mask[idx] = 1.0
return mask
@staticmethod
def backward(ctx, grad_output):
gumbel_norm = ctx.saved_for_backward
return gumbel_norm * (1 - gumbel_norm) * grad_output * gumbel_norm.shape[0]
class GumbelSoftMax(nn.Module):
def __init__(self):
super(GumbelSoftMax, self).__init__()
def forward(self, weights, temp_coeff=1.0):
gumbel = -1e-3 * torch.log(-torch.log(torch.rand_like(weights))).to(weights.device)
weights = _GumbelSoftMax.apply((weights + gumbel) / temp_coeff)
return weights
# class D_Conv(nn.Module):
# """ Deformable Conv V2 """
# def __init__(self, C_in, C_out, kernel_size, padding, affine=True, bn=False):
# super(D_Conv, self).__init__()
# if bn:
# if cfg["syncBN"]:
# bn_layer = nn.SyncBatchNorm(C_out)
# else:
# bn_layer = nn.BatchNorm2d(C_out)
# self.op = nn.Sequential(
# nn.ReLU(inplace=False),
# DCN(
# C_in, C_in, kernel_size=kernel_size, padding=padding, stride=1, deformable_groups=C_in, groups=C_in
# ),
# nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
# bn_layer,
# )
# else:
# self.op = nn.Sequential(
# nn.ReLU(inplace=False),
# DCN(
# C_in, C_in, kernel_size=kernel_size, padding=padding, stride=1, deformable_groups=C_in, groups=C_in
# ),
# nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=True),
# )
# def forward(self, x):
# return self.op(x)
class RfeConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True, bn=False):
super(RfeConv, self).__init__()
if not bn:
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(
C_in,
C_in,
kernel_size=(1, kernel_size),
stride=(1, stride),
padding=(0, padding),
groups=C_in,
bias=True,
),
# nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=True),
nn.Conv2d(
C_in,
C_in,
kernel_size=(kernel_size, 1),
stride=(stride, 1),
padding=(padding, 0),
groups=C_in,
bias=True,
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=True),
)
else:
if cfg['GN']:
bn_layer = nn.GroupNorm(32, C_out)
elif cfg["syncBN"]:
bn_layer = nn.SyncBatchNorm(C_out)
else:
bn_layer = nn.BatchNorm2d(C_out)
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(
C_in,
C_in,
kernel_size=(1, kernel_size),
stride=(1, stride),
padding=(0, padding),
groups=C_in,
bias=True,
),
nn.Conv2d(
C_in,
C_in,
kernel_size=(kernel_size, 1),
stride=(stride, 1),
padding=(padding, 0),
groups=C_in,
bias=True,
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=True),
bn_layer,
)
if RELU_FIRST:
self.op = nn.Sequential()
self.op.add_module('0', nn.ReLU())
for i in range(1, len(op)+1):
self.op.add_module(str(i), op[i-1])
else:
self.op = op
self.op.add_module(str(len(op)), nn.ReLU())
# self.op = op
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True, bn=False):
super(DilConv, self).__init__()
if not bn:
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(
C_in,
C_in,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=C_in,
bias=True,
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=True),
)
else:
if cfg['GN']:
bn_layer = nn.GroupNorm(32, C_out)
elif cfg["syncBN"]:
bn_layer = nn.SyncBatchNorm(C_out)
else:
bn_layer = nn.BatchNorm2d(C_out)
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(
C_in,
C_in,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=C_in,
bias=False,
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
bn_layer,
)
if RELU_FIRST:
self.op = nn.Sequential()
self.op.add_module('0', nn.ReLU())
for i in range(1, len(op)+1):
self.op.add_module(str(i), op[i-1])
else:
self.op = op
self.op.add_module(str(len(op)), nn.ReLU())
# self.op = op
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True, bn=False):
super(SepConv, self).__init__()
if not bn:
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=True,),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=True),
)
else:
if cfg['GN']:
bn_layer = nn.GroupNorm(32, C_out)
elif cfg["syncBN"]:
bn_layer = nn.SyncBatchNorm(C_out)
else:
bn_layer = nn.BatchNorm2d(C_out)
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(
C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False,
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
bn_layer,
)
if RELU_FIRST:
self.op = nn.Sequential(nn.ReLU())
# self.op.add_module('0', nn.ReLU())
for i in range(1, len(op)+1):
self.op.add_module(str(i), op[i-1])
else:
self.op = op
self.op.add_module(str(len(op)), nn.ReLU())
# self.op = op
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride | |
<reponame>Deltares/geolib<gh_stars>1-10
import os
import pathlib
import shutil
from pathlib import Path
import pytest
from teamcity import is_running_under_teamcity
from geolib.geometry.one import Point
from geolib.models import BaseModel, BaseModelStructure
from geolib.models.dstability import DStabilityModel
from geolib.models.dstability.analysis import (
DStabilityBishopAnalysisMethod,
DStabilityBishopBruteForceAnalysisMethod,
DStabilityCircle,
DStabilitySearchArea,
DStabilitySearchGrid,
DStabilitySpencerAnalysisMethod,
DStabilitySpencerGeneticAnalysisMethod,
DStabilityUpliftVanAnalysisMethod,
DStabilityUpliftVanParticleSwarmAnalysisMethod,
)
from geolib.models.dstability.dstability_model import DStabilityModel
from geolib.models.dstability.internal import (
AnalysisTypeEnum,
CalculationTypeEnum,
DStabilityStructure,
)
from geolib.models.dstability.loads import Consolidation, LineLoad, UniformLoad
from geolib.models.dstability.reinforcements import ForbiddenLine, Geotextile, Nail
from geolib.models.dstability.states import (
DStabilityStateLinePoint,
DStabilityStatePoint,
DStabilityStress,
)
from geolib.soils import ShearStrengthModelTypePhreaticLevel, Soil, SuTablePoint
from tests.utils import TestUtils, only_teamcity
class TestDStabilityModel:
@pytest.mark.unittest
def test_instantiate_DStabilityModel(self):
assert isinstance(DStabilityModel(filename=None), BaseModel), (
"" + "DStabilityModel does not instanciate BaseModel"
)
@pytest.mark.systemtest
@pytest.mark.parametrize(
"filepath",
[
pytest.param("dstability/example_1", id="Input Structure"),
pytest.param(
"dstability/example_1/Tutorial.stix", id="Input Structure for zip"
),
pytest.param("dstability/Tutorial_v20_2_1", id="Tutorial DStability 20.2.1"),
],
)
def test_given_datadir_when_parse_then_datastructure_of_expected_type(
self, filepath: str
):
# 1. Set up test data.
test_input_filepath = Path(TestUtils.get_local_test_data_dir(filepath))
dstability_model = DStabilityModel(filename=None)
# 2. Verify initial expectations.
assert os.path.exists(test_input_filepath)
assert dstability_model is not None
# 3. Run test.
dstability_model.parse(test_input_filepath)
# 4. Verify final expectations.
assert dstability_model.is_valid
assert isinstance(dstability_model.datastructure, DStabilityStructure)
@pytest.mark.systemtest
@pytest.mark.parametrize(
"dir_path",
[
pytest.param("dstability/example_1", id="Input Structure"),
pytest.param("dstability/Tutorial_v20_2_1", id="Tutorial DStability 20.2.1"),
],
)
def test_given_data_when_parseandserialize_then_doesnotraise(self, dir_path: str):
# 1. Set up test data.
test_input_filepath = Path(TestUtils.get_local_test_data_dir(dir_path))
dstability_model = DStabilityModel(filename=None)
test_output_filepath = Path(
TestUtils.get_output_test_data_dir("dstability/parseandserialize")
)
# 2. Verify initial expectations.
assert os.path.exists(test_input_filepath)
if len(os.listdir(test_output_filepath)) > 0:
shutil.rmtree(test_output_filepath)
os.mkdir(test_output_filepath)
assert dstability_model is not None
# 3. Run test.
dstability_model.parse(test_input_filepath)
dstability_model.serialize(test_output_filepath)
# 4. Verify final expectations.
assert dstability_model.is_valid
assert len(os.listdir(test_output_filepath)) > 0, (
"" + "No data was generated while serializing."
)
@pytest.mark.systemtest
@pytest.mark.skipif(
not is_running_under_teamcity(), reason="Console test only installed on TC."
)
@pytest.mark.parametrize(
"dir_path",
[
pytest.param("dstability/example_1", id="Input Structure"),
pytest.param("dstability/Tutorial_v20_2_1", id="Tutorial DStability 20.2.1"),
],
)
def test_execute_model_succesfully(self, dir_path: str):
# 1. Set up test data.
dm = DStabilityModel()
test_filepath = Path(TestUtils.get_local_test_data_dir(dir_path))
dm.parse(test_filepath)
test_output_filepath = (
Path(TestUtils.get_output_test_data_dir("dstability")) / "test.stix"
)
dm.serialize(test_output_filepath)
# 2. Verify initial expectations.
assert os.path.exists(test_output_filepath)
# 3. Run test.
dm.filename = test_output_filepath
model = dm.execute()
# 3. Verify model output has been parsed
assert model
@pytest.mark.integrationtest
def test_add_default_stage(self):
# Setup
dm = DStabilityModel()
dm.add_layer(
[
Point(x=-50, z=-10),
Point(x=50, z=-10),
Point(x=50, z=-20),
Point(x=-50, z=-20),
],
"Sand",
)
# Test
new_stage_id = dm.add_stage("new stage", "")
# Assert new stage has default (empty geometry)
assert new_stage_id == 1
assert len(dm.stages) == 2
assert len(dm.datastructure.geometries[-1].Layers) == 0
@pytest.mark.integrationtest
def test_copy_stage(self):
# Setup
dm = DStabilityModel()
dm.add_layer(
[
Point(x=-50, z=-10),
Point(x=50, z=-10),
Point(x=50, z=-20),
Point(x=-50, z=-20),
],
"Sand",
)
# Test
new_stage_id = dm.copy_stage("new stage", "")
# Assert new stage has default (empty geometry)
assert new_stage_id == 1
assert len(dm.stages) == 2
assert len(dm.datastructure.geometries[-1].Layers) == 1
@pytest.mark.unittest
def test_gen_unique_id(self):
"""This test will fail when we've added new default
ids to the internal datastructure. Please update accordingly."""
max_id_after_initialization_of_dstability_structure = 21
dm = DStabilityModel()
assert dm.datastructure.waternets[0].Id == "14"
new_id = dm.datastructure.get_unique_id()
assert new_id == max_id_after_initialization_of_dstability_structure
@pytest.mark.acceptance
@only_teamcity
def test_generate_simple_model(self):
dm = DStabilityModel()
layer_1 = [
Point(x=-50, z=-10),
Point(x=50, z=-10),
Point(x=50, z=-20),
Point(x=-50, z=-20),
]
layer_2 = [
Point(x=-50, z=-5),
Point(x=50, z=-5),
Point(x=50, z=-10),
Point(x=-50, z=-10),
]
layer_3 = [
Point(x=-50, z=0),
Point(x=50, z=0),
Point(x=50, z=-5),
Point(x=-50, z=-5),
]
embankment = [
Point(x=-10, z=0),
Point(x=0, z=2),
Point(x=10, z=2),
Point(x=30, z=0),
]
layers_and_soils = [
(layer_1, "Sand"),
(layer_2, "H_Ro_z&k"),
(layer_3, "H_Rk_k_shallow"),
(embankment, "H_Aa_ht_old"),
]
layer_ids = [dm.add_layer(points, soil) for points, soil in layers_and_soils]
for layer_id in layer_ids:
# Has to be done in separate loop since all layers first need to be definied.
dm.add_soil_layer_consolidations(soil_layer_id=layer_id)
assert len(dm.datastructure.loads[0].LayerLoads) == 4
assert dm.is_valid
# Serialize model to input file.
path = pathlib.Path.cwd() / "test.stix"
dm.serialize(path)
# Check for succesfull execution
dm.execute()
assert dm.datastructure
@pytest.mark.systemtest
def test_get_stabfactor(self):
test_filepath = Path(
TestUtils.get_local_test_data_dir("dstability/test_dstab_full.stix")
)
dm = DStabilityModel()
dm.parse(test_filepath)
assert pytest.approx(dm.output.FactorOfSafety, 0.56)
def test_get_slipeplane(self):
test_filepath = Path(
TestUtils.get_local_test_data_dir("dstability/test_dstab_full.stix")
)
dm = DStabilityModel()
dm.parse(test_filepath)
assert len(dm.output.SlipPlane) == 5
@pytest.mark.acceptance
@only_teamcity
def test_generate_model_from_scratch(self):
dm = DStabilityModel()
bishop_analysis_method = DStabilityBishopAnalysisMethod(
circle=DStabilityCircle(center=Point(x=20, z=3), radius=15)
)
dm.set_model(bishop_analysis_method)
assert (
dm.datastructure.calculationsettings[0].CalculationType
== CalculationTypeEnum.DETERMINISTIC
)
assert (
dm.datastructure.calculationsettings[0].AnalysisType
== AnalysisTypeEnum.BISHOP
)
# add soil
soil_peat_id = Soil()
soil_peat_id.name = "Peat (weak)"
soil_peat_id.code = "HV"
soil_peat_id.soil_weight_parameters.unsaturated_weight.mean = 10.2
soil_peat_id.soil_weight_parameters.saturated_weight.mean = 10.2
soil_peat_id.mohr_coulomb_parameters.friction_angle.mean = 15
soil_peat_id.mohr_coulomb_parameters.cohesion.mean = 0.5
soil_peat_id = dm.add_soil(soil_peat_id)
# add layers
layer_1 = [
Point(x=-50, z=-10),
Point(x=50, z=-10),
Point(x=50, z=-20),
Point(x=-50, z=-20),
]
layer_2 = [
Point(x=-50, z=-5),
Point(x=50, z=-5),
Point(x=50, z=-10),
Point(x=-50, z=-10),
]
layer_3 = [
Point(x=-50, z=0),
Point(x=-10, z=0),
Point(x=30, z=0),
Point(x=50, z=0),
Point(x=50, z=-5),
Point(x=-50, z=-5),
]
embankment = [
Point(x=-10, z=0),
Point(x=0, z=2),
Point(x=10, z=2),
Point(x=30, z=0),
]
layers_and_soils = [
(layer_1, "Sand"),
(layer_2, "H_Ro_z&k"),
(layer_3, "HV"),
(embankment, "H_Aa_ht_old"),
]
layer_ids = []
for layer, soil in layers_and_soils:
layer_id = dm.add_layer(layer, soil)
layer_ids.append(layer_id)
outputdir = Path(TestUtils.get_output_test_data_dir("dstability/acceptancetest/"))
path = outputdir / "test_layers.stix"
dm.serialize(path)
# add phreatic line
phreatic_line_id = dm.add_head_line(
points=[
Point(x=-50, z=1.0),
Point(x=0, z=1),
Point(x=30, z=-1),
Point(x=50, z=-1),
],
label="Phreatic Line",
is_phreatic_line=True,
)
path = outputdir / "test_phreatic_line.stix"
dm.serialize(path)
# add headline for deep sand
sand_head_line_id = dm.add_head_line(
points=[Point(x=-50, z=5.0), Point(x=50, z=5.0)],
label="Hydraulic head in sandlayer",
)
dm.add_reference_line(
points=[Point(x=-50, z=-3), Point(x=50, z=-3)],
bottom_headline_id=phreatic_line_id,
top_head_line_id=phreatic_line_id,
)
dm.add_reference_line(
points=[Point(x=-50, z=-10), Point(x=50, z=-10)],
bottom_headline_id=sand_head_line_id,
top_head_line_id=sand_head_line_id,
)
path = outputdir / "test_reference_line.stix"
dm.serialize(path)
# change some parameters
dm.edit_soil("HV", cohesion=2.0, friction_angle=17.5)
path = outputdir / "test_edited_soil.stix"
dm.serialize(path)
# add uniform load
dm.add_load(
UniformLoad(
label="trafficload",
start=6.5,
end=9.0,
magnitude=13,
angle_of_distribution=45,
)
)
path = outputdir / "test_uniformload.stix"
dm.serialize(path)
# add line load
dm.add_load(
LineLoad(
location=Point(x=2.0, z=2.0),
angle=0.0,
magnitude=10.0,
angle_of_distribution=45.0,
)
)
path = outputdir / "test_lineload.stix"
dm.serialize(path)
# create reinforcements NAIL
dm.add_reinforcement(
Nail(
location=Point(x=20.0, z=1.0),
direction=15.0,
horizontal_spacing=1.0,
length=3.0,
grout_diameter=0.1,
max_pull_force=10.0,
plastic_moment=5.0,
bending_stiffness=100.0,
)
)
path = outputdir / "test_nail.stix"
dm.serialize(path)
# create reinforcements GEOTEXTILE
dm.add_reinforcement(
Geotextile(
start=Point(x=20.0, z=0.0),
end=Point(x=30.0, z=0.0),
effective_tensile_strength=10.0,
reduction_area=0.5,
)
)
path = outputdir / "test_geotextile.stix"
dm.serialize(path)
# create reinforcements FORBIDDEN LINE
dm.add_reinforcement(
ForbiddenLine(start=Point(x=30.0, z=0.0), end=Point(x=30.0, z=-4.0))
)
path = outputdir / "test_forbidden_line.stix"
dm.serialize(path)
# add bishop brute force
dm.set_model(
DStabilityBishopBruteForceAnalysisMethod(
search_grid=DStabilitySearchGrid(
bottom_left=Point(x=15, z=2),
number_of_points_in_x=10,
number_of_points_in_z=10,
space=0.5,
),
bottom_tangent_line_z=-6.0,
number_of_tangent_lines=5,
space_tangent_lines=0.5,
)
)
path = outputdir / "test_bishop_brute_force.stix"
dm.serialize(path)
# add spencer
dm.set_model(
DStabilitySpencerAnalysisMethod(
slipplane=[
Point(x=7, z=2.0),
Point(x=15, z=-3),
Point(x=30, z=-4.5),
Point(x=40, z=0.0),
]
)
)
path = outputdir / "test_spencer.stix"
dm.serialize(path)
# add spencer genetic
dm.set_model(
DStabilitySpencerGeneticAnalysisMethod(
slip_plane_a=[
Point(x=10, z=2.0),
Point(x=15, z=0),
Point(x=30, z=-4),
Point(x=35, z=0.0),
],
slip_plane_b=[
Point(x=5, z=2.0),
Point(x=15, z=-3),
Point(x=30, z=-6),
Point(x=40, z=0.0),
],
)
)
path = outputdir / "test_spencer_genetic.stix"
dm.serialize(path)
# uplift
dm.set_model(
DStabilityUpliftVanAnalysisMethod(
first_circle=DStabilityCircle(center=Point(x=5, z=5), radius=9.5),
second_circle_center=Point(x=40, z=2),
)
)
path = outputdir / "test_uplift.stix"
dm.serialize(path)
# uplift particle swarm
dm.set_model(
DStabilityUpliftVanParticleSwarmAnalysisMethod(
search_area_a=DStabilitySearchArea(
height=5.0, top_left=Point(x=0.0, z=10.0), width=5.0
),
search_area_b=DStabilitySearchArea(
height=5.0, top_left=Point(x=35.0, z=5.0), width=5.0
),
tangent_area_height=2.0,
tangent_area_top_z=-4.5,
)
)
path = outputdir / "test_uplift_particle_swarm.stix"
dm.serialize(path)
# state point
dm.add_state_point(
DStabilityStatePoint(
layer_id=layer_ids[2], # HV layer
point=Point(x=0, z=-2.5),
stress=DStabilityStress(pop=10.0),
)
)
path = outputdir / "test_state_point.stix"
dm.serialize(path)
# state line
dm.add_state_line(
points=[Point(x=-50, z=-2), Point(x=50, z=-2)],
state_points=[
DStabilityStateLinePoint(
above=DStabilityStress(pop=5), below=DStabilityStress(pop=10), x=20
)
],
)
path = outputdir / "test_state_line.stix"
dm.serialize(path)
# 3. Verify model output has been parsed
model = dm.execute()
assert model
@pytest.mark.integrationtest
def test_su_table_version_parsing(self):
# initialize model
dm = DStabilityModel()
# stix input file path
test_filepath = Path(TestUtils.get_local_test_data_dir("dstability/Example.stix"))
# stix output file path
test_output_filepath = Path(
TestUtils.get_local_test_data_dir("dstability/Tutorial_serialized_new.stix")
)
# parse existing files
dm.parse(test_filepath)
# test that the file was read correctly
soil_su_table = dm.input.soils.get_soil("H_Aa_ht_old")
assert soil_su_table.shear_strength_model_below_phreatic_level.value == "SuTable"
assert (
soil_su_table.shear_strength_model_above_phreatic_level.value
== "Mohr_Coulomb"
)
assert soil_su_table.undrained_parameters.su_table
@pytest.mark.integrationtest
def test_su_table_version_input(self):
# initialize model
dm = DStabilityModel()
# stix input file path
test_filepath = Path(TestUtils.get_local_test_data_dir("dstability/Example.stix"))
# stix output file path
test_output_filepath = Path(
TestUtils.get_local_test_data_dir("dstability/Tutorial_serialized_new.stix")
)
# parse existing files
dm.parse(test_filepath)
# change waterlevels
# add su table values for soil
# add soil
soil = Soil()
soil.name = "Soil test"
soil.code = "su soil"
soil.soil_weight_parameters.saturated_weight.mean = 10.2
soil.soil_weight_parameters.unsaturated_weight.mean = 10.2
soil.undrained_parameters.strength_increase_exponent = 1.1
soil.undrained_parameters.su_table = [
SuTablePoint(su=0, stress=0),
SuTablePoint(su=100, stress=200),
SuTablePoint(su=200, stress=300),
]
soil.shear_strength_model_below_phreatic_level = (
ShearStrengthModelTypePhreaticLevel.SUTABLE
)
new_layer = [
Point(x=66, z=0),
Point(x=89.95, z=-0.06),
Point(x=90, z=-8.7),
Point(x=88.6, z=-5.9),
Point(x=85.9, z=-4.7),
Point(x=83.6, z=-3.6),
Point(x=81, z=-3),
Point(x=79.2, z=-2),
Point(x=77.1, z=-1.7),
Point(x=74.2, z=-1),
Point(x=71, z=-0.4),
]
soil_undrained_id = dm.add_soil(soil)
layer_id = dm.add_layer(points=new_layer, soil_code=soil.code)
# output changed file
dm.serialize(test_output_filepath)
# test that the file was written correctly
soil_su_table = dm.input.soils.get_soil("su soil")
assert soil_su_table.shear_strength_model_below_phreatic_level.value == | |
ranks of each process.")
group.add('--world_size', '-world_size', default=1, type=int,
help="total number of distributed processes.")
group.add('--gpu_backend', '-gpu_backend',
default="nccl", type=str,
help="Type of torch distributed backend")
group.add('--gpu_verbose_level', '-gpu_verbose_level', default=0, type=int,
help="Gives more info on each process per GPU.")
group.add('--master_ip', '-master_ip', default="localhost", type=str,
help="IP of master for torch.distributed training.")
group.add('--master_port', '-master_port', default=10000, type=int,
help="Port of master for torch.distributed training.")
group.add('--queue_size', '-queue_size', default=40, type=int,
help="Size of queue for each process in producer/consumer")
_add_reproducibility_opts(parser)
# Init options
group = parser.add_argument_group('Initialization')
group.add('--param_init', '-param_init', type=float, default=0.1,
help="Parameters are initialized over uniform distribution "
"with support (-param_init, param_init). "
"Use 0 to not use initialization")
group.add('--param_init_glorot', '-param_init_glorot', action='store_true',
help="Init parameters with xavier_uniform. "
"Required for transformer.")
group.add('--train_from', '-train_from', default='', type=str,
help="If training from a checkpoint then this is the "
"path to the pretrained model's state_dict.")
group.add('--reset_optim', '-reset_optim', default='none',
choices=['none', 'all', 'states', 'keep_states'],
help="Optimization resetter when train_from.")
# Pretrained word vectors
group.add('--pre_word_vecs_enc', '-pre_word_vecs_enc',
help="If a valid path is specified, then this will load "
"pretrained word embeddings on the encoder side. "
"See README for specific formatting instructions.")
group.add('--pre_word_vecs_dec', '-pre_word_vecs_dec',
help="If a valid path is specified, then this will load "
"pretrained word embeddings on the decoder side. "
"See README for specific formatting instructions.")
# Freeze word vectors
group.add('--freeze_word_vecs_enc', '-freeze_word_vecs_enc',
action='store_true',
help="Freeze word embeddings on the encoder side.")
group.add('--freeze_word_vecs_dec', '-freeze_word_vecs_dec',
action='store_true',
help="Freeze word embeddings on the decoder side.")
# Optimization options
group = parser.add_argument_group('Optimization- Type')
group.add('--batch_size', '-batch_size', type=int, default=64,
help='Maximum batch size for training')
group.add('--batch_size_multiple', '-batch_size_multiple',
type=int, default=None,
help='Batch size multiple for token batches.')
group.add('--batch_type', '-batch_type', default='sents',
choices=["sents", "tokens"],
help="Batch grouping for batch_size. Standard "
"is sents. Tokens will do dynamic batching")
group.add('--pool_factor', '-pool_factor', type=int, default=8192,
help="""Factor used in data loading and batch creations.
It will load the equivalent of `pool_factor` batches,
sort them by the according `sort_key` to produce
homogeneous batches and reduce padding, and yield
the produced batches in a shuffled way.
Inspired by torchtext's pool mechanism.""")
group.add('--normalization', '-normalization', default='sents',
choices=["sents", "tokens"],
help='Normalization method of the gradient.')
group.add('--accum_count', '-accum_count', type=int, nargs='+',
default=[1],
help="Accumulate gradient this many times. "
"Approximately equivalent to updating "
"batch_size * accum_count batches at once. "
"Recommended for Transformer.")
group.add('--accum_steps', '-accum_steps', type=int, nargs='+',
default=[0], help="Steps at which accum_count values change")
group.add('--valid_steps', '-valid_steps', type=int, default=10000,
help='Perfom validation every X steps')
group.add('--valid_batch_size', '-valid_batch_size', type=int, default=32,
help='Maximum batch size for validation')
group.add('--max_generator_batches', '-max_generator_batches',
type=int, default=32,
help="Maximum batches of words in a sequence to run "
"the generator on in parallel. Higher is faster, but "
"uses more memory. Set to 0 to disable.")
group.add('--train_steps', '-train_steps', type=int, default=100000,
help='Number of training steps')
group.add('--single_pass', '-single_pass', action='store_true',
help="Make a single pass over the training dataset.")
group.add('--epochs', '-epochs', type=int, default=0,
help='Deprecated epochs see train_steps')
group.add('--early_stopping', '-early_stopping', type=int, default=0,
help='Number of validation steps without improving.')
group.add('--early_stopping_criteria', '-early_stopping_criteria',
nargs="*", default=None,
help='Criteria to use for early stopping.')
group.add('--optim', '-optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam',
'sparseadam', 'adafactor', 'fusedadam'],
help="Optimization method.")
group.add('--adagrad_accumulator_init', '-adagrad_accumulator_init',
type=float, default=0,
help="Initializes the accumulator values in adagrad. "
"Mirrors the initial_accumulator_value option "
"in the tensorflow adagrad (use 0.1 for their default).")
group.add('--max_grad_norm', '-max_grad_norm', type=float, default=5,
help="If the norm of the gradient vector exceeds this, "
"renormalize it to have the norm equal to "
"max_grad_norm")
group.add('--dropout', '-dropout', type=float, default=[0.3], nargs='+',
help="Dropout probability; applied in LSTM stacks.")
group.add('--attention_dropout', '-attention_dropout', type=float,
default=[0.1], nargs='+',
help="Attention Dropout probability.")
group.add('--dropout_steps', '-dropout_steps', type=int, nargs='+',
default=[0], help="Steps at which dropout changes.")
group.add('--truncated_decoder', '-truncated_decoder', type=int, default=0,
help="""Truncated bptt.""")
group.add('--adam_beta1', '-adam_beta1', type=float, default=0.9,
help="The beta1 parameter used by Adam. "
"Almost without exception a value of 0.9 is used in "
"the literature, seemingly giving good results, "
"so we would discourage changing this value from "
"the default without due consideration.")
group.add('--adam_beta2', '-adam_beta2', type=float, default=0.999,
help='The beta2 parameter used by Adam. '
'Typically a value of 0.999 is recommended, as this is '
'the value suggested by the original paper describing '
'Adam, and is also the value adopted in other frameworks '
'such as Tensorflow and Keras, i.e. see: '
'https://www.tensorflow.org/api_docs/python/tf/train/Adam'
'Optimizer or https://keras.io/optimizers/ . '
'Whereas recently the paper "Attention is All You Need" '
'suggested a value of 0.98 for beta2, this parameter may '
'not work well for normal models / default '
'baselines.')
group.add('--label_smoothing', '-label_smoothing', type=float, default=0.0,
help="Label smoothing value epsilon. "
"Probabilities of all non-true labels "
"will be smoothed by epsilon / (vocab_size - 1). "
"Set to zero to turn off label smoothing. "
"For more detailed information, see: "
"https://arxiv.org/abs/1512.00567")
group.add('--average_decay', '-average_decay', type=float, default=0,
help="Moving average decay. "
"Set to other than 0 (e.g. 1e-4) to activate. "
"Similar to Marian NMT implementation: "
"http://www.aclweb.org/anthology/P18-4020 "
"For more detail on Exponential Moving Average: "
"https://en.wikipedia.org/wiki/Moving_average")
group.add('--average_every', '-average_every', type=int, default=1,
help="Step for moving average. "
"Default is every update, "
"if -average_decay is set.")
# learning rate
group = parser.add_argument_group('Optimization- Rate')
group.add('--learning_rate', '-learning_rate', type=float, default=1.0,
help="Starting learning rate. "
"Recommended settings: sgd = 1, adagrad = 0.1, "
"adadelta = 1, adam = 0.001")
group.add('--learning_rate_decay', '-learning_rate_decay',
type=float, default=0.5,
help="If update_learning_rate, decay learning rate by "
"this much if steps have gone past "
"start_decay_steps")
group.add('--start_decay_steps', '-start_decay_steps',
type=int, default=50000,
help="Start decaying every decay_steps after "
"start_decay_steps")
group.add('--decay_steps', '-decay_steps', type=int, default=10000,
help="Decay every decay_steps")
group.add('--decay_method', '-decay_method', type=str, default="none",
choices=['noam', 'noamwd', 'rsqrt', 'none'],
help="Use a custom decay rate.")
group.add('--warmup_steps', '-warmup_steps', type=int, default=4000,
help="Number of warmup steps for custom decay.")
_add_logging_opts(parser, is_train=True)
def _add_train_dynamic_data(parser):
group = parser.add_argument_group("Dynamic data")
group.add("-bucket_size", "--bucket_size", type=int, default=2048,
help="Examples per dynamically generated torchtext Dataset.")
def train_opts(parser):
""" Training and saving options """
group = parser.add_argument_group('General')
group.add('--data', '-data', required=True,
help='Path prefix to the ".train.pt" and '
'".valid.pt" file path from preprocess.py')
group.add('--data_ids', '-data_ids', nargs='+', default=[None],
help="In case there are several corpora.")
group.add('--data_weights', '-data_weights', type=int, nargs='+',
default=[1], help="""Weights of different corpora,
should follow the same order as in -data_ids.""")
group.add('--data_to_noise', '-data_to_noise', nargs='+', default=[],
help="IDs of datasets on which to apply noise.")
group.add('--save_model', '-save_model', default='model',
help="Model filename (the model will be saved as "
"<save_model>_N.pt where N is the number "
"of steps")
group.add('--save_checkpoint_steps', '-save_checkpoint_steps',
type=int, default=5000,
help="""Save a checkpoint every X steps""")
group.add('--keep_checkpoint', '-keep_checkpoint', type=int, default=-1,
help="Keep X checkpoints (negative: keep all)")
# GPU
group.add('--gpuid', '-gpuid', default=[], nargs='*', type=int,
help="Deprecated see world_size and gpu_ranks.")
group.add('--gpu_ranks', '-gpu_ranks', default=[], nargs='*', type=int,
help="list of ranks of each process.")
group.add('--world_size', '-world_size', default=1, type=int,
help="total number of distributed processes.")
group.add('--gpu_backend', '-gpu_backend',
default="nccl", type=str,
help="Type of torch distributed backend")
group.add('--gpu_verbose_level', '-gpu_verbose_level', default=0, type=int,
help="Gives more info on each process per GPU.")
group.add('--master_ip', '-master_ip', default="localhost", type=str,
help="IP of master for torch.distributed training.")
group.add('--master_port', '-master_port', default=10000, type=int,
help="Port of master for torch.distributed training.")
group.add('--queue_size', '-queue_size', default=40, type=int,
help="Size of queue for each process in producer/consumer")
group.add('--seed', '-seed', type=int, default=-1,
help="Random seed used for the experiments "
"reproducibility.")
# Init options
group = parser.add_argument_group('Initialization')
group.add('--param_init', '-param_init', type=float, default=0.1,
help="Parameters are initialized over uniform distribution "
"with support (-param_init, param_init). "
"Use 0 to not use initialization")
group.add('--param_init_glorot', '-param_init_glorot', action='store_true',
help="Init parameters with xavier_uniform. "
"Required for transformer.")
group.add('--train_from', '-train_from', default='', type=str,
help="If training from a checkpoint then this is the "
"path to the pretrained model's state_dict.")
group.add('--reset_optim', '-reset_optim', default='none',
choices=['none', 'all', 'states', 'keep_states'],
help="Optimization resetter when train_from.")
# Pretrained word vectors
group.add('--pre_word_vecs_enc', '-pre_word_vecs_enc',
help="If a valid path is specified, then this will load "
"pretrained word embeddings on the encoder side. "
"See README for specific formatting instructions.")
group.add('--pre_word_vecs_dec', '-pre_word_vecs_dec',
help="If a valid path is specified, then this will load "
"pretrained word embeddings on the decoder side. "
"See README for specific formatting instructions.")
# Fixed word vectors
group.add('--fix_word_vecs_enc', '-fix_word_vecs_enc',
action='store_true',
help="Fix word embeddings on the encoder side.")
group.add('--fix_word_vecs_dec', '-fix_word_vecs_dec',
action='store_true',
help="Fix word embeddings on the decoder side.")
# Optimization options
group = parser.add_argument_group('Optimization- Type')
group.add('--batch_size', '-batch_size', type=int, default=64,
help='Maximum batch size for training')
group.add('--batch_type', '-batch_type', default='sents',
choices=["sents", "tokens"],
help="Batch grouping for batch_size. Standard "
"is sents. Tokens will do dynamic batching")
group.add('--pool_factor', '-pool_factor', type=int, default=8192,
help="""Factor used in data loading | |
<filename>exopy_qcircuits/instruments/drivers/ZI/UHFLI.py
# -*- coding: utf-8 -*-
"""This module defines drivers for UHFLI using Zhinst Library.
:Contains:
UHFLI
Python package zhinst from Zurick Instruments need to be install
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from ..driver_tools import (InstrIOError, instrument_property,
secure_communication)
import sys
from subprocess import call
import ctypes
import os
from inspect import cleandoc
import math
import numpy as np
import time
import logging
logger = logging.getLogger(__name__)
try:
from ..ZI_tools import ZIInstrument
import zhinst.utils
class UHFLI(ZIInstrument):
def __init__(self,connection_info, caching_allowed=True,
caching_permissions={}):
super(UHFLI, self).__init__(connection_info, caching_allowed,
caching_permissions)
self.awgModule = None
self.required_devtype='.*LI'
self.required_options=['AWG','DIG']
def close_connection(self):
if self.awgModule:
if self.daq:
self.daq.setInt('/%s/awgs/0/enable' %self.device, 0)
self.awgModule.finish()
self.awgModule.clear()
def set_general_setting(self):
general_setting = [['/%s/demods/*/enable' % self.device, 0],
['/%s/scopes/*/enable' % self.device, 0]]
self.daq.set(general_setting)
self.daq.sync()
def TransfertSequence(self,awg_program):
# Transfer the AWG sequence program. Compilation starts automatically.
self.awgModule.set('awgModule/compiler/sourcestring', awg_program)
while self.awgModule.getInt('awgModule/compiler/status') == -1:
time.sleep(0.1)
if self.awgModule.getInt('awgModule/compiler/status') == 1:
# compilation failed, raise an exception
raise Exception(self.awgModule.getString('awgModule/compiler/statusstring'))
else:
if self.awgModule.getInt('awgModule/compiler/status') == 2:
print("Compilation successful with warnings, will upload the program to the instrument.")
print("Compiler warning: ",
self.awgModule.getString('awgModule/compiler/statusstring'))
# wait for waveform upload to finish
i = 0
while self.awgModule.getDouble('awgModule/progress') < 1.0:
time.sleep(0.1)
i += 1
def get_scope_demod(self,samplingRate, duration, delay, recordsPerCapture,
freq, average,demod,trace, Npoints,customDemod,demodCosinus,scopeModule):
if 0 in duration[0]:
channel = [1]
elif 0 in duration[1]:
channel=[0]
else :
channel = [0,1]
#erase the memory of the scope
scopeModule.set('scopeModule/clearhistory', 1)
# Tell the module to be ready to acquire data; reset the module's progress to 0.0.
scopeModule.execute()
# Enable the scope: Now the scope is ready to record data upon receiving triggers.
self.daq.setInt('/%s/scopes/0/single' % self.device, 0)
self.daq.setInt('/%s/scopes/0/enable' % self.device, 1)
self.daq.sync()
start = time.time()
timeout = recordsPerCapture/100 #[s]
records = 0
dataRecorded = []
# Wait until the Scope Module has received and processed the desired number of records.
while (records < recordsPerCapture):
time.sleep(0.01)
records = scopeModule.getInt("scopeModule/records")
# Advanced use: It's possible to read-out data before all records have been recorded (or even before all
# segments in a multi-segment record have been recorded). Note that complete records are removed from the Scope
# Module and can not be read out again; the read-out data must be managed by the client code. If a multi-segment
# record is read-out before all segments have been recorded, the wave data has the same size as the complete
# data and scope data points currently unacquired segments are equal to 0.
data=scopeModule.read(True)
if '/%s/scopes/0/wave'%self.device in data.keys():
dataRecorded.extend(data['/%s/scopes/0/wave' % self.device])
if (time.time() - start) > timeout:
# Break out of the loop if for some reason we're no longer receiving scope data from the device.
print("\nScope Module did not return {} records after {} s - forcing stop.".format(1, timeout))
break
self.daq.setInt('/%s/scopes/0/enable' % self.device, 0)
# Read out the scope data from the module.
data = scopeModule.read(True)
if '/%s/scopes/0/wave' %self.device in data.keys():
dataRecorded.extend(data['/%s/scopes/0/wave' %self.device])
# Stop the module
scopeModule.set('scopeModule/clearhistory', 1)
scopeModule.finish()
scopeModule.clear()
#check that no problems occur
dataUnusable=[]
num_records = len(dataRecorded)
for i in range(len(dataRecorded)):
if dataRecorded[i][0]['flags'] & 1:
dataUnusable.append(i)
if dataRecorded[i][0]['flags'] & 2:
dataUnusable.append(i)
if dataRecorded[i][0]['flags'] & 3:
dataUnusable.append(i)
# number max of period in the trace at the frequency freq
nb =np.array([np.int_(duration[i]*freq[i]) for i in range(2)])
#number of point we keep for the demodulation
nbSample= [np.int_(nb[i]*1/freq[i]*samplingRate) for i in range(2)]
delaySample= np.int_(np.array(delay)*samplingRate)
length = [np.int_(i) for i in np.array(duration)*samplingRate]
#keep only data with no problem
tracedata = [[],[]]
for i in range(num_records):
if not i in dataUnusable:
for c in range(len(channel)):
tracedata[channel[c]].append(dataRecorded[i][0]['wave'][c][:delaySample[channel[c]]+np.sum(length[channel[c]])])
if max(len(tracedata[0]),len(tracedata[1]))>=recordsPerCapture:
for c in channel:
tracedata[c] = np.array(tracedata[c])[:recordsPerCapture,delaySample[c]:delaySample[c]+np.sum(length[c])]
else:
raise Exception("Error: To many data not workable")
datatype = str(dataRecorded[0][0]['wave'].dtype)
del(dataRecorded)
#demodulation function
coses=[]
sines=[]
for c in range(2):
if demodCosinus:
coses.append(np.cos(np.arange(np.sum(length[c]))*2*np.pi*freq[c]/samplingRate))
sines.append(np.sin(np.arange(np.sum(length[c]))*2*np.pi*freq[c]/samplingRate))
else:
coses.append(customDemod[0])
sines.append(customDemod[1])
if demod:
answerTypeDemod = []
for c in channel:
for i in range(len(duration[c])):
answerTypeDemod =answerTypeDemod+ [(str(c+1)+'I_'+str(i),datatype),
(str(c+1)+'Q_'+str(i),datatype)]
else:
answerTypeDemod= 'f'
if trace:
answerTypeTrace=[]
for c in channel:
for i in range(len(duration[c])):
answerTypeTrace = answerTypeTrace+ [(str(c+1)+'_'+str(i),datatype)]
else:
answerTypeTrace = 'f'
if average:
if Npoints == 1 or Npoints == 0:
answerDemod = np.zeros(1, dtype=answerTypeDemod)
answerTrace = np.zeros(np.max([np.max(length[0]),np.max(length[1])]), dtype=answerTypeTrace)
else:
answerDemod = np.zeros((1, Npoints), dtype=answerTypeDemod)
answerTrace = np.zeros((Npoints,np.max([np.max(length[0]),np.max(length[1])])), dtype=answerTypeTrace)
else:
answerDemod = np.zeros(recordsPerCapture, dtype=answerTypeDemod)
answerTrace = np.zeros((recordsPerCapture, np.max([np.max(length[0]),np.max(length[1])])), dtype=answerTypeTrace)
for c in channel:
if demod[c]:
start =0
for i in range(len(duration[c])):
ansI= 2*np.mean(tracedata[c][:,start:start+nbSample[c][i]]*coses[c][start:start+nbSample[c][i]],axis=1)
ansQ= 2*np.mean(tracedata[c][:,start:start+nbSample[c][i]]*sines[c][start:start+nbSample[c][i]],axis=1)
if Npoints!=1 and Npoints!=0 and average:
ansI = ansI.reshape((int(recordsPerCapture/Npoints),Npoints))
ansQ = ansQ.reshape((int(recordsPerCapture/Npoints),Npoints))
ansI = ansI.mean(axis=0)
ansQ = ansQ.mean(axis=0)
answerDemod[str(c+1)+'I_'+str(i)]= ansI
answerDemod[str(c+1)+'Q_'+str(i)]= ansQ
elif average and (Npoints==1 or Npoints ==0):
ansI = np.mean(ansI,axis=0)
ansQ = np.mean(ansQ,axis=0)
answerDemod[str(c+1)+'I_'+str(i)]= ansI
answerDemod[str(c+1)+'Q_'+str(i)]= ansQ
else:
answerDemod[str(c+1)+'I_'+str(i)]= ansI
answerDemod[str(c+1)+'Q_'+str(i)]= ansQ
start = start+length[c][i]
for c in channel:
if trace[c]:
start =0
for i in range(len(duration[c])):
trace = (tracedata[c][:,start:start+length[c][i]])
if Npoints!=1 and Npoints!=0 and average:
trace =(trace.reshape((-1,Npoints,length[c][i]))).mean(axis=0)
answerTrace[str(c+1)+'_'+str(i)][:,:length[c][i]]= trace
elif average and (Npoints==1 or Npoints ==0):
trace = np.mean(trace,axis=0)
answerTrace[str(c+1)+'_'+str(i)][:length[c][i]]= trace
else:
answerTrace[str(c+1)+'_'+str(i)][:,:length[c][i]]= trace
start = start+length[c][i]
return answerDemod, answerTrace
@secure_communication()
def get_demodLI(self,recordsPerCapture,average,Npoints,channel,demod,powerBool,AWGcontrol):
if ['1'] == channel:
self.daq.setInt('/%s/demods/%d/enable' % (self.device,demod[0]), 1) # enable the stream data of input1
self.daq.sync()
elif ['2'] == channel:
self.daq.setInt('/%s/demods/%d/enable' % (self.device,demod[1]), 1) # enable the stream data of input2
self.daq.sync()
else :
# enable the stream data of the demodulators 3 and 4
self.daq.set([['/%s/demods/%d/enable' % (self.device,demod[0]), 1],['/%s/demods/%d/enable' % (self.device,demod[1]), 1]])
self.daq.sync()
time.sleep(0.1)
if AWGcontrol:
self.daq.setInt('/%s/awgs/0/enable' %self.device, 1)
data1x=[];
data1y=[];
data2x=[];
data2y=[];
time1=[]
time2=[]
path1 = '/%s/demods/%d/sample' % (self.device,demod[0])
path2 = '/%s/demods/%d/sample' % (self.device,demod[1])
data=self.daq.poll(0.1,500,0x0004,True)
if '1' in channel:
if path1 in data.keys():
data1x= data[path1]['x']
data1y = data[path1]['y']
time1 = data[path1]['timestamp']
if '2' in channel:
if path2 in data.keys():
data2x= data[path2]['x']
data2y = data[path2 ]['y']
time2 = data[path2]['timestamp']
# if math.isnan(np.mean(data1x)) or math.isnan(np.mean(data1y)):
# print(str(data))
while(len(data1x)<recordsPerCapture*('1' in channel) or len(data2x)<recordsPerCapture*('2' in channel)):
data=self.daq.poll(0.1,500,0x0004,True)
if '1' in channel:
if path1 in data.keys():
data1x = np.concatenate((data1x,data[path1]['x']))
data1y = np.concatenate((data1y,data[path1]['y']))
time1 = np.concatenate((time1,data[path1]['timestamp']))
if '2' in channel:
if path2 in data.keys():
data2x = np.concatenate((data2x,data[path2]['x']))
data2y = np.concatenate((data2y,data[path2]['y']))
time2 = np.concatenate((time2,data[path2]['timestamp']))
self.daq.setInt('/%s/demods/%s/enable'% (self.device,demod[0]), 0); # close the stream data of input1
self.daq.setInt('/%s/demods/%s/enable'% (self.device,demod[1]), 0); # close the stream data of input2
if AWGcontrol:
self.daq.setInt('/%s/awgs/0/enable' %self.device, 0)
if ['1','2'] == channel:
n=0;
for i in range(min(len(time1),len(time2))):
if time1[i]!=time2[i]:
n+=1;
print(str(n) + ' errors in demodulation')
print(str(len(time1)-len(time2)))
diff = np.diff(time1)
mini = np.min(diff)
number=0;
for i in range(len(diff)):
if 1.5*mini <= diff[i]:
number = number+1
if number!=0:
print("%d trigger miss" %number)
if '1' in channel:
if math.isnan(np.mean(data1x)):
listNan = [math.isnan(i) for i in data1x]
data1x=np.delete(data1x,np.where(listNan))
data1y=np.delete(data1y,np.where(listNan))
print('Warning NaN value detect in data1 :%d NaN value, %d value correct' %(np.sum(listNan),len(data1x)))
if len(data1x)<recordsPerCapture:
recordsPerCapture = len(data1x)
if '2' in channel:
if math.isnan(np.mean(data2x)):
listNan = [math.isnan(i) for i in data2x]
data2x=np.delete(data2x,np.where(listNan))
data2y=np.delete(data2y,np.where(listNan))
print('Warning NaN value detect in data2 :%d NaN value, %d value correct' %(np.sum(listNan),len(data2x)))
if len(data2x)<recordsPerCapture:
recordsPerCapture = len(data2x)
if '1' in channel:
data1x= data1x[:recordsPerCapture]
data1y= data1y[:recordsPerCapture]
if '2' in channel:
data2x= data2x[:recordsPerCapture]
data2y= data2y[:recordsPerCapture]
answerTypeDemod=[];
if '1' in channel:
answerTypeDemod = answerTypeDemod+ [('1I',str(data1x.dtype)),
('1Q',str(data1y.dtype))]
if powerBool :
answerTypeDemod = answerTypeDemod+ [('1P',str(data1x.dtype))]
if '2' in channel:
answerTypeDemod = answerTypeDemod+ [('2I',str(data2x.dtype)),
('2Q',str(data2y.dtype))]
if powerBool :
answerTypeDemod = answerTypeDemod+ [('2P',str(data2x.dtype))]
if average:
if Npoints==0 or Npoints ==1:
answerDemod = np.zeros(1, dtype=answerTypeDemod)
else :
answerDemod = np.zeros((1, Npoints), dtype=answerTypeDemod)
else:
answerDemod = np.zeros(recordsPerCapture, dtype=answerTypeDemod)
if average:
if Npoints==0 or Npoints ==1:
if '1' in channel:
ans1I = np.mean(data1x)
ans1Q = np.mean(data1y)
answerDemod['1I']= ans1I
answerDemod['1Q']= ans1Q
if powerBool:
ansR = np.mean(data1x**2+data1y**2)
answerDemod['1P']= ansR
if '2' in channel:
ans2I = np.mean(data2x)
ans2Q = np.mean(data2y)
answerDemod['2I']= ans2I
answerDemod['2Q']= ans2Q
if powerBool:
ansR = np.mean(data2x**2+data2y**2)
answerDemod['2P']= ansR
else :
if '1' in channel:
data1x = data1x.reshape((-1,Npoints))
data1y = data1y.reshape((-1,Npoints))
ans1I = data1x.mean(axis=0)
ans1Q = data1y.mean(axis=0)
answerDemod['1I']= ans1I
answerDemod['1Q']= ans1Q
if powerBool:
ansR = np.mean((data2x**2+data2y**2).reshape((-1,Npoints)),axis=0)
answerDemod['1P']= ansR
if '2' in channel:
data2x = data2x.reshape((-1,Npoints))
data2y = data2y.reshape((-1,Npoints))
ans2I = data2x.mean(axis=0)
| |
" to method portals_id_image_folders_rel_fk_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_image_folders_rel_fk_delete`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_image_folders_rel_fk_delete`")
collection_formats = {}
resource_path = '/Portals/{id}/imageFolders/rel/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_image_folders_rel_fk_head(self, id, fk, **kwargs):
"""
Check the existence of imageFolders relation to an item by id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_image_folders_rel_fk_head(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str fk: Foreign key for imageFolders (required)
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_image_folders_rel_fk_head_with_http_info(id, fk, **kwargs)
else:
(data) = self.portals_id_image_folders_rel_fk_head_with_http_info(id, fk, **kwargs)
return data
def portals_id_image_folders_rel_fk_head_with_http_info(self, id, fk, **kwargs):
"""
Check the existence of imageFolders relation to an item by id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_image_folders_rel_fk_head_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str fk: Foreign key for imageFolders (required)
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_image_folders_rel_fk_head" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_image_folders_rel_fk_head`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_image_folders_rel_fk_head`")
collection_formats = {}
resource_path = '/Portals/{id}/imageFolders/rel/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'HEAD',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='bool',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_image_folders_rel_fk_put(self, id, fk, **kwargs):
"""
Add a related item by id for imageFolders.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_image_folders_rel_fk_put(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str fk: Foreign key for imageFolders (required)
:param PortalImageFolder data:
:return: PortalImageFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_image_folders_rel_fk_put_with_http_info(id, fk, **kwargs)
else:
(data) = self.portals_id_image_folders_rel_fk_put_with_http_info(id, fk, **kwargs)
return data
def portals_id_image_folders_rel_fk_put_with_http_info(self, id, fk, **kwargs):
"""
Add a related item by id for imageFolders.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_image_folders_rel_fk_put_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str fk: Foreign key for imageFolders (required)
:param PortalImageFolder data:
:return: PortalImageFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_image_folders_rel_fk_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_image_folders_rel_fk_put`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_image_folders_rel_fk_put`")
collection_formats = {}
resource_path = '/Portals/{id}/imageFolders/rel/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalImageFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_invitation_tickets_fk_delete(self, id, id2, fk, **kwargs):
"""
Delete InvitationTickets for this Portal
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_invitation_tickets_fk_delete(id, id2, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str id2: Portal id (required)
:param str fk: InvitationTicket id (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_invitation_tickets_fk_delete_with_http_info(id, id2, fk, **kwargs)
else:
(data) = self.portals_id_invitation_tickets_fk_delete_with_http_info(id, id2, fk, **kwargs)
return data
def portals_id_invitation_tickets_fk_delete_with_http_info(self, id, id2, fk, **kwargs):
"""
Delete InvitationTickets for this Portal
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_invitation_tickets_fk_delete_with_http_info(id, id2, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str id2: Portal id (required)
:param str fk: InvitationTicket id (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'id2', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_invitation_tickets_fk_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_invitation_tickets_fk_delete`")
# verify the required parameter 'id2' is set
if ('id2' not in params) or (params['id2'] is None):
raise ValueError("Missing the required parameter `id2` when calling `portals_id_invitation_tickets_fk_delete`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_invitation_tickets_fk_delete`")
collection_formats = {}
resource_path = '/Portals/{id}/invitationTickets/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'id2' in params:
path_params['id'] = params['id2']
if 'fk' in params:
| |
import code # For development: code.interact(local = dict(globals(), **locals()))
import numpy as np
import xml.etree.ElementTree as et
from scipy.io import netcdf
import matplotlib.dates as mdates
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import math
# =============================================================================
#
# This module processes and compares model output to census benchmarks.
# Currently, this module assumes that FATES size structured outputs are
# available. Benchmarks are prepared in another script, one such script
# is the NGEET/tools_benchmarking_evaluation set.
#
# Here is the list of model output needed:
#
# Basal Area: BA_SCPF (SIZE x PFT)
# Diameter Increment: DDBH_SCPF (SIZE x PFT) / NPLANT_SCPF
# Mortality Rate: (M1_SCPF + M2_SCPF + M3_SCPF + M4_SCPF + M5_SCPF +
# M6_SCPF + M7_SCPF + M8_SCPF) / NPLANT_SCPF
# Recruitment Rate: RECRUITMENT (PFT)
#
#
#
#
# =============================================================================
# The CTFS processed files use an invalid flag of -9e+30
# We will consider anything very large negative invalid
invalid_flag = -9.9e10
# Anything that is a rate, needs to be normalized by the number of plants
# This is a restriction on
nplant_scpf_name = 'NPLANT_SCPF'
# BA_SCPF (SIZE x PFT)
# DDBH_SCPF (SIZE x PFT)
# (M1_SCPF + M2_SCPF + M3_SCPF + M4_SCPF + M5_SCPF + M6_SCPF + M7_SCPF + M8_SCPF) / NPLANT_SCPF
# RECRUITMENT (PFT)
# This object is bound to each site
# It should contain a list of viable benchmarks
class benchmark_obj:
def __init__(self,census_filename):
self.bvarlist = []
self.census_filename = census_filename
# Lets check through the census file and see if any of these variables
# are in the file. We will later look through the model output
# and pop off list entries that are not there.
if(census_filename.strip() != ''):
print("Loading census file: {}".format(census_filename))
fp = netcdf.netcdf_file(census_filename, 'r', mmap=False)
cens_var_name = 'basal_area_by_size_census'
if (fp.variables.has_key(cens_var_name)):
self.bvarlist.append(benchmark_vars( name = 'Basal Area', \
mod_symbols = 'BA_SCPF', \
obs_symbol = cens_var_name, \
mod_dimclass = 'scpf', \
obs_dimclass = 'size-class', \
unit = 'm2/ha', \
vartype = 'quantity'))
self.bvarlist[-1].load_census(fp)
else:
print('Census variable: '+cens_var_name+', was not found in the census file')
cens_var_name = 'growth_increment_by_size_census'
if (fp.variables.has_key(cens_var_name)):
self.bvarlist.append(benchmark_vars( name = 'Growth Increment', \
mod_symbols = 'DDBH_SCPF', \
obs_symbol = cens_var_name, \
mod_dimclass = 'scpf', \
obs_dimclass = 'size-class', \
unit = 'cm/yr', \
vartype = 'rate'))
self.bvarlist[-1].load_census(fp)
else:
print('Census variable: '+cens_var_name+', was not found in the census file')
cens_var_name = 'mortality_rate_by_size_census'
if (fp.variables.has_key(cens_var_name)):
self.bvarlist.append(benchmark_vars( name = 'Mortality Rate', \
mod_symbols = 'M1_SCPF,M2_SCPF,M3_SCPF,M4_SCPF,M5_SCPF,M6_SCPF,M7_SCPF,M8_SCPF', \
obs_symbol = cens_var_name, \
mod_dimclass = 'scpf', \
obs_dimclass = 'size-class', \
unit = '/yr', \
vartype = 'rate'))
self.bvarlist[-1].load_census(fp)
cens_var_name = 'new_recruits_by_census'
if (fp.variables.has_key(cens_var_name)):
self.bvarlist.append(benchmark_vars( name = 'Recruitment Rate', \
mod_symbols = 'RECRUITMENT', \
obs_symbol = cens_var_name, \
mod_dimclass = 'pft', \
obs_dimclass = 'scalar', \
unit = 'indv ha-1 yr-1', \
vartype = 'quantity'))
self.bvarlist[-1].load_census(fp)
else:
print('Census variable: '+cens_var_name+', was not found in the census file')
fp.close()
# ===================================================================================
## Check the first history file in the list to see which benchmarking variables
# are available.
def init_history(self,hist_file0,n_htypes):
fp = netcdf.netcdf_file(hist_file0, 'r', mmap=False)
for bvar in self.bvarlist:
all_symbols_found = True
for mod_symbol in bvar.mod_symbols:
if( not fp.variables.has_key(mod_symbol) ):
all_symbols_found = False
if( all_symbols_found ):
bvar.active = True
if( bvar.mod_dimclass == 'scpf'):
dims = fp.variables[bvar.mod_symbols[0]].dimensions
if(dims[1] != 'fates_levscpf'):
print('An SCPF benchmark variable: {} does not actually'.format(bvar.mod_symbols[0]))
print(' have the correct dimensions: {}... exiting'.format(dims))
exit(2)
fates_levscls = fp.variables['fates_levscls'].data
if (fates_levscls[0] == 0.0):
bvar.offset0 = True
if(fates_levscls.size-1 != bvar.scv_obs_ar.shape[0]):
print('Dimensions of model output size-classes dont match observations')
for isc,scvar in enumerate(fates_levscls[1:]):
if( np.abs(scvar- bvar.scv_x[isc])>1.0e-10 ):
print('Dimensions of model output size-classes dont match observations')
print('Observed classes: {}'.format(bvar.scv_x))
print('Modeled (0 is ignored): {}',format(fates_levscls))
else:
bvar.offset0 = False
if(fates_levscls.size != bvar.scv_obs_ar.shape[0]):
print('Dimensions of model output size-classes dont match observations')
for isc,scvar in enumerate(fates_levscls[:]):
if( np.abs(scvar- bvar.scv_x[isc])>1.0e-10 ):
print('Dimensions of model output size-classes dont match observations')
print('Observed classes: {}'.format(bvar.scv_x))
print('Modeled (0 is ignored): {}',format(fates_levscls))
d_sizes = bvar.scv_obs_ar.shape
bvar.modlist = []
for imod in range(n_htypes):
bvar.modlist.append(mod_scv_array(d_sizes[0]))
elif( bvar.mod_dimclass == 'pft' ):
dims = fp.variables[bvar.mod_symbols[0]].dimensions
if(dims[1] != 'fates_levpft'):
print('A PFT benchmark variable: {} does not actually'.format(bvar.mod_symbols[0]))
print(' have the correct dimensions: {}... exiting'.format(dims))
exit(2)
fates_levpft = fp.variables['fates_levpft'].data
scalar_size = 1
bvar.modlist = []
for imod in range(n_htypes):
bvar.modlist.append(mod_scv_array(scalar_size))
fp.close()
# ===================================================================================
def load_history(self,filename,h_index,site_index):
# Objective is to push new estimates of the benchmark variables
fp = netcdf.netcdf_file(filename, 'r', mmap=False)
#code.interact(local = dict(globals(), **locals()))
for bvar in self.bvarlist:
if(bvar.active):
d_sizes = fp.variables[bvar.mod_symbols[0]].shape
hist_arr = np.ma.zeros(fp.variables[bvar.mod_symbols[0]].shape)
if(bvar.vartype == 'quantity'):
for mod_symbol in bvar.mod_symbols:
hist_arr = hist_arr + fp.variables[mod_symbol].data
elif( (bvar.vartype == 'rate') and (bvar.mod_dimclass=='scpf') ):
# Mask out when there are no cohort counts
hist_arr[fp.variables[nplant_scpf_name].data <= 0.] = np.ma.masked
for mod_symbol in bvar.mod_symbols:
hist_arr = hist_arr + \
fp.variables[mod_symbol].data / fp.variables[nplant_scpf_name].data
else:
print("Unhandled variable type submitted to registry: {}".format(bvar.vartype))
print("Must be one of: quantity or rate. Exiting")
exit(2)
# Mask if the variable has a no data flag
hist_arr[hist_arr.data<invalid_flag] = np.ma.masked
if( (bvar.obs_dimclass=='scalar') and (bvar.mod_dimclass == 'pft') ):
# These are the expected dimensions
# (time, fates_levpft, lndgrid) ;
for itime in range(d_sizes[0]):
# Loop PFTs
local_vars = hist_arr[itime,:,site_index]
if ( np.ma.count(local_vars)>0 ):
if(bvar.vartype == 'quantity'):
local_var = local_vars.sum()
elif(bvar.vartype == 'rate'):
local_var = local_vars.mean()
else:
print('Unknown vartype')
exit(2)
bvar.modlist[h_index].var_ar[0] = ( bvar.modlist[h_index].var_ar[0] \
* bvar.modlist[h_index].var_n[0] \
+ local_var) / (bvar.modlist[h_index].var_n[0] + 1.0)
bvar.modlist[h_index].var_n[0] = bvar.modlist[h_index].var_n[0] + 1.0
elif( (bvar.obs_dimclass=='size-class') and (bvar.mod_dimclass=='scpf') ):
# Create a mapping between FATES size-classes and the SCPF map
# ------------------------------------------------------------
fates_levscls = fp.variables['fates_levscls'].data
fates_scmap_levscpf = fp.variables['fates_scmap_levscpf'].data
# fates_scmap_levscpf
# These are the expected dimensions
# ('time', 'fates_levscpf', 'lndgrid')
# Mask out when there are no cohort counts
hist_arr[fp.variables[nplant_scpf_name].data <= 0.] = np.ma.masked
for itime in range(d_sizes[0]):
# Loop Sizes and then PFTs
# For quantities, add them
# For rates, take the mean
# code.interact(local = dict(globals(), **locals()))
# for isc,isc_map0 in enumerate(fates_scmap_levscpf):
for isc,isc_val in enumerate(fates_levscls):
if ( (bvar.offset0 == False) or ((bvar.offset0 == True)and(isc != 0 )) ):
if(bvar.offset0==True):
isc0 = isc-1
else:
isc0 = isc
sc_maps = [i for i, x in enumerate(fates_scmap_levscpf) if x == isc+1 ]
local_vars = hist_arr[itime,sc_maps,site_index]
if ( np.ma.count(local_vars)>0 ):
if(bvar.vartype == 'quantity'):
local_var = local_vars.sum()
elif(bvar.vartype == 'rate'):
local_var = local_vars.mean()
else:
print('Unknown vartype')
exit(2)
bvar.modlist[h_index].var_ar[isc0] = ( bvar.modlist[h_index].var_ar[isc0] \
* bvar.modlist[h_index].var_n[isc0] \
+ local_var) / (bvar.modlist[h_index].var_n[isc0] + 1.0)
bvar.modlist[h_index].var_n[isc0] = bvar.modlist[h_index].var_n[isc0] + 1.0
else:
print('Only scpf to sc is available now. Exiting.')
print(0)
fp.close()
# =======================================================================================
class benchmark_vars:
def __init__(self,name,mod_symbols,obs_symbol,mod_dimclass,obs_dimclass,unit,vartype):
self.active = False
self.obs_symbol = obs_symbol
self.name = name
self.scv = False # Size-class variable
self.sv = False # Scalar variable
self.unit = unit
self.vartype = vartype # Is this a simple quantity, or a rate?
self.hfile_id = -9
# This will convert mod_symbols into a list
self.mod_symbols = mod_symbols.split(',')
self.mod_dimclass = mod_dimclass
self.obs_dimclass = obs_dimclass
# This is a size-dimensioned variable
if(self.mod_dimclass == 'scpf'):
# Initialize size-class array
self.scv = True
# Initialize scalar array
self.sv = True
if(self.mod_dimclass == 'scalar'):
# Initialize scalar array
self.sv = True
def load_census(self,fp):
# The 'size-class' type census variable is actually a
# fp.variables[self.obs_symbol].shape
# (7, 10, 3)
# fp.variables[self.obs_symbol].dimensions
# ('cens', 'dclass', 'confidence')
# fp.variables[self.obs_symbol].data
d_sizes = fp.variables[self.obs_symbol].shape
dim_names = fp.variables[self.obs_symbol].dimensions
if(self.obs_dimclass == 'size-class'):
if(dim_names[0] != 'cens'):
print('expected census data to have cens as first dimension: {}'.format(dim_names))
print('exiting')
exit(2)
# Condense the census dimension into 1 size
self.scv_obs_ar = np.zeros((d_sizes[1],d_sizes[2]))
# This is the mean across census intervals
# AND... the lowest lower bound CI across census,
# and... the highest upper bound CI across census
# Mask out bad data (for rates of change, probably missing
# first census, or perhaps lowest
masked_data = np.ma.array(fp.variables[self.obs_symbol].data, \
mask=fp.variables[self.obs_symbol].data<invalid_flag)
self.scv_obs_ar[:,0] = masked_data[:,:,0].min(axis=0).data
self.scv_obs_ar[:,1] = masked_data[:,:,1].mean(axis=0).data
self.scv_obs_ar[:,2] = masked_data[:,:,2].max(axis=0).data
# Note that the dimensions in the census dictate the output dimension
self.scv_x = np.zeros((d_sizes[1],1))
fp.variables['dclass'].data.resize(self.scv_x.shape)
self.scv_x[:] = fp.variables['dclass'].data
self.scv_x_unit = 'DBH [cm]'
elif(self.obs_dimclass == 'scalar' ):
if(dim_names[0] != 'cens'):
print('expected census data to have cens as first dimension: {}'.format(dim_names))
print('exiting')
exit(2)
# Condense the census dimension into 1 size (confidence interval)
self.scv_obs_ar = np.zeros((d_sizes[1]))
# This | |
x2, y1: y2] = 255
return
'''
# for the full equation - m and the whole thing
# USED in "get rect side scales"
def math_m(alpha,beta):
m = np.tan(0.5*(alpha-beta)) / np.tan(0.5*(alpha+beta))
return m
def math_scale_a(d,m):
a = d*((1+m) / (1-m))
#keep in mind this is HALF a scale. This is also for Vertical measuring
return a
# Retrieve the height and width of a rectangle
# USE THIS IN A FUNCTION
def get_rect_side_scales(aspect_ratio, distance, alpha, beta):
global math_m
global math_scale_a
V, H = aspect_ratio
vertical_scale_angle_parameter = math_m(alpha, beta)
vertical_scale = 2 * math_scale_a(distance, vertical_scale_angle_parameter)
horizontal_scale = (vertical_scale / V) * H
return (vertical_scale, horizontal_scale)
"""
# Checks whether we have an overlapped rectangle
def check_overlap(h_offset, v_offset, rect_cam, rect_proj):
h_c, v_c = rect_cam
h_p, v_p = rect_proj
scalar_h = (h_c / 2) + (h_p / 2)
scalar_v = (v_c / 2) + (v_p / 2)
if np.abs(h_offset) >= scalar_h or np.abs(v_offset) >= scalar_v:
return False
else: return True
"""
# This function will pull the raw pixel coordinates from the img with the face detection rectangle
# just the actual location of the pixels, along with the size in pixels of the image, and will convert to ratios
def detection_rect_get_ratio(a,b,c,d,resolution):
m = a[0] / resolution[0]
k = a[1] / resolution[1]
face_width = np.abs(a[0] - b[0])
face_height = np.abs(a[1] - d[1])
n = (a[0] + face_width) / resolution[0]
l = (a[1] + face_height) / resolution[1]
return m, k, n, l
# this function is intended to use the above given ratios in order to convert the coordinates into our plane
# it will need to be fed the cam rectangle dimensions as they are in the plane
def detection_rect_in_plane(m, k, n, l, cam_rect_width, cam_rect_height):
ad = (m * cam_rect_width, k * cam_rect_height)
bd = (n * cam_rect_width, k * cam_rect_height)
cd = (n * cam_rect_width, l * cam_rect_height)
dd = (m * cam_rect_width, l * cam_rect_height)
rect_width = np.abs(ad[0] - bd[0])
rect_height = np.abs(ad[1] - dd[1])
return rect_width, rect_height, ad, bd, cd, dd
#This is our logic for finding the overlap coordinates
# y,y means completely outside, n,n means completely inside
# REVISION - THIS FUNCTION SHOULD ONLY BE USED TO DETERMINE THE OVERLAP OF THE DETECTION RECTANGLE OVER THE
# PROJECTION RECTANGLE. FEED IN THIS ORDER: AP, AD, BP, BD, CP, CD, DP, DD
def get_overlap_rect(ac, ap, bc, bp, cc, cp, dc, dp):
a = [0,0]
b = [0,0]
c = [0,0]
d = [0,0]
overlap_a = [0,0]
overlap_b = [0,0]
overlap_c = [0,0]
overlap_d = [0,0]
if ap[0] < ac[0]:
a[0] = True
else: a[0] = False
if ap[1] < ac[1]:
a[1] = True
else: a[1] = False
if a == [True,True]:
overlap_a = ac
elif a== [False,False]:
overlap_a = ap
elif a==[True,False]:
overlap_a = (ac[0], ap[1])
elif a==[False,True]:
overlap_a = (ap[0], ac[0])
if bp[0] > bc[0]:
b[0] = True
else: b[1] = False
if bp[1] < bc[1]:
b[1] = True
else: b[1] = False
if b==[True,True]:
overlap_b=bc
elif b==[False,False]:
overlap_b=bp
elif b==[True,False]:
overlap_b=(bc[0],bp[1])
elif b==[False,True]:
overlap_b = (bp[0],bc[1])
if cp[0] > cc[0]:
c[0] = True
else: c[0] = False
if cp[1] > cc[1]:
c[1] = True
else: c[1] = False
if c==[True,True]:
overlap_c=cc
elif c==[False,False]:
overlap_c=cp
elif c==[True,False]:
overlap_c=(cc[0],cp[1])
elif c==[False,True]:
overlap_c=(cp[0],cc[1])
if dp[0] < dc[0]:
d[0] = True
else: d[0] = False
if dp[1] > dc[1]:
d[1] = True
else: d[1] = False
if d==[True,True]:
overlap_d = dc
elif d==[False,False]:
overlap_d = dp
elif d==[True,False]:
overlap_d = (dc[0], dp[1])
elif d==[False,True]:
overlap_d = (dp[0], dc[1])
#print(overlap_a, overlap_b, overlap_c, overlap_d)
scale_width = np.abs(overlap_d[0] - overlap_c[0])
scale_height = np.abs(overlap_a[1] - overlap_d[1])
overlap_a = tuple(overlap_a)
overlap_b = tuple(overlap_b)
overlap_c = tuple(overlap_c)
overlap_d = tuple(overlap_d)
return scale_width, scale_height, overlap_a, overlap_b, overlap_c, overlap_d
# generates NEW coordinates of overlapping rectangle, based on a new axis in which ap is (0,0)
# feed the "a" point of the projection rectangle, feed all points of the overlapping rectangle & dimensions
def get_overlap_coords_based_on_projector_rect(ap, overlap_a, overlap_width, overlap_height):
new_overlap_a = (overlap_a[0] - ap[0], overlap_a[1] - ap[1])
new_overlap_b = (new_overlap_a[0] + overlap_width, new_overlap_a[1])
new_overlap_c = (new_overlap_b[0], new_overlap_b[1] + overlap_height)
new_overlap_d = (new_overlap_a[0], new_overlap_c[1])
return new_overlap_a, new_overlap_b, new_overlap_c, new_overlap_d
# Feed into this the overlap coordinates based on the "get_overlap_coords_base_on_projector_rect" function
# also feed the width and height of the projector rectangle
def get_trimmed_detect_ratio_by_projection(a, b, c, d, proj_width, proj_height):
trimmed_overlap_width = np.abs(a[0] - b[0])
trimmed_overlap_height = np.abs(a[1] - d[1])
m = a[0] / proj_width
k = a[1] / proj_height
n = (a[0] + trimmed_overlap_width) / proj_width
l = (a[1] + trimmed_overlap_height) / proj_height
return m, k, n, l
# feed ratios receive from "get_trimmed_detect_ratio_by_projection" and the resolution of the projected image
# This function will give us the final coordinates IN THE PROJECTED IMAGE in which there is a face :)
def get_detected_rectangle_in_projected_image(m, k, n, l, resolution_proj):
a_detect = (np.int(m * resolution_proj[0]), np.int(k * resolution_proj[1]))
b_detect = (np.int(n * resolution_proj[0]), np.int(k * resolution_proj[1]))
c_detect = (np.int(n * resolution_proj[0]), np.int(l * resolution_proj[1]))
d_detect = (np.int(m * resolution_proj[0]), np.int(l * resolution_proj[1]))
detect_width = np.abs(a_detect[1] - b_detect[1])
detect_height = np.abs(a_detect[0] - d_detect[0])
return a_detect, b_detect, c_detect, d_detect, detect_width, detect_height
# BEFORE WE GO INTO THE LOOP - WE SET UP ALL THE PERMANENCE SCALES
# just the sizes of each rectangle's sides based on the projection spread angle and distance
cam_scales = get_rect_side_scales(aspect_ratio_cam, camera_distance, camera_vertical_fov, beta_cam)
proj_scales = get_rect_side_scales(aspect_ratio_proj, projector_distance, projector_vertical_fov, beta_proj)
# DEFINE BOTH RECTANGLES IN AXIS
# Width, height and center of camera rectangle
rect_cam = get_rect_side_scales(aspect_ratio_cam, camera_distance, camera_vertical_fov, beta_cam)
width_cam, height_cam = rect_cam
center_cam = (width_cam / 2, height_cam / 2)
# Width, height and center of projector rectangle in relation to Cam
rect_proj = get_rect_side_scales(aspect_ratio_proj, projector_distance, projector_vertical_fov, beta_proj)
width_proj, height_proj = rect_proj
center_proj = ((width_cam / 2) + horizontal_offset, (height_cam / 2) + vertical_offset)
ProjC_x, ProjC_y = center_proj
# points of cam rectangle, and of projector rectangle. THE ARE ALL DEFINED FOR RECTANGLES ONLY. No angle vs the wall.
# CAM POINTS
ac = (0,0)
bc = (width_cam, 0)
cc = (width_cam, height_cam)
dc = (0, height_cam)
# PROJ POINTS
ap = (ProjC_x - (width_proj / 2), ProjC_y - (height_proj / 2))
bp = (ProjC_x + (width_proj / 2), ProjC_y - (height_proj / 2))
cp = (ProjC_x + (width_proj / 2), ProjC_y + (height_proj / 2))
dp = (ProjC_x - (width_proj / 2), ProjC_y + (height_proj / 2))
cv2.namedWindow("projection", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("projection", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# THE WHILE LOOP WHERE EVERYTHING HAPPENS
while True:
_, frame = cap.read()
cam_feed=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
# I want to get the two Cam and Proj rectangles now in the RL axis I defined by Cam
# detect faces & select a random one
faces = face_cascade.detectMultiScale(cam_feed, 1.1, 12)
if len(faces) > 0:
color=color1
if len(faces) > 1:
color=color2
x, y, w, h = faces[np.random.randint(0, len(faces), dtype=np.uint8)]
a1 = (x,y)
b1 = (x+w,y)
c1 = (x+w, y+h)
d1 = (x, y+h)
#print(a1,b1,c1,d1)
# m k n and l ratios-by-cam of the detected face, converted into ratios-by-cam for use in our global axis
m,k,n,l = detection_rect_get_ratio(a1,b1,c1,d1, img_resolution_cam)
# get all the infos about the detected face in our global plain
d_width, d_height, ad,bd,cd,dd = detection_rect_in_plane(m, k, n, l, cam_scales[0], cam_scales[1])
'''this part checked and is okay'''
""" WE NEED TO CHECK FOR OVERLAP AT ***THIS*** STAGE """
# ADJUSTMENT ONLY FOR OVERLAP OVER PROJECTION RECTANGLE
# PROJECTION RECTANGLE. FEED IN THIS ORDER: AP, AD, BP, BD, CP, CD, DP, DD
d_width, d_height, ad,bd,cd,dd = get_overlap_rect(ap, ad, bp, bd, cp, cd, dp, dd)
#print(ad,bd,cd,dd)
'''at this point we're getting points that are all 0'''
# convert into axis which is defined by projector rect
ad, bd, cd, dd = get_overlap_coords_based_on_projector_rect(ap, ad, d_width, d_height)
""" BROKEN OVERLAP COORDS - NEED FIX """
#print(ad,bd,cd,dd)
md, kd, nd, ld = get_trimmed_detect_ratio_by_projection(ad, bd, cd, dd, proj_scales[0], proj_scales[1])
# get the final set of coordinates or scales in the projected image of where the face is
a_detect, b_detect, c_detect, d_detect, detect_width, detect_height = get_detected_rectangle_in_projected_image(md, kd, nd, ld, img_resolution_proj)
#print(a_detect,b_detect,c_detect,d_detect)
# Finally - we get the picture the | |
Prop._class_property_relation:
storids = [ancestor.storid for ancestor in entity.ancestors()]
prop_storids = []
values = set()
for P in Props:
if issubclass_python(P, TransitiveProperty):
if P._inverse_storid: prop_storids.append((P.storid, P._inverse_storid))
else: prop_storids.append((P.storid, None))
else:
if P._inverse_storid:
values.update(onto._to_python(o) for storid in storids
for g in (world._get_obj_triples_sp_o(storid, P.storid),
world._get_obj_triples_po_s(P._inverse_storid, storid))
for o in g )
else:
values.update(onto._to_python(o) for storid in storids
for o in world._get_obj_triples_sp_o(storid, P.storid) )
if prop_storids:
values.update(onto._to_python(o) for storid in storids
for o in world._get_obj_triples_transitive_sp_indirect(storid, prop_storids))
if transitive_exclude_self: values.discard(entity)
elif Prop._class_property_some:
if issubclass_python(Prop, TransitiveProperty):
values = set()
def walk(o):
values.add(o)
for r in _inherited_properties_value_restrictions(o, Props, set()):
if r.type == VALUE:
if not r.value in values:
for o2 in r.value.equivalent_to.self_and_indirect_equivalent():
if not o2 in values:
values.add(o2)
values.update(Prop._get_indirect_values_for_individual(o2))
elif (r.type == SOME) or ((r.type == EXACTLY) and r.cardinality >= 1) or ((r.type == MIN) and r.cardinality >= 1):
if not r.value in values: walk(r.value)
if isinstance(o, ThingClass):
for e in o.equivalent_to.indirect():
if not e in values: walk(e)
walk(entity)
if transitive_exclude_self: values.discard(entity)
else:
values = set(r.value for r in _inherited_properties_value_restrictions(entity, Props, set())
if (r.type == VALUE) or (r.type == SOME) or ((r.type == EXACTLY) and r.cardinality >= 1) or ((r.type == MIN) and r.cardinality >= 1) )
elif Prop._class_property_only: # Effect of transitivity on ONLY restrictions is unclear -- probably no effect?
or_valuess = [set(_flatten_only(r)) for r in _inherited_properties_value_restrictions(entity, Props, set())
if (r.type == ONLY)]
values = or_valuess[0]
for or_values in or_valuess[1:]:
new_values = values & or_values
for vs1, vs2 in ((values, or_values), (or_values, values)):
vs2_classes = tuple(o for o in vs2 if isinstance(o, EntityClass))
for v in vs1 - vs2:
if isinstance(v, EntityClass):
if issubclass(v, vs2_classes): new_values.add(v)
else:
if isinstance(v, vs2_classes): new_values.add(v)
values = new_values
return list(values)
def _set_value_for_individual(Prop, entity, value):
if value is None: entity.namespace.ontology._del_obj_triple_spo(entity.storid, Prop.storid, None)
else: entity.namespace.ontology._set_obj_triple_spo(entity.storid, Prop.storid, value.storid)
if (not isinstance(entity, EntityClass)) and (Prop is entity.namespace.world._props.get(Prop._python_name)):
entity.__dict__[Prop.python_name] = value
def _set_value_for_class (Prop, entity, value ): Prop._get_values_for_class(entity).reinit([value])
class ObjectProperty(Property, metaclass = ObjectPropertyClass):
namespace = owl
class DataPropertyClass(ReasoningPropertyClass):
_owl_type = owl_data_property
inverse_property = None
def _get_value_for_individual(Prop, entity):
value = entity.namespace.world._get_data_triple_sp_od(entity.storid, Prop.storid)
if not value is None: return entity.namespace.ontology._to_python(*value)
def _get_value_for_class(Prop, entity):
if Prop._class_property_relation: Prop._get_value_for_individual(entity)
elif Prop._class_property_some:
for r in _property_value_restrictions(entity, Prop):
if (r.type == VALUE) or (r.type == SOME) or ((r.type == EXACTLY) and r.cardinality >= 1) or ((r.type == MIN) and r.cardinality >= 1):
return r.value
elif Prop._class_property_only:
for r in _property_value_restrictions(Class, Prop):
if (r.type == ONLY):
for value in _flatten_only(r): return value
def _get_values_for_individual(Prop, entity):
return IndividualValueList((entity.namespace.ontology._to_python(o, d)
for o, d in entity.namespace.world._get_data_triples_sp_od(entity.storid, Prop.storid)),
entity, Prop)
def _get_values_for_class(Prop, entity):
if Prop._class_property_relation:
return Prop._get_values_for_individual(entity)
elif Prop._class_property_some:
return ClassValueList(set(r.value for r in _property_value_restrictions(entity, Prop)
if (r.type == VALUE) or (r.type == SOME) or ((r.type == EXACTLY) and r.cardinality >= 1) or ((r.type == MIN) and r.cardinality >= 1) ),
entity, Prop)
elif Prop._class_property_only:
return ClassValueList(set(x for r in _property_value_restrictions(entity, Prop)
if (r.type == ONLY)
for x in _flatten_only(r) ),
entity, Prop)
def _get_indirect_value_for_individual(Prop, entity):
values = Prop._get_indirect_values_for_individual(entity)
if len(values) == 0: return None
elif len(values) == 1: return values[0]
# XXX datatype
return _most_specific(values)
def _get_indirect_value_for_class(Prop, entity):
values = Prop._get_indirect_values_for_class(entity)
if len(values) == 0: return None
elif len(values) == 1: return values[0]
# XXX datatype
return _most_specific(values)
def _get_indirect_values_for_individual(Prop, entity):
eqs = list(entity.equivalent_to.self_and_indirect_equivalent())
values = [entity.namespace.ontology._to_python(o, d)
for P in Prop.descendants()
for eq in eqs
for o, d in entity.namespace.world._get_data_triples_sp_od(eq.storid, P.storid)]
values.extend(Prop._get_indirect_values_for_class(entity.__class__))
return values
def _get_indirect_values_for_class(Prop, entity):
Props = Prop.descendants()
if Prop._class_property_relation:
storids = [ancestor.storid for ancestor in entity.ancestors()]
return [ entity.namespace.ontology._to_python(o, d)
for storid in storids
for P in Props
for o, d in entity.namespace.world._get_data_triples_sp_od(storid, P.storid) ]
elif Prop._class_property_some:
return list(set(r.value for r in _inherited_properties_value_restrictions(entity, Props, set())
if (r.type == VALUE) or (r.type == SOME) or ((r.type == EXACTLY) and r.cardinality >= 1) or ((r.type == MIN) and r.cardinality >= 1) ))
elif Prop._class_property_only:
return list(set(x for r in _inherited_properties_value_restrictions(entity, Props, set())
if (r.type == ONLY)
for x in _flatten_only(r) ))
def _set_value_for_individual(Prop, entity, value):
if value is None: entity.namespace.ontology._del_data_triple_spod(entity.storid, Prop.storid, None, None)
else: entity.namespace.ontology._set_data_triple_spod(entity.storid, Prop.storid, *entity.namespace.ontology._to_rdf(value))
if (not isinstance(entity, EntityClass)) and (Prop is entity.namespace.world._props.get(Prop._python_name)):
entity.__dict__[Prop.python_name] = value
def _set_value_for_class (Prop, entity, value ): Prop._get_values_for_class(entity).reinit([value])
class DatatypeProperty(Property, metaclass = DataPropertyClass):
namespace = owl
DataProperty = DatatypeProperty
class FunctionalProperty(Property):
namespace = owl
@classmethod
def is_functional_for(Prop, o): return True
class InverseFunctionalProperty(Property): namespace = owl
class TransitiveProperty (Property): namespace = owl
class SymmetricProperty (Property): namespace = owl
class AsymmetricProperty (Property): namespace = owl
class ReflexiveProperty (Property): namespace = owl
class IrreflexiveProperty (Property): namespace = owl
_CLASS_PROPS = { DataProperty, ObjectProperty }
_TYPE_PROPS = { FunctionalProperty, InverseFunctionalProperty, TransitiveProperty, SymmetricProperty, AsymmetricProperty, ReflexiveProperty, IrreflexiveProperty }
def destroy_entity(e, undoable = False):
if undoable: undoer_objs = []; undoer_datas = []; undoer_bnodes = []; undoer_relations = []
else: undoer_objs = undoer_datas = None; undoer_bnodes = None; undoer_relations = None
if hasattr(e, "__destroy__"): e.__destroy__(undoer_objs, undoer_datas)
elif isinstance(e, PropertyClass):
modified_entities = set()
if e._owl_type == owl_object_property:
for s,p,o in e.namespace.world._get_obj_triples_spo_spo(None, e.storid, None):
modified_entities.add(s)
e.namespace.world._del_obj_triple_spo(None, e.storid, None)
# XXX inverse ?
elif e._owl_type == owl_data_property:
for s,p,o,d in e.namespace.world._get_data_triples_spod_spod(None, e.storid, None, None):
modified_entities.add(s)
e.namespace.world._del_data_triple_spod(None, e.storid, None, None)
else: #e._owl_type == owl_annotation_property:
for s,p,o,d in e.namespace.world._get_triples_spod_spod(None, e.storid, None, None):
modified_entities.add(s)
e.namespace.world._del_obj_triple_spo (None, e.storid, None)
e.namespace.world._del_data_triple_spod(None, e.storid, None, None)
for s in modified_entities:
s = e.namespace.world._entities.get(s)
if s:
delattr(s, e._python_name)
e.namespace.world._props .pop(e._python_name, None)
e.namespace.world._reasoning_props.pop(e._python_name, None)
def destroyer(bnode):
if bnode == e.storid: return
if undoer_bnodes: undoer_bnodes.append(bnode)
class_construct = e.namespace.ontology._bnodes.pop(bnode, None)
if class_construct and class_construct.ontology: # No ontology => already removed
for subclass in class_construct.subclasses(True):
if isinstance(subclass, EntityClass) or isinstance(subclass, Thing):
if class_construct in subclass.is_a: subclass.is_a .remove(class_construct)
else: subclass.equivalent_to.remove(class_construct)
def relation_updater(destroyed_storids, storid, relations):
if undoer_relations is not None: undoer_relations.append((destroyed_storids, storid, relations))
update_relation(destroyed_storids, storid, relations)
def update_relation(destroyed_storids, storid, relations):
o = e.namespace.world._entities.get(storid)
if o:
for r in relations:
if (r == rdf_type) or (r == rdfs_subpropertyof) or (r == rdfs_subclassof):
#o.is_a.reinit([i for i in o.is_a if not i.storid in destroyed_storids])
parents = [e.namespace.world._to_python(i) for i in e.namespace.world._get_obj_triples_sp_o(storid, r)]
o.is_a.reinit([i for i in parents if not i is None])
if r == rdfs_subclassof:
for Subclass in o.descendants(True, True): _FUNCTIONAL_FOR_CACHE.pop(Subclass, None)
elif (r == owl_equivalentproperty) or (r == owl_equivalentindividual):
if o._equivalent_to._indirect:
for o2 in o.equivalent_to._indirect: o2._equivalent_to._indirect = None
o._equivalent_to._indirect = None
elif r == owl_equivalentclass:
if o._equivalent_to._indirect:
for o2 in o.equivalent_to._indirect: o2._equivalent_to._indirect = None
o._equivalent_to._indirect = None
for Subclass in o.descendants(True, True): _FUNCTIONAL_FOR_CACHE.pop(Subclass, None)
elif r == rdf_domain:
o._domain = None
elif r == rdf_range:
o._range = None
else:
r = e.namespace.world._entities.get(r)
if r:
try: del o.__dict__[r.python_name]
except: pass
e.namespace.world.graph.destroy_entity(e.storid, destroyer, relation_updater, undoer_objs, undoer_datas)
e.namespace.world._entities.pop(e.storid, None)
e.namespace.ontology._entity_destroyed(e)
if undoable:
def undestroy():
e.namespace.world.graph.restore_iri(e.storid, e.iri)
c_2_onto = e.namespace.world.graph.c_2_onto
for c,s,p,o in undoer_objs:
c_2_onto[c]._add_obj_triple_spo(s,p,o)
for c,s,p,o,d in undoer_datas:
c_2_onto[c]._add_data_triple_spo(s,p,o,d)
#e.namespace.world.graph.db.executemany("INSERT INTO objs VALUES (?,?,?,?)", undoer_objs)
#e.namespace.world.graph.db.executemany("INSERT INTO datas VALUES (?,?,?,?,?)", undoer_datas)
e.namespace.world._entities[e.storid] = e
for bnode in undoer_bnodes:
class_construct = e.namespace.world._parse_bnode(bnode)
for subclass in class_construct.subclasses(True):
if isinstance(subclass, EntityClass) or isinstance(subclass, Thing):
subclass.is_a._append(class_construct)
for destroyed_storids, storid, relations in undoer_relations:
update_relation(destroyed_storids, storid, relations)
return undestroy
class bottomObjectProperty(ObjectProperty): pass
class bottomDataProperty(DataProperty): pass
def _property_value_restrictions(x, Prop):
for parents in (x.is_a, x.equivalent_to.indirect()):
for r in parents:
if isinstance(r, Restriction):
if (Prop is None) or (r.property is Prop): yield r
elif isinstance(r, And):
for r2 in r.Classes:
if isinstance(r2, Restriction):
if (Prop is None) or (r2.property is Prop): yield r2
def _inherited_properties_value_restrictions(x, Props, already):
if isinstance(x, Restriction):
if (Props is None) or (x.property in Props): yield x
elif isinstance(x, And):
for x2 in x.Classes:
yield from _inherited_properties_value_restrictions(x2, Props, already)
elif isinstance(x, EntityClass) or isinstance(x, Thing):
already.add(x)
parents = [ parent
for parents in (x.is_a, list(x.equivalent_to.indirect()))
for parent in parents
if not parent in already ]
# Need two passes in order to favor restriction on the initial class rather than those on the ancestor classes
for parent in parents:
if isinstance(parent, | |
def get_remote_client(self, server_or_ip, username=None, private_key=None):
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
network_name_for_ssh = CONF.compute.network_for_ssh
ip = server_or_ip.networks[network_name_for_ssh][0]
if username is None:
username = CONF.scenario.ssh_user
if private_key is None:
private_key = self.keypair.private_key
return remote_client.RemoteClient(ip, username, pkey=private_key)
def _log_console_output(self, servers=None):
if not servers:
servers = self.compute_client.servers.list()
for server in servers:
LOG.debug('Console output for %s', server.id)
LOG.debug(server.get_console_output())
def wait_for_volume_status(self, status):
volume_id = self.volume.id
self.status_timeout(
self.volume_client.volumes, volume_id, status)
def _image_create(self, name, fmt, path, properties={}):
name = data_utils.rand_name('%s-' % name)
image_file = open(path, 'rb')
self.addCleanup(image_file.close)
params = {
'name': name,
'container_format': fmt,
'disk_format': fmt,
'is_public': 'True',
}
params.update(properties)
image = self.image_client.images.create(**params)
self.addCleanup(self.image_client.images.delete, image)
self.assertEqual("queued", image.status)
image.update(data=image_file)
return image.id
def glance_image_create(self):
qcow2_img_path = (CONF.scenario.img_dir + "/" +
CONF.scenario.qcow2_img_file)
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
LOG.debug("paths: img: %s, ami: %s, ari: %s, aki: %s"
% (qcow2_img_path, ami_img_path, ari_img_path, aki_img_path))
try:
self.image = self._image_create('scenario-img',
'bare',
qcow2_img_path,
properties={'disk_format':
'qcow2'})
except IOError:
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
properties = {
'properties': {'kernel_id': kernel, 'ramdisk_id': ramdisk}
}
self.image = self._image_create('scenario-ami', 'ami',
path=ami_img_path,
properties=properties)
LOG.debug("image:%s" % self.image)
class BaremetalScenarioTest(OfficialClientTest):
@classmethod
def setUpClass(cls):
super(BaremetalScenarioTest, cls).setUpClass()
if (not CONF.service_available.ironic or
not CONF.baremetal.driver_enabled):
msg = 'Ironic not available or Ironic compute driver not enabled'
raise cls.skipException(msg)
# use an admin client manager for baremetal client
username, password, tenant = cls.admin_credentials()
manager = clients.OfficialClientManager(username, password, tenant)
cls.baremetal_client = manager.baremetal_client
# allow any issues obtaining the node list to raise early
cls.baremetal_client.node.list()
def _node_state_timeout(self, node_id, state_attr,
target_states, timeout=10, interval=1):
if not isinstance(target_states, list):
target_states = [target_states]
def check_state():
node = self.get_node(node_id=node_id)
if getattr(node, state_attr) in target_states:
return True
return False
if not tempest.test.call_until_true(
check_state, timeout, interval):
msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
(node_id, state_attr, target_states))
raise exceptions.TimeoutException(msg)
def wait_provisioning_state(self, node_id, state, timeout):
self._node_state_timeout(
node_id=node_id, state_attr='provision_state',
target_states=state, timeout=timeout)
def wait_power_state(self, node_id, state):
self._node_state_timeout(
node_id=node_id, state_attr='power_state',
target_states=state, timeout=CONF.baremetal.power_timeout)
def wait_node(self, instance_id):
"""Waits for a node to be associated with instance_id."""
def _get_node():
node = None
try:
node = self.get_node(instance_id=instance_id)
except ironic_exceptions.HTTPNotFound:
pass
return node is not None
if not tempest.test.call_until_true(
_get_node, CONF.baremetal.association_timeout, 1):
msg = ('Timed out waiting to get Ironic node by instance id %s'
% instance_id)
raise exceptions.TimeoutException(msg)
def get_node(self, node_id=None, instance_id=None):
if node_id:
return self.baremetal_client.node.get(node_id)
elif instance_id:
return self.baremetal_client.node.get_by_instance_uuid(instance_id)
def get_ports(self, node_id):
ports = []
for port in self.baremetal_client.node.list_ports(node_id):
ports.append(self.baremetal_client.port.get(port.uuid))
return ports
class NetworkScenarioTest(OfficialClientTest):
"""
Base class for network scenario tests
"""
@classmethod
def check_preconditions(cls):
if (CONF.service_available.neutron):
cls.enabled = True
# verify that neutron_available is telling the truth
try:
cls.network_client.list_networks()
except exc.EndpointNotFound:
cls.enabled = False
raise
else:
cls.enabled = False
msg = 'Neutron not available'
raise cls.skipException(msg)
@classmethod
def setUpClass(cls):
super(NetworkScenarioTest, cls).setUpClass()
if CONF.compute.allow_tenant_isolation:
cls.tenant_id = cls.isolated_creds.get_primary_tenant().id
else:
cls.tenant_id = cls.manager._get_identity_client(
CONF.identity.username,
CONF.identity.password,
CONF.identity.tenant_name).tenant_id
def _create_network(self, tenant_id, namestart='network-smoke-'):
name = data_utils.rand_name(namestart)
body = dict(
network=dict(
name=name,
tenant_id=tenant_id,
),
)
result = self.network_client.create_network(body=body)
network = net_common.DeletableNetwork(client=self.network_client,
**result['network'])
self.assertEqual(network.name, name)
self.set_resource(name, network)
return network
def _list_networks(self, **kwargs):
nets = self.network_client.list_networks(**kwargs)
return nets['networks']
def _list_subnets(self, **kwargs):
subnets = self.network_client.list_subnets(**kwargs)
return subnets['subnets']
def _list_routers(self, **kwargs):
routers = self.network_client.list_routers(**kwargs)
return routers['routers']
def _list_ports(self, **kwargs):
ports = self.network_client.list_ports(**kwargs)
return ports['ports']
def _get_tenant_own_network_num(self, tenant_id):
nets = self._list_networks(tenant_id=tenant_id)
return len(nets)
def _get_tenant_own_subnet_num(self, tenant_id):
subnets = self._list_subnets(tenant_id=tenant_id)
return len(subnets)
def _get_tenant_own_port_num(self, tenant_id):
ports = self._list_ports(tenant_id=tenant_id)
return len(ports)
def _create_subnet(self, network, namestart='subnet-smoke-', **kwargs):
"""
Create a subnet for the given network within the cidr block
configured for tenant networks.
"""
def cidr_in_use(cidr, tenant_id):
"""
:return True if subnet with cidr already exist in tenant
False else
"""
cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
return len(cidr_in_use) != 0
tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
result = None
# Repeatedly attempt subnet creation with sequential cidr
# blocks until an unallocated block is found.
for subnet_cidr in tenant_cidr.subnet(
CONF.network.tenant_network_mask_bits):
str_cidr = str(subnet_cidr)
if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
continue
body = dict(
subnet=dict(
name=data_utils.rand_name(namestart),
ip_version=4,
network_id=network.id,
tenant_id=network.tenant_id,
cidr=str_cidr,
),
)
body['subnet'].update(kwargs)
try:
result = self.network_client.create_subnet(body=body)
break
except exc.NeutronClientException as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
self.assertIsNotNone(result, 'Unable to allocate tenant network')
subnet = net_common.DeletableSubnet(client=self.network_client,
**result['subnet'])
self.assertEqual(subnet.cidr, str_cidr)
self.set_resource(data_utils.rand_name(namestart), subnet)
return subnet
def _create_port(self, network, namestart='port-quotatest-'):
name = data_utils.rand_name(namestart)
body = dict(
port=dict(name=name,
network_id=network.id,
tenant_id=network.tenant_id))
result = self.network_client.create_port(body=body)
self.assertIsNotNone(result, 'Unable to allocate port')
port = net_common.DeletablePort(client=self.network_client,
**result['port'])
self.set_resource(name, port)
return port
def _get_server_port_id(self, server, ip_addr=None):
ports = self._list_ports(device_id=server.id, fixed_ip=ip_addr)
self.assertEqual(len(ports), 1,
"Unable to determine which port to target.")
return ports[0]['id']
def _create_floating_ip(self, thing, external_network_id, port_id=None):
if not port_id:
port_id = self._get_server_port_id(thing)
body = dict(
floatingip=dict(
floating_network_id=external_network_id,
port_id=port_id,
tenant_id=thing.tenant_id,
)
)
result = self.network_client.create_floatingip(body=body)
floating_ip = net_common.DeletableFloatingIp(
client=self.network_client,
**result['floatingip'])
self.set_resource(data_utils.rand_name('floatingip-'), floating_ip)
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
port_id = self._get_server_port_id(server)
floating_ip.update(port_id=port_id)
self.assertEqual(port_id, floating_ip.port_id)
return floating_ip
def _disassociate_floating_ip(self, floating_ip):
"""
:param floating_ip: type DeletableFloatingIp
"""
floating_ip.update(port_id=None)
self.assertIsNone(floating_ip.port_id)
return floating_ip
def _ping_ip_address(self, ip_address, should_succeed=True):
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
return (proc.returncode == 0) == should_succeed
return tempest.test.call_until_true(
ping, CONF.compute.ping_timeout, 1)
def _create_pool(self, lb_method, protocol, subnet_id):
"""Wrapper utility that returns a test pool."""
name = data_utils.rand_name('pool-')
body = {
"pool": {
"protocol": protocol,
"name": name,
"subnet_id": subnet_id,
"lb_method": lb_method
}
}
resp = self.network_client.create_pool(body=body)
pool = net_common.DeletablePool(client=self.network_client,
**resp['pool'])
self.assertEqual(pool['name'], name)
self.set_resource(name, pool)
return pool
def _create_member(self, address, protocol_port, pool_id):
"""Wrapper utility that returns a test member."""
body = {
"member": {
"protocol_port": protocol_port,
"pool_id": pool_id,
"address": address
}
}
resp = self.network_client.create_member(body)
member = net_common.DeletableMember(client=self.network_client,
**resp['member'])
self.set_resource(data_utils.rand_name('member-'), member)
return member
def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
"""Wrapper utility that returns a test vip."""
name = data_utils.rand_name('vip-')
body = {
"vip": {
"protocol": protocol,
"name": name,
"subnet_id": subnet_id,
"pool_id": pool_id,
"protocol_port": protocol_port
}
}
resp = self.network_client.create_vip(body)
vip = net_common.DeletableVip(client=self.network_client,
**resp['vip'])
self.assertEqual(vip['name'], name)
self.set_resource(name, vip)
return vip
def _check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True):
"""
:param ip_address: server to test against
:param username: server's ssh username
:param private_key: server's ssh private key to be used
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
self.assertTrue(self._ping_ip_address(ip_address,
should_succeed=should_connect),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
linux_client = self.get_remote_client(ip_address, username,
private_key)
linux_client.validate_authentication()
def _check_remote_connectivity(self, source, dest, should_succeed=True):
"""
check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
:param dest: and IP to ping against
:param should_succeed: boolean should ping succeed or not
:returns: boolean -- should_succeed == ping
:returns: ping is false if ping failed
"""
def ping_remote():
try:
source.ping_host(dest)
except exceptions.SSHExecCommandFailed:
LOG.exception('Failed to ping host via ssh connection')
return not should_succeed
return should_succeed
return tempest.test.call_until_true(ping_remote,
CONF.compute.ping_timeout,
1)
def _create_security_group_nova(self, client=None,
namestart='secgroup-smoke-',
tenant_id=None):
if client is None:
client = self.compute_client
# Create security group
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
secgroup = client.security_groups.create(sg_name, sg_desc)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(secgroup.description, sg_desc)
self.set_resource(sg_name, secgroup)
# Add rules to the security group
self._create_loginable_secgroup_rule_nova(client, secgroup.id)
return secgroup
def _create_security_group_neutron(self, tenant_id, client=None,
namestart='secgroup-smoke-'):
if client is None:
client = self.network_client
secgroup = self._create_empty_security_group(namestart=namestart,
client=client,
tenant_id=tenant_id)
# Add rules to the security group
rules = self._create_loginable_secgroup_rule_neutron(secgroup=secgroup)
for rule in rules:
self.assertEqual(tenant_id, rule.tenant_id)
self.assertEqual(secgroup.id, rule.security_group_id)
return secgroup
def _create_empty_security_group(self, tenant_id, client=None,
namestart='secgroup-smoke-'):
"""Create a security group without rules.
Default rules will be created:
- IPv4 egress to any
- IPv6 egress to any
:param tenant_id: secgroup will be created in this tenant
:returns: DeletableSecurityGroup -- containing the secgroup created
"""
if client is None:
client = self.network_client
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
sg_dict = dict(name=sg_name,
description=sg_desc)
sg_dict['tenant_id'] = tenant_id
body = dict(security_group=sg_dict)
result = client.create_security_group(body=body)
secgroup | |
= ir*sigmax
this_sigmay = ir*sigmay
# print iz, ir, a, b
tmpG = (1/(2.*this_sigmax*this_sigmay))*np.exp(-0.5*(((x-x0)**2)/this_sigmax**2+((y-y0)**2)/this_sigmay**2))
tmpdens[:,:,ii] = tmpG*scale_factor/tmpG.max()
# print tmpG.shape, tmpG.max()
# exit()
ii=ii+1
iz=iz+1
if verbose==True:
print x
print x.shape, y.shape, z.shape
sys.stdout.flush()
return tmpdens
def make3DBox(sizex, sizey, sizez, verbose=False, scale_factor=1.0):
""" Make homogeneous 3D 3x3x3 cube.
"""
if verbose==True:
print "make3DBox size", sizex, sizey, sizez
sys.stdout.flush()
tmpdens = np.ones((sizex, sizey, sizez))
tmpdens = tmpdens*scale_factor
if verbose==True:
print "make3DBox", tmpdens.shape
print "make3DBox", tmpdens
sys.stdout.flush()
return tmpdens
def make3DCell(sizex, sizey, sizez, verbose=False, scale_factor=1.0):
""" Calculate 3D distribution for an SO2 cell. Here a 3x3x3 cube
with a hole in the middle for the camera.
"""
if verbose==True:
print "make3DCell size", sizex, sizey, sizez
sys.stdout.flush()
tmpdens = np.ones((sizex, sizey, sizez))
tmpdens = tmpdens*scale_factor
x0 = sizex // 2
y0 = sizey // 2
z0 = sizez // 2
tmpdens[x0,y0,z0] = 0.0
if verbose==True:
print "make3DCell", tmpdens.shape
print "make3DCell", tmpdens
sys.stdout.flush()
return tmpdens
class ConversionFactors:
def __init__(self):
self.nm2cm = 1.e-07 # Convert from nm to cm
self.nm2um = 1.e-03 # Convert from nm to cm
self.um2m = 1.e-06 # Convert from um to m
self.cm2km = 1.e-05 # Factor from cm to km
self.m2km = 1.e-03 # Factor from m to km
self.m2cm = 1.e+02 # Factor from m to cm
self.km2m = 1000. # Factor from km to m
self.km2cm = 100000. # Factor from km to m
self.gm3togcm3=1e-06 # Convert from g/m**3 to g/cm**3
self.kgtog=1.0e+03 # Convert from kg to g
self.m3tocm3=1.0e+06 # Convert from m**3 to cm**3
class Camera:
def __init__(self,RandString=''):
self.Type='Camera'
self.verbose=False
self.name='TestCamera'
self.savename='Camera'
# Localisation of camera in m
self.x = 0.0
self.y = 0.0
self.z = 0.0
# Viewing direction
self.umu = 1.0 # Viewing the horizon
self.phi = 0.0 # Looking north
# Number of pixels in horizontal and vertical
self.h_pixels=0
self.v_pixels=0
# Field of view of camera: phi1 phi2 theta1 theta2
# phi = 0 is looking to the south
# phi = 180 is with the sun in the back if phi0=0
self.phi1 = 0.0
self.phi2 = 0.0
self.theta1 = 0.0
self.theta2 = 0.0
self.wavelength_grid_file='../Data/XSections/uvspec_SO2_wavelength_grid_file'
self.wavelength=-9999
self.wavelength1=-9999
self.wavelength2=-9999
if RandString == '':
self.RandString= 'Cam'+''.join((random.sample(string.ascii_lowercase, 5)))
else:
self.RandString= 'Cam'+RandString
return
def info(self,latex=False):
print
print self.savename+' name:', self.name
print '(All dimensions are in units of m)'
print 'Localisation x={:f}, y={:f}, z={:f}'.format(self.x, self.y, self.z)
print 'Pixels h_pixels={:d}, v_pixels={:f}'.format(self.h_pixels, self.v_pixels)
print 'FOV phi1={:f}, phi2={:f}, theta1={:f}, theta2={:f}'.format(self.phi1, self.phi2, self.theta1, self.theta2)
sys.stdout.flush()
if latex:
print '& {:5.1f} & {:5.1f} & {:5.1f} & {:5.1f} & {:6.1f} & {:6.1f} & {:6.1f} & {:6.1f} & {:d}& {:d}\\\\'.format(self.wavelength1, self.x, self.y, self.z, self.phi1, self.phi2, self.theta1, self.theta2, self.h_pixels, self.v_pixels)
sys.stdout.flush()
print
return
def save(self,folder):
pkl_file = open(folder+self.savename+self.name+'.pkl', 'wb')
pickle.dump(self,pkl_file )
pkl_file.close()
return
def SetRTInput(self, UVS):
print "Cam SetRTInput"
sys.stdout.flush()
UVS.inp['mc_sensorposition'] = '{:8.1f} {:8.1f} {:8.1f}'.format(self.x, self.y, self.z)
UVS.inp['mc_sample_grid'] = '{:d} {:d}'.format(self.h_pixels, self.v_pixels)
UVS.inp['mc_panorama_view'] = '{:f} {:f} {:f} {:f}'.format(self.phi1, self.phi2, self.theta1, self.theta2)
UVS.inp['mc_panorama_alignment'] = 'mu'
UVS.inp['umu'] = '{:f}'.format(self.umu)
UVS.inp['phi'] = '{:f}'.format(self.phi)
# UVS.inp['mc_panorama_alignment'] = 'sun'
# UVS.inp['mc_panorama'] = 'weight_with_cos'
# UVS.inp['mc_panorama'] = 'with_direct_rad'
# UVS.inp['umu'] = '{:f}'.format((np.cos(np.deg2rad(0.5*(self.theta1+self.theta2)))))
# UVS.inp['phi'] = '{:f}'.format(0.5*(self.phi1+self.phi2)-UVS.inp['phi0'])
if self.wavelength1 != self.wavelength2:
UVS.inp['wavelength_grid_file'] = self.wavelength_grid_file
if not 'mol_abs_param' in UVS.inp:
UVS.inp['mol_abs_param'] = 'crs'
try:
self.wavelength
UVS.inp['wavelength'] = self.wavelength
except:
pass
try:
self.wavelength1
self.wavelength2
UVS.inp['wavelength'] = str(self.wavelength1)+' '+str(self.wavelength2)
except:
print "Both wavelength1 and wavelength2 must be given"
exit()
# try:
# self.filterfunction
# UVS.inp['filter_function_file'] = self.filterfunction
# UVS.inp['output_process'] = 'integrate'
# except:
# pass
return
class Spectrometer(Camera):
def __init__(self, RunName=''):
self.Type='Spectrometer'
self.verbose=False
self.name='Spectrometer'
self.savename='Spectrometer'
# Localisation of camera in m
self.x = 0.0
self.y = 0.0
self.z = 0.0
# Number of pixels in horizontal and vertical
self.h_pixels=1
self.v_pixels=1
# Field of view of camera: phi1 phi2 theta1 theta2
# phi = 0 is looking to the south
# phi = 180 is with the sun in the back if phi0=0
self.phi1 = 0.0
self.phi2 = 0.0
self.theta1 = 0.0
self.theta2 = 0.0
self.mol_modify_o3=-9999
self.crs_o3= '../Data/XSections/O3_Serdyuchenko_2014_223K_213-1100nm2013version.txt'
self.crs_o4= '../Data/XSections/o4_thalman_volkamer_293K.dat'
self.slitfunction = ''
self.wavelength_grid_file='../Data/XSections/uvspec_SO2_wavelength_grid_file'
self.RandString= 'Spec'+RunName+'_'+''.join(random.sample(string.ascii_lowercase, 5))
return
def SetRTInput(self, UVS):
print self.savename+" SetRTInput"
sys.stdout.flush()
UVS.inp['mc_sensorposition'] = '{:8.1f} {:8.1f} {:8.1f}'.format(self.x, self.y, self.z)
UVS.inp['mc_sample_grid'] = '{:d} {:d}'.format(self.h_pixels, self.v_pixels)
UVS.inp['mc_panorama_view'] = '{:f} {:f} {:f} {:f}'.format(self.phi1, self.phi2, self.theta1, self.theta2)
UVS.inp['wavelength'] = str(self.wavelength1)+' '+str(self.wavelength2)
UVS.inp['mol_abs_param'] = 'crs'
UVS.inp['crs_file O4'] = self.crs_o4
UVS.inp['crs_file O3'] = self.crs_o3
if self.mol_modify_o3>0.0:
UVS.inp['mol_modify O3'] = self.mol_modify_o3+' DU'
# Do this in a separate call to conv and spline after running uvspec
# if self.slitfunction != '':
# UVS.inp['slit_function_file'] = self.slitfunction
UVS.inp['wavelength_grid_file'] = self.wavelength_grid_file
return
def CalculateColumnDensity(self):
"""
NOTE: It is assumed that integration is along x-axis for the
center pixels in the y-direction.
"""
for Plu in self.PlumeList:
if 'SO2' in Plu.name:
fact = 100. # Convert from m to cm to column in cm-2
else:
fact = 1.0
# print "CalculateCol", Plu.name, fact
# Calculate line integral for Spectrometer using Tomography software
nx=Plu.nx
dx=Plu.dx*1000 # # In meters
x0=0
nz=Plu.nz
z0=Plu.z[0]*1000.0
dz=Plu.dz*1000.0 #100. # In meters
# RR=ReconstructionRegion
RR = TC.Area(nx=nx,dx=dx, x0=x0, nz=nz, dz=dz, z0=z0 )
RR.zmin = RR.z0
RR.zmax = RR.z0 + RR.dz*RR.nz
RR.Image = np.zeros([nz,nx])
# print RR.Image.shape, Plu.dens.shape
ycenter = int(Plu.dens.shape[1]/2) # Do this for center slice in y-direction
RR.Image=Plu.dens[:,ycenter,1:] # Plu is one pixel larger in z-direction, inconsistent.
RR.Image=RR.Image.T # Sigh, why do I use different conventions for x and y......
indices=np.argmax(RR.Image)
#maxind=np.unravel_index(indices, RR.Image.shape)
#print "RR", indices, maxind
indices=np.argmax(Plu.dens)
maxind=np.unravel_index(indices, Plu.dens.shape)
# print "Plu dens", indices, maxind
# print "RR.Image.max()", RR.Image.min(), RR.Image.max(), Plu.dens[maxind]
theta1 = self.theta1-90 # And different conventions for the angles.....
theta2 = self.theta2-90
Nrays=9
Camera1=TC.TomoCamera(x=-Plu.x_start*1000-Plu.x_length*1000/2, z=self.z, theta1=theta1, theta2=theta2, Nrays=Nrays,Name='Cam 1')
Camera1.Rays()
iRay=0
sumtmpRq=0
while iRay<Camera1.Nrays:
tmpRq, tmpTotalLength, tmpind, tmpN =Camera1.CalculateLineIntegral(RR, iRay)
Camera1.Sinogram[iRay]=tmpRq
# print '{:e}'.format(tmpRq)
sumtmpRq=sumtmpRq+tmpRq
iRay=iRay+1
Plu.ColumnDensity = fact*sumtmpRq/Nrays
# fnTestLineIntegral='tmpgabba_'
# tmpfn = fnTestLineIntegral+'Cam1.dat'
# print 'tmpfn', tmpfn
# Camera1.WriteLineIntegralToFile(tmpfn)
return
class Domain:
def __init__(self):
self.verbose=False
self.name='TestDomain'
# Domain size, all in km
self.x_start = 0
self.x_end = 0.4
self.dx = 0.001
self.nx=0
self.x =None
self.y_start = 0
self.y_end = 0.8
self.dy = 0.001
self.ny=0
self.y =None
self.z_start = 0.150
self.z_end = 0.350
self.dz = 0.001
self.nz=0
self.z =None
self.x_centre = 0.5*(self.x_start+self.x_end)
self.y_centre = 0.5*(self.y_start+self.y_end)
self.z_centre = 0.5*(self.z_start+self.z_end)
return
def finalize(self):
self.nx = int(np.rint((self.x_end-self.x_start)/self.dx))
self.x = np.linspace(self.x_start,self.x_end,self.nx+1)
self.x_size = self.x_end-self.x_start
self.ny = int(np.rint((self.y_end-self.y_start)/self.dy))
self.y = np.linspace(self.y_start,self.y_end,self.ny+1)
self.y_size = self.y_end-self.y_start
self.nz = int(np.rint((self.z_end-self.z_start)/self.dz))
self.z = np.linspace(self.z_start,self.z_end,self.nz+1)
self.z_size = self.z_end-self.z_start
self.x_centre = 0.5*(self.x_start+self.x_end)
self.y_centre = 0.5*(self.y_start+self.y_end)
self.z_centre = 0.5*(self.z_start+self.z_end)
return
def info(self):
print
print 'Domain name:', self.name
print '(All dimensions are in units of km)'
print 'x_start {:f}, x_end {:f}, dx {:f}, nx {:d}'.format(self.x_start, self.x_end, self.dx, self.nx)
print 'y_start {:f}, y_end {:f}, dy {:f}, ny {:d}'.format(self.y_start, self.y_end, self.dy, self.ny)
print 'z_start {:f}, z_end {:f}, dz {:f}, nz {:d}'.format(self.z_start, self.z_end, self.dz, self.nz)
print 'x_centre {:f}, y_centre {:f}, z_centre {:f}'.format(self.x_centre, self.y_centre, self.z_centre)
print
sys.stdout.flush()
return
def save(self,folder):
pkl_file = open(folder+'Domain.pkl', 'wb')
pickle.dump(self,pkl_file )
pkl_file.close()
return
class Plume:
def __init__(self):
self.verbose=False
self.name='TestPlume'
self.randname = 'Plume'+''.join((random.sample(string.ascii_lowercase, 5)))
self.shape= '' # Either set by input to set_density or inside ReadLESNetCDF
self.x_start = None
self.x_end = None
self.x_length = None
self.y_start = None
self.y_end = None
self.y_length = None
self.z_start = None
self.z_end = None
self.z_length = None
self.set_density_flag=False
self.ext = 0.0
self.gg = 0.0
self.ssa = 0.0
self.cdf = ''
self.MYSTIC_profile_file_flag=0
return
def revise_domain(self, Domain, verbose=False):
if verbose:
print "Inside revise_domain"
sys.stdout.flush()
# Only need to revise z-direction as x- and y-direction should be ok
# For z-direction avoid first value as it has a half step.
tmpdz = self.z[2]-self.z[1]
tmpz_start=self.z[1]-tmpdz#*self.z.shape[0]
# Add 1 as we want altitude at levels.
tmpz = np.arange(tmpz_start, tmpz_start+tmpdz*float(self.z.shape[0]+1), tmpdz)
Domain.z = tmpz
Domain.dz = tmpdz
Domain.nz = Domain.z.shape[0]-1 # -1 as this should by number of layers, not levels
Domain.z_start=Domain.z[0]
Domain.z_end=Domain.z[Domain.nz]
Domain.z_size = Domain.z_end-Domain.z_end
return Domain
def finalize_size(self, Domain, verbose=False):
if verbose:
print "Inside finalize_size"
sys.stdout.flush()
self.x_end = self.x_start+self.x_length
self.y_end = self.y_start+self.y_length
self.z_end = self.z_start+self.z_length
self.x_centre = self.x_start+0.5*self.x_length
self.y_centre = self.y_start+0.5*self.y_length
self.z_centre = self.z_start+0.5*self.z_length
self.dx = Domain.dx
self.dy = Domain.dy
self.dz = Domain.dz
self.nx = int(np.rint((self.x_end-self.x_start)/self.dx))
self.x = np.linspace(self.x_start,self.x_end,self.nx+1)
self.ny = int(np.rint((self.y_end-self.y_start)/self.dy))
self.y = np.linspace(self.y_start,self.y_end,self.ny+1)
self.nz = int(np.ceil((self.z_end-self.z_start)/self.dz))
self.z = np.linspace(self.z_start,self.z_end,self.nz+1)
# Check that plume | |
<filename>src/bot.py
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.messages.flat.QuickChatSelection import QuickChatSelection
from rlbot.utils.structures.game_data_struct import GameTickPacket
from util.ball_prediction_analysis import find_slice_at_time, find_matching_slice
from util.boost_pad_tracker import BoostPadTracker
from util.drive import steer_toward_target, limit_to_safe_range
from util.sequence import Sequence, ControlStep
from util.vec import Vec3
from util.orientation import Orientation, relative_location
import math
class MyBot(BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.active_sequence: Sequence = None
self.boost_pad_tracker = BoostPadTracker()
self.bot_state = 0
def initialize_agent(self):
# Set up information about the boost pads now that the game is active and the info is available
self.boost_pad_tracker.initialize_boosts(self.get_field_info())
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
"""
This function will be called by the framework many times per second. This is where you can
see the motion of the ball, etc. and return controls to drive your car.
"""
# Keep our boost pad info updated with which pads are currently active
self.boost_pad_tracker.update_boost_status(packet)
# This is good to keep at the beginning of get_output. It will allow you to continue
# any sequences that you may have started during a previous call to get_output.
if self.active_sequence is not None and not self.active_sequence.done:
controls = self.active_sequence.tick(packet)
if controls is not None:
return controls
# Gather some information about our car and the ball
my_car = packet.game_cars[self.index]
car_location = Vec3(my_car.physics.location)
car_velocity = Vec3(my_car.physics.velocity)
ball_location = Vec3(packet.game_ball.physics.location)
ball_velocity = Vec3(packet.game_ball.physics.velocity)
# By default we will chase the ball, but target_location can be changed later
target_location = ball_location
ball_on_floor = target_location
ball_prediction = self.get_ball_prediction_struct() # This can predict bounces, etc
# dynamic time to the ball depending on the car's speed
time_in_future = self.time_to_ball(car_location, car_velocity.length(), ball_location)
seconds_in_future = packet.game_info.seconds_elapsed + time_in_future
ball_in_future = find_slice_at_time(ball_prediction, seconds_in_future)
ball_on_floor = find_matching_slice(ball_prediction, 0, lambda s: s.physics.location.z < 150 and s.game_seconds >= packet.game_info.seconds_elapsed + time_in_future, search_increment=1)
time_to_floor = 0
# ball_in_future might be None if we don't have an adequate ball prediction right now, like during
# replays, so check it to avoid errors.
if ball_in_future is not None:
target_location = Vec3(ball_in_future.physics.location)
time_in_future = self.time_to_ball(car_location, car_velocity.length(), target_location)
self.renderer.draw_line_3d(ball_location, target_location, self.renderer.cyan())
# gets the next time when the ball is on the floor
if ball_on_floor is not None:
floor_location = Vec3(ball_on_floor.physics.location)
time_to_floor = ball_on_floor.game_seconds - packet.game_info.seconds_elapsed
self.renderer.draw_line_3d(ball_location, floor_location, self.renderer.orange())
self.renderer.draw_rect_3d(floor_location, 8, 8, True, self.renderer.orange(), centered=True)
# Draw some things to help understand what the bot is thinking
self.renderer.draw_line_3d(car_location, target_location, self.renderer.white())
self.renderer.draw_rect_3d(target_location, 8, 8, True, self.renderer.cyan(), centered=True)
orientation = Orientation(my_car.physics.rotation)
relative = relative_location(car_location, orientation, ball_location)
controls = SimpleControllerState()
self.renderer.draw_string_2d(10, 10 + self.team * 100, 5, 5, f"{controls.throttle}", self.renderer.team_color())
# makes the car rotate to be more straight
if not my_car.has_wheel_contact:
# roll to land on all four wheels
if orientation.roll < -0.1:
controls.roll = 1
elif orientation.roll > 0.1:
controls.roll = -1
# pitch to land on all four wheels
if orientation.pitch < -0.1:
controls.pitch = 1
elif orientation.pitch > 0.1:
controls.pitch = -1
deg = math.degrees(car_location.ang_to(ball_location))
# yaw to correct angle towards ball
if deg < 85:
controls.yaw = 1
elif deg > 95:
controls.yaw = -1
# jump if another car is close to not get stuck
if self.location_to_nearest_car(car_location, my_car.team, packet).dist(car_location) < 200 and car_velocity.length() < 50:
controls.jump = True
self.set_kickoff_state(car_velocity, ball_location, ball_velocity)
self.decide_state(controls, packet, my_car, car_location, car_velocity, ball_location, ball_velocity, target_location, ball_prediction, orientation, relative, time_in_future, time_to_floor)
return controls
def decide_state(self, controls, packet, my_car, car_location, car_velocity, ball_location, ball_velocity, target_location, ball_prediction, orientation, relative, time_to_target, time_to_floor):
if self.bot_state == 0:
self.ball_chase(controls, packet, my_car, car_velocity, car_location, target_location, ball_location, relative, time_to_target, time_to_floor, orientation)
elif self.bot_state == 1:
self.retreat_to_goal(controls, packet, my_car, car_location, car_velocity)
elif self.bot_state == 2:
self.go_towards_own_goal(controls, my_car, car_location, ball_location)
elif self.bot_state == 3:
self.kickoff()
def set_kickoff_state(self, car_velocity, ball_location, ball_velocity):
if car_velocity.length() == 0 and ball_location.x == 0 and ball_location.y == 0 and ball_velocity.length() == 0:
self.bot_state = 3
def kickoff(self):
self.bot_state = 0
def ball_chase(self, controls, packet, my_car, car_velocity, car_location, target_location, ball_location, relative, time_to_target, time_to_floor, orientation):
"""
Makes the bot chase the ball unless some conditions are valid
"""
# retreat to own goal if the ball is a lot closer to our goal than we are
info = self.get_field_info()
own_goal_vec = info.goals[self.team].location
own_goal_location = Vec3(own_goal_vec)
if ball_location.dist(own_goal_location) + 1000 < car_location.dist(own_goal_location) and car_location.dist(own_goal_location) > 4000:
self.bot_state = 1
elif own_goal_vec.y > 5000 and car_location.y + 100 < target_location.y: # BLUE
self.bot_state = 2
elif own_goal_vec.y < -5000 and car_location.y > target_location.y + 100: # ORANGE
self.bot_state = 2
self.renderer.draw_string_3d(car_location, 1, 1, "\nBall chasing", self.renderer.red())
# makes the bots shoot towards the goal
target_location = self.ball_towards_goal_location(target_location, own_goal_location, car_location, ball_location)
self.renderer.draw_rect_3d(target_location, 8, 8, True, self.renderer.red(), centered=True)
self.renderer.draw_line_3d(car_location, target_location, self.renderer.red())
controls.steer = steer_toward_target(my_car, target_location)
controls.throttle = 1.0
# You can set more controls if you want, like controls.boost.
# angle to ball
car_to_ball = Vec3(ball_location.x - car_location.x, ball_location.y - car_location.y, ball_location.z - car_location.z)
angle = math.degrees(orientation.forward.ang_to(car_to_ball))
# boost
if angle < 20 and not my_car.is_super_sonic:
controls.boost = True
# try to turn around quickly
if angle > 160 and relative.x < -2000:
self.begin_half_flip(packet)
elif angle > 40:
controls.handbrake = True
elif 1000 < car_velocity.length() and angle < 90 and car_to_ball.length() < 400 and relative.z < 200:
# We'll do a front flip if the car is moving at a certain speed.
return self.begin_front_flip(packet, angle, orientation.right.length())
def drive_to_ball_bounce(self, my_car, car_location, floor_location):
"""
Slowly drives to where the ball will bounce
"""
pass
def retreat_to_goal(self, controls, packet, my_car, car_location, car_velocity):
"""
Makes the bot retreat back to the goal and only change back to another state when it's close to the goal
"""
self.renderer.draw_string_3d(car_location, 1, 1, "\nRetreating to goal", self.renderer.red())
info = self.get_field_info()
own_goal_vec = info.goals[self.team].location
own_goal_location = Vec3(own_goal_vec)
controls.steer = steer_toward_target(my_car, own_goal_location)
controls.throttle = 1.0
if not my_car.is_super_sonic and car_velocity.length() > 200 and car_location.dist(own_goal_location) > 4500:
controls.boost = True
# change back to ball chasing if distance to goal is small
self.renderer.draw_string_3d(car_location, 1, 1, f"\n\nDist to goal {car_location.dist(own_goal_location)}", self.renderer.white())
if car_location.dist(own_goal_location) < 4000:
self.bot_state = 0
def go_towards_own_goal(self, controls, my_car, car_location, ball_location):
"""
Goes towards own goal and changes back to ball chasing when a bit away from the ball
"""
self.renderer.draw_string_3d(car_location, 1, 1, "\nGoing towards own goal", self.renderer.red())
info = self.get_field_info()
own_goal_vec = info.goals[self.team].location
own_goal_location = Vec3(own_goal_vec)
controls.steer = steer_toward_target(my_car, own_goal_location)
controls.throttle = 1.0
# goes back to ball chase state if far enough away from the ball
if car_location.dist(ball_location) > 1000 or car_location.dist(own_goal_location) < 4000:
self.bot_state = 0
def jump_shot(self, controls, car_location, ball_location):
pass
def ball_towards_goal_location(self, target_location, goal_location, car_location, ball_location):
"""
Modifies the target location so that the ball is hit a bit more towards the enemy goal
"""
enemy_goal_location = Vec3(0, -goal_location.y, goal_location.z)
if target_location.x - enemy_goal_location.x == 0:
target_location.x = 1
if goal_location.y < 0: # usually for blue side
target_location.x = -1
slope = (target_location.y - enemy_goal_location.y) / (target_location.x - enemy_goal_location.x)
dist = car_location.dist(target_location)
if dist > 3000:
correction = 1000
elif 500 < dist <= 3000:
correction = 0.36 * dist - 80
else:
correction = 100
x_value = 1
if target_location.x < 0:
x_value = -1
new_target_location = Vec3(1, slope, 0).normalized()
return target_location + x_value * correction * new_target_location
def location_to_nearest_car(self, car_location, team, packet, enemy=False):
"""
Gets the closest enemy car to a target location
"""
# If enemy is true, only view nearest enemy cars
nearest_distance = 999999
nearest_car = None
for car in packet.game_cars:
if car.team == team:
continue
other_car = Vec3(car.physics.location)
distance_to = car_location.dist(other_car)
if distance_to < nearest_distance:
nearest_distance = distance_to
nearest_car = other_car
return nearest_car
def time_to_ball(self, car_location, car_speed, ball_location):
# estimates a time it takes for the bot to reach the ball for it to better predict where to go to hit the ball
distance = car_location.dist(ball_location)
return distance/(car_speed+100)
def begin_half_flip(self, packet):
self.active_sequence = Sequence([
ControlStep(duration=1.0, controls=SimpleControllerState(throttle=-1, boost=False)),
ControlStep(duration=0.1, controls=SimpleControllerState(jump=True)),
ControlStep(duration=0.05, controls=SimpleControllerState(jump=False)),
ControlStep(duration=0.2, controls=SimpleControllerState(jump=True, pitch=1)),
ControlStep(duration=0.15, controls=SimpleControllerState(pitch=-1, boost=False)),
ControlStep(duration=0.5, controls=SimpleControllerState(pitch=-1, boost=True, roll=1, throttle=1)),
ControlStep(duration=0.5, controls=SimpleControllerState()),
])
return self.active_sequence.tick(packet)
def begin_front_flip(self, packet, angle=0.0, right=1):
# Do a flip. We will be committed to this for a few seconds and the bot will ignore other
# logic during that time because | |
self.vz = vz
self.xacc = xacc
self.yacc = yacc
self.zacc = zacc
def pack(self, mav):
return MAVLink_message.pack(self, mav, 183, struct.pack('<Qffffffiiihhhhhh', self.time_usec, self.roll, self.pitch, self.yaw, self.rollspeed, self.pitchspeed, self.yawspeed, self.lat, self.lon, self.alt, self.vx, self.vy, self.vz, self.xacc, self.yacc, self.zacc))
class MAVLink_hil_controls_message(MAVLink_message):
'''
Sent from autopilot to simulation. Hardware in the loop
control outputs
'''
def __init__(self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_HIL_CONTROLS, 'HIL_CONTROLS')
self._fieldnames = ['time_usec', 'roll_ailerons', 'pitch_elevator', 'yaw_rudder', 'throttle', 'aux1', 'aux2', 'aux3', 'aux4', 'mode', 'nav_mode']
self.time_usec = time_usec
self.roll_ailerons = roll_ailerons
self.pitch_elevator = pitch_elevator
self.yaw_rudder = yaw_rudder
self.throttle = throttle
self.aux1 = aux1
self.aux2 = aux2
self.aux3 = aux3
self.aux4 = aux4
self.mode = mode
self.nav_mode = nav_mode
def pack(self, mav):
return MAVLink_message.pack(self, mav, 63, struct.pack('<QffffffffBB', self.time_usec, self.roll_ailerons, self.pitch_elevator, self.yaw_rudder, self.throttle, self.aux1, self.aux2, self.aux3, self.aux4, self.mode, self.nav_mode))
class MAVLink_hil_rc_inputs_raw_message(MAVLink_message):
'''
Sent from simulation to autopilot. The RAW values of the RC
channels received. The standard PPM modulation is as follows:
1000 microseconds: 0%, 2000 microseconds: 100%. Individual
receivers/transmitters might violate this specification.
'''
def __init__(self, time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_HIL_RC_INPUTS_RAW, 'HIL_RC_INPUTS_RAW')
self._fieldnames = ['time_usec', 'chan1_raw', 'chan2_raw', 'chan3_raw', 'chan4_raw', 'chan5_raw', 'chan6_raw', 'chan7_raw', 'chan8_raw', 'chan9_raw', 'chan10_raw', 'chan11_raw', 'chan12_raw', 'rssi']
self.time_usec = time_usec
self.chan1_raw = chan1_raw
self.chan2_raw = chan2_raw
self.chan3_raw = chan3_raw
self.chan4_raw = chan4_raw
self.chan5_raw = chan5_raw
self.chan6_raw = chan6_raw
self.chan7_raw = chan7_raw
self.chan8_raw = chan8_raw
self.chan9_raw = chan9_raw
self.chan10_raw = chan10_raw
self.chan11_raw = chan11_raw
self.chan12_raw = chan12_raw
self.rssi = rssi
def pack(self, mav):
return MAVLink_message.pack(self, mav, 54, struct.pack('<QHHHHHHHHHHHHB', self.time_usec, self.chan1_raw, self.chan2_raw, self.chan3_raw, self.chan4_raw, self.chan5_raw, self.chan6_raw, self.chan7_raw, self.chan8_raw, self.chan9_raw, self.chan10_raw, self.chan11_raw, self.chan12_raw, self.rssi))
class MAVLink_optical_flow_message(MAVLink_message):
'''
Optical flow from a flow sensor (e.g. optical mouse sensor)
'''
def __init__(self, time_usec, sensor_id, flow_x, flow_y, flow_comp_m_x, flow_comp_m_y, quality, ground_distance):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_OPTICAL_FLOW, 'OPTICAL_FLOW')
self._fieldnames = ['time_usec', 'sensor_id', 'flow_x', 'flow_y', 'flow_comp_m_x', 'flow_comp_m_y', 'quality', 'ground_distance']
self.time_usec = time_usec
self.sensor_id = sensor_id
self.flow_x = flow_x
self.flow_y = flow_y
self.flow_comp_m_x = flow_comp_m_x
self.flow_comp_m_y = flow_comp_m_y
self.quality = quality
self.ground_distance = ground_distance
def pack(self, mav):
return MAVLink_message.pack(self, mav, 175, struct.pack('<QfffhhBB', self.time_usec, self.flow_comp_m_x, self.flow_comp_m_y, self.ground_distance, self.flow_x, self.flow_y, self.sensor_id, self.quality))
class MAVLink_global_vision_position_estimate_message(MAVLink_message):
'''
'''
def __init__(self, usec, x, y, z, roll, pitch, yaw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_GLOBAL_VISION_POSITION_ESTIMATE, 'GLOBAL_VISION_POSITION_ESTIMATE')
self._fieldnames = ['usec', 'x', 'y', 'z', 'roll', 'pitch', 'yaw']
self.usec = usec
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 102, struct.pack('<Qffffff', self.usec, self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
class MAVLink_vision_position_estimate_message(MAVLink_message):
'''
'''
def __init__(self, usec, x, y, z, roll, pitch, yaw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_VISION_POSITION_ESTIMATE, 'VISION_POSITION_ESTIMATE')
self._fieldnames = ['usec', 'x', 'y', 'z', 'roll', 'pitch', 'yaw']
self.usec = usec
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 158, struct.pack('<Qffffff', self.usec, self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
class MAVLink_vision_speed_estimate_message(MAVLink_message):
'''
'''
def __init__(self, usec, x, y, z):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_VISION_SPEED_ESTIMATE, 'VISION_SPEED_ESTIMATE')
self._fieldnames = ['usec', 'x', 'y', 'z']
self.usec = usec
self.x = x
self.y = y
self.z = z
def pack(self, mav):
return MAVLink_message.pack(self, mav, 208, struct.pack('<Qfff', self.usec, self.x, self.y, self.z))
class MAVLink_vicon_position_estimate_message(MAVLink_message):
'''
'''
def __init__(self, usec, x, y, z, roll, pitch, yaw):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_VICON_POSITION_ESTIMATE, 'VICON_POSITION_ESTIMATE')
self._fieldnames = ['usec', 'x', 'y', 'z', 'roll', 'pitch', 'yaw']
self.usec = usec
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
def pack(self, mav):
return MAVLink_message.pack(self, mav, 56, struct.pack('<Qffffff', self.usec, self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
class MAVLink_highres_imu_message(MAVLink_message):
'''
The IMU readings in SI units in NED body frame
'''
def __init__(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_HIGHRES_IMU, 'HIGHRES_IMU')
self._fieldnames = ['time_usec', 'xacc', 'yacc', 'zacc', 'xgyro', 'ygyro', 'zgyro', 'xmag', 'ymag', 'zmag', 'abs_pressure', 'diff_pressure', 'pressure_alt', 'temperature', 'fields_updated']
self.time_usec = time_usec
self.xacc = xacc
self.yacc = yacc
self.zacc = zacc
self.xgyro = xgyro
self.ygyro = ygyro
self.zgyro = zgyro
self.xmag = xmag
self.ymag = ymag
self.zmag = zmag
self.abs_pressure = abs_pressure
self.diff_pressure = diff_pressure
self.pressure_alt = pressure_alt
self.temperature = temperature
self.fields_updated = fields_updated
def pack(self, mav):
return MAVLink_message.pack(self, mav, 93, struct.pack('<QfffffffffffffH', self.time_usec, self.xacc, self.yacc, self.zacc, self.xgyro, self.ygyro, self.zgyro, self.xmag, self.ymag, self.zmag, self.abs_pressure, self.diff_pressure, self.pressure_alt, self.temperature, self.fields_updated))
class MAVLink_file_transfer_start_message(MAVLink_message):
'''
Begin file transfer
'''
def __init__(self, transfer_uid, dest_path, direction, file_size, flags):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_FILE_TRANSFER_START, 'FILE_TRANSFER_START')
self._fieldnames = ['transfer_uid', 'dest_path', 'direction', 'file_size', 'flags']
self.transfer_uid = transfer_uid
self.dest_path = dest_path
self.direction = direction
self.file_size = file_size
self.flags = flags
def pack(self, mav):
return MAVLink_message.pack(self, mav, 235, struct.pack('<QI240sBB', self.transfer_uid, self.file_size, self.dest_path, self.direction, self.flags))
class MAVLink_file_transfer_dir_list_message(MAVLink_message):
'''
Get directory listing
'''
def __init__(self, transfer_uid, dir_path, flags):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_FILE_TRANSFER_DIR_LIST, 'FILE_TRANSFER_DIR_LIST')
self._fieldnames = ['transfer_uid', 'dir_path', 'flags']
self.transfer_uid = transfer_uid
self.dir_path = dir_path
self.flags = flags
def pack(self, mav):
return MAVLink_message.pack(self, mav, 93, struct.pack('<Q240sB', self.transfer_uid, self.dir_path, self.flags))
class MAVLink_file_transfer_res_message(MAVLink_message):
'''
File transfer result
'''
def __init__(self, transfer_uid, result):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_FILE_TRANSFER_RES, 'FILE_TRANSFER_RES')
self._fieldnames = ['transfer_uid', 'result']
self.transfer_uid = transfer_uid
self.result = result
def pack(self, mav):
return MAVLink_message.pack(self, mav, 124, struct.pack('<QB', self.transfer_uid, self.result))
class MAVLink_battery_status_message(MAVLink_message):
'''
Transmitte battery informations for a accu pack.
'''
def __init__(self, accu_id, voltage_cell_1, voltage_cell_2, voltage_cell_3, voltage_cell_4, voltage_cell_5, voltage_cell_6, current_battery, battery_remaining):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_BATTERY_STATUS, 'BATTERY_STATUS')
self._fieldnames = ['accu_id', 'voltage_cell_1', 'voltage_cell_2', 'voltage_cell_3', 'voltage_cell_4', 'voltage_cell_5', 'voltage_cell_6', 'current_battery', 'battery_remaining']
self.accu_id = accu_id
self.voltage_cell_1 = voltage_cell_1
self.voltage_cell_2 = voltage_cell_2
self.voltage_cell_3 = voltage_cell_3
self.voltage_cell_4 = voltage_cell_4
self.voltage_cell_5 = voltage_cell_5
self.voltage_cell_6 = voltage_cell_6
self.current_battery = current_battery
self.battery_remaining = battery_remaining
def pack(self, mav):
return MAVLink_message.pack(self, mav, 42, struct.pack('<HHHHHHhBb', self.voltage_cell_1, self.voltage_cell_2, self.voltage_cell_3, self.voltage_cell_4, self.voltage_cell_5, self.voltage_cell_6, self.current_battery, self.accu_id, self.battery_remaining))
class MAVLink_setpoint_8dof_message(MAVLink_message):
'''
Set the 8 DOF setpoint for a controller.
'''
def __init__(self, target_system, val1, val2, val3, val4, val5, val6, val7, val8):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SETPOINT_8DOF, 'SETPOINT_8DOF')
self._fieldnames = ['target_system', 'val1', 'val2', 'val3', 'val4', 'val5', 'val6', 'val7', 'val8']
self.target_system = target_system
self.val1 = val1
self.val2 = val2
self.val3 = val3
self.val4 = val4
self.val5 = val5
self.val6 = val6
self.val7 = val7
self.val8 = val8
def pack(self, mav):
return MAVLink_message.pack(self, mav, 241, struct.pack('<ffffffffB', self.val1, self.val2, self.val3, self.val4, self.val5, self.val6, self.val7, self.val8, self.target_system))
class MAVLink_setpoint_6dof_message(MAVLink_message):
'''
Set the 6 DOF setpoint for a attitude and position controller.
'''
def __init__(self, target_system, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_SETPOINT_6DOF, 'SETPOINT_6DOF')
self._fieldnames = ['target_system', 'trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z']
self.target_system = target_system
self.trans_x = trans_x
self.trans_y = trans_y
self.trans_z = trans_z
self.rot_x = rot_x
self.rot_y = rot_y
self.rot_z = rot_z
def pack(self, mav):
return MAVLink_message.pack(self, mav, 15, struct.pack('<ffffffB', self.trans_x, self.trans_y, self.trans_z, self.rot_x, self.rot_y, self.rot_z, self.target_system))
class MAVLink_memory_vect_message(MAVLink_message):
'''
Send raw controller memory. The use of this message is
discouraged for normal packets, but a quite efficient way for
testing new messages and getting experimental debug output.
'''
def __init__(self, address, ver, type, value):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_MEMORY_VECT, 'MEMORY_VECT')
self._fieldnames = ['address', 'ver', 'type', 'value']
self.address = address
self.ver = ver
self.type = type
self.value = value
def pack(self, mav):
return MAVLink_message.pack(self, mav, 204, struct.pack('<HBB32s', self.address, self.ver, self.type, self.value))
class MAVLink_debug_vect_message(MAVLink_message):
'''
'''
def __init__(self, name, time_usec, x, y, z):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_DEBUG_VECT, 'DEBUG_VECT')
self._fieldnames = ['name', 'time_usec', 'x', 'y', 'z']
self.name = name
self.time_usec = time_usec
self.x = x
self.y = y
self.z = z
def pack(self, mav):
return MAVLink_message.pack(self, mav, 49, struct.pack('<Qfff10s', self.time_usec, self.x, self.y, self.z, self.name))
class MAVLink_named_value_float_message(MAVLink_message):
'''
Send a key-value pair as float. The use of this message is
discouraged for normal packets, but a quite efficient way for
testing new messages and getting experimental debug output.
'''
def __init__(self, time_boot_ms, name, value):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_NAMED_VALUE_FLOAT, 'NAMED_VALUE_FLOAT')
self._fieldnames = ['time_boot_ms', 'name', 'value']
self.time_boot_ms = time_boot_ms
self.name = name
self.value = value
def pack(self, mav):
return MAVLink_message.pack(self, mav, 170, struct.pack('<If10s', self.time_boot_ms, self.value, self.name))
class MAVLink_named_value_int_message(MAVLink_message):
'''
Send a key-value pair as integer. The use of this message is
discouraged for normal packets, but a quite efficient way for
testing new messages and getting experimental debug output.
'''
def __init__(self, time_boot_ms, name, value):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_NAMED_VALUE_INT, 'NAMED_VALUE_INT')
self._fieldnames = ['time_boot_ms', 'name', 'value']
self.time_boot_ms = time_boot_ms
self.name = name
self.value = value
def pack(self, mav):
return MAVLink_message.pack(self, mav, 44, | |
<filename>generated_python_code/ball_collector/ballcollector/scripts/subsystem_cs.py
#!/usr/bin/env python
'''
Copyright (c) 2019, Robot Control and Pattern Recognition Group, Warsaw University of Technology
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Warsaw University of Technology nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: <NAME>
'''
# Import other scripts #
from auxiliary_functions import *
from auxiliary_agent_ballcollector import *
from auxiliary_subsystem_cs import *
# Temporary definitions #
IS_LOG = False # Flag determining if logs are shown in the terminal #
IS_PRINT = True # Flag indicating if debug information for developer are shown in the terminal #
class cs:
##### Subsystem cs constructor #####
def __init__(self):
self.log("__init__ function")
rospy.init_node("cs")
self._subsystemName="cs"
self._subsystemFrequency=10;
self._currentSubsystemBehaviour="Behaviour_initBehaviour";
self._subsystemIterations=0
self._behaviourIterations=0
self.initialiseCommunicationModel()
self.auxiliaryFunctions = AuxiliaryFunctions(self._subsystemFrequency)
# initialize all input flags
self._in_flag_desiredRobotCommandTele=False
self._in_flag_desiredRobotCommandAuto=False
self._in_flag_cameraInfo=False
self._in_flag_sensorInfo=False
self._in_flag_detectedBalls=False
self._in_flag_rpiCamera=False
self._in_flag_desiredRobotSpeedAuto=False
self._in_flag_desiredVaccumSpeedAuto=False
# initialize all output flags
self._out_flag_obstacleDetectedAuto=False
self._out_flag_obstacleDetectedTele=False
self._out_flag_ballInfoAuto=False
self._out_flag_ballInfoTele=False
self._out_flag_ballCollectedTele=False
self._out_flag_ballCollectedAuto=False
self._out_flag_desiredVacuumCommand=False
self._out_flag_desiredMoveCommand=False
pass
##### Start subsystem #####
def startSubsystem(self):
self.log("startSubsystem")
try:
while self.auxiliaryFunctions.isSubsystemOK():
''' Execute behaviour associated with _currentSubsystemBehaviour -- choose appropriate state based on _currentSubsystemBehaviour '''
if self._currentSubsystemBehaviour=="Behaviour_initBehaviour":
self.log("_currentSubsystemBehaviour==Behaviour_initBehaviour")
self.subsystemBehaviour_initBehaviour()
continue
if self._currentSubsystemBehaviour=="Behaviour_idleBehaviour":
self.log("_currentSubsystemBehaviour==Behaviour_idleBehaviour")
self.subsystemBehaviour_idleBehaviour()
continue
except Exception as e:
print e
self.error("Error found in function startSubsystem -- file subsystem_cs.py!")
pass
##### Update data for input buffer: desiredRobotCommandTele #####
def update_desiredRobotCommandTele(self, data):
self.log("update_desiredRobotCommandTele")
self.desiredRobotCommandTele=data
self._in_flag_desiredRobotCommandTele=True
pass
##### Update data for input buffer: desiredRobotCommandAuto #####
def update_desiredRobotCommandAuto(self, data):
self.log("update_desiredRobotCommandAuto")
self.desiredRobotCommandAuto=data
self._in_flag_desiredRobotCommandAuto=True
pass
##### Update data for input buffer: cameraInfo #####
def update_cameraInfo(self, data):
self.log("update_cameraInfo")
self.cameraInfo=data
self._in_flag_cameraInfo=True
pass
##### Update data for input buffer: sensorInfo #####
def update_sensorInfo(self, data):
self.log("update_sensorInfo")
self.sensorInfo=data
self._in_flag_sensorInfo=True
pass
##### Update data for input buffer: detectedBalls #####
def update_detectedBalls(self, data):
self.log("update_detectedBalls")
self.detectedBalls=data
self._in_flag_detectedBalls=True
pass
##### Update data for input buffer: rpiCamera #####
def update_rpiCamera(self, data):
self.log("update_rpiCamera")
self.rpiCamera=data
self._in_flag_rpiCamera=True
pass
##### Update data for input buffer: desiredRobotSpeedAuto #####
def update_desiredRobotSpeedAuto(self, data):
self.log("update_desiredRobotSpeedAuto")
self.desiredRobotSpeedAuto=data
self._in_flag_desiredRobotSpeedAuto=True
pass
##### Update data for input buffer: desiredVaccumSpeedAuto #####
def update_desiredVaccumSpeedAuto(self, data):
self.log("update_desiredVaccumSpeedAuto")
self.desiredVaccumSpeedAuto=data
self._in_flag_desiredVaccumSpeedAuto=True
pass
##### Initialise communication model #####
def initialiseCommunicationModel(self):
self.log("initialiseCommunicationModel")
self.initialiseSendChannel()
self.initialiseSendChannelForDiagnostics()
self.initialiseReceiveChannel()
pass
##### Initialise send channel #####
def initialiseSendChannel(self):
self.log("initialiseSendChannel")
# Buffer name=obstacleDetectedAuto - Sender using NON-BLOCKING mode, receiver using NON-BLOCKING mode
self._sender_obstacleDetectedAuto=rospy.Publisher("obstacleDetectedChannelAuto", ObstacleDetected, queue_size=CHANNEL_SIZE)
# Buffer name=obstacleDetectedTele - Sender using NON-BLOCKING mode, receiver using NON-BLOCKING mode
self._sender_obstacleDetectedTele=rospy.Publisher("obstacleDetectedChannelTele", ObstacleDetected, queue_size=CHANNEL_SIZE)
# Buffer name=ballInfoAuto - Sender using NON-BLOCKING mode, receiver using NON-BLOCKING mode
self._sender_ballInfoAuto=rospy.Publisher("ballInfoRpiChannelAuto", CameraMessage, queue_size=CHANNEL_SIZE)
# Buffer name=ballInfoTele - Sender using NON-BLOCKING mode, receiver using NON-BLOCKING mode
self._sender_ballInfoTele=rospy.Publisher("ballInfoChannelTele", CameraMessage, queue_size=CHANNEL_SIZE)
# Buffer name=ballCollectedTele - Sender using NON-BLOCKING mode, receiver using NON-BLOCKING mode
self._sender_ballCollectedTele=rospy.Publisher("ballCollectedChannelTele", Bool, queue_size=CHANNEL_SIZE)
# Buffer name=ballCollectedAuto - Sender using NON-BLOCKING mode, receiver using NON-BLOCKING mode
self._sender_ballCollectedAuto=rospy.Publisher("ballCollectedChannelAuto", Bool, queue_size=CHANNEL_SIZE)
# Buffer name=desiredVacuumCommand - Sender using NON-BLOCKING mode, receiver using NON-BLOCKING mode
self._sender_desiredVacuumCommand=rospy.Publisher("desiredVacuumCommandChannel", Int64, queue_size=CHANNEL_SIZE)
# Buffer name=desiredMoveCommand - Sender using NON-BLOCKING mode, receiver using NON-BLOCKING mode
self._sender_desiredMoveCommand=rospy.Publisher("desiredMoveCommandChannel", MotorMessage, queue_size=CHANNEL_SIZE)
pass
##### Initialise send channel for diagnostics #####
def initialiseSendChannelForDiagnostics(self):
self.log("initialiseSendChannelForDiagnostics")
self._vectorOfSenderDiagnostics=[]
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_currentSubsystemBehaviour', String, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_subsystemFrequency', Float64, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_subsystemName', String, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_subsystemIterations', Int64, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_behaviourIterations', Int64, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/currentRobotSpeed', Int64, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_in_flag_desiredRobotCommandTele', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_in_flag_desiredRobotCommandAuto', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_in_flag_cameraInfo', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_in_flag_sensorInfo', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_in_flag_detectedBalls', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_in_flag_rpiCamera', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_in_flag_desiredRobotSpeedAuto', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_in_flag_desiredVaccumSpeedAuto', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_out_flag_obstacleDetectedAuto', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_out_flag_obstacleDetectedTele', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_out_flag_ballInfoAuto', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_out_flag_ballInfoTele', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_out_flag_ballCollectedTele', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_out_flag_ballCollectedAuto', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_out_flag_desiredVacuumCommand', Bool, queue_size=CHANNEL_SIZE))
self._vectorOfSenderDiagnostics.append(rospy.Publisher('cs/_out_flag_desiredMoveCommand', Bool, queue_size=CHANNEL_SIZE))
pass
##### Initialise receive channel based on input buffers #####
def initialiseReceiveChannel(self):
self.log("initialiseReceiveChannel")
# Buffer name=desiredRobotCommandTele sender NON-BLOCKING mode - receiver NON-BLOCKING mode
self._subscriber_desiredRobotCommandTele=rospy.Subscriber("desiredRobotCommandChannelTele", String, self.update_desiredRobotCommandTele)
# Buffer name=desiredRobotCommandAuto sender NON-BLOCKING mode - receiver NON-BLOCKING mode
self._subscriber_desiredRobotCommandAuto=rospy.Subscriber("desiredRobotCommandChannelAuto", String, self.update_desiredRobotCommandAuto)
# Buffer name=cameraInfo sender NON-BLOCKING mode - receiver NON-BLOCKING mode
self._subscriber_cameraInfo=rospy.Subscriber("cameraInfoChannel", CameraMessage, self.update_cameraInfo)
# Buffer name=sensorInfo sender NON-BLOCKING mode - receiver NON-BLOCKING mode
self._subscriber_sensorInfo=rospy.Subscriber("sensorInfoChannel", SensorMessage, self.update_sensorInfo)
# Buffer name=detectedBalls sender NON-BLOCKING mode - receiver NON-BLOCKING mode
self._subscriber_detectedBalls=rospy.Subscriber("detectedBallsChannel", Image, self.update_detectedBalls)
# Buffer name=rpiCamera sender NON-BLOCKING mode - receiver NON-BLOCKING mode
self._subscriber_rpiCamera=rospy.Subscriber("rpiCameraChannel", Image, self.update_rpiCamera)
# Buffer name=desiredRobotSpeedAuto sender NON-BLOCKING mode - receiver NON-BLOCKING mode
self._subscriber_desiredRobotSpeedAuto=rospy.Subscriber("desiredRobotSpeedChannelAuto", Int64, self.update_desiredRobotSpeedAuto)
# Buffer name=desiredVaccumSpeedAuto sender NON-BLOCKING mode - receiver NON-BLOCKING mode
self._subscriber_desiredVaccumSpeedAuto=rospy.Subscriber("desiredVaccumSpeedChannelAuto", Int64, self.update_desiredVaccumSpeedAuto)
pass
##### Wait for all messages #####
def waitForAllMessages(self):
self.log("waitForAllMessages")
#rospy.wait_for_message("", String, timeout=TOPIC_TIMEOUT)
#rospy.wait_for_message("", String, timeout=TOPIC_TIMEOUT)
#rospy.wait_for_message("", CameraMessage, timeout=TOPIC_TIMEOUT)
#rospy.wait_for_message("", SensorMessage, timeout=TOPIC_TIMEOUT)
#rospy.wait_for_message("", Image, timeout=TOPIC_TIMEOUT)
#rospy.wait_for_message("", Image, timeout=TOPIC_TIMEOUT)
#rospy.wait_for_message("", Int64, timeout=TOPIC_TIMEOUT)
#rospy.wait_for_message("", Int64, timeout=TOPIC_TIMEOUT)
pass
##### Publish on topics diagnostic data concerning the subsystem state #####
def sendDataForDiagnostics(self):
self._vectorOfSenderDiagnostics[0].publish(self._currentSubsystemBehaviour)
self._vectorOfSenderDiagnostics[1].publish(self._subsystemFrequency)
self._vectorOfSenderDiagnostics[2].publish(self._subsystemName)
self._vectorOfSenderDiagnostics[3].publish(self._subsystemIterations)
self._vectorOfSenderDiagnostics[4].publish(self._behaviourIterations)
###### internal state #####
if(21 < len(self._vectorOfSenderDiagnostics) ):
self._vectorOfSenderDiagnostics[5].publish(self.currentRobotSpeed)
self._vectorOfSenderDiagnostics[6].publish(self._in_flag_desiredRobotCommandTele)
self._vectorOfSenderDiagnostics[7].publish(self._in_flag_desiredRobotCommandAuto)
self._vectorOfSenderDiagnostics[8].publish(self._in_flag_cameraInfo)
self._vectorOfSenderDiagnostics[9].publish(self._in_flag_sensorInfo)
self._vectorOfSenderDiagnostics[10].publish(self._in_flag_detectedBalls)
self._vectorOfSenderDiagnostics[11].publish(self._in_flag_rpiCamera)
self._vectorOfSenderDiagnostics[12].publish(self._in_flag_desiredRobotSpeedAuto)
self._vectorOfSenderDiagnostics[13].publish(self._in_flag_desiredVaccumSpeedAuto)
self._vectorOfSenderDiagnostics[14].publish(self._out_flag_obstacleDetectedAuto)
self._vectorOfSenderDiagnostics[15].publish(self._out_flag_obstacleDetectedTele)
self._vectorOfSenderDiagnostics[16].publish(self._out_flag_ballInfoAuto)
self._vectorOfSenderDiagnostics[17].publish(self._out_flag_ballInfoTele)
self._vectorOfSenderDiagnostics[18].publish(self._out_flag_ballCollectedTele)
self._vectorOfSenderDiagnostics[19].publish(self._out_flag_ballCollectedAuto)
self._vectorOfSenderDiagnostics[20].publish(self._out_flag_desiredVacuumCommand)
self._vectorOfSenderDiagnostics[21].publish(self._out_flag_desiredMoveCommand)
pass
##### Behaviour definitions #####
##### Behaviour initBehaviour #####
##### Terminal condition #####
def terminalCondition_initBehaviour(self): # String desiredRobotCommandTele, String desiredRobotCommandAuto, CameraMessage cameraInfo, SensorMessage sensorInfo, Image detectedBalls, Image rpiCamera, Int64 desiredRobotSpeedAuto, Int64 desiredVaccumSpeedAuto, Int64 currentRobotSpeed, std_msgs::Bool _in_flag_desiredRobotCommandTele, std_msgs::Bool _in_flag_desiredRobotCommandAuto, std_msgs::Bool _in_flag_cameraInfo, std_msgs::Bool _in_flag_sensorInfo, std_msgs::Bool _in_flag_detectedBalls, std_msgs::Bool _in_flag_rpiCamera, std_msgs::Bool _in_flag_desiredRobotSpeedAuto, std_msgs::Bool _in_flag_desiredVaccumSpeedAuto, std_msgs::Bool _out_flag_obstacleDetectedAuto, std_msgs::Bool _out_flag_obstacleDetectedTele, std_msgs::Bool _out_flag_ballInfoAuto, std_msgs::Bool _out_flag_ballInfoTele, std_msgs::Bool _out_flag_ballCollectedTele, std_msgs::Bool _out_flag_ballCollectedAuto, std_msgs::Bool _out_flag_desiredVacuumCommand, std_msgs::Bool _out_flag_desiredMoveCommand #
self.log("[Behaviour initBehaviour] -- Checking Terminal Condition")
return True
pass
##### Error condition #####
def errorCondition_initBehaviour(self): # String desiredRobotCommandTele, String desiredRobotCommandAuto, CameraMessage cameraInfo, SensorMessage sensorInfo, Image detectedBalls, Image rpiCamera, Int64 desiredRobotSpeedAuto, Int64 desiredVaccumSpeedAuto, Int64 currentRobotSpeed, std_msgs::Bool _in_flag_desiredRobotCommandTele, std_msgs::Bool _in_flag_desiredRobotCommandAuto, std_msgs::Bool _in_flag_cameraInfo, std_msgs::Bool _in_flag_sensorInfo, std_msgs::Bool _in_flag_detectedBalls, std_msgs::Bool _in_flag_rpiCamera, std_msgs::Bool _in_flag_desiredRobotSpeedAuto, std_msgs::Bool _in_flag_desiredVaccumSpeedAuto, std_msgs::Bool _out_flag_obstacleDetectedAuto, std_msgs::Bool _out_flag_obstacleDetectedTele, std_msgs::Bool _out_flag_ballInfoAuto, std_msgs::Bool _out_flag_ballInfoTele, std_msgs::Bool _out_flag_ballCollectedTele, std_msgs::Bool _out_flag_ballCollectedAuto, std_msgs::Bool _out_flag_desiredVacuumCommand, std_msgs::Bool _out_flag_desiredMoveCommand #
self.log("[Behaviour initBehaviour] -- Checking Error Condition")
return False
pass
##### Transition function #####
def transitionFunction_initBehaviour(self):
self.log("[Behaviour initBehaviour] -- Calculating Transition Function")
# Transition function #
self.log("TRANSITION FUNCTION - initBehaviour consists of the following partial transition functions (decomposition based on output buffers)")
# Partial transition function call: fun1
self.transitionFunction_initBehaviour_fun1()
# Partial transition function call: set_buffer_flags_function
self.transitionFunction_initBehaviour_set_buffer_flags_function()
pass
##### Decomposition of partial transition function based on input buffers #####
def transitionFunction_initBehaviour_fun1(self):
self.log("[Behaviour initBehaviour] -- Calculating Partial Transition Function fun1")
# Partial Transition Function - the first layer #
self.log("PARTIAL TRANSITION FUNCTION - FIRST LAYER - initBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
if True:
self.transitionFunction_initBehaviour_fun1_0()
pass
##### Partial transition function: fun1_0 based on input buffers True #####
def transitionFunction_initBehaviour_fun1_0(self):
self.log("[Behaviour initBehaviour] -- Calculating Partial Transition Function fun1_0")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - initBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
print("[CS - ballCollector] -- initBehaviour")
self.desiredMoveCommand=MotorMessage()
print("ALA")
self.desiredMoveCommand.desiredSpeed=Int64(10)
print("ALA1")
self.desiredMoveCommand.direction=Int64(0)
print("ALA2")
self.desiredMoveCommand.cmd=String("empty")
self.desiredRobotCommandTele=String("empty")
self.desiredRobotCommandAuto=String("empty")
self.ballCollectedAuto=Bool(False)
self.ballCollectedTele=Bool(False)
self.obstacleDetectedTele=ObstacleDetected()
self.obstacleDetectedTele.sonar_1=Int64(0)
self.obstacleDetectedTele.sonar_2=Int64(0)
self.obstacleDetectedTele.sonar_3=Int64(0)
self.obstacleDetectedTele.sonar_4=Int64(0)
self.obstacleDetectedAuto=ObstacleDetected()
self.obstacleDetectedAuto.sonar_1=Int64(0)
self.obstacleDetectedAuto.sonar_2=Int64(0)
self.obstacleDetectedAuto.sonar_3=Int64(0)
self.obstacleDetectedAuto.sonar_4=Int64(0)
# vacuum
self.desiredVacuumCommand=Int64(0)
# output camera info
self.ballInfoAuto=CameraMessage()
self.ballInfoTele=CameraMessage()
# internal memory
self.currentRobotSpeed=Int64(0)
# input buffer
self.desiredRobotSpeedAuto=Int64(0)
# End - Partial Transition Function Code
pass
##### Decomposition of partial transition function based on input buffers #####
def transitionFunction_initBehaviour_set_buffer_flags_function(self):
| |
None: # 窓面積が空欄であれば、窓寸法から面積を計算。
inputdata["WindowConfigure"][window_id]["windowArea"] = \
inputdata["WindowConfigure"][window_id]["windowWidth"] * inputdata["WindowConfigure"][window_id]["windowHeight"]
# 外壁面積の算出
for room_zone_name in inputdata["EnvelopeSet"]:
for (wall_id, wall_configure) in enumerate( inputdata["EnvelopeSet"][room_zone_name]["WallList"] ):
window_total = 0 # 窓面積の集計用
if "WindowList" in wall_configure: # 窓がある場合
# 窓面積の合計を求める(Σ{窓面積×枚数})
for (window_id, window_configure) in enumerate(wall_configure["WindowList"]):
if window_configure["WindowID"] != "無":
window_total += \
inputdata["WindowConfigure"][ window_configure["WindowID"] ]["windowArea"] * window_configure["WindowNumber"]
# 壁のみの面積(窓がない場合は、window_total = 0)
if wall_configure["EnvelopeArea"] >= window_total:
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WallArea"] = wall_configure["EnvelopeArea"] - window_total
else:
print(room_zone_name)
print(wall_configure)
raise Exception('窓面積が外皮面積よりも大きくなっています')
##----------------------------------------------------------------------------------
## 室の定常熱取得の計算(解説書 2.4.2.2〜2.4.2.7)
##----------------------------------------------------------------------------------
## EnvelopeSet に WallConfigure, WindowConfigure の情報を貼り付ける。
for room_zone_name in inputdata["EnvelopeSet"]:
# 壁毎にループ
for (wall_id, wall_configure) in enumerate( inputdata["EnvelopeSet"][room_zone_name]["WallList"]):
if inputdata["WallConfigure"][ wall_configure["WallSpec"] ]["inputMethod"] == "断熱材種類を入力":
if wall_configure["Direction"] == "水平(上)": # 天井と見なす。
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["U_wall"] = \
inputdata["WallConfigure"][ wall_configure["WallSpec"] ]["Uvalue_roof"]
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WallArea"] = wall_configure["WallArea"]
elif wall_configure["Direction"] == "水平(下)": # 床と見なす。
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["U_wall"] = \
inputdata["WallConfigure"][ wall_configure["WallSpec"] ]["Uvalue_floor"]
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WallArea"] = wall_configure["WallArea"]
else:
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["U_wall"] = \
inputdata["WallConfigure"][ wall_configure["WallSpec"] ]["Uvalue_wall"]
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WallArea"] = wall_configure["WallArea"]
else:
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["U_wall"] = \
inputdata["WallConfigure"][ wall_configure["WallSpec"] ]["Uvalue"]
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WallArea"] = wall_configure["WallArea"]
for (window_id, window_configure) in enumerate( inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"]):
if window_configure["WindowID"] != "無":
# 日よけ効果係数の算出
if window_configure["EavesID"] == "無":
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["shadingEffect_C"] = 1
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["shadingEffect_H"] = 1
else:
if inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["shadingEffect_C"] != None and \
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["shadingEffect_H"] != None :
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["shadingEffect_C"] = \
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["shadingEffect_C"]
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["shadingEffect_H"] = \
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["shadingEffect_H"]
else:
# 関数 shading.calc_shadingCoefficient で日よけ効果係数を算出。
(inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["shadingEffect_C"], \
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["shadingEffect_H"] ) = \
shading.calc_shadingCoefficient(inputdata["Building"]["Region"],\
wall_configure["Direction"], \
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["x1"],\
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["x2"],\
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["x3"],\
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["y1"],\
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["y2"],\
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["y3"],\
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["zxPlus"],\
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["zxMinus"],\
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["zyPlus"],\
inputdata["ShadingConfigure"][ window_configure["EavesID"] ]["zyMinus"])
# 窓のUA(熱貫流率×面積)を計算
if window_configure["isBlind"] == "無": # ブラインドがない場合
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["U_window"] = \
inputdata["WindowConfigure"][ window_configure["WindowID"] ]["Uvalue"]
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["I_window"] = \
inputdata["WindowConfigure"][ window_configure["WindowID"] ]["Ivalue"]
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["windowArea"] = \
window_configure["WindowNumber"] * inputdata["WindowConfigure"][ window_configure["WindowID"] ]["windowArea"]
elif window_configure["isBlind"] == "有": # ブラインドがある場合
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["U_window"] = \
inputdata["WindowConfigure"][ window_configure["WindowID"] ]["Uvalue_blind"]
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["I_window"] = \
inputdata["WindowConfigure"][ window_configure["WindowID"] ]["Ivalue_blind"]
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["windowArea"] = \
window_configure["WindowNumber"] * inputdata["WindowConfigure"][ window_configure["WindowID"] ]["windowArea"]
# 任意入力 SP-8
if "window_Ivalue" in inputdata["SpecialInputData"]:
if window_configure["WindowID"] in inputdata["SpecialInputData"]["window_Ivalue"]:
inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["IA_window"] = \
window_configure["WindowNumber"] * inputdata["WindowConfigure"][ window_configure["WindowID"] ]["windowArea"] * \
np.array(inputdata["SpecialInputData"]["window_Ivalue"][ window_configure["WindowID"] ])
##----------------------------------------------------------------------------------
## 室負荷の計算(解説書 2.4.3、2.4.4)
##----------------------------------------------------------------------------------
Heat_light_hourly = {}
Num_of_Person_hourly = {}
Heat_OAapp_hourly = {}
for room_zone_name in inputdata["AirConditioningZone"]:
# 室が使用されているか否か=空調運転時間(365日分)
btype = inputdata["AirConditioningZone"][room_zone_name]["buildingType"]
rtype = inputdata["AirConditioningZone"][room_zone_name]["roomType"]
# 発熱量参照値 [W/m2] を読み込む関数(空調) SP-9
if "room_usage_condition" in inputdata["SpecialInputData"]:
(roomHeatGain_Light, roomHeatGain_Person, roomHeatGain_OAapp, roomNumOfPerson) = \
bc.get_roomHeatGain(btype, rtype, inputdata["SpecialInputData"]["room_usage_condition"])
else:
(roomHeatGain_Light, roomHeatGain_Person, roomHeatGain_OAapp, roomNumOfPerson) = \
bc.get_roomHeatGain(btype, rtype)
# 様式4から照明発熱量を読み込む
if BUILELIB_MODE:
if room_zone_name in inputdata["LightingSystems"]:
lighting_power = 0
for unit_name in inputdata["LightingSystems"][room_zone_name]["lightingUnit"]:
lighting_power += inputdata["LightingSystems"][room_zone_name]["lightingUnit"][unit_name]["RatedPower"] * \
inputdata["LightingSystems"][room_zone_name]["lightingUnit"][unit_name]["Number"]
roomHeatGain_Light = lighting_power / inputdata["AirConditioningZone"][room_zone_name]["zoneArea"]
# 時刻別計算用(本来はこのループに入れるべきではない → 時刻別計算の方に入れるべき)
Heat_light_hourly[room_zone_name] = roomScheduleLight[room_zone_name] * roomHeatGain_Light # 照明からの発熱 (365日分)
Num_of_Person_hourly[room_zone_name] = roomSchedulePerson[room_zone_name] * roomNumOfPerson # 人員密度(365日分)
Heat_OAapp_hourly[room_zone_name] = roomScheduleOAapp[room_zone_name] * roomHeatGain_OAapp # 機器からの発熱 (365日分)
##----------------------------------------------------------------------------------
## 動的室負荷計算
##----------------------------------------------------------------------------------
# 負荷計算モジュールの読み込み
from .heat_load_calculation import Main
import copy
# ファイルの読み込み
with open('./builelib/heat_load_calculation/heatload_calculation_template.json', 'r', encoding='utf-8') as js:
# with open('input_non_residential.json', 'r', encoding='utf-8') as js:
input_heatcalc_template = json.load(js)
## 入力ファイルの生成(共通)
# 地域
input_heatcalc_template["common"]["region"] = inputdata["Building"]["Region"]
input_heatcalc_template["common"]["is_residential"] = False
# 室温上限値・下限
input_heatcalc_template["rooms"][0]["schedule"]["temperature_upper_limit"] = bc.trans_36524to8760(resultJson["schedule"]["room_temperature_setpoint"])
input_heatcalc_template["rooms"][0]["schedule"]["temperature_lower_limit"] = bc.trans_36524to8760(resultJson["schedule"]["room_temperature_setpoint"])
# 相対湿度上限値・下限
input_heatcalc_template["rooms"][0]["schedule"]["relative_humidity_upper_limit"] = bc.trans_36524to8760(resultJson["schedule"]["room_humidity_setpoint"])
input_heatcalc_template["rooms"][0]["schedule"]["relative_humidity_lower_limit"] = bc.trans_36524to8760(resultJson["schedule"]["room_humidity_setpoint"])
# 非住宅では使わない
input_heatcalc_template["rooms"][0]["vent"] = 0
input_heatcalc_template["rooms"][0]["schedule"]["heat_generation_cooking"] = np.zeros(8760)
input_heatcalc_template["rooms"][0]["schedule"]["vapor_generation_cooking"] = np.zeros(8760)
input_heatcalc_template["rooms"][0]["schedule"]["local_vent_amount"] = np.zeros(8760)
# 空調ゾーン毎に負荷を計算
for room_zone_name in inputdata["AirConditioningZone"]:
# 入力ファイルの読み込み
input_heatcalc = copy.deepcopy(input_heatcalc_template)
## 入力ファイルの生成(室単位)
# 室名
input_heatcalc["rooms"][0]["name"] = room_zone_name
# 気積 [m3]
input_heatcalc["rooms"][0]["volume"] = inputdata["AirConditioningZone"][room_zone_name]["zoneArea"] * inputdata["AirConditioningZone"][room_zone_name]["ceilingHeight"]
# 室温湿度の上下限
input_heatcalc["rooms"][0]["schedule"]["is_upper_temp_limit_set"] = np.reshape(np.array(roomScheduleRoom[room_zone_name], dtype="bool"), 8760)
input_heatcalc["rooms"][0]["schedule"]["is_lower_temp_limit_set"] = np.reshape(np.array(roomScheduleRoom[room_zone_name], dtype="bool"), 8760)
input_heatcalc["rooms"][0]["schedule"]["is_upper_humidity_limit_set"] = np.reshape(np.array(roomScheduleRoom[room_zone_name], dtype="bool"), 8760)
input_heatcalc["rooms"][0]["schedule"]["is_lower_humidity_limit_set"] = np.reshape(np.array(roomScheduleRoom[room_zone_name], dtype="bool"), 8760)
# 発熱量
# 照明発熱スケジュール[W]
input_heatcalc["rooms"][0]["schedule"]["heat_generation_lighting"] = np.reshape(Heat_light_hourly[room_zone_name],8760) * inputdata["AirConditioningZone"][room_zone_name]["zoneArea"]
# 機器発熱スケジュール[W]
input_heatcalc["rooms"][0]["schedule"]["heat_generation_appliances"] = np.reshape(Heat_OAapp_hourly[room_zone_name],8760) * inputdata["AirConditioningZone"][room_zone_name]["zoneArea"]
# 人員数[人]
input_heatcalc["rooms"][0]["schedule"]["number_of_people"] = np.reshape(Num_of_Person_hourly[room_zone_name],8760) * inputdata["AirConditioningZone"][room_zone_name]["zoneArea"]
# 床の面積(計算対象床面積を入力する)
input_heatcalc["rooms"][0]["boundaries"][0]["area"] = \
inputdata["AirConditioningZone"][room_zone_name]["zoneArea"]
# 天井の面積(床と同じとする)
input_heatcalc["rooms"][0]["boundaries"][1]["area"] = \
inputdata["AirConditioningZone"][room_zone_name]["zoneArea"]
# 外皮があれば
if room_zone_name in inputdata["EnvelopeSet"]:
# 外壁
for (wall_id, wall_configure) in enumerate( inputdata["EnvelopeSet"][room_zone_name]["WallList"]):
# 等価R値
if inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["U_wall"] > 4:
equivalent_Rvalue = 0.001
else:
equivalent_Rvalue = (1/inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["U_wall"] - 0.25)
direction = ""
if inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["Direction"] == "北":
direction = "n"
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["Direction"] == "北東":
direction = "ne"
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["Direction"] == "東":
direction = "e"
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["Direction"] == "南東":
direction = "se"
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["Direction"] == "南":
direction = "s"
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["Direction"] == "南西":
direction = "sw"
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["Direction"] == "西":
direction = "w"
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["Direction"] == "北西":
direction = "nw"
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["Direction"] == "水平(上)":
direction = "top"
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["Direction"] == "水平(下)":
direction = "bottom"
else:
raise Exception("方位が不正です")
boundary_type = ""
is_sun_striked_outside = ""
if inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WallType"] == "日の当たる外壁":
boundary_type = "external_general_part"
is_sun_striked_outside = True
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WallType"] == "日の当たらない外壁":
boundary_type = "external_general_part"
is_sun_striked_outside = False
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WallType"] == "地盤に接する外壁":
boundary_type = "ground"
is_sun_striked_outside = False
elif inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WallType"] == "地盤に接する外壁_Ver2":
boundary_type = "ground"
is_sun_striked_outside = False
if boundary_type == "external_general_part":
input_heatcalc["rooms"][0]["boundaries"].append(
{
"name": "wall",
"boundary_type": boundary_type,
"area": inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WallArea"],
"is_sun_striked_outside": is_sun_striked_outside,
"temp_dif_coef": 0,
"direction": direction,
"is_solar_absorbed_inside": False,
"general_part_spec" :
{
"outside_emissivity": 0.9,
"outside_solar_absorption": 0.8,
"inside_heat_transfer_resistance": 0.11,
"outside_heat_transfer_resistance": 0.04,
"layers": [
{
"name": "コンクリート",
"thermal_resistance": 0.10,
"thermal_capacity": 300
},
{
"name": "吹付け硬質ウレタンフォーム",
"thermal_resistance": equivalent_Rvalue,
"thermal_capacity": 1.00
}
],
},
"solar_shading_part": {
"existence" : False
},
}
)
elif boundary_type == "ground":
input_heatcalc["rooms"][0]["boundaries"].append(
{
"name": "wall",
"boundary_type": boundary_type,
"area": inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WallArea"],
"is_sun_striked_outside": is_sun_striked_outside,
"temp_dif_coef": 0,
"direction": direction,
"is_solar_absorbed_inside": False,
"ground_spec" :
{
"inside_heat_transfer_resistance": 0.11,
"layers": [
{
"name": "コンクリート",
"thermal_resistance": 0.10,
"thermal_capacity": 300
},
{
"name": "吹付け硬質ウレタンフォーム",
"thermal_resistance": equivalent_Rvalue,
"thermal_capacity": 1.00
}
],
}
}
)
# 窓
for (window_id, window_configure) in enumerate( inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"]):
if window_configure["WindowID"] != "無":
input_heatcalc["rooms"][0]["boundaries"].append(
{
"name": "window",
"boundary_type": "external_transparent_part",
"area": inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["windowArea"] ,
"is_sun_striked_outside": True,
"temp_dif_coef": 0,
"direction": direction,
"is_solar_absorbed_inside": False,
"transparent_opening_part_spec": {
"eta_value": inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["I_window"],
"u_value": inputdata["EnvelopeSet"][room_zone_name]["WallList"][wall_id]["WindowList"][window_id]["U_window"],
"outside_emissivity": 0.8,
"inside_heat_transfer_resistance": 0.11,
"outside_heat_transfer_resistance": 0.04,
"incident_angle_characteristics": "1"
},
"solar_shading_part": {
"existence": False
}
}
)
# デバッグ用
# with open("heatloadcalc_input.json",'w', encoding='utf-8') as fw:
# json.dump(input_heatcalc, fw, indent=4, ensure_ascii=False, cls = bc.MyEncoder)
# 負荷計算の実行
room_air_temperature, mean_radiant_temperature, heatload_sensible_convection, heatload_sensible_radiation, heatload_latent \
= Main.run(input_heatcalc)
# 室温
resultJson["Qroom"][room_zone_name]["Troom"] = bc.trans_8760to36524(room_air_temperature)
resultJson["Qroom"][room_zone_name]["MRTroom"] = bc.trans_8760to36524(mean_radiant_temperature)
# 負荷の積算(全熱負荷)[W] (365×24)
heatload = np.array(
bc.trans_8760to36524(heatload_sensible_convection) + \
bc.trans_8760to36524(heatload_sensible_radiation) + \
bc.trans_8760to36524(heatload_latent)
)
for dd in range(0,365):
for hh in range(0,24):
# 時刻別室負荷 [W] → [MJ/hour]
resultJson["Qroom"][room_zone_name]["Qroom_hourly"][dd][hh] = (-1) * heatload[dd][hh] * 3600/1000000
if DEBUG: # pragma: no cover
# 熱負荷のグラフ化
for room_zone_name in inputdata["AirConditioningZone"]:
mf.hourlyplot(resultJson["Qroom"][room_zone_name]["Troom"], "室内空気温度: "+room_zone_name, "b", "室内空気温度")
mf.hourlyplot(resultJson["Qroom"][room_zone_name]["MRTroom"], "室内平均放射温度 "+room_zone_name, "b", "室内平均放射温度")
mf.hourlyplot(resultJson["Qroom"][room_zone_name]["Qroom_hourly"], "室負荷: "+room_zone_name, "b", "時刻別室負荷")
print('室負荷計算完了')
##----------------------------------------------------------------------------------
## 空調機群の一次エネルギー消費量(解説書 2.5)
##----------------------------------------------------------------------------------
# 結果格納用の変数 resultJson (空調機群)
for ahu_name in inputdata["AirHandlingSystem"]:
resultJson["AHU"][ahu_name] = {
"schedule": np.zeros((365,24)), # 時刻別の運転スケジュール(365×24)
"Hoa_hourly": np.zeros((365,24)), # 空調運転時間帯の外気エンタルピー
"Qoa_hourly": np.zeros((365,24)), # 日平均外気負荷 [kW]
"Qroom_hourly" : np.zeros((365,24)), # 時刻別室負荷の積算値 [MJ/h]
"Qahu_hourly" : np.zeros((365,24)), # 時刻別空調負荷 [MJ/day]
"Qahu_unprocessed": np.zeros((365,24)), # 空調機群の未処理負荷(冷房)[MJ/h]
"E_fan_hourly" : np.zeros((365,24)), # 送風機の時刻別エネルギー消費量 [MWh]
"E_aex_hourly" : np.zeros((365,24)), # 全熱交換器の時刻別エネルギー消費量 [MWh]
"Economizer":{
"AHUVovc" : np.zeros((365,24)), # 外気冷房運転時の外気風量 [kg/s]
"Qahu_oac": np.zeros((365,24)), # 外気冷房による負荷削減効果 [MJ/day]
},
"load_ratio": np.zeros((365,24)), # 時刻別の負荷率
"Eahu_total" : 0, # 消費電力の合計 [h]
"Tahu_total" : 0 # 運転時間の合計 [h]
}
##----------------------------------------------------------------------------------
## 空調機群全体のスペックを整理
##----------------------------------------------------------------------------------
for ahu_name in inputdata["AirHandlingSystem"]:
# 空調機タイプ(1つでも空調機があれば「空調機」と判断する)
inputdata["AirHandlingSystem"][ahu_name]["AHU_type"] = "空調機以外"
for unit_id, unit_configure in enumerate(inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"]):
if unit_configure["Type"] == "空調機":
inputdata["AirHandlingSystem"][ahu_name]["AHU_type"] = "空調機"
break
# 空調機の能力
inputdata["AirHandlingSystem"][ahu_name]["RatedCapacityCooling"] = 0
inputdata["AirHandlingSystem"][ahu_name]["RatedCapacityHeating"] = 0
for unit_id, unit_configure in enumerate(inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"]):
if unit_configure["RatedCapacityCooling"] != None:
inputdata["AirHandlingSystem"][ahu_name]["RatedCapacityCooling"] += \
unit_configure["RatedCapacityCooling"] * unit_configure["Number"]
if unit_configure["RatedCapacityHeating"] != None:
inputdata["AirHandlingSystem"][ahu_name]["RatedCapacityHeating"] += \
unit_configure["RatedCapacityHeating"] * unit_configure["Number"]
# 送風機単体の定格消費電力(解説書 2.5.8) [kW]
for unit_id, unit_configure in enumerate(inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"]):
inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"][unit_id]["FanPowerConsumption_total"] = 0
if unit_configure["FanPowerConsumption"] != None:
# 送風機の定格消費電力 kW = 1台あたりの消費電力 kW × 台数
inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"][unit_id]["FanPowerConsumption_total"] = \
unit_configure["FanPowerConsumption"] * unit_configure["Number"]
# 空調機の風量 [m3/h]
inputdata["AirHandlingSystem"][ahu_name]["FanAirVolume"] = 0
for unit_id, unit_configure in enumerate(inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"]):
if unit_configure["FanAirVolume"] != None:
inputdata["AirHandlingSystem"][ahu_name]["FanAirVolume"] += \
unit_configure["FanAirVolume"] * unit_configure["Number"]
# 全熱交換器の効率(一番低いものを採用)
inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioCooling"] = None
inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioHeating"] = None
for unit_id, unit_configure in enumerate(inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"]):
# 冷房の効率
if (unit_configure["AirHeatExchangeRatioCooling"] != None):
if inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioCooling"] == None:
inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioCooling"] = unit_configure["AirHeatExchangeRatioCooling"]
elif inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioCooling"] > unit_configure["AirHeatExchangeRatioCooling"]:
inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioCooling"] = unit_configure["AirHeatExchangeRatioCooling"]
# 暖房の効率
if (unit_configure["AirHeatExchangeRatioHeating"] != None):
if inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioHeating"] == None:
inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioHeating"] = unit_configure["AirHeatExchangeRatioHeating"]
elif inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioHeating"] > unit_configure["AirHeatExchangeRatioHeating"]:
inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioHeating"] = unit_configure["AirHeatExchangeRatioHeating"]
# 全熱交換器のバイパス制御の有無(1つでもあればバイパス制御「有」とする)
inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangerControl"] = "無"
for unit_id, unit_configure in enumerate(inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"]):
if (unit_configure["AirHeatExchangeRatioCooling"] != None) and (unit_configure["AirHeatExchangeRatioHeating"] != None):
if unit_configure["AirHeatExchangerControl"] == "有":
inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangerControl"] = "有"
# 全熱交換器の消費電力 [kW]
| |
counts: dict - lookup table for SVDM
min_max: pd:DataFrame - contains min/max values per numeric feature
classes: list of str - class labels in the dataset.
examples_covered_by_rule: dict - which rule covers which examples, i.e. {rule ri: set(example ei, example ej)}
label_type: str - consider only examples of the specified type as neighbors. Valid values:
scripts.vars.ALL_LABELS - ignore the label and choose the k-nearest examples across all class labels
scripts.vars.SAME_LABEL_AS_RULE - consider only examples as k-nearest examples with they have the same label as
<rule>
scripts.vars.OPPOSITE_LABEL_TO_RULE - consider only examples as k-nearest examples with they have the opposite
label of <rule>
only_uncovered_neighbors: bool - True if only examples should be considered that aren't covered by <rule> yet.
Otherwise, all neighbors are considered. An example is covered by a rule if the example satisfies all conditions
imposed by <rule>.
Returns
-------
pd.Series, float, bool OR None, None, None.
Rule that is closest to an example, distance of that rule to the example, True if a rule became the closest rule.
None, None, None if only 1 rule and 1 example are provided and that example is seed for the rule, where the latter
doesn't cover multiple examples.
"""
k = 5
min_dist = math.inf
min_rule_id = None
if example.name in my_vars.closest_rule_per_example:
min_rule_id, min_dist = my_vars.closest_rule_per_example[example.name]
# print("entry exists for example {}: {}".format(example.name, my_vars.closest_rule_per_example[example.name]))
# hvdm() expects a dataFrame of examples, not a Series
# Plus, data type is "object", but then numeric columns won't be detected in di(), so we need to infer them
example_df = example.to_frame().T.infer_objects()
try:
was_updated = False
for rule in rules:
rule_id = rule.name
# print("Now checking rule with ID {}:\n{}".format(rule_id, rule))
examples = len(examples_covered_by_rule.get(rule.name, set()))
# > 0 (instead of 1) because seeds aren't stored in this dict, so we implicitly add 1
covers_multiple_examples = True if examples > 0 else False
# Ignore rule because current example was seed for it and the rule doesn't cover multiple examples
# if not covers_multiple_examples and my_vars.seed_example_rule[example.name] == rule_id:
if not covers_multiple_examples and rule_id in my_vars.seed_example_rule.get(example.name, set()):
# Ignore rule as it's the seed for the example
# print("rule {} is seed for example {}, so ignore it".format(rule_id, example.name))
continue
neighbors, dists, is_closest = \
find_nearest_examples(example_df, k, rule, class_col_name, counts, min_max, classes,
label_type=label_type, only_uncovered_neighbors=only_uncovered_neighbors)
if neighbors is not None:
dist = dists.iloc[0][my_vars.DIST]
if min_dist is not None:
if is_closest:
was_updated = True
min_dist = dist
min_rule_id = rule_id
else:
min_dist = dist
min_rule_id = rule_id
was_updated = True
else:
raise MyException("No neighbors for rule:\n{}".format(rule))
if min_rule_id is not None:
print("nearest rule for example {}:rule {} with dist={}".format(example.name, min_rule_id, min_dist))
return my_vars.all_rules[min_rule_id], min_dist, was_updated
return None, None, None
except MyException:
return None, None, None
def most_specific_generalization(example, rule, class_col_name, dtypes):
"""
Implements MostSpecificGeneralization() from the paper, i.e. Algorithm 2.
Parameters
----------
example: pd.Series - row from the dataset.
rule: pd.Series - rule that will be potentially generalized.
class_col_name: str - name of the column hold the class labels.
dtypes: pd.Series - data types of the respective columns in the dataset.
Returns
-------
pd.Series.
Generalized rule
"""
# Without a deep copy, any changes to the returned rule, will also affect <rule>, i.e. the original rule
rule = copy.deepcopy(rule)
for (col_name, example_val), dtype in zip(example.iteritems(), dtypes):
if col_name == class_col_name:
continue
example_dtype = dtype
if col_name in rule:
# Cast object to tuple datatype -> this is only automatically done if it's not a string
rule_val = (rule[col_name])
# print("rule_val", rule_val, "\nrule type:", type(rule_val))
if is_string_dtype(example_dtype) and example_val != rule_val:
rule = rule.drop(labels=[col_name])
elif is_numeric_dtype(example_dtype):
if example_val > rule_val[1]:
# print("new upper limit", (rule_val[0], example_val))
rule[col_name] = Bounds(lower=rule_val[0], upper=example_val)
elif example_val < rule_val[0]:
# print("new lower limit", (example_val, rule_val[1]))
rule[col_name] = Bounds(lower=example_val, upper=rule_val[1])
# print("updated:", rule)
return rule
def hvdm(examples, rule, counts, classes, min_max, class_col_name):
"""
Computes the distance (Heterogenous Value Difference Metrics) between a rule/example and another example.
Assumes that there's at least 1 feature shared between <rule> and <examples>.
Parameters
----------
examples: pd.DataFrame - examples
rule: pd.Series - (m x n) rule
counts: dict of Counters - contains for nominal classes how often the value of an co-occurs with each class label
classes: list of str - class labels in the dataset.
min_max: pd: pd.DataFrame - contains min/max values per numeric feature
class_col_name: str - name of class label
Returns
-------
pd.DataFrame - distances.
"""
# Select only those columns that exist in examples and rule
# https://stackoverflow.com/questions/46228574/pandas-select-dataframe-columns-based-on-another-dataframes-columns
examples = examples[rule.index.intersection(examples.columns)]
dists = []
# Compute distance for j-th feature (=column)
for col_name in examples:
if col_name == class_col_name or col_name == my_vars.TAG or col_name == my_vars.COVERED:
continue
# Extract column from both dataframes into numpy array
example_feature_col = examples[col_name]
# Compute nominal/numeric distance
if pd.api.types.is_numeric_dtype(example_feature_col):
dist_squared = di(example_feature_col, rule, min_max)
else:
dist_squared = svdm(example_feature_col, rule, counts, classes)
dists.append(dist_squared)
# Note: this line assumes that there's at least 1 feature
distances = pd.DataFrame(list(zip(*dists)), columns=[s.name for s in dists], index=dists[0].index)
# Sum up rows to compute HVDM - no need to square the distances as the order won't change
distances[my_vars.DIST] = distances.select_dtypes(float).sum(1)
distances = distances.sort_values(my_vars.DIST, ascending=True)
return distances
def svdm(example_feat, rule_feat, counts, classes):
"""
Computes the (squared) Value difference metric for nominal values.
Parameters
----------
example_feat: pd.Series - column (=feature) containing all examples.
rule_feat: pd.Series - column (=feature) of the rule.
counts: dict of Counters - contains for nominal classes how often the value of an co-occurs with each class label
classes: list of str - class labels in the dataset.
Returns
-------
pd.Series.
(squared) distance of each example.
"""
col_name = example_feat.name
rule_val = rule_feat[col_name]
dists = []
# Feature is NaN in rule -> all distances will become 1 automatically by definition
if pd.isnull(rule_val):
print("column {} is NaN in rule:\n{}".format(col_name, rule_feat))
dists = [(idx, 1.0) for idx, _ in example_feat.iteritems()]
zlst = list(zip(*dists))
out = pd.Series(zlst[1], index=zlst[0], name=col_name)
return out
n_rule = counts[col_name][rule_val]
# For every row/example
for idx, example_val in example_feat.iteritems():
if pd.isnull(example_val):
print("NaN(s) in svdm() in column '{}' in row {}".format(col_name, idx))
dist = 1.0
else:
# print("compute example", idx)
# print("------------------")
# print(example_val)
dist = 0.
if example_val != rule_val:
for k in classes:
# print("processing class", k)
n_example = counts[col_name][example_val]
nk_example = counts[col_name][my_vars.CONDITIONAL][example_val][k]
nk_rule = counts[col_name][my_vars.CONDITIONAL][rule_val][k]
# print("n_example", n_example)
# print("nk_example", nk_example)
# print("n_rule", n_rule)
# print("nk_rule", nk_rule)
res = abs(nk_example/n_example - nk_rule/n_rule)
dist += res
# print("|{}/{}-{}/{}| = {}".format(nk_example, n_example, nk_rule, n_rule, res))
# print("d={}".format(dist))
# else:
# print("same val ({}) in row {}".format(example_val, idx))
dists.append((idx, dist*dist))
# Split tuples into 2 separate lists, one containing the indices and the other one containing the values
zlst = list(zip(*dists))
out = pd.Series(zlst[1], index=zlst[0], name=col_name)
return out
def di(example_feat, rule_feat, min_max):
"""
Computes the (squared) partial distance for numeric values between an example and a rule.
Parameters
----------
example_feat: pd.Series - column (=feature) containing all examples.
rule_feat: pd.Series - column (=feature) of the rule.
min_max: pd.DataFrame - min and max value per numeric feature.
Returns
-------
pd.Series
(squared) distance of each example.
"""
col_name = example_feat.name
lower_rule_val, upper_rule_val = rule_feat[col_name]
dists = []
# Feature is NaN in rule -> all distances will become 1 automatically by definition
if pd.isnull(lower_rule_val) or pd.isnull(upper_rule_val):
print("column {} is NaN in rule:\n{}".format(col_name, rule_feat))
dists = [(idx, 1.0) for idx, _ in example_feat.iteritems()]
zlst = list(zip(*dists))
out = pd.Series(zlst[1], index=zlst[0], name=col_name)
return out
# For every row/example
for idx, example_val in example_feat.iteritems():
# print("processing", example_val)
if pd.isnull(example_val):
print("NaN(s) in svdm() in column '{}' in row {}".format(col_name, idx))
dist = 1.0
else:
min_rule_val = min_max.at["min", col_name]
max_rule_val = min_max.at["max", col_name]
# print("min({})={}".format(col_name, min_rule_val))
# print("max({})={}".format(col_name, max_rule_val))
if example_val > upper_rule_val:
# print("example > upper")
# print("({} - {}) / ({} - {})".format(example_val, upper_rule_val, max_rule_val, min_rule_val))
dist = (example_val - upper_rule_val) / (max_rule_val - min_rule_val)
elif example_val < lower_rule_val:
# print("example < lower")
# print("({} - | |
,
u'㓲' : [u'p'] ,
u'䩹' : [u'e'] ,
u'腿' : [u't'] ,
u'邋' : [u'l'] ,
u'漊' : [u'l'] ,
u'厍' : [u's'] ,
u'抗' : [u'k'] ,
u'萘' : [u'n'] ,
u'䜚' : [u't'] ,
u'鰨' : [u't', u'd'] ,
u'弪' : [u'j'] ,
u'辵' : [u'c'] ,
u'渴' : [u'h', u'k', u'j'] ,
u'劷' : [u'y'] ,
u'㴾' : [u'b'] ,
u'懁' : [u'h', u'x'] ,
u'荂' : [u'k'] ,
u'䙄' : [u'q', u'z'] ,
u'科' : [u'k'] ,
u'魒' : [u'p'] ,
u'幔' : [u'm'] ,
u'軟' : [u'r'] ,
u'浞' : [u'z'] ,
u'凡' : [u'f'] ,
u'㱨' : [u'l'] ,
u'惫' : [u'b'] ,
u'艬' : [u'c'] ,
u'䕮' : [u'j'] ,
u'磻' : [u'p', u'b'] ,
u'驼' : [u't'] ,
u'嵾' : [u'c'] ,
u'沈' : [u'c', u's', u't'] ,
u'錍' : [u'p'] ,
u'嘏' : [u'j', u'g'] ,
u'膖' : [u'p'] ,
u'攙' : [u'c'] ,
u'馦' : [u'x'] ,
u'紩' : [u'z'] ,
u'岨' : [u'j'] ,
u'殲' : [u'j'] ,
u'鈷' : [u'h', u'g'] ,
u'唹' : [u'y'] ,
u'㪼' : [u'k'] ,
u'胀' : [u'z'] ,
u'摃' : [u'k', u'g'] ,
u'䏂' : [u's'] ,
u'飐' : [u'z'] ,
u'籓' : [u'f'] ,
u'寒' : [u'h'] ,
u'櫜' : [u'g'] ,
u'酡' : [u't'] ,
u'㧦' : [u'x'] ,
u'捭' : [u'b'] ,
u'韺' : [u'y'] ,
u'嫼' : [u'm'] ,
u'䂁' : [u'c'] ,
u'讇' : [u'c'] ,
u'琎' : [u'j'] ,
u'墑' : [u'd'] ,
u'晴' : [u'q'] ,
u'䰞' : [u'z'] ,
u'疣' : [u'y'] ,
u'㚥' : [u'y'] ,
u'霤' : [u'l'] ,
u'椰' : [u'y'] ,
u'䶳' : [u'y'] ,
u'钹' : [u'b'] ,
u'䅀' : [u'l'] ,
u'曅' : [u'y'] ,
u'衆' : [u'z'] ,
u'奐' : [u'h'] ,
u'绕' : [u'r'] ,
u'觛' : [u'd'] ,
u'牢' : [u'l'] ,
u'囥' : [u'k'] ,
u'㝤' : [u'g'] ,
u'珷' : [u'w'] ,
u'㓹' : [u'r'] ,
u'镸' : [u'c'] ,
u'老' : [u'l'] ,
u'构' : [u'g'] ,
u'儋' : [u'd'] ,
u'頑' : [u'k', u'w'] ,
u'羔' : [u'g'] ,
u'蚚' : [u'q'] ,
u'樝' : [u'z'] ,
u'垤' : [u'd'] ,
u'麪' : [u'm'] ,
u'䈭' : [u'h'] ,
u'贳' : [u's'] ,
u'炶' : [u'h'] ,
u'㖸' : [u'x'] ,
u'娽' : [u'l'] ,
u'䣆' : [u'x'] ,
u'鏌' : [u'm'] ,
u'睏' : [u'k'] ,
u'㡑' : [u'q'] ,
u'旘' : [u'z'] ,
u'佟' : [u't'] ,
u'陥' : [u'x'] ,
u'編' : [u'b'] ,
u'㻪' : [u's'] ,
u'蓮' : [u'l'] ,
u'桱' : [u'j'] ,
u'嗸' : [u'a'] ,
u'鳾' : [u's'] ,
u'蚃' : [u'x'] ,
u'愂' : [u'b'] ,
u'䖅' : [u'l'] ,
u'麓' : [u'l'] ,
u'礒' : [u'y'] ,
u'嶕' : [u'j'] ,
u'沟' : [u'g'] ,
u'鈠' : [u'y'] ,
u'儢' : [u'l'] ,
u'膭' : [u'g'] ,
u'怬' : [u'x'] ,
u'馽' : [u'z'] ,
u'砼' : [u't'] ,
u'岿' : [u'k'] ,
u'濉' : [u's'] ,
u'赊' : [u's'] ,
u'偌' : [u'r'] ,
u'胗' : [u'z'] ,
u'捖' : [u'h', u'w', u'g'] ,
u'䟙' : [u'q'] ,
u'飧' : [u's'] ,
u'筦' : [u'g'] ,
u'忩' : [u'c'] ,
u'滳' : [u's'] ,
u'豴' : [u'd'] ,
u'卶' : [u's'] ,
u'㧽' : [u'h', u'g'] ,
u'技' : [u'j'] ,
u'蔅' : [u'y'] ,
u'䠇' : [u'j'] ,
u'窐' : [u'w', u'g'] ,
u'鴕' : [u't'] ,
u'辞' : [u'c'] ,
u'猡' : [u'l'] ,
u'加' : [u'j'] ,
u'萯' : [u'f'] ,
u'䬱' : [u'b'] ,
u'疺' : [u'f'] ,
u'鰿' : [u'j'] ,
u'牋' : [u'j'] ,
u'㵕' : [u's'] ,
u'蝙' : [u'p', u'b'] ,
u'䩛' : [u'b'] ,
u'瓤' : [u'r'] ,
u'齩' : [u'y', u'j'] ,
u'觲' : [u'x'] ,
u'浵' : [u't'] ,
u'䳴' : [u'd', u't'] ,
u'㱿' : [u'q'] ,
u'樆' : [u'l'] ,
u'争' : [u'z'] ,
u'閏' : [u'r'] ,
u'殛' : [u'j'] ,
u'娦' : [u'p'] ,
u'䎫' : [u'c', u'z'] ,
u'誱' : [u'j'] ,
u'眸' : [u'm'] ,
u'宻' : [u'm'] ,
u'㐺' : [u'z'] ,
u'佈' : [u'b'] ,
u'烍' : [u'x'] ,
u'陎' : [u's'] ,
u'摚' : [u't'] ,
u'䣝' : [u't'] ,
u'韣' : [u'd'] ,
u'籪' : [u'd'] ,
u'旯' : [u'x', u'l'] ,
u'議' : [u'y'] ,
u'呺' : [u'x', u'h'] ,
u'緿' : [u'd'] ,
u'踉' : [u'l'] ,
u'熌' : [u's'] ,
u'弓' : [u'g'] ,
u'䦜' : [u'w'] ,
u'邢' : [u'x', u'g'] ,
u'琥' : [u'h'] ,
u'㤧' : [u'h'] ,
u'茫' : [u'h', u'm'] ,
u'暮' : [u'm'] ,
u'鬻' : [u'y', u'z', u'j'] ,
u'纾' : [u's'] ,
u'藄' : [u'q'] ,
u'楇' : [u'h'] ,
u'鷔' : [u'a'] ,
u'䅗' : [u's'] ,
u'衝' : [u'c'] ,
u'珠' : [u'z'] ,
u'奧' : [u'a', u'y'] ,
u'䯰' : [u'j'] ,
u'鋶' : [u'l'] ,
u'癹' : [u'p', u'b'] ,
u'㭻' : [u't'] ,
u'钋' : [u'p'] ,
u'權' : [u'q'] ,
u'垍' : [u'j'] ,
u'㨔' : [u'h'] ,
u'暗' : [u'a'] ,
u'耘' : [u'y'] ,
u'䌚' : [u's'] ,
u'㖡' : [u'y'] ,
u'级' : [u'j'] ,
u'頨' : [u'p'] ,
u'嬪' : [u'p'] ,
u'讵' : [u'j'] ,
u'樴' : [u'z'] ,
u'嚷' : [u'r'] ,
u'㤾' : [u's'] ,
u'旁' : [u'p', u'b'] ,
u'蝂' : [u'b'] ,
u'䉄' : [u'l'] ,
u'㓋' : [u'g'] ,
u'緑' : [u'l'] ,
u'齒' : [u'c'] ,
u'諟' : [u's'] ,
u'楞' : [u'l'] ,
u'嗡' : [u'w'] ,
u'㡨' : [u'q'] ,
u'擫' : [u'y'] ,
u'虬' : [u'q'] ,
u'䅮' : [u'c'] ,
u'系' : [u'x', u'j'] ,
u'鹼' : [u'j'] ,
u'奾' : [u'x'] ,
u'梈' : [u'p'] ,
u'霍' : [u'h', u's'] ,
u'刏' : [u'j'] ,
u'㾒' : [u'r', u'j'] ,
u'薖' : [u'k'] ,
u'愙' : [u'k'] ,
u'鶦' : [u'h'] ,
u'礩' : [u'z'] ,
u'墨' : [u'm'] ,
u'澲' : [u'y'] ,
u'阷' : [u'c'] ,
u'儹' : [u'z'] ,
u'蓀' : [u's'] ,
u'恃' : [u's'] ,
u'䟂' : [u'm'] ,
u'㝍' : [u'x'] ,
u'鳐' : [u'y'] ,
u'忒' : [u't'] ,
u'滜' : [u'h', u'z', u'g'] ,
u'镡' : [u'x', u't'] ,
u'偣' : [u'y'] ,
u'㷦' : [u'x'] ,
u'杭' : [u'h', u'k'] ,
u'䛬' : [u'c', u't'] ,
u'㙷' : [u'z'] ,
u'鏺' : [u'p'] ,
u'罽' : [u'j'] ,
u'廼' : [u'n'] ,
u'䒁' : [u'y', u'x', u'j'] ,
u'辇' : [u'n'] ,
u'瀎' : [u'm'] ,
u'岑' : [u'c'] ,
u'㴐' : [u'm'] ,
u'䠞' : [u'q', u'c'] ,
u'熣' : [u's'] ,
u'錤' : [u'q', u'j'] ,
u'洰' : [u'j'] ,
u'䦳' : [u'h', u'x'] ,
u'邹' : [u'z'] ,
u'䕀' : [u'k'] ,
u'担' : [u'q', u'd'] ,
u'豆' : [u'd'] ,
u'嵐' : [u'l'] ,
u'跛' : [u'p', u'b'] ,
u'癢' : [u'y'] ,
u'勥' : [u'q', u'j'] ,
u'矷' : [u'z'] ,
u'酸' : [u's'] ,
u'萁' : [u'q', u'j'] ,
u'掄' : [u'l'] ,
u'唋' : [u't'] ,
u'鰑' : [u'y'] ,
u'箔' : [u'b'] ,
u'芚' : [u'c', u't'] ,
u'渝' : [u'y'] ,
u'厤' : [u'l'] ,
u'骪' : [u'w'] ,
u'䘭' : [u'z', u'd'] ,
u'褳' : [u'l'] ,
u'璶' : [u'j'] ,
u'帽' : [u'm'] ,
u'䳆' : [u'b'] ,
u'韌' : [u'r'] ,
u'獏' : [u'm'] ,
u'懘' : [u'c'] ,
u'䭟' : [u'y'] ,
u'鉥' : [u's'] ,
u'秨' : [u'z'] ,
u'胮' : [u'p'] ,
u'汱' : [u'q'] ,
u'凸' : [u't'] ,
u'飾' : [u's', u'c'] ,
u'芃' : [u'p'] ,
u'攂' : [u'l'] ,
u'䆅' : [u'c', u'j'] ,
u'㐌' : [u'y', u's'] ,
u'骓' : [u'z'] ,
u'紒' : [u'j'] ,
u'妕' : [u'z'] ,
u'梟' : [u'x'] ,
u'阠' : [u'x'] ,
u'唢' : [u's'] ,
u'搬' : [u'b'] ,
u'䂯' : [u'g'] ,
u'㜶' : [u'w'] ,
u'鶽' : [u's', u'z'] ,
u'簼' : [u'l'] ,
u'墿' : [u'y'] ,
u'毉' : [u'y'] ,
u'襊' : [u'z'] ,
u'呌' : [u'j'] ,
u'㫓' : [u'q'] ,
u'蓗' : [u'z'] ,
u'杖' : [u'z'] ,
u'䏙' : [u't'] ,
u'㙠' : [u'y'] ,
u'鳧' : [u'f'] ,
u'罦' : [u'f'] ,
u'審' : [u's'] ,
u'櫳' : [u'l'] ,
u'衴' : [u'd'] ,
u'坶' : [u'm'] ,
u'暀' : [u'w'] ,
u'脅' : [u'x'] ,
u'䰇' : [u'q'] ,
u'餕' : [u'j'] ,
u'讞' : [u'y'] ,
u'眡' : [u'd'] ,
u'嚠' : [u'l'] ,
u'耯' : [u'h'] ,
u'伱' : [u'n'] ,
u'熺' : [u'x'] ,
u'頿' : [u'z'] ,
u'諈' : [u'z'] ,
u'癋' : [u'h'] ,
u'㥕' : [u'l'] ,
u'荙' : [u'd'] ,
u'烤' : [u'k'] ,
u'魩' : [u'm'] ,
u'跲' : [u'j'] ,
u'極' : [u'j'] ,
u'䣴' : [u'x'] ,
u'㡿' : [u'c'] ,
u'渆' : [u'y'] ,
u'䪉' : [u'l'] ,
u'醏' : [u'd'] ,
u'贜' : [u'z'] ,
u'带' : [u'd'] ,
u'䞫' : [u'y'] ,
u'躱' : [u'd'] ,
u'猸' : [u'm'] ,
u'徻' : [u'w'] ,
u'䭈' : [u'j'] ,
u'瓍' : [u's'] ,
u'鉎' : [u's'] ,
u'恚' : [u'h'] ,
u'䳝' : [u'p', u'b', u'd', u'f'] ,
u'鏣' : [u's'] ,
u'硪' : [u'y', u'e', u'w'] ,
u'懯' : [u'f'] ,
u'轰' : [u'h'] ,
u'偺' : [u'z'] ,
u'秿' : [u'f'] ,
u'䌃' : [u's'] ,
u'訉' : [u'f'] ,
u'疌' : [u'n'] ,
u'嬓' : [u'j'] ,
u'䶜' : [u'g'] ,
u'钢' : [u'g'] ,
u'瀥' : [u'h'] ,
u'蜫' : [u'k'] ,
u'抮' : [u'z'] ,
u'䠵' : [u'f'] ,
u'鼻' : [u'b'] ,
u'窾' : [u'k'] ,
u'臄' : [u'j'] ,
u'浇' : [u'a', u'j'] ,
u'勎' : [u'l'] ,
u'駔' : [u'z'] ,
u'䕗' : [u'p', u'b'] ,
u'豝' : [u'b'] ,
u'矠' : [u'c'] ,
u'㓢' : [u'l'] ,
u'嵧' : [u'l'] ,
u'俰' : [u'h'] ,
u'零' : [u'l'] ,
u'特' : [u't'] ,
u'颋' : [u't'] ,
u'朊' : [u'r'] ,
u'宍' : [u'r'] ,
u'㘔' : [u'c', u's'] ,
u'檗' : [u'b'] ,
u'谘' : [u'z'] ,
u'会' : [u'h', u'k', u'g'] ,
u'㦡' : [u'l'] ,
u'犧' : [u'x', u's'] ,
u'鐨' : | |
Mikrotik product; for future use)
elif "tbps" in rate_string:
# Yes
# Then speed is saved in mpbs
speed = int(
float(rate_string.split("tbps")[0]) * 1000000
)
# Display info message
log.info(
f"get_interfaces: get_speed: rate found: {rate_string}, rate: {speed} mbps"
)
# " full-duplex: yes" field found in the block of strings? (full_duplex)
if " full-duplex: yes" in data_block:
# Yes
# Display info message
log.info(
f"get_interfaces: get_duplex: {interface_name} is in full duplex mode"
)
# Then the insterface is in full duplex mode
full_duplex = True
# Remove current interface information from the block of data
# (to speed up the research of data)
del block_of_strings_bitrate[index]
# Leave the loop
break
# Get interface mode (access, trunk or hybrid)
# Check if the interface is one of the trunk interface
if interface_name in dict_trunk_interface:
# Yes
# Set interface mode
mode = dict_trunk_interface[interface_name]
# Display info message
log.info(f"get_interfaces: mode: {mode}")
# # Check if the interface is one of the trunk interface
# if interface_name in dict_trunk_interface:
# # Yes
# # Set trunk mode
# mode = "trunk"
# # Display info message
# log.info(f"get_interfaces: mode: {mode}")
# # Get input erros, FCS errors, input packets anf output packets
# for index, data_stats in enumerate(block_of_strings_stats):
# # Display info message
# log.info(
# f"get_interfaces: get_stats: index: {index} [{len(block_of_strings_stats)}]"
# )
# # Is the name of interface found in the block of strings?
# if f"name: {interface_name}" in data_stats:
# # Yes, so this block of strings has information on the interface
# # Display info message
# log.info(f"get_interfaces: get_stats: index found: {index}")
# # " rx-fcs-error=" filed found in the block of strings? (speed)
# if " rx-fcs-error=" in data_stats:
# # Yes
# # Save the line with the data of FCS errors
# line_split = data_stats.split("rx-fcs-error=")[-1].split("=")[0]
# # By default no string gathered
# fcs_string = ""
# # Check each character till a non-numeric character
# for character in line_split:
# # Display info message
# log.info(
# f"get_interfaces: get_stats: fcs errors: char = {character}"
# )
# # Is it a numeric characer ("0" to "9")?
# if character >= "0" and character <= "9":
# # Yes
# # So the character is added to a string
# fcs_string += character
# # Is the character different than " " (which can be used for separator)?
# elif character != " ":
# # Yes, this is not a space
# # Leave the loop then since this is the beginning of another word
# break
# log.info(
# f"get_interfaces: get_stats: fcs errors: fcs_string: {fcs_string}"
# )
# # String not empty?
# if fcs_string:
# # Yes
# # Then save the result in integer
# fcs_error = int(fcs_string)
# Get description
if " comment=" in line:
description = (
line.split(" comment=")[-1].split("=")[0].rsplit(" ", 1)[0]
)
# Display info message
log.info(f"get_interfaces: comment: {description}")
# Create a dictionary
returned_dict = {
"operational": operational,
"admin_state": admin_state,
"maximum_frame_size": maximum_frame_size,
"full_duplex": full_duplex,
"speed": speed,
"mode": mode,
"description": description,
}
# Add the information to the dict
if interface_name:
returned_output[interface_name] = returned_dict
# Return data
return returned_output
async def get_vlans(self):
"""
Asyn method used to get the vlans information from the device
:return: VLANs of the device
:rtype: dict
"""
# Display info message
log.info("get_vlans")
# By default nothing is returned
returned_output = {}
# Send a command
output = await self.send_command(self.cmd_get_vlans)
# Display info message
log.info(f"get_vlans:\n'{output}'")
# Convert a string into a list of strings
lines = output.splitlines()
# Read each line
for line in lines:
# Initialize data with default values
name = ""
vlan_id = 0
extra = None
# extra = {
# "bridge": "",
# }
# Get VLAN name
if " comment=" in line:
name = line.split(" comment=")[-1].split("=")[0].rsplit(" ", 1)[0]
# Display info message
log.info(f"get_vlans: name: {name}")
# Get VLAN ID
if " vlan-ids=" in line:
vlan_id = int(line.split(" vlan-ids=")[-1].split()[0])
# Display info message
log.info(f"get_vlans: vlan_id: {vlan_id}")
# Get bridge (special Mikrotik)
if " bridge=" in line:
bridge = line.split(" bridge=")[-1].split("=")[0].rsplit(" ", 1)[0]
# Display info message
log.info(f"get_vlans: bridge: {bridge}")
# Save bridge information into
extra = {
"bridge": bridge,
}
# Create a dictionary
returned_dict = {
"name": name,
"extra": extra,
}
# Is VLAN ID not nul?
if vlan_id:
# Yes
# Add the information to the dict
returned_output[vlan_id] = returned_dict
# Return data
return returned_output
async def get_routing_table(self):
"""
Asyn method used to get the routing table of the device
:return: Routing table of the device
:rtype: dict
"""
# Display info message
log.info("get_routing_table")
# By default nothing is returned
returned_output = {}
# Send a command
output = await self.send_command(self.cmd_get_routing_table)
# Display info message
log.info(f"get_routing_table:\n'{output}'")
# Convert a string into a list of strings
lines = output.splitlines()
# Read each line
for line in lines:
# Initialize data with default values
network = ""
address = ""
prefix = 0
protocol = "unknown"
administrative_distance = 0
gateway = ""
active = False
protocol_attributes = None
# Get network, address and prefix
if " dst-address=" in line:
network = line.split(" dst-address=")[-1].split()[0]
address = network.split("/")[0]
prefix = int(network.split("/")[1])
# Get protocol
# Save char with protocol letter
if len(line) > 5:
protocol_char = line[5]
if protocol_char == "C":
# Connected
protocol = "connected"
elif protocol_char == "S":
# Static
protocol = "static"
elif protocol_char == "r":
# RIP
protocol = "rip"
elif protocol_char == "b":
# BGP
protocol = "bgp"
elif protocol_char == "o":
# OSPF
protocol = "ospf"
elif protocol_char == "m":
# MME
protocol = "mme"
# Get administrative distance
if " distance=" in line:
administrative_distance = int(line.split(" distance=")[-1].split()[0])
# Get gateway
if " gateway=" in line:
gateway = line.split(" gateway=")[-1].split()[0]
# Get active status
if len(line) > 3:
if line[3] == "A":
active = True
# Create a dictionary
returned_dict = {
"address": address,
"prefix": prefix,
"protocol": protocol,
"administrative_distance": administrative_distance,
"gateway": gateway,
"active": active,
"protocol_attributes": protocol_attributes,
}
# Is a network found?
if network:
# Yes
# Add the information to the dict
returned_output[network] = returned_dict
# Return data
return returned_output
async def get_bridges(self):
"""
Asyn method used to get bridges from the device
:return: A dictionary with the bridge information
:rtype: dict of dict
"""
# Display info message
log.info("get_bridges")
# By default nothing is returned
returned_output = {}
# Send a command
output = await self.send_command(self.cmd_get_bridges)
# Display info message
log.info(f"get_bridges:\n'{output}'")
# Convert a string into a list of strings
lines = output.splitlines()
# Read each line
for line in lines:
# Initialize data with default values
index = None
name = ""
status = False
mac_address = None
spanning_tree = None
igmp_snooping = False
vlan_filtering = False
multicast_querier = False
# Get index
# Line has enough characters?
if len(line) > 1:
# Yes
# Get the 2 first characters (100 bridges max should be ok)
index_string = line[:2]
# Convert characters into a integer
try:
index = int(index_string)
# Display info message
log.info(f"get_bridges: index: {index}")
except:
# Convertion failed
pass
# Get name
if " name=" in line:
name = line.split(" name=")[-1].split("=")[0].rsplit(" ", 1)[0]
# Display info message
log.info(f"get_bridges: name: {name}")
# Get status
line_words = line.split()
# Enough words?
if len(line_words) > 1:
# Running?
if line_words[1] == "R":
# Yes
# So the bridge is enabled
status = True
# Display info message
log.info(f"get_bridges: status: {status}")
# Get MAC ADDRESS
if " mac-address=" in line:
mac_address = (
line.split(" mac-address=")[-1].split("=")[0].rsplit(" ", 1)[0]
)
# Display info message
log.info(f"get_bridges: mac_address: {mac_address}")
# Get Spanning Tree mode
if " protocol-mode=" in line:
spanning_tree = (
line.split(" protocol-mode=")[-1].split("=")[0].rsplit(" ", 1)[0]
)
# Display info message
log.info(f"get_bridges: spanning_tree: {spanning_tree}")
# Get IGMP SNOOPING status
if " igmp-snooping=" in line:
# Value "yes" for IGMP SNOOPING?
if | |
#!/usr/bin/python
import os, sys
import json
import numpy as np
import re
import copy
"""
Name: <NAME>
ID: 16342613
Github:
I have included manual implementations of 5 different problems. The copy library was used to create a deep copy of the
input grid.
"""
### YOUR CODE HERE: write at least three functions which solve
### specific tasks by transforming the input x and returning the
### result. Name them according to the task ID as in the three
### examples below. Delete the three examples. The tasks you choose
### must be in the data/training directory, not data/evaluation.
# This is an exception that will allow us to break from nested loops
class BreakLoop(Exception): pass
def solve_0a938d79(inputGrid):
"""
- Transformation Process:
The input matrix has 2 coloured cells on an edge/s of the matrix. In the output matrix, these cells are extended
up to the opposite edge, forming a procedural pattern up to the end of the matrix.
- Analysis:
All training and testing grids were solved correctly.
"""
# Note: This was the first one that I did, and I can admit that this can be programmed more efficiently. I wanted
# to get back to this one and fix the inefficiency but I ran out of time. There is too much repetition in this
# function and that could be removed easily!
# The grids are layed out using (y,x,colour) as the coordinates.
seeds = []
# Create a deep copy of the input grid
output = copy.deepcopy(inputGrid)
# Store the size of the input grid
gridXSize = len(inputGrid[0])
gridYSize = len(inputGrid)
coloursOnYAxis = True
# Loop through the edges of the grid and find the 'seeds' which are the coloured cells
try:
for xIndex in range(gridXSize):
# Represents the opposite edges on the x axis
possibleColourIndexes = [0, gridYSize - 1]
# Find the seeds on each edge (if they exist on both edges)
for yIndex in range(len(possibleColourIndexes)):
colourValue = inputGrid[yIndex][xIndex]
# If we find a seed
if colourValue != 0:
seeds.append([0, xIndex, colourValue])
# Find the remaining seed
for yIndex2 in possibleColourIndexes:
for xIndex2 in range(gridXSize):
colourValue = inputGrid[yIndex2][xIndex2]
if colourValue != 0 and xIndex2 != xIndex:
seeds.append([gridYSize - 1, xIndex2, colourValue])
# Once we find both seeds, specify which axis the coloured bars will be (perpendicular
# to the x axis here), and break from the nested loop
coloursOnYAxis = True
raise BreakLoop
for yIndex in range(gridYSize):
# Represents the opposite edges on the y axis
possibleColourIndexes = [0, gridXSize - 1]
# Find the seeds on each edge (if they exist on both edges)
colourValue = inputGrid[yIndex][0]
for xIndex in possibleColourIndexes:
# If we find a seed
if colourValue != 0:
seeds.append([yIndex, xIndex, colourValue])
# Find the remaining seed
for xIndex2 in possibleColourIndexes:
for yIndex2 in range(gridYSize):
colourValue = inputGrid[yIndex2][xIndex2]
if colourValue != 0 and yIndex2 != yIndex:
seeds.append([yIndex2, gridYSize - 1, colourValue])
# Once we find both seeds, specify which axis the coloured bars will be (perpendicular
# to the y axis here), and break from the nested loop
coloursOnYAxis = False
raise BreakLoop
except BreakLoop:
pass
# Find out how far to extend the lines
gridConstraint = gridXSize
if coloursOnYAxis is False:
seeds[0] = [seeds[0][1], seeds[0][0], seeds[0][2]]
seeds[1] = [seeds[1][1], seeds[1][0], seeds[1][2]]
gridConstraint = gridYSize
# The gap between the lines
barGap = seeds[0][1] - seeds[1][1]
# The details of the current line we are painting
currentPaintAxisValueDetails = [0, 0, 0]
# Find out which line to draw first
if barGap < 0:
currentPaintAxisValueDetails = [seeds[0][1], seeds[0][2], seeds[1][2]]
elif barGap > 0:
currentPaintAxisValueDetails = [seeds[1][1], seeds[1][2], seeds[0][2]]
# A count of how many lines we have drawn so far
count = 0
while currentPaintAxisValueDetails[0] < gridConstraint:
if coloursOnYAxis is True:
# Draw the line for the first colour
if count % 2 == 0:
output[:, currentPaintAxisValueDetails[0]] = currentPaintAxisValueDetails[1]
# Draw the line for the second colour
else:
output[:, currentPaintAxisValueDetails[0]] = currentPaintAxisValueDetails[2]
else:
# Draw the line for the first colour
if count % 2 == 0:
output[currentPaintAxisValueDetails[0], :] = currentPaintAxisValueDetails[1]
else:
# Draw the line for the second colour
output[currentPaintAxisValueDetails[0], :] = currentPaintAxisValueDetails[2]
# Set up the 'painter' to draw the next line, taking the gap between the lines into account
currentPaintAxisValueDetails[0] += abs(barGap)
count += 1
return output
def solve_5c0a986e(inputGrid):
"""
- Transformation Process:
The input grid has a 2x2 red square and a 2x2 blue square. A bottom-right facing diagonal line trails off from
the red square, while a top-left facing diagonal line trials off from the blue square. Adding these trails
gives us the output matrix.
- Analysis:
All training and testing grids were solved correctly.
"""
# Create a deep copy of the input grid
output = copy.deepcopy(inputGrid)
# Store the size of the input grid
gridXSize = len(inputGrid[0])
gridYSize = len(inputGrid)
# The important corner of the blue and red squares, and the colours of the diagonal lines which emanate from them
blueSeed = []
redSeed = []
colours = [1, 2]
try:
# Loop through the grid
for yIndex in range(gridYSize):
for xIndex in range(gridXSize):
# Finding the top left blue block in the 2x2 blue square
if inputGrid[yIndex][xIndex] == 1 and blueSeed == []:
blueSeed = [yIndex, xIndex]
# Finding the bottom right red block in the 2x2 blue square
if inputGrid[yIndex][xIndex] == 2 and redSeed == []:
redSeed = [yIndex + 1, xIndex + 1]
# If we have found both the red and blue squares, we can stop our search
if redSeed != [] and blueSeed != []:
raise BreakLoop
except BreakLoop:
pass
nextIndexes = [[point - 1 for point in blueSeed], [point + 1 for point in redSeed]]
# Repeat this for both squares
for i in range(2):
# Repeat until the diagonal line hits an edge
while (nextIndexes[i][0] < gridYSize) and (nextIndexes[i][0] >= 0) and \
(nextIndexes[i][1] < gridXSize) and (nextIndexes[i][1] >= 0):
output[nextIndexes[i][0]][nextIndexes[i][1]] = colours[i]
if i == 0:
nextIndexes[0] = [point - 1 for point in nextIndexes[0]]
if i == 1:
nextIndexes[1] = [point + 1 for point in nextIndexes[1]]
return output
def solve_363442ee(inputGrid):
"""
- Transformation Process:
The input grid is seperates by a grey line. The top 3x3 square to the left of the grey line has a specific
pattern if colours, and we store this pattern. To the right of the grey line, there are a number of blue cells.
Each blue cell represents a centre point on which the pattern should be applied, therefore we paste the pattern
onto the blue cells such that a blue cell is in the middle of the 3x3 pattern. This gives us the output matrix.
- Analysis:
All training and testing grids were solved correctly.
"""
# Create a deep copy of the input grid
output = copy.deepcopy(inputGrid)
# Store the pattern and the input grid size
pattern = inputGrid[0:3, 0:3]
gridXSize = len(inputGrid[0])
gridYSize = len(inputGrid)
# Locate the grey line which separates the input grid
seperatorXIndex = 0
for xIndex in range(gridXSize):
if inputGrid[0][xIndex] == 5:
seperatorXIndex = xIndex
break
# Only keep the right hand side of the grey line
gridXSize = gridXSize - seperatorXIndex
# Paste the pattern on top of the blue cells, centred on the blue cells
for yIndex in range(gridYSize):
for xIndex in range(gridXSize):
if inputGrid[yIndex][xIndex + seperatorXIndex] == 1:
output[(yIndex - 1):(yIndex + 2), (xIndex + seperatorXIndex- 1):(xIndex + seperatorXIndex + 2)] = pattern
return output
def solve_868de0fa(inputGrid):
"""
- Transformation Process:
In the input grid, there are a set of 'hollow' squares. In the output, these same 'hollow' squares are filled
with either a red or orange colour. If the number of cells in one side of the square is even, then the square
is filled with a red colour. If the number of cells in one side of the square is odd, then the square os filled
with an orange | |
function at a given address with a given type.
:param int addr: Address to get tags at
:param TagType tag_type: TagType object to match in searching
:param Architecture arch: Architecture for the block in which the Tags are located (optional)
:return: A list of data Tags
:rtype: list(Tag)
"""
if arch is None:
assert self.arch is not None, "Can't get_user_address_tags_of_type for function with no architecture specified"
arch = self.arch
count = ctypes.c_ulonglong()
tags = core.BNGetUserAddressTagsOfType(self.handle, arch.handle, addr, tag_type.handle, count)
assert tags is not None, "core.BNGetUserAddressTagsOfType returned None"
try:
result = []
for i in range(0, count.value):
tag_ref = core.BNNewTagReference(tags[i])
assert tag_ref is not None, "core.BNNewTagReference returned None"
result.append(binaryview.Tag(tag_ref))
return result
finally:
core.BNFreeTagList(tags, count.value)
def get_address_tags_in_range(
self, address_range: 'variable.AddressRange', arch: 'architecture.Architecture' = None
) -> List[Tuple['architecture.Architecture', int, 'binaryview.Tag']]:
"""
``get_address_tags_in_range`` gets a list of all Tags in the function at a given address.
Range is inclusive at the start, exclusive at the end.
:param AddressRange address_range: Address range from which to get tags
:param Architecture arch: Architecture for the block in which the Tag is located (optional)
:return: A list of (arch, address, Tag) tuples
:rtype: list((Architecture, int, Tag))
"""
if arch is None:
assert self.arch is not None, "Can't call get_address_tags_in_range for function with no architecture specified"
arch = self.arch
count = ctypes.c_ulonglong()
refs = core.BNGetAddressTagsInRange(self.handle, arch.handle, address_range.start, address_range.end, count)
assert refs is not None, "core.BNGetAddressTagsInRange returned None"
try:
result = []
for i in range(0, count.value):
tag_ref = core.BNNewTagReference(refs[i].tag)
assert tag_ref is not None, "core.BNNewTagReference returned None"
tag = binaryview.Tag(tag_ref)
result.append((arch, refs[i].addr, tag))
return result
finally:
core.BNFreeTagReferences(refs, count.value)
def get_auto_address_tags_in_range(
self, address_range: 'variable.AddressRange', arch: 'architecture.Architecture' = None
) -> List[Tuple['architecture.Architecture', int, 'binaryview.Tag']]:
"""
``get_auto_address_tags_in_range`` gets a list of all auto-defined Tags in the function at a given address.
Range is inclusive at the start, exclusive at the end.
:param AddressRange address_range: Address range from which to get tags
:param Architecture arch: Architecture for the block in which the Tag is located (optional)
:return: A list of (arch, address, Tag) tuples
:rtype: list((Architecture, int, Tag))
"""
if arch is None:
assert self.arch is not None, "Can't call get_auto_address_tags_in_range for function with no architecture specified"
arch = self.arch
count = ctypes.c_ulonglong()
refs = core.BNGetAutoAddressTagsInRange(self.handle, arch.handle, address_range.start, address_range.end, count)
assert refs is not None, "core.BNGetAutoAddressTagsInRange returned None"
try:
result = []
for i in range(0, count.value):
tag_ref = core.BNNewTagReference(refs[i].tag)
assert tag_ref is not None, "core.BNNewTagReference returned None"
tag = binaryview.Tag(tag_ref)
result.append((arch, refs[i].addr, tag))
return result
finally:
core.BNFreeTagReferences(refs, count.value)
def get_user_address_tags_in_range(
self, address_range: 'variable.AddressRange', arch: 'architecture.Architecture' = None
) -> List[Tuple['architecture.Architecture', int, 'binaryview.Tag']]:
"""
``get_user_address_tags_in_range`` gets a list of all user Tags in the function at a given address.
Range is inclusive at the start, exclusive at the end.
:param AddressRange address_range: Address range from which to get tags
:param Architecture arch: Architecture for the block in which the Tag is located (optional)
:return: A list of (arch, address, Tag) tuples
:rtype: list((Architecture, int, Tag))
"""
if arch is None:
assert self.arch is not None, "Can't call get_user_address_tags_in_range for function with no architecture specified"
arch = self.arch
count = ctypes.c_ulonglong()
refs = core.BNGetUserAddressTagsInRange(self.handle, arch.handle, address_range.start, address_range.end, count)
assert refs is not None, "core.BNGetUserAddressTagsInRange returned None"
try:
result = []
for i in range(0, count.value):
tag_ref = core.BNNewTagReference(refs[i].tag)
assert tag_ref is not None, "core.BNNewTagReference returned None"
tag = binaryview.Tag(tag_ref)
result.append((arch, refs[i].addr, tag))
return result
finally:
core.BNFreeTagReferences(refs, count.value)
def get_stack_contents_at(
self, addr: int, offset: int, size: int, arch: Optional['architecture.Architecture'] = None
) -> 'variable.RegisterValue':
"""
``get_stack_contents_at`` returns the RegisterValue for the item on the stack in the current function at the
given virtual address ``addr``, stack offset ``offset`` and size of ``size``. Optionally specifying the architecture.
:param int addr: virtual address of the instruction to query
:param int offset: stack offset base of stack
:param int size: size of memory to query
:param Architecture arch: (optional) Architecture for the given function
:rtype: variable.RegisterValue
.. note:: Stack base is zero on entry into the function unless the architecture places the return address on the \
stack as in (x86/x86_64) where the stack base will start at address_size
:Example:
>>> current_function.get_stack_contents_at(0x400fad, -16, 4)
<range: 0x8 to 0xffffffff>
"""
if arch is None:
arch = self.arch
value = core.BNGetStackContentsAtInstruction(self.handle, arch.handle, addr, offset, size)
result = variable.RegisterValue.from_BNRegisterValue(value, arch)
return result
def get_stack_contents_after(
self, addr: int, offset: int, size: int, arch: Optional['architecture.Architecture'] = None
) -> 'variable.RegisterValue':
if arch is None:
arch = self.arch
value = core.BNGetStackContentsAfterInstruction(self.handle, arch.handle, addr, offset, size)
result = variable.RegisterValue.from_BNRegisterValue(value, arch)
return result
def get_parameter_at(
self, addr: int, func_type: Optional['types.Type'], i: int, arch: Optional['architecture.Architecture'] = None
) -> 'variable.RegisterValue':
if arch is None:
arch = self.arch
_func_type = None
if func_type is not None:
_func_type = func_type.handle
value = core.BNGetParameterValueAtInstruction(self.handle, arch.handle, addr, _func_type, i)
result = variable.RegisterValue.from_BNRegisterValue(value, arch)
return result
def remove_user_address_tags_of_type(self, addr, tag_type, arch=None):
"""
``remove_user_address_tags_of_type`` removes all tags at the given address of the given type.
Since this removes user tags, it will be added to the current undo buffer.
:param int addr: Address at which to remove the tag
:param Tag tag_type: TagType object to match for removing
:param Architecture arch: Architecture for the block in which the Tags is located (optional)
:rtype: None
"""
if arch is None:
arch = self.arch
core.BNRemoveUserAddressTagsOfType(self.handle, arch.handle, addr, tag_type.handle)
def get_parameter_at_low_level_il_instruction(
self, instr: 'lowlevelil.InstructionIndex', func_type: 'types.Type', i: int
) -> 'variable.RegisterValue':
_func_type = None
if func_type is not None:
_func_type = func_type.handle
value = core.BNGetParameterValueAtLowLevelILInstruction(self.handle, instr, _func_type, i)
result = variable.RegisterValue.from_BNRegisterValue(value, self.arch)
return result
def get_regs_read_by(self, addr: int,
arch: Optional['architecture.Architecture'] = None) -> List['architecture.RegisterName']:
if arch is None:
arch = self.arch
count = ctypes.c_ulonglong()
regs = core.BNGetRegistersReadByInstruction(self.handle, arch.handle, addr, count)
assert regs is not None, "core.BNGetRegistersReadByInstruction returned None"
result = []
for i in range(0, count.value):
result.append(arch.get_reg_name(regs[i]))
core.BNFreeRegisterList(regs)
return result
def get_regs_written_by(self, addr: int,
arch: Optional['architecture.Architecture'] = None) -> List['architecture.RegisterName']:
if arch is None:
arch = self.arch
count = ctypes.c_ulonglong()
regs = core.BNGetRegistersWrittenByInstruction(self.handle, arch.handle, addr, count)
assert regs is not None, "core.BNGetRegistersWrittenByInstruction returned None"
result = []
for i in range(0, count.value):
result.append(arch.get_reg_name(regs[i]))
core.BNFreeRegisterList(regs)
return result
def remove_auto_address_tag(
self, addr: int, tag: 'binaryview.TagType', arch: Optional['architecture.Architecture'] = None
) -> None:
"""
``remove_auto_address_tag`` removes a Tag object at a given address.
:param int addr: Address at which to add the tag
:param Tag tag: Tag object to be added
:param Architecture arch: Architecture for the block in which the Tag is added (optional)
:rtype: None
"""
if arch is None:
arch = self.arch
core.BNRemoveAutoAddressTag(self.handle, arch.handle, addr, tag.handle)
def remove_auto_address_tags_of_type(self, addr, tag_type, arch=None):
"""
``remove_auto_address_tags_of_type`` removes all tags at the given address of the given type.
:param int addr: Address at which to remove the tags
:param Tag tag_type: TagType object to match for removing
:param Architecture arch: Architecture for the block in which the Tags is located (optional)
:rtype: None
"""
if arch is None:
arch = self.arch
core.BNRemoveAutoAddressTagsOfType(self.handle, arch.handle, addr, tag_type.handle)
def get_stack_vars_referenced_by(
self, addr: int, arch: Optional['architecture.Architecture'] = None
) -> List['variable.StackVariableReference']:
if arch is None:
arch = self.arch
count = ctypes.c_ulonglong()
refs = core.BNGetStackVariablesReferencedByInstruction(self.handle, arch.handle, addr, count)
assert refs is not None, "core.BNGetStackVariablesReferencedByInstruction returned None"
result = []
for i in range(0, count.value):
var_type = types.Type.create(
core.BNNewTypeReference(refs[i].type), platform=self.platform, confidence=refs[i].typeConfidence
)
var = variable.Variable.from_identifier(self, refs[i].varIdentifier)
result.append(
variable.StackVariableReference(
refs[i].sourceOperand, var_type, refs[i].name, var, refs[i].referencedOffset, refs[i].size
)
)
core.BNFreeStackVariableReferenceList(refs, count.value)
return result
def get_stack_vars_referenced_by_address_if_available(
self, addr: int, arch: Optional['architecture.Architecture'] = None
) -> List['variable.StackVariableReference']:
if arch is None:
arch = self.arch
count = ctypes.c_ulonglong()
refs = core.BNGetStackVariablesReferencedByInstructionIfAvailable(self.handle, arch.handle, addr, count)
assert refs is not None, "core.BNGetStackVariablesReferencedByInstructionIfAvailable returned None"
result = []
for i in range(0, count.value):
var_type = types.Type.create(
core.BNNewTypeReference(refs[i].type), platform=self.platform, confidence=refs[i].typeConfidence
)
var = variable.Variable.from_identifier(self, refs[i].varIdentifier)
result.append(
variable.StackVariableReference(
refs[i].sourceOperand, var_type, refs[i].name, var, refs[i].referencedOffset, refs[i].size
)
)
core.BNFreeStackVariableReferenceList(refs, count.value)
return result
@property
def auto_function_tags(self):
"""
``auto_function_tags`` gets a list of all auto-defined function Tags for the function.
:rtype: list(Tag)
"""
count = ctypes.c_ulonglong()
tags = core.BNGetAutoFunctionTags(self.handle, count)
assert tags is not None, "core.BNGetAutoFunctionTags returned None"
result = []
for i in range(0, count.value):
tag_ref = core.BNNewTagReference(tags[i])
assert tag_ref is not None, "core.BNNewTagReference returned None"
result.append(binaryview.Tag(tag_ref))
core.BNFreeTagList(tags, count.value)
return result
@property
def user_function_tags(self):
"""
``user_function_tags`` gets a list of all user function Tags for the function.
:rtype: list(Tag)
"""
count = ctypes.c_ulonglong()
tags = core.BNGetUserFunctionTags(self.handle, count)
assert tags is not None, "core.BNGetUserFunctionTags returned None"
result = []
for i in range(0, count.value):
tag_ref = core.BNNewTagReference(tags[i])
assert tag_ref is not None, "core.BNNewTagReference returned None"
result.append(binaryview.Tag(tag_ref))
core.BNFreeTagList(tags, count.value)
return result
def get_lifted_il_at(
self, addr: int, arch: Optional['architecture.Architecture'] = None
) -> Optional['lowlevelil.LowLevelILInstruction']:
if arch is None:
arch = self.arch
idx = core.BNGetLiftedILForInstruction(self.handle, arch.handle, addr)
if idx == len(self.lifted_il):
return None
return self.lifted_il[idx]
def get_lifted_ils_at(
self, addr: int, arch: Optional['architecture.Architecture'] = None
) -> List['lowlevelil.LowLevelILInstruction']:
"""
``get_lifted_ils_at`` gets the Lifted IL Instruction(s) corresponding to the given virtual address
:param int addr: virtual address of the function to be queried
:param Architecture arch: (optional) Architecture for the given function
:rtype: list(LowLevelILInstruction)
:Example:
>>> func = next(bv.functions)
>>> func.get_lifted_ils_at(func.start)
[<il: push(rbp)>]
"""
if arch is None:
arch = self.arch
count = ctypes.c_ulonglong()
instrs = core.BNGetLiftedILInstructionsForAddress(self.handle, arch.handle, addr, count)
assert instrs is not None, "core.BNGetLiftedILInstructionsForAddress returned None"
result = []
for i in range(0, count.value):
result.append(self.lifted_il[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_function_tags_of_type(self, tag_type):
"""
``get_function_tags_of_type`` gets a list of all function Tags with a given type.
:param TagType tag_type: TagType object to match in searching
:return: A list of data Tags
:rtype: list(Tag)
"""
count = ctypes.c_ulonglong()
tags = core.BNGetFunctionTagsOfType(self.handle, tag_type.handle, count)
assert tags is not None, "core.BNGetFunctionTagsOfType returned None"
result = []
for i in range(0, count.value):
tag_ref = core.BNNewTagReference(tags[i])
assert tag_ref is not None, "core.BNNewTagReference returned None"
result.append(binaryview.Tag(tag_ref))
core.BNFreeTagList(tags, count.value)
return result
def get_auto_function_tags_of_type(self, tag_type):
"""
``get_auto_function_tags_of_type`` gets a list of all auto-defined function Tags with a given type.
:param TagType | |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2022 The NiPreps Developers <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""A lightweight NiPype MultiProc execution plugin."""
# Import packages
import os
import sys
from copy import deepcopy
from time import sleep, time
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
from traceback import format_exception
import gc
# Run node
def run_node(node, updatehash, taskid):
"""
Execute node.run(), catch and log any errors and get a result.
Parameters
----------
node : nipype Node instance
the node to run
updatehash : boolean
flag for updating hash
taskid : int
an identifier for this task
Returns
-------
result : dictionary
dictionary containing the node runtime results and stats
"""
# Init variables
result = dict(result=None, traceback=None, taskid=taskid)
# Try and execute the node via node.run()
try:
result["result"] = node.run(updatehash=updatehash)
except: # noqa: E722, intendedly catch all here
result["traceback"] = format_exception(*sys.exc_info())
result["result"] = node.result
# Return the result dictionary
return result
class PluginBase:
"""Base class for plugins."""
def __init__(self, plugin_args=None):
"""Initialize plugin."""
if plugin_args is None:
plugin_args = {}
self.plugin_args = plugin_args
self._config = None
self._status_callback = plugin_args.get("status_callback")
def run(self, graph, config, updatehash=False):
"""
Instruct the plugin to execute the workflow graph.
The core plugin member that should be implemented by
all plugins.
Parameters
----------
graph :
a networkx, flattened :abbr:`DAG (Directed Acyclic Graph)`
to be executed
config : :obj:`~nipype.config`
a nipype.config object
updatehash : :obj:`bool`
whether cached nodes with stale hash should be just updated.
"""
raise NotImplementedError
class DistributedPluginBase(PluginBase):
"""
Execute workflow with a distribution engine.
Combinations of ``proc_done`` and ``proc_pending``:
+------------+---------------+--------------------------------+
| proc_done | proc_pending | outcome |
+============+===============+================================+
| True | False | Process is finished |
+------------+---------------+--------------------------------+
| True | True | Process is currently being run |
+------------+---------------+--------------------------------+
| False | False | Process is queued |
+------------+---------------+--------------------------------+
| False | True | INVALID COMBINATION |
+------------+---------------+--------------------------------+
Attributes
----------
procs : :obj:`list`
list (N) of underlying interface elements to be processed
proc_done : :obj:`numpy.ndarray`
a boolean numpy array (N,) signifying whether a process has been
submitted for execution
proc_pending : :obj:`numpy.ndarray`
a boolean numpy array (N,) signifying whether a
process is currently running.
depidx : :obj:`numpy.matrix`
a boolean matrix (NxN) storing the dependency structure accross
processes. Process dependencies are derived from each column.
"""
def __init__(self, plugin_args=None):
"""Initialize runtime attributes to none."""
super(DistributedPluginBase, self).__init__(plugin_args=plugin_args)
self.procs = None
self.depidx = None
self.refidx = None
self.mapnodes = None
self.mapnodesubids = None
self.proc_done = None
self.proc_pending = None
self.pending_tasks = []
self.max_jobs = self.plugin_args.get("max_jobs", None)
def _prerun_check(self, graph):
"""Stub method to validate/massage graph and nodes before running."""
def _postrun_check(self):
"""Stub method to close any open resources."""
def run(self, graph, config, updatehash=False):
"""Execute a pre-defined pipeline using distributed approaches."""
import numpy as np
self._config = config
poll_sleep_secs = float(config["execution"]["poll_sleep_duration"])
self._prerun_check(graph)
# Generate appropriate structures for worker-manager model
self._generate_dependency_list(graph)
self.mapnodes = []
self.mapnodesubids = {}
# setup polling - TODO: change to threaded model
notrun = []
errors = []
while not np.all(self.proc_done) or np.any(self.proc_pending):
loop_start = time()
toappend = []
# trigger callbacks for any pending results
while self.pending_tasks:
taskid, jobid = self.pending_tasks.pop()
try:
result = self._get_result(taskid)
except Exception as exc:
notrun.append(self._clean_queue(jobid, graph))
errors.append(exc)
else:
if result:
if result["traceback"]:
notrun.append(
self._clean_queue(jobid, graph, result=result)
)
errors.append("".join(result["traceback"]))
else:
self._task_finished_cb(jobid)
self._remove_node_dirs()
self._clear_task(taskid)
else:
assert self.proc_done[jobid] and self.proc_pending[jobid]
toappend.insert(0, (taskid, jobid))
if toappend:
self.pending_tasks.extend(toappend)
num_jobs = len(self.pending_tasks)
if self.max_jobs is None or num_jobs < self.max_jobs:
self._send_procs_to_workers(updatehash=updatehash, graph=graph)
sleep_til = loop_start + poll_sleep_secs
sleep(max(0, sleep_til - time()))
self._remove_node_dirs()
# close any open resources
self._postrun_check()
if errors:
# If one or more nodes failed, re-rise first of them
error, cause = errors[0], None
if isinstance(error, str):
error = RuntimeError(error)
if len(errors) > 1:
error, cause = (
RuntimeError(f"{len(errors)} raised. Re-raising first."),
error,
)
raise error from cause
def _get_result(self, taskid):
raise NotImplementedError
def _submit_job(self, node, updatehash=False):
raise NotImplementedError
def _report_crash(self, node, result=None):
from nipype.pipeline.plugins.tools import report_crash
tb = None
if result is not None:
node._result = result["result"]
tb = result["traceback"]
node._traceback = tb
return report_crash(node, traceback=tb)
def _clear_task(self, taskid):
raise NotImplementedError
def _clean_queue(self, jobid, graph, result=None):
from mriqc import config
if self._status_callback:
self._status_callback(self.procs[jobid], "exception")
if result is None:
result = {
"result": None,
"traceback": "\n".join(format_exception(*sys.exc_info())),
}
crashfile = self._report_crash(self.procs[jobid], result=result)
if config.nipype.stop_on_first_crash:
raise RuntimeError("".join(result["traceback"]))
if jobid in self.mapnodesubids:
# remove current jobid
self.proc_pending[jobid] = False
self.proc_done[jobid] = True
# remove parent mapnode
jobid = self.mapnodesubids[jobid]
self.proc_pending[jobid] = False
self.proc_done[jobid] = True
# remove dependencies from queue
return self._remove_node_deps(jobid, crashfile, graph)
def _send_procs_to_workers(self, updatehash=False, graph=None):
"""Submit tasks to workers when system resources are available."""
def _submit_mapnode(self, jobid):
import numpy as np
import scipy.sparse as ssp
if jobid in self.mapnodes:
return True
self.mapnodes.append(jobid)
mapnodesubids = self.procs[jobid].get_subnodes()
numnodes = len(mapnodesubids)
for i in range(numnodes):
self.mapnodesubids[self.depidx.shape[0] + i] = jobid
self.procs.extend(mapnodesubids)
self.depidx = ssp.vstack(
(self.depidx, ssp.lil_matrix(np.zeros((numnodes, self.depidx.shape[1])))),
"lil",
)
self.depidx = ssp.hstack(
(self.depidx, ssp.lil_matrix(np.zeros((self.depidx.shape[0], numnodes)))),
"lil",
)
self.depidx[-numnodes:, jobid] = 1
self.proc_done = np.concatenate(
(self.proc_done, np.zeros(numnodes, dtype=bool))
)
self.proc_pending = np.concatenate(
(self.proc_pending, np.zeros(numnodes, dtype=bool))
)
return False
def _local_hash_check(self, jobid, graph):
from mriqc import config
if not config.nipype.local_hash_check:
return False
try:
cached, updated = self.procs[jobid].is_cached()
except Exception:
return False
overwrite = self.procs[jobid].overwrite
always_run = self.procs[jobid].interface.always_run
if (
cached
and updated
and (overwrite is False or overwrite is None and not always_run)
):
try:
self._task_finished_cb(jobid, cached=True)
self._remove_node_dirs()
except Exception:
self._clean_queue(jobid, graph)
self.proc_pending[jobid] = False
return True
return False
def _task_finished_cb(self, jobid, cached=False):
"""
Extract outputs and assign to inputs of dependent tasks.
This is called when a job is completed.
"""
if self._status_callback:
self._status_callback(self.procs[jobid], "end")
# Update job and worker queues
self.proc_pending[jobid] = False
# update the job dependency structure
rowview = self.depidx.getrowview(jobid)
rowview[rowview.nonzero()] = 0
if jobid not in self.mapnodesubids:
self.refidx[self.refidx[:, jobid].nonzero()[0], jobid] = 0
def _generate_dependency_list(self, graph):
"""Generate a dependency list for a list of graphs."""
import numpy as np
import networkx as nx
from nipype.pipeline.engine.utils import topological_sort
self.procs, _ = topological_sort(graph)
self.depidx = nx.to_scipy_sparse_matrix(
graph, nodelist=self.procs, format="lil"
)
self.refidx = self.depidx.astype(int)
self.proc_done = np.zeros(len(self.procs), dtype=bool)
self.proc_pending = np.zeros(len(self.procs), dtype=bool)
def _remove_node_deps(self, jobid, crashfile, graph):
import networkx as nx
try:
dfs_preorder = nx.dfs_preorder
except AttributeError:
dfs_preorder = nx.dfs_preorder_nodes
subnodes = [s for s in dfs_preorder(graph, self.procs[jobid])]
for node in subnodes:
idx = self.procs.index(node)
self.proc_done[idx] = True
self.proc_pending[idx] = False
return dict(node=self.procs[jobid], dependents=subnodes, crashfile=crashfile)
def _remove_node_dirs(self):
"""Remove directories whose outputs have already been used up."""
import numpy as np
from shutil import rmtree
from mriqc import config
if config.nipype.remove_node_directories:
indices = np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0]
for idx in indices:
if idx in self.mapnodesubids:
continue
if self.proc_done[idx] and (not self.proc_pending[idx]):
self.refidx[idx, idx] = -1
outdir = self.procs[idx].output_dir()
rmtree(outdir)
class MultiProcPlugin(DistributedPluginBase):
"""
A lightweigh re-implementation of NiPype's MultiProc plugin.
Execute workflow with multiprocessing, not sending more jobs at once
than the system can support.
The plugin_args input to run can be used to control the multiprocessing
execution and defining the maximum amount of memory and threads that
should be used. When those parameters are not specified,
the number of threads and memory of the system is used.
System consuming nodes should be tagged::
memory_consuming_node.mem_gb = 8
thread_consuming_node.n_procs = 16
The default number of threads and memory are set at node
creation, and are 1 and 0.25GB respectively.
Currently supported options are:
- non_daemon: boolean flag to execute as non-daemon processes
- n_procs: maximum number of threads to be executed in parallel
- memory_gb: maximum | |
#TODO: Consider adding more checks whether entities exist.
class Geometry:
'''
Instances of GeoData can hold geometric data and be passed to
GmshMesher in pycalfem_Mesh to mesh the geometry.
'''
def __init__(self):
self.points = {} #dict of [[x, y, z], elSize, marker]
self.curves = {} #dict of [curvTypestring, [p1, p2, ... pn], marker, elementsOnCurve, distributionString, distributionVal]
self.surfaces = {} #dict of [SurfaceTypeString, [c1, c2 ... cn], [[c1, c2 ... cm], ... [c1, ... ck]], ID, marker, isStructured]. c means curve-ID.
self.volumes = {} #dict of [[s1, s2 ..], [[s1,s2...],[s1,s2..],..], ID, marker, isStructured]
self.is3D = False #This is automatically set to True if a 3D point is added.
self._pointIDspecified = False
self._nextPointID = 0
self._curveIDspecified = False
self._nextcurveID = 0
self._surfaceIDspecified = False
self._nextsurfaceID = 0
self._volumeIDspecified = False
self._nextvolumeID = 0
def removePoint(self, ID):
'''Removes the point with this ID'''
self.points.pop(ID)
def removeCurve(self, ID):
'''Removes the curve with this ID'''
self.curve.pop(ID)
def removeSurface(self, ID):
'''Removes the surface with this ID'''
self.surfaces.pop(ID)
def removeVolume(self, ID):
'''Removes the volume with this ID'''
self.volumes.pop(ID)
def getPointCoords(self, IDs=None):
'''
Returns an N-by-3 list of point coordinates if the parameter is
a list of IDs. If the parameter is just a single integer then
a single coordinate (simple 3-element list) is returned.
If the parameter is undefined (or None) all point coords will be returned
'''
if IDs == None:
return [p[0] for p in self.points.values()]
try:
pointCoords = [self.points[pID][0] for pID in IDs]
except TypeError: #IDs was not iterable. Probably just a single ID.
pointCoords = self.points[IDs][0]
return pointCoords
def pointsOnCurves(self, IDs):
'''
Returns a list of all geometric points (not nodes) on the curves
specified in IDs. IDs may be an integer or a list of integers.
'''
return self._subentitiesOnEntities(IDs, self.curves, 1)
def stuffOnSurfaces(self, IDs):
'''
Returns lists of all geometric points and curves on the surfaces
specified in IDs. IDs may be an integer or a list of integers
'''
curveSet = self._subentitiesOnEntities(IDs, self.surfaces, 1) #Curves on the outer edges
curveSet.update( self._subentityHolesOnEntities(IDs, self.surfaces, 2) ) #Curves on the holes
pointList = self.pointsOnCurves(curveSet) #Points on the curves of these surfaces.
return pointList, list(curveSet)
def stuffOnVolumes(self, IDs):
'''
Returns lists of all geometric points, curves, and surfaces on the volumes
specified in IDs. IDs may be an integer or a list of integers
'''
surfaceSet = self._subentitiesOnEntities(IDs, self.surfaces, 0)
surfaceSet.update( self._subentitiesOnEntities(IDs, self.surfaces, 1) )
pointList, curveList = self.stuffOnSurfaces(surfaceSet)
return pointList, curveList, list(surfaceSet)
def _subentitiesOnEntities(self, IDs, entityDict, index):
'''
Duplicate code. Gets the IDs of the subentities that
make up an entity, i.e. the points that define a curve or
the curves that define a surface. Note that only the outer
subentities of surfaces and volumes can be extracted with
this function. For holes use _subentityHolesOnEntities().
'''
theSet = set()
try:
for ID in IDs:
theSet.update(entityDict[ID][index])
except TypeError: #IDs is not iterable, so it is probably a single ID
theSet.update(entityDict[IDs][index])
return theSet
def _subentityHolesOnEntities(self, IDs, entityDict, index):
'''Duplicate code. Does the same thing as _subentitiesOnEntities(), but for holes'''
theSet = set()
try:
for ID in IDs:
for hole in entityDict[ID][index]:
theSet.update(hole)
except TypeError: #IDs is not iterable, so it is probably a single ID
for hole in entityDict[IDs][index]:
theSet.update(hole)
return theSet
def addPoint(self, coord, ID=None, marker=0, elSize=1):
'''
Adds a point.
Parameters:
coord - [x, y] or [x, y, z].
List, not array.
ID - Positive integer ID of this point. If left unspecified the
point will be assigned the smallest unused point-ID.
It is recommended to specify all point-IDs or none.
marker - Marker applied to this point. Default 0.
It is not a good idea to apply non-zero markers to points
that are control points on B-splines or center points on
circles/ellipses, since this can lead to "loose" nodes
that are not part of any elements.
elSize - The size of elements at this point. Default 1. Use to make
a mesh denser or sparser here. Only affects unstructured
meshes
'''
if len(coord)==3: #A 3D point is inserted.
self.is3D = True
else: #The point is in 2D (we assume)
coord = coord+[0] #Pad with a 0. (we store points as 3D points for consistency's sake)
if ID==None: #ID is not specified. Get an ID for this point:
ID = self._getNewPointID()
else:
self._pointIDspecified = True
self.points[ID] = [coord, elSize, marker]
def addSpline(self, points, ID=None, marker=0, elOnCurve=None, elDistribType=None, elDistribVal=None):
'''
Adds a Spline curve
points - List of indices of control points that make a Spline
[p1, p2, ... , pn]
ID - Positive integer ID of this curve. If left unspecified the
curve will be assigned the smallest unused curve-ID.
It is recommended to specify all curve-IDs or none.
marker - Integer. Marker applied to this curve. Default 0.
elOnCurv - Positive integer. Elements on curve.
The number of element edges that will be distributed
along this curve. Only works for structured meshes.
elDistribType -
String. Either "bump" or "progression".
Determines how the density of elements vary along the curve
for structured meshes. Only works for structured meshes.
elOnCurv and elDistribVal must be be defined if this param
is used.
elDistribVal -
Float. Determines how severe the element distribution is.
Only works for structured meshes. elOnCurv and
elDistribType must be be defined if this param is used.
bump:
Smaller value means elements are bunched up at the edges
of the curve, larger means bunched in the middle.
progression:
The edge of each element along this curve (from starting
point to end) will be larger than the preceding one by
this factor.
elDistribVal = 2 meaning for example that each line element
in the series will be twice as long as the preceding one.
elDistribVal < 1 makes each element smaller than the
preceeding one.
'''
self._addCurve("Spline", points, ID, marker, elOnCurve, elDistribType, elDistribVal)
def addBSpline(self, points, ID=None, marker=0, elOnCurve=None, elDistribType=None, elDistribVal=None):
'''
Adds a B-Spline curve
points - List of indices of control points that make a B-spline
[p1, p2, ... , pn]
ID - Positive integer ID of this curve. If left unspecified the
curve will be assigned the smallest unused curve-ID.
It is recommended to specify all curve-IDs or none.
marker - Integer. Marker applied to this curve. Default 0.
elOnCurv - Positive integer. Elements on curve.
The number of element edges that will be distributed
along this curve. Only works for structured meshes.
elDistribType -
String. Either "bump" or "progression".
Determines how the density of elements vary along the curve
for structured meshes. Only works for structured meshes.
elOnCurv and elDistribVal must be be defined if this param
is used.
elDistribVal -
Float. Determines how severe the element distribution is.
Only works for structured meshes. elOnCurv and
elDistribType must be be defined if this param is used.
bump:
Smaller value means elements are bunched up at the edges
of the curve, larger means bunched in the middle.
progression:
The edge of each element along this curve (from starting
point to end) will be larger than the preceding one by
this factor.
elDistribVal = 2 meaning for example that each line element
in the series will be twice as long as the preceding one.
elDistribVal < 1 makes each element smaller than the
preceeding one.
'''
self._addCurve("BSpline", points, ID, marker, elOnCurve, elDistribType, elDistribVal)
def addCircle(self, points, ID=None, marker=0, elOnCurve=None, elDistribType=None, elDistribVal=None):
'''
Adds a Circle arc curve.
points - list of 3 indices of point that make a circle | |
a string value for input uint64_t
sh_funcs.append("string string_convert_helper(const uint64_t toString, const string prefix)\n{")
sh_funcs.append(" using namespace StreamControl;")
sh_funcs.append(" stringstream ss;")
sh_funcs.append(' ss << toString;')
sh_funcs.append(' string final_str = prefix + ss.str();')
sh_funcs.append(" return final_str;")
sh_funcs.append("}")
sh_funcs.append('%s' % lineinfo.get())
# Add function to return a string value for input VkSurfaceFormatKHR*
sh_funcs.append("string string_convert_helper(VkSurfaceFormatKHR toString, const string prefix)\n{")
sh_funcs.append(' string final_str = prefix + "format = " + string_VkFormat(toString.format) + "format = " + string_VkColorSpaceKHR(toString.colorSpace);')
sh_funcs.append(" return final_str;")
sh_funcs.append("}")
sh_funcs.append('%s' % lineinfo.get())
# Add function to dynamically print out unknown struct
sh_funcs.append("string dynamic_display(const void* pStruct, const string prefix)\n{")
sh_funcs.append(" // Cast to APP_INFO ptr initially just to pull sType off struct")
sh_funcs.append(" if (pStruct == NULL) {\n")
sh_funcs.append(" return string();")
sh_funcs.append(" }\n")
sh_funcs.append(" VkStructureType sType = ((VkApplicationInfo*)pStruct)->sType;")
sh_funcs.append(' string indent = " ";')
sh_funcs.append(' indent += prefix;')
sh_funcs.append(" switch (sType)\n {")
for e in enum_type_dict:
if "StructureType" in e:
for v in sorted(enum_type_dict[e]):
struct_name = get_struct_name_from_struct_type(v)
if struct_name not in self.struct_dict:
continue
print_func_name = self._get_sh_func_name(struct_name)
#sh_funcs.append('string %s(const %s* pStruct, const string prefix);' % (self._get_sh_func_name(s), typedef_fwd_dict[s]))
sh_funcs.append(' case %s:\n {' % (v))
sh_funcs.append(' return %s((%s*)pStruct, indent);' % (print_func_name, struct_name))
sh_funcs.append(' }')
sh_funcs.append(' break;')
sh_funcs.append(" default:")
sh_funcs.append(" return string();")
sh_funcs.append('%s' % lineinfo.get())
sh_funcs.append(" }")
sh_funcs.append("}")
return "\n".join(sh_funcs)
def _genStructMemberPrint(self, member, s, array, struct_array):
(p_out, p_arg) = self._get_struct_print_formatted(self.struct_dict[s][member], pre_var_name="&m_dummy_prefix", struct_var_name="m_struct", struct_ptr=False, print_array=True)
extra_indent = ""
if array:
extra_indent = " "
if is_type(self.struct_dict[s][member]['type'], 'struct'): # print struct address for now
struct_array.insert(0, self.struct_dict[s][member])
elif self.struct_dict[s][member]['ptr']:
# Special case for void* named "pNext"
if "void" in self.struct_dict[s][member]['type'] and "pNext" == self.struct_dict[s][member]['name']:
struct_array.insert(0, self.struct_dict[s][member])
return (' %sprintf("%%*s %s", m_indent, ""%s);' % (extra_indent, p_out, p_arg), struct_array)
def _generateDisplayDefinitions(self, s):
disp_def = []
struct_array = []
# Single-line struct print function
disp_def.append("// Output 'structname = struct_address' on a single line")
disp_def.append("void %s::display_single_txt()\n{" % self.get_class_name(s))
disp_def.append(' printf(" %%*s%s = %%p", m_indent, "", (void*)m_origStructAddr);' % typedef_fwd_dict[s])
disp_def.append("}\n")
# Private helper function to print struct members
disp_def.append("// Private helper function that displays the members of the wrapped struct")
disp_def.append("void %s::display_struct_members()\n{" % self.get_class_name(s))
i_declared = False
for member in sorted(self.struct_dict[s]):
# TODO : Need to display each member based on its type
# TODO : Need to handle pNext which are structs, but of void* type
# Can grab struct type off of header of struct pointed to
# TODO : Handle Arrays
if self.struct_dict[s][member]['array']:
# Create for loop to print each element of array
if not i_declared:
disp_def.append(' uint32_t i;')
i_declared = True
disp_def.append(' for (i = 0; i<%s; i++) {' % self.struct_dict[s][member]['array_size'])
(return_str, struct_array) = self._genStructMemberPrint(member, s, True, struct_array)
disp_def.append(return_str)
disp_def.append(' }')
else:
(return_str, struct_array) = self._genStructMemberPrint(member, s, False, struct_array)
disp_def.append(return_str)
disp_def.append("}\n")
i_declared = False
# Basic print function to display struct members
disp_def.append("// Output all struct elements, each on their own line")
disp_def.append("void %s::display_txt()\n{" % self.get_class_name(s))
disp_def.append(' printf("%%*s%s struct contents at %%p:\\n", m_indent, "", (void*)m_origStructAddr);' % typedef_fwd_dict[s])
disp_def.append(' this->display_struct_members();')
disp_def.append("}\n")
# Advanced print function to display current struct and contents of any pointed-to structs
disp_def.append("// Output all struct elements, and for any structs pointed to, print complete contents")
disp_def.append("void %s::display_full_txt()\n{" % self.get_class_name(s))
disp_def.append(' printf("%%*s%s struct contents at %%p:\\n", m_indent, "", (void*)m_origStructAddr);' % typedef_fwd_dict[s])
disp_def.append(' this->display_struct_members();')
class_num = 0
# TODO : Need to handle arrays of structs here
for ms in struct_array:
swc_name = "class%s" % str(class_num)
if ms['array']:
if not i_declared:
disp_def.append(' uint32_t i;')
i_declared = True
disp_def.append(' for (i = 0; i<%s; i++) {' % ms['array_size'])
#disp_def.append(" if (m_struct.%s[i]) {" % (ms['name']))
disp_def.append(" %s %s(&(m_struct.%s[i]));" % (self.get_class_name(ms['type']), swc_name, ms['name']))
disp_def.append(" %s.set_indent(m_indent + 4);" % (swc_name))
disp_def.append(" %s.display_full_txt();" % (swc_name))
#disp_def.append(' }')
disp_def.append(' }')
elif 'pNext' == ms['name']:
# Need some code trickery here
# I'm thinking have a generated function that takes pNext ptr value
# then it checks sType and in large switch statement creates appropriate
# wrapper class type and then prints contents
disp_def.append(" if (m_struct.%s) {" % (ms['name']))
#disp_def.append(' printf("%*s This is where we would call dynamic print function\\n", m_indent, "");')
disp_def.append(' dynamic_display_full_txt(m_struct.%s, m_indent);' % (ms['name']))
disp_def.append(" }")
else:
if ms['ptr']:
disp_def.append(" if (m_struct.%s) {" % (ms['name']))
disp_def.append(" %s %s(m_struct.%s);" % (self.get_class_name(ms['type']), swc_name, ms['name']))
else:
disp_def.append(" if (&m_struct.%s) {" % (ms['name']))
disp_def.append(" %s %s(&m_struct.%s);" % (self.get_class_name(ms['type']), swc_name, ms['name']))
disp_def.append(" %s.set_indent(m_indent + 4);" % (swc_name))
disp_def.append(" %s.display_full_txt();\n }" % (swc_name))
class_num += 1
disp_def.append("}\n")
return "\n".join(disp_def)
def _generateStringHelperHeader(self):
header = []
header.append("//#includes, #defines, globals and such...\n")
for f in self.include_headers:
if 'vk_enum_string_helper' not in f:
header.append("#include <%s>\n" % f)
header.append('#include "vk_enum_string_helper.h"\n\n// Function Prototypes\n')
header.append("char* dynamic_display(const void* pStruct, const char* prefix);\n")
return "".join(header)
def _generateStringHelperHeaderCpp(self):
header = []
header.append("//#includes, #defines, globals and such...\n")
for f in self.include_headers:
if 'vk_enum_string_helper' not in f:
header.append("#include <%s>\n" % f)
header.append('#include "vk_enum_string_helper.h"\n')
header.append('using namespace std;\n\n// Function Prototypes\n')
header.append('\n')
header.append('namespace StreamControl\n')
header.append('{\n')
header.append('bool writeAddress = true;\n')
header.append('template <typename T>\n')
header.append('std::ostream& operator<< (std::ostream &out, T const* pointer)\n')
header.append('{\n')
header.append(' if(writeAddress)\n')
header.append(' {\n')
header.append(' out.operator<<(pointer);\n')
header.append(' }\n')
header.append(' else\n')
header.append(' {\n')
header.append(' std::operator<<(out, "address");\n')
header.append(' }\n')
header.append(' return out;\n')
header.append('}\n')
header.append('std::ostream& operator<<(std::ostream &out, char const*const s)\n')
header.append('{\n')
header.append(' return std::operator<<(out, s);\n')
header.append('}\n')
header.append('}\n')
header.append('\n')
header.append("string dynamic_display(const void* pStruct, const string prefix);\n")
return "".join(header)
def _generateValidateHelperFunctions(self):
sh_funcs = []
# We do two passes, first pass just generates prototypes for all the functsions
for s in sorted(self.struct_dict):
# Wrap this in platform check since it may contain undefined structs or functions
add_platform_wrapper_entry(sh_funcs, typedef_fwd_dict[s])
sh_funcs.append('uint32_t %s(const %s* pStruct);' % (self._get_vh_func_name(s), typedef_fwd_dict[s]))
add_platform_wrapper_exit(sh_funcs, typedef_fwd_dict[s])
sh_funcs.append('\n')
for s in sorted(self.struct_dict):
# Wrap this in platform check since it may contain undefined structs or functions
add_platform_wrapper_entry(sh_funcs, typedef_fwd_dict[s])
sh_funcs.append('uint32_t %s(const %s* pStruct)\n{' % (self._get_vh_func_name(s), typedef_fwd_dict[s]))
for m in sorted(self.struct_dict[s]):
# TODO : Need to handle arrays of enums like in VkRenderPassCreateInfo struct
if is_type(self.struct_dict[s][m]['type'], 'enum') and not self.struct_dict[s][m]['ptr']:
sh_funcs.append(' if (!validate_%s(pStruct->%s))\n return 0;' % (self.struct_dict[s][m]['type'], self.struct_dict[s][m]['name']))
# TODO : Need a little refinement to this code to make sure type of struct matches expected input (ptr, const...)
if is_type(self.struct_dict[s][m]['type'], 'struct'):
if (self.struct_dict[s][m]['ptr']):
sh_funcs.append(' if (pStruct->%s && !%s((const %s*)pStruct->%s))\n return 0;' % (self.struct_dict[s][m]['name'], self._get_vh_func_name(self.struct_dict[s][m]['type']), self.struct_dict[s][m]['type'], self.struct_dict[s][m]['name']))
else:
sh_funcs.append(' if (!%s((const %s*)&pStruct->%s))\n return 0;' % (self._get_vh_func_name(self.struct_dict[s][m]['type']), self.struct_dict[s][m]['type'], self.struct_dict[s][m]['name']))
sh_funcs.append(" return 1;\n}")
# End of platform wrapped section
add_platform_wrapper_exit(sh_funcs, typedef_fwd_dict[s])
return "\n".join(sh_funcs)
def _generateValidateHelperHeader(self):
header = []
header.append("//#includes, #defines, globals and such...\n")
for f in self.include_headers:
if 'vk_enum_validate_helper' not in f:
header.append("#include <%s>\n" % f)
header.append('#include "vk_enum_validate_helper.h"\n\n// Function Prototypes\n')
#header.append("char* dynamic_display(const void* pStruct, const char* prefix);\n")
return "".join(header)
def _generateSizeHelperFunctions(self):
sh_funcs = []
# just generates prototypes for all the functions
for s in sorted(self.struct_dict):
# Wrap this in platform check since it may contain undefined structs or functions
add_platform_wrapper_entry(sh_funcs, typedef_fwd_dict[s])
sh_funcs.append('size_t %s(const %s* pStruct);' % (self._get_size_helper_func_name(s), typedef_fwd_dict[s]))
add_platform_wrapper_exit(sh_funcs, typedef_fwd_dict[s])
return "\n".join(sh_funcs)
def _generateSizeHelperFunctionsC(self):
sh_funcs = []
# generate function definitions
for s in sorted(self.struct_dict):
# Wrap this in platform check since it may contain undefined structs or functions
add_platform_wrapper_entry(sh_funcs, typedef_fwd_dict[s])
skip_list = [] # Used when struct elements need to be skipped because size already accounted for
sh_funcs.append('size_t %s(const %s* pStruct)\n{' % (self._get_size_helper_func_name(s), typedef_fwd_dict[s]))
indent = ' '
sh_funcs.append('%ssize_t structSize = 0;' % (indent))
sh_funcs.append('%sif (pStruct) {' % (indent))
indent = ' '
sh_funcs.append('%sstructSize = sizeof(%s);' % (indent, typedef_fwd_dict[s]))
i_decl = False
for m in sorted(self.struct_dict[s]):
if m in skip_list:
continue
if self.struct_dict[s][m]['dyn_array']:
if self.struct_dict[s][m]['full_type'].count('*') > 1:
if not is_type(self.struct_dict[s][m]['type'], 'struct') and not 'char' in self.struct_dict[s][m]['type'].lower():
if 'ppMemoryBarriers' == self.struct_dict[s][m]['name']:
# TODO : For now be conservative and consider all memBarrier ptrs as largest possible struct
sh_funcs.append('%sstructSize += pStruct->%s*(sizeof(%s*) + sizeof(VkImageMemoryBarrier));' % (indent, self.struct_dict[s][m]['array_size'], self.struct_dict[s][m]['type']))
else:
sh_funcs.append('%sstructSize += pStruct->%s*(sizeof(%s*) + sizeof(%s));' % (indent, self.struct_dict[s][m]['array_size'], self.struct_dict[s][m]['type'], self.struct_dict[s][m]['type']))
else: # This is an array of char* or array of struct ptrs
if not i_decl:
sh_funcs.append('%suint32_t i = 0;' % (indent))
i_decl = True
sh_funcs.append('%sfor (i = 0; i < pStruct->%s; i++) {' % (indent, self.struct_dict[s][m]['array_size']))
indent = ' '
if is_type(self.struct_dict[s][m]['type'], 'struct'):
sh_funcs.append('%sstructSize += (sizeof(%s*) + %s(pStruct->%s[i]));' % (indent, self.struct_dict[s][m]['type'], self._get_size_helper_func_name(self.struct_dict[s][m]['type']), self.struct_dict[s][m]['name']))
else:
sh_funcs.append('%sstructSize += (sizeof(char*) | |
# -*- coding: utf-8 -*-
"""
irradiance.py from pvlib
========================
Stripped down, vendorized version from:
https://github.com/pvlib/pvlib-python/
Calculate the solar position using the NREL SPA algorithm either using
numpy arrays or compiling the code to machine language with numba.
The rational for not including this library as a strict dependency is to avoid
including a dependency on pandas, keeping load time low, and PyPy compatibility
Created by <NAME> (@alorenzo175), Univ. of Arizona, 2015
For a full list of contributors to this file, see the `pvlib` repository.
The copyright notice (BSD-3 clause) is as follows:
BSD 3-Clause License
Copyright (c) 2013-2018, Sandia National Laboratories and pvlib python
Development Team
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division
import os
import time
from datetime import datetime
import math
from math import degrees, sin, cos, tan, radians, atan, asin, atan2, sqrt, acos
from fluids.constants import deg2rad, rad2deg
from fluids.numerics import sincos
__all__ = ['julian_day_dt', 'julian_day', 'julian_ephemeris_day', 'julian_century',
'julian_ephemeris_century', 'julian_ephemeris_millennium', 'heliocentric_longitude',
'heliocentric_latitude', 'heliocentric_radius_vector', 'geocentric_longitude',
'geocentric_latitude', 'mean_elongation', 'mean_anomaly_sun', 'mean_anomaly_moon',
'moon_argument_latitude', 'moon_ascending_longitude', 'longitude_nutation',
'obliquity_nutation', 'mean_ecliptic_obliquity', 'true_ecliptic_obliquity',
'aberration_correction', 'apparent_sun_longitude', 'mean_sidereal_time',
'apparent_sidereal_time', 'geocentric_sun_right_ascension', 'geocentric_sun_declination',
'local_hour_angle', 'equatorial_horizontal_parallax', 'uterm', 'xterm', 'yterm',
'parallax_sun_right_ascension', 'topocentric_sun_right_ascension', 'topocentric_sun_declination',
'topocentric_local_hour_angle', 'topocentric_elevation_angle_without_atmosphere',
'atmospheric_refraction_correction', 'topocentric_elevation_angle', 'topocentric_zenith_angle',
'topocentric_astronomers_azimuth', 'topocentric_azimuth_angle', 'sun_mean_longitude',
'equation_of_time', 'calculate_deltat', 'longitude_obliquity_nutation',
'transit_sunrise_sunset',
]
nan = float("nan")
HELIO_RADIUS_TABLE_LIST_0 = [[100013989.0, 0.0, 0.0],
[1670700.0, 3.0984635, 6283.07585],
[13956.0, 3.05525, 12566.1517],
[3084.0, 5.1985, 77713.7715],
[1628.0, 1.1739, 5753.3849],
[1576.0, 2.8469, 7860.4194],
[925.0, 5.453, 11506.77],
[542.0, 4.564, 3930.21],
[472.0, 3.661, 5884.927],
[346.0, 0.964, 5507.553],
[329.0, 5.9, 5223.694],
[307.0, 0.299, 5573.143],
[243.0, 4.273, 11790.629],
[212.0, 5.847, 1577.344],
[186.0, 5.022, 10977.079],
[175.0, 3.012, 18849.228],
[110.0, 5.055, 5486.778],
[98.0, 0.89, 6069.78],
[86.0, 5.69, 15720.84],
[86.0, 1.27, 161000.69],
[65.0, 0.27, 17260.15],
[63.0, 0.92, 529.69],
[57.0, 2.01, 83996.85],
[56.0, 5.24, 71430.7],
[49.0, 3.25, 2544.31],
[47.0, 2.58, 775.52],
[45.0, 5.54, 9437.76],
[43.0, 6.01, 6275.96],
[39.0, 5.36, 4694.0],
[38.0, 2.39, 8827.39],
[37.0, 0.83, 19651.05],
[37.0, 4.9, 12139.55],
[36.0, 1.67, 12036.46],
[35.0, 1.84, 2942.46],
[33.0, 0.24, 7084.9],
[32.0, 0.18, 5088.63],
[32.0, 1.78, 398.15],
[28.0, 1.21, 6286.6],
[28.0, 1.9, 6279.55],
[26.0, 4.59, 10447.39]]
HELIO_RADIUS_TABLE_LIST_1 = [[103019.0, 1.10749, 6283.07585],
[1721.0, 1.0644, 12566.1517],
[702.0, 3.142, 0.0],
[32.0, 1.02, 18849.23],
[31.0, 2.84, 5507.55],
[25.0, 1.32, 5223.69],
[18.0, 1.42, 1577.34],
[10.0, 5.91, 10977.08],
[9.0, 1.42, 6275.96],
[9.0, 0.27, 5486.78],
]
HELIO_RADIUS_TABLE_LIST_2 = [[4359.0, 5.7846, 6283.0758],
[124.0, 5.579, 12566.152],
[12.0, 3.14, 0.0],
[9.0, 3.63, 77713.77],
[6.0, 1.87, 5573.14],
[3.0, 5.47, 18849.23]]
HELIO_RADIUS_TABLE_LIST_3 = [[145.0, 4.273, 6283.076],
[7.0, 3.92, 12566.15]]
HELIO_RADIUS_TABLE_LIST_4 = [[4.0, 2.56, 6283.08]]
NUTATION_YTERM_LIST_0 = [0.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, 0.0, 0.0, -2.0, -2.0, -2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 0.0, -2.0, 0.0, 2.0, 0.0, 0.0, -2.0, 0.0, -2.0, 0.0, 0.0, 2.0, -2.0, 0.0, -2.0, 0.0, 0.0, 2.0, 2.0, 0.0, -2.0, 0.0, 2.0, 2.0, -2.0, -2.0, 2.0, 2.0, 0.0, -2.0, -2.0, 0.0, -2.0, -2.0, 0.0, -1.0, -2.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 2.0, 0.0, 2.0]
NUTATION_YTERM_LIST_1 = [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 2.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -1.0, 1.0, -1.0, -1.0, 0.0, -1.0]
NUTATION_YTERM_LIST_2 = [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, -1.0, 0.0, 1.0, -1.0, -1.0, 1.0, 2.0, -2.0, 0.0, 2.0, 2.0, 1.0, 0.0, 0.0, -1.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 2.0, -1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 2.0, 1.0, -2.0, 0.0, 1.0, 0.0, 0.0, 2.0, 2.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, -2.0, 1.0, 1.0, 1.0, -1.0, 3.0, 0.0]
NUTATION_YTERM_LIST_3 = [0.0, 2.0, 2.0, 0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0, 0.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, -2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, -2.0, 0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0]
NUTATION_YTERM_LIST_4 = [1.0, 2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 1.0, 2.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 1.0, 1.0, 0.0, 1.0, 2.0, 2.0, 0.0, 2.0, 0.0, 0.0, 1.0, 0.0, 1.0, 2.0, 1.0, 1.0, 1.0, 0.0, 1.0, 2.0, 2.0, 0.0, 2.0, 1.0, 0.0, 2.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0]
NUTATION_ABCD_LIST = [[-171996.0, -174.2, 92025.0, 8.9],
[-13187.0, -1.6, 5736.0, -3.1],
[-2274.0, -0.2, 977.0, -0.5],
[2062.0, 0.2, -895.0, 0.5],
[1426.0, -3.4, 54.0, -0.1],
[712.0, 0.1, -7.0, 0.0],
[-517.0, 1.2, 224.0, -0.6],
[-386.0, -0.4, 200.0, 0.0],
[-301.0, 0.0, 129.0, -0.1],
[217.0, -0.5, -95.0, 0.3],
[-158.0, 0.0, 0.0, 0.0],
[129.0, 0.1, -70.0, 0.0],
[123.0, 0.0, -53.0, 0.0],
[63.0, 0.0, 0.0, 0.0],
[63.0, 0.1, -33.0, 0.0],
[-59.0, 0.0, 26.0, 0.0],
[-58.0, -0.1, 32.0, 0.0],
[-51.0, 0.0, 27.0, 0.0],
[48.0, 0.0, 0.0, 0.0],
[46.0, 0.0, -24.0, 0.0],
[-38.0, 0.0, 16.0, 0.0],
[-31.0, 0.0, 13.0, 0.0],
[29.0, 0.0, 0.0, 0.0],
[29.0, 0.0, -12.0, 0.0],
[26.0, 0.0, 0.0, 0.0],
[-22.0, 0.0, 0.0, 0.0],
[21.0, 0.0, -10.0, 0.0],
[17.0, -0.1, 0.0, 0.0],
[16.0, 0.0, -8.0, 0.0],
[-16.0, 0.1, 7.0, 0.0],
[-15.0, 0.0, 9.0, 0.0],
[-13.0, 0.0, 7.0, 0.0],
[-12.0, 0.0, 6.0, 0.0],
[11.0, 0.0, 0.0, 0.0],
[-10.0, 0.0, 5.0, 0.0],
[-8.0, 0.0, 3.0, 0.0],
[7.0, 0.0, -3.0, 0.0],
[-7.0, 0.0, 0.0, 0.0],
[-7.0, 0.0, 3.0, 0.0],
[-7.0, 0.0, 3.0, 0.0],
[6.0, 0.0, 0.0, 0.0],
[6.0, 0.0, -3.0, 0.0],
[6.0, 0.0, -3.0, 0.0],
[-6.0, 0.0, 3.0, 0.0],
[-6.0, 0.0, 3.0, 0.0],
[5.0, 0.0, 0.0, 0.0],
[-5.0, 0.0, 3.0, 0.0],
[-5.0, 0.0, 3.0, 0.0],
[-5.0, 0.0, 3.0, 0.0],
[4.0, 0.0, 0.0, 0.0],
[4.0, 0.0, 0.0, 0.0],
[4.0, 0.0, 0.0, 0.0],
[-4.0, 0.0, 0.0, 0.0],
[-4.0, 0.0, 0.0, 0.0],
[-4.0, 0.0, 0.0, 0.0],
[3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0]]
HELIO_LAT_TABLE_LIST_0 = [[280.0, 3.199, 84334.662],
[102.0, 5.422, 5507.553],
[80.0, 3.88, 5223.69],
[44.0, 3.7, 2352.87],
[32.0, 4.0, 1577.34]]
HELIO_LAT_TABLE_LIST_1 = [[9.0, 3.9, 5507.55],
[6.0, 1.73, 5223.69]]
#HELIO_LONG_TABLE_LIST = HELIO_LONG_TABLE.tolist()
HELIO_LONG_TABLE_LIST_0 = [[175347046.0, 0.0, 0.0],
[3341656.0, 4.6692568, 6283.07585],
[34894.0, 4.6261, 12566.1517],
[3497.0, 2.7441, 5753.3849],
[3418.0, 2.8289, 3.5231],
[3136.0, 3.6277, 77713.7715],
[2676.0, 4.4181, 7860.4194],
[2343.0, 6.1352, 3930.2097],
[1324.0, 0.7425, 11506.7698],
[1273.0, 2.0371, 529.691],
[1199.0, 1.1096, 1577.3435],
[990.0, 5.233, 5884.927],
[902.0, 2.045, 26.298],
[857.0, 3.508, 398.149],
[780.0, 1.179, 5223.694],
[753.0, 2.533, 5507.553],
[505.0, 4.583, 18849.228],
[492.0, 4.205, 775.523],
[357.0, 2.92, 0.067],
[317.0, 5.849, 11790.629],
[284.0, 1.899, 796.298],
[271.0, 0.315, 10977.079],
[243.0, 0.345, 5486.778],
[206.0, 4.806, 2544.314],
[205.0, 1.869, 5573.143],
[202.0, 2.458, 6069.777],
[156.0, 0.833, 213.299],
[132.0, 3.411, 2942.463],
[126.0, 1.083, 20.775],
[115.0, 0.645, 0.98],
[103.0, 0.636, 4694.003],
[102.0, 0.976, 15720.839],
[102.0, 4.267, 7.114],
[99.0, 6.21, 2146.17],
[98.0, 0.68, 155.42],
[86.0, 5.98, 161000.69],
[85.0, 1.3, 6275.96],
[85.0, 3.67, 71430.7],
[80.0, 1.81, 17260.15],
[79.0, 3.04, 12036.46],
[75.0, 1.76, 5088.63],
[74.0, 3.5, 3154.69],
[74.0, | |
"""
Get arguments corresponding to parser.
"""
return parser.parse_args()
def getSourceListArgs(parser, wildcards = True):
"""
Create a list of tuples that contain source ROOT file names
and lists of path in these files as well as the original arguments
"""
args = getArgs(parser)
inputFiles = []
try:
inputFiles = args.FILE
except:
inputFiles = args.SOURCE
sourceList = \
[tup for pattern in inputFiles \
for tup in patternToFileNameAndPathSplitList(pattern,wildcards)]
return sourceList, args
def getSourceListOptDict(parser, wildcards = True):
"""
Get the list of tuples and the dictionary with options
returns:
sourceList: a list of tuples with one list element per file
the first tuple entry being the root file,
the second a list of subdirectories,
each being represented as a list itself with a string per level
e.g.
rootls tutorial/tmva/TMVA.root:Method_BDT/BDT turns into
[('tutorials/tmva/TMVA.root', [['Method_BDT','BDT']])]
vars(args): a dictionary of matched options, e.g.
{'longListing': False,
'oneColumn': False,
'treeListing': False,
'FILE': ['tutorials/tmva/TMVA.root:Method_BDT/BDT']
}
"""
sourceList, args = getSourceListArgs(parser, wildcards)
if sourceList == []:
logging.error("Input file(s) not found!")
return sourceList, vars(args)
def getSourceDestListOptDict(parser, wildcards = True):
"""
Get the list of tuples of sources, create destination name, destination pathSplit
and the dictionary with options
"""
sourceList, args = getSourceListArgs(parser, wildcards)
destList = \
patternToFileNameAndPathSplitList( \
args.DEST,wildcards=False)
if destList != []:
destFileName,destPathSplitList = destList[0]
destPathSplit = destPathSplitList[0]
else:
destFileName = ""
destPathSplit = []
return sourceList, destFileName, destPathSplit, vars(args)
# The end of the set of functions to put the arguments in shape
##########
##########
# Several functions shared by rootcp, rootmv and rootrm
TARGET_ERROR = "target '{0}' is not a directory"
OMITTING_FILE_ERROR = "omitting file '{0}'"
OMITTING_DIRECTORY_ERROR = "omitting directory '{0}'"
OVERWRITE_ERROR = "cannot overwrite non-directory '{0}' with directory '{1}'"
def copyRootObject(sourceFile,sourcePathSplit,destFile,destPathSplit,oneSource,recursive,replace):
"""
Initialize the recursive function 'copyRootObjectRecursive', written to be as unix-like as possible
"""
retcode = 0
isMultipleInput = not (oneSource and sourcePathSplit != [])
recursiveOption = recursive
# Multiple input and un-existing or non-directory destination
# TARGET_ERROR
if isMultipleInput and destPathSplit != [] \
and not (isExisting(destFile,destPathSplit) \
and isDirectory(destFile,destPathSplit)):
logging.warning(TARGET_ERROR.format(destPathSplit[-1]))
retcode += 1
# Entire ROOT file or directory in input omitting "-r" option
# OMITTING_FILE_ERROR or OMITTING_DIRECTORY_ERROR
if not recursiveOption:
if sourcePathSplit == []:
logging.warning(OMITTING_FILE_ERROR.format( \
sourceFile.GetName()))
retcode += 1
elif isDirectory(sourceFile,sourcePathSplit):
logging.warning(OMITTING_DIRECTORY_ERROR.format( \
sourcePathSplit[-1]))
retcode += 1
# Run copyRootObjectRecursive function with the wish
# to follow the unix copy behaviour
if sourcePathSplit == []:
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit,replace)
else:
setName = ""
if not isMultipleInput and (destPathSplit != [] \
and not isExisting(destFile,destPathSplit)):
setName = destPathSplit[-1]
objectName = sourcePathSplit[-1]
if isDirectory(sourceFile,sourcePathSplit):
if setName != "":
createDirectory(destFile,destPathSplit[:-1]+[setName])
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1]+[setName],replace)
elif isDirectory(destFile,destPathSplit):
if not isExisting(destFile,destPathSplit+[objectName]):
createDirectory(destFile,destPathSplit+[objectName])
if isDirectory(destFile,destPathSplit+[objectName]):
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit+[objectName],replace)
else:
logging.warning(OVERWRITE_ERROR.format( \
objectName,objectName))
retcode += 1
else:
logging.warning(OVERWRITE_ERROR.format( \
destPathSplit[-1],objectName))
retcode += 1
else:
if setName != "":
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1],replace,setName)
elif isDirectory(destFile,destPathSplit):
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit,replace)
else:
setName = destPathSplit[-1]
retcode += copyRootObjectRecursive(sourceFile,sourcePathSplit, \
destFile,destPathSplit[:-1],replace,setName)
return retcode
DELETE_ERROR = "object {0} was not existing, so it is not deleted"
def deleteObject(rootFile,pathSplit):
"""
Delete the object 'pathSplit[-1]' from (rootFile,pathSplit[:-1])
"""
retcode = changeDirectory(rootFile,pathSplit[:-1])
if retcode == 0:
fileName = pathSplit[-1]
if isExisting(rootFile,pathSplit):
ROOT.gDirectory.Delete(fileName+";*")
else:
logging.warning(DELETE_ERROR.format(fileName))
retcode += 1
return retcode
def copyRootObjectRecursive(sourceFile,sourcePathSplit,destFile,destPathSplit,replace,setName=""):
"""
Copy objects from a file or directory (sourceFile,sourcePathSplit)
to an other file or directory (destFile,destPathSplit)
- Has the will to be unix-like
- that's a recursive function
- Python adaptation of a root input/output tutorial :
$ROOTSYS/tutorials/io/copyFiles.C
"""
retcode = 0
replaceOption = replace
seen = {}
for key in getKeyList(sourceFile,sourcePathSplit):
objectName = key.GetName()
# write keys only if the cycle is higher than before
if objectName not in seen.keys():
seen[objectName] = key
else:
if seen[objectName].GetCycle() < key.GetCycle():
seen[objectName] = key
else:
continue
if isDirectoryKey(key):
if not isExisting(destFile,destPathSplit+[objectName]):
createDirectory(destFile,destPathSplit+[objectName])
if isDirectory(destFile,destPathSplit+[objectName]):
retcode +=copyRootObjectRecursive(sourceFile, \
sourcePathSplit+[objectName], \
destFile,destPathSplit+[objectName],replace)
else:
logging.warning(OVERWRITE_ERROR.format( \
objectName,objectName))
retcode += 1
elif isTreeKey(key):
T = key.GetMotherDir().Get(objectName+";"+str(key.GetCycle()))
if replaceOption and isExisting(destFile,destPathSplit+[T.GetName()]):
retcodeTemp = deleteObject(destFile,destPathSplit+[T.GetName()])
if retcodeTemp:
retcode += retcodeTemp
continue
changeDirectory(destFile,destPathSplit)
newT = T.CloneTree(-1,"fast")
if setName != "":
newT.SetName(setName)
newT.Write()
else:
obj = key.ReadObj()
if replaceOption and isExisting(destFile,destPathSplit+[setName]):
changeDirectory(destFile,destPathSplit)
otherObj = getFromDirectory(setName)
if not otherObj == obj:
retcodeTemp = deleteObject(destFile,destPathSplit+[setName])
if retcodeTemp:
retcode += retcodeTemp
continue
else:
obj.SetName(setName)
changeDirectory(destFile,destPathSplit)
obj.Write()
else:
obj.SetName(setName)
changeDirectory(destFile,destPathSplit)
obj.Write()
elif issubclass(obj.__class__, ROOT.TCollection):
# probably the object was written with kSingleKey
changeDirectory(destFile,destPathSplit)
obj.Write(setName, ROOT.TObject.kSingleKey)
else:
if setName != "":
obj.SetName(setName)
else:
obj.SetName(objectName)
changeDirectory(destFile,destPathSplit)
obj.Write()
obj.Delete()
changeDirectory(destFile,destPathSplit)
ROOT.gDirectory.SaveSelf(ROOT.kTRUE)
return retcode
FILE_REMOVE_ERROR = "cannot remove '{0}': Is a ROOT file"
DIRECTORY_REMOVE_ERROR = "cannot remove '{0}': Is a directory"
ASK_FILE_REMOVE = "remove '{0}' ? (y/n) : "
ASK_OBJECT_REMOVE = "remove '{0}' from '{1}' ? (y/n) : "
def deleteRootObject(rootFile, pathSplit, interactive, recursive):
"""
Remove the object (rootFile,pathSplit)
-interactive : prompt before every removal
-recursive : allow directory, and ROOT file, removal
"""
retcode = 0
if not recursive and isDirectory(rootFile,pathSplit):
if pathSplit == []:
logging.warning(FILE_REMOVE_ERROR.format(rootFile.GetName()))
retcode += 1
else:
logging.warning(DIRECTORY_REMOVE_ERROR.format(pathSplit[-1]))
retcode += 1
else:
if interactive:
if pathSplit != []:
answer = _input(ASK_OBJECT_REMOVE \
.format("/".join(pathSplit),rootFile.GetName()))
else:
answer = _input(ASK_FILE_REMOVE \
.format(rootFile.GetName()))
remove = answer.lower() == 'y'
else:
remove = True
if remove:
if pathSplit != []:
retcode += deleteObject(rootFile,pathSplit)
else:
rootFile.Close()
os.remove(rootFile.GetName())
return retcode
# End of functions shared by rootcp, rootmv and rootrm
##########
##########
# Help strings for ROOT command line tools
# Arguments
SOURCE_HELP = "path of the source."
SOURCES_HELP = "path of the source(s)."
DEST_HELP = "path of the destination."
# Options
COMPRESS_HELP = \
"""change the compression settings of the
destination file (if not already existing)."""
INTERACTIVE_HELP = "prompt before every removal."
RECREATE_HELP = "recreate the destination file."
RECURSIVE_HELP = "recurse inside directories"
REPLACE_HELP = "replace object if already existing"
# End of help strings
##########
##########
# ROOTBROWSE
def _openBrowser(rootFile=None):
browser = ROOT.TBrowser()
_input("Press enter to exit.")
def rootBrowse(fileName=None):
if fileName:
rootFile = openROOTFile(fileName)
if not rootFile: return 1
_openBrowser(rootFile)
rootFile.Close()
else:
_openBrowser()
return 0
# End of ROOTBROWSE
##########
##########
# ROOTCP
def _copyObjects(fileName, pathSplitList, destFile, destPathSplit, oneFile, \
recursive, replace):
retcode = 0
destFileName = destFile.GetName()
rootFile = openROOTFile(fileName) \
if fileName != destFileName else \
destFile
if not rootFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(rootFile) # Fast copy necessity
for pathSplit in pathSplitList:
oneSource = oneFile and len(pathSplitList)==1
retcode += copyRootObject(rootFile, pathSplit, destFile, destPathSplit, \
oneSource, recursive, replace)
if fileName != destFileName: rootFile.Close()
return retcode
def rootCp(sourceList, destFileName, destPathSplit, \
compress=None, recreate=False, recursive=False, replace=False):
# Check arguments
if sourceList == [] or destFileName == "": return 1
if recreate and destFileName in [n[0] for n in sourceList]:
logging.error("cannot recreate destination file if this is also a source file")
return 1
# Open destination file
destFile = openROOTFileCompress(destFileName, compress, recreate)
if not destFile: return 1
ROOT.gROOT.GetListOfFiles().Remove(destFile) # Fast copy necessity
# Loop on the root files
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _copyObjects(fileName, pathSplitList, destFile, destPathSplit, \
len(sourceList)==1, recursive, replace)
destFile.Close()
return retcode
# End of ROOTCP
##########
##########
# ROOTEVENTSELECTOR
def _setBranchStatus(tree,branchSelectionString,status=0):
"""This is used by _copyTreeSubset() to turn on/off branches"""
for branchToModify in branchSelectionString.split(","):
logging.info("Setting branch status to %d for %s"%(status,branchToModify) )
tree.SetBranchStatus(branchToModify,status)
return tree
def _copyTreeSubset(sourceFile,sourcePathSplit,destFile,destPathSplit,firstEvent,lastEvent,selectionString,
branchinclude, branchexclude):
"""Copy a subset of the tree from (sourceFile,sourcePathSplit)
to (destFile,destPathSplit) according to options in optDict"""
retcode = changeDirectory(sourceFile,sourcePathSplit[:-1])
if retcode != 0: return retcode
bigTree = getFromDirectory(sourcePathSplit[-1])
nbrEntries = bigTree.GetEntries()
# changeDirectory for the small tree not to be memory-resident
retcode = changeDirectory(destFile,destPathSplit)
if retcode != 0: return retcode
if lastEvent == -1:
lastEvent = nbrEntries-1
numberOfEntries = (lastEvent-firstEvent)+1
# "Skim" events based on branch values using selectionString
# as well as selecting a range of events by index
outputTree = bigTree.CopyTree(selectionString,"",numberOfEntries,firstEvent)
# "Slim" tree by removing branches -
# This is done after the skimming to allow for the user to skim on a
# branch they no longer need to keep
if branchexclude:
_setBranchStatus(outputTree,branchexclude,0)
if branchinclude:
_setBranchStatus(outputTree,branchinclude,1)
if branchexclude or branchinclude:
outputTree = outputTree.CloneTree()
outputTree.Write()
return retcode
def _copyTreeSubsets(fileName, pathSplitList, destFile, destPathSplit, first, last, selectionString,
branchinclude, branchexclude):
retcode = 0
destFileName = destFile.GetName()
rootFile = openROOTFile(fileName) \
if fileName != destFileName else \
destFile
if not rootFile: return 1
for pathSplit in pathSplitList:
if isTree(rootFile,pathSplit):
retcode += _copyTreeSubset(rootFile,pathSplit, \
destFile,destPathSplit,first,last,selectionString,branchinclude, branchexclude)
if fileName != destFileName: rootFile.Close()
return retcode
def | |
<filename>Sprint Week/Main.py
# Filename Main.py
# The Sprint Project Company Main Menu
# Authors:<NAME>, <NAME>, <NAME>.
# Date November 17, 2021
# Imports
import time
import datetime
# Functions
def As_Dollars_Pad(Number):
"""Format Dollars amounts to strings & Pad Right 10 Spaces"""
Number_Display = f"${Number:,.2f}"
Number_Display = f"{Number_Display:>10}"
return Number_Display
def Number_Pad(Number):
"""Format Dollars amounts to strings & Pad Right 10 Spaces"""
Number_Display = f"{Number:,}"
Number_Display = f"{Number_Display:>10}"
return Number_Display
def Write(Variable, f):
"""Function to Convert None Strings to Strings and Format to write to file with ,"""
import datetime
if isinstance(Variable, str) == False:
if isinstance(Variable, datetime.datetime) == True:
return f.write(f"{Variable.strftime('%Y-%m-%d')},")
else:
Variable = round(Variable, 2)
return f.write(f"{str(Variable)},")
elif isinstance(Variable, str) == True:
return f.write(f"{(Variable)},")
def Write_Space(Variable, f):
"""Function to Convert None Strings to Strings and Format to write to file with Space"""
import datetime
if isinstance(Variable, str) == False:
if isinstance(Variable, datetime.datetime) == True:
return f.write(f"{Variable.strftime('%Y-%m-%d')}\n")
else:
Variable = round(Variable, 2)
return f.write(f"{str(Variable)}\n")
elif isinstance(Variable, str) == True:
return f.write(f"{(Variable)}\n")
def Name_Validation(Name):
""" Function to Validate a Name for Input: Allowing Spaces, - and '"""
for Char in Name:
if ("A" <= Char <= "Z" or "a" <= Char <= "z"
or Char == "-" or Char == "'"):
continue
else:
return False
return True
def Business_Day(Date):
"""Function to Work Within Business Days Only Mon-Fri"""
Weekday = Date.weekday()
if Weekday == 5:
Date += datetime.timedelta(days=2)
elif Weekday == 6:
Date += datetime.timedelta(days=1)
return Date
# Part One Simple IPO Program
def Part_One():
# CONSTANTS
RENT_RATE = 35.00
KM_RATE = 0.10
HST_TAX = 0.15
# INPUT STATEMENTS
print()
print(" Edsel Car Rental Company ")
print()
print("Please answer the following questions!")
print()
print("Rental Details")
print()
while True:
try:
Num_Days_Rented = int(input(" Number of days the automobile was rented: "))
except:
print("Invalid Entry: Please input a Number")
else:
if Num_Days_Rented == "":
print("Cannot be blank! Please try again")
elif Num_Days_Rented < 1:
print("Invalid Entry Number Days Rented Cannot be Less than 1")
else:
break
print()
print(" Mileage")
print()
while True:
try:
Start_Mileage = int(input(" Starting mileage when car was rented: "))
End_Mileage = int(input(" Ending mileage when car was returned: "))
except:
print("Invalid Entry: Please Input KM Amount")
else:
if Start_Mileage == "" or End_Mileage == "":
print("Cannot be blank - Please try again!")
elif Start_Mileage < 100:
print("Invalid Entry Starting Mileage cannot be less than 100km")
elif End_Mileage < Start_Mileage:
print("Invalid Entry: Ending Mileage Cannot be less than starting Mileage")
else:
break
print()
AnyKey = input("Press any key to Display Rental Invoice....")
print()
# CALCULATIONS PROCESSING
Km_Travelled = End_Mileage - Start_Mileage
Daily_Cost = Num_Days_Rented * RENT_RATE
Mileage_Cost = Km_Travelled * KM_RATE
Rent_Cost = Daily_Cost + Mileage_Cost
Tax_Cost = Daily_Cost * HST_TAX
Total_Rent_Cost = Rent_Cost + Tax_Cost
# CALCULATIONS FOR OUTPUT TO USER
print()
print(" Edsel")
print(" Car Rental Company")
print()
print(" Mileage:")
print("~" * 30)
print(" Km's Travelled {}".format(Number_Pad(Km_Travelled)))
print("~" * 30)
print(" Costs:")
print("~" * 30)
print(" Daily Cost {}".format(As_Dollars_Pad(Daily_Cost)))
print(" Mileage Cost {}".format(As_Dollars_Pad(Mileage_Cost)))
print(' ------------ {:>10}'.format("-" * 9))
print(" Rental Cost {}".format(As_Dollars_Pad(Rent_Cost)))
print(" Tax/HST Cost {}".format(As_Dollars_Pad(Tax_Cost)))
print(' ------------ {:>10}'.format("-" * 9))
print(" Total Cost {}".format(As_Dollars_Pad(Total_Rent_Cost)))
print("~" * 30)
print()
print()
AnyKey = input("Press any key to Return to Menu....")
print()
print("Loading ", end="")
for wait in range(1, 11):
print('*', end=' ')
time.sleep(.2)
print()
return
# Option 2 If and Loop Sample
def FizzBuzz(X=5,Y=8):
"""Number Divisible by 5 = Fizz and Number Divisible by 8 = Buzz, both = FizzBuzz"""
for Number in range(1, 101):
Fizz = Number % X
Buzz = Number % Y
if Fizz == 0 and Buzz == 0:
print("FizzBuzz")
elif Fizz == 0:
print("Fizz")
elif Buzz == 0:
print("Buzz")
else:
print(Number)
print()
AnyKey = input("Press any key to Return to Menu....")
print()
print("Loading ", end="")
for wait in range(1, 11):
print('*', end=' ')
time.sleep(.2)
print()
return
# Strings and Dates
def Part_Three():
# inputs
print()
print("Employee Information Page")
print()
while True:
First = input("First Name: ").title().lstrip().rstrip()
if First == "":
print("First Name cannot be blank: Please Re-Enter")
elif len(First) > 25:
print("Invalid First Name Length: Cannot be longer than 25 letters ")
elif Name_Validation(First) == False: # Function to Validate Name Input
print("Invalid Name Entered: Please use letters between (a-z), (-) and (') No Spaces")
else:
break
while True:
Last = input("Last Name: ").title().lstrip().rstrip()
if Last == "":
print("Last Name cannot be blank: Please Re-Enter")
elif len(Last) > 30:
print("Invalid Last Name Length: Cannot be longer than 30 letters ")
elif Name_Validation(Last) == False: # Function to Validate Name Input
print("Invalid Name Entered: Please use letters between (a-z), (-) and (') ")
else:
break
while True:
try:
StartDate = input("What date did they start? (YYYY-MM-DD): ")
StartDate = datetime.datetime.strptime(StartDate, "%Y-%m-%d")
except:
print("Invalid start date - must be in the format YYYY-MM-DD).")
else:
break
while True:
try:
BirthDate = input("What is the Employee's Birthday? (YYYY-MM-DD): ")
BirthDate = datetime.datetime.strptime(BirthDate, "%Y-%m-%d")
except:
print("Invalid Birthdate - must be in the format YYYY-MM-DD).")
else:
break
while True:
try:
Salary = int(input("What is their yearly salary?: "))
except:
print("Invalid Entry: Please input the Yearly Salary")
else:
if Salary <= 0:
print("Salary must be greater than 0!")
else:
break
# Forrmatting and Calculations
StartDateStr = str(StartDate)
BirthDateStr = str(BirthDate)
EmployeeNo = f"{First[0]}{Last[0]}-{StartDateStr[0:4]}-{BirthDateStr[5:7]}"
ReviewDate = StartDate + datetime.timedelta(weeks=52)
Probation = StartDate + datetime.timedelta(days=90)
# days to next birthday
Today = datetime.date.today()
if (Today.month == BirthDate.month and Today.day >= BirthDate.day or Today.month > BirthDate.month):
NextBirthdayYear = Today.year + 1
else:
NextBirthdayYear = Today.year
NextBirthday = datetime.date(NextBirthdayYear, BirthDate.month, BirthDate.day)
DaysToNextBirthday = NextBirthday - Today
if (Today.month == StartDate.month and Today.day >= StartDate.day or Today.month > StartDate.month):
NextReviewYear = Today.year + 1
else:
NextReviewYear = Today.year
NextReview = datetime.date(NextReviewYear, StartDate.month, StartDate.day)
# Output
print()
print("Employee Information:")
print()
print(f"Employee Name: {First} {Last}; {First[0].title()}.{Last}; {Last},{First[0]}.")
print("Employee Number: ", EmployeeNo)
print(f"Employee Review Date: {ReviewDate.strftime('%Y-%m-%d')}")
print("Days left to Employee's next Birthday:", DaysToNextBirthday.days)
print(f"Salary: ${Salary:,}")
print()
print(f"{EmployeeNo}:({First} {Last})")
print("-" * 50)
if ReviewDate > datetime.datetime.now():
print(f"Next Annual Review Date: {Business_Day(ReviewDate.date())}")
else:
print(f"Next Annual Review Date: {Business_Day(NextReview)}")
print(f"Probation Period End: {Probation.strftime('%Y-%m-%d')}")
print()
print(f"Eligible for company benefits on: {Probation.strftime('%B %d, %Y')} ")
print()
AnyKey = input("Press any key to Return to Menu....")
print()
print("Loading ", end="")
for wait in range(1, 11):
print('*', end=' ')
time.sleep(.2)
print()
return
#Option 4 Data Files and Default Values.
def Part_Four():
# opening Default values from file
with open('ECRDef.dat', 'r') as f:
RENTAL_NUMBER = int(f.readline())
RENT_RATE = int(f.readline())
KM_RATE = float(f.readline())
TAX = float(f.readline())
while True: #Program end loop
# INPUT STATEMENTS
print()
print(" Edsel Car Rental Company ")
print()
print("Please answer the following questions")
print()
print("*" * 40)
print(" To End Program and Return to Menu ")
print(" Enter 0 for Travel Days")
print("*" * 40)
print()
print(" Rental Details")
print()
while True:
try:
Num_Days_Rented = int(input(" Number of days the automobile was rented: "))
except:
print("Invalid Entry: Please input a Number")
else:
if Num_Days_Rented == 0:
print()
print("Loading ", end="")
for wait in range(1, 11):
print('*', end=' ')
time.sleep(.2)
print()
break
elif Num_Days_Rented < 0:
print("Invalid Entry Number Days Rented Cannot be Less than 0")
else:
break
if Num_Days_Rented == 0:
break
print()
print(" Mileage")
print()
while True:
try:
Start_Mileage = int(input(" Starting mileage when car was rented: "))
End_Mileage = int(input(" Ending mileage when car was returned: "))
except:
print("Invalid Entry: Please Input KM Amount")
else:
if Start_Mileage == "" or End_Mileage == "":
print("Cannot be blank - Please try again!")
elif Start_Mileage < 100:
print("Invalid Entry Starting Mileage cannot be less than 100km")
elif End_Mileage < Start_Mileage:
print("Invalid Entry: Ending Mileage Cannot be less than starting Mileage")
else:
break
print()
AnyKey = input("Press any key to Display Rental Invoice....")
print()
# CALCULATIONS PROCESSING
Km_Travelled = End_Mileage - Start_Mileage
Daily_Cost = Num_Days_Rented * RENT_RATE
Mileage_Cost = Km_Travelled * KM_RATE
Rent_Cost = Daily_Cost + Mileage_Cost
Tax_Cost = Daily_Cost * TAX
Total_Rent_Cost = Rent_Cost + Tax_Cost
# CALCULATIONS FOR OUTPUT TO USER
print()
print(" Edsel Car Rental Company ")
print(f" Rental Number")
print(f" #{RENTAL_NUMBER}")
print()
print(" Mileage:")
print("~" | |
# Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import ntpath
import os
import re
import shutil
import tempfile
import six
from argus import config as argus_config
from argus import exceptions
from argus.introspection.cloud import base
from argus import util
CONFIG = argus_config.CONFIG
# escaped characters for powershell paths
ESC = "( )"
SEP = "----" # default separator for network details blocks
NIC_KEYS = ["mac", "address", "gateway", "netmask", "dns", "dhcp"]
Address = collections.namedtuple("Address", ["v4", "v6"])
NICDetails = collections.namedtuple("NICDetails", NIC_KEYS)
Interface = collections.namedtuple('Interface', ['name', 'mtu'])
@contextlib.contextmanager
def _create_tempdir():
tempdir = tempfile.mkdtemp(prefix="cloudbaseinit-ci-tests")
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
@contextlib.contextmanager
def _create_tempfile(content=None):
with _create_tempdir() as temp:
file_desc, path = tempfile.mkstemp(dir=temp)
os.close(file_desc)
if content:
with open(path, 'w') as stream:
stream.write(content)
yield path
def _get_ntp_peers(output):
peers = []
for line in output.splitlines():
if not line.startswith("Peer: "):
continue
_, _, entry_peers = line.partition(":")
peers.extend(entry_peers.split(","))
return list(filter(None, map(six.text_type.strip, peers)))
def escape_path(path):
"""Escape the spaces in the given path in order to work with Powershell."""
for char in ESC:
path = path.replace(char, "`{}".format(char))
return path
def _get_ips(ips_as_string):
"""Returns viable v4 and v6 IPs from a space separated string."""
ips = ips_as_string.split(" ")[1:] # skip the header
ips_v4, ips_v6 = [], []
# There is no guarantee if all the IPs are valid and sorted by type.
for ip in ips:
if not ip:
continue
if "." in ip and ":" not in ip:
ips_v4.append(ip)
else:
ips_v6.append(ip)
return ips_v4, ips_v6
def _get_nic_details(details):
"""Get parsed network details from the raw ones."""
nic_details = dict.fromkeys(NIC_KEYS)
for detail in details:
if detail.startswith("mac"):
nic_details["mac"] = detail.split(" ")[1]
elif detail.startswith("address"):
v4s, v6s = _get_ips(detail)
if len(v6s) >= 2:
v6 = v6s[1]
else:
v6 = None
nic_details["address"] = Address(v4s[0], v6)
elif detail.startswith("gateway"):
v4s, v6s = _get_ips(detail)
v4 = v4s[0] if v4s else None
v6 = v6s[0] if v6s else None
nic_details["gateway"] = Address(v4, v6)
elif detail.startswith("netmask"):
# Similar to "address" field.
v4s, v6s = _get_ips(detail)
v4 = v4s[0]
if len(v6s) >= 2:
v6 = v6s[1]
else:
v6 = None
nic_details["netmask"] = Address(v4, v6)
elif detail.startswith("dns"):
v4s, v6s = _get_ips(detail)
nic_details["dns"] = Address(v4s, v6s)
elif detail.startswith("dhcp"):
nic_details["dhcp"] = detail.split(" ")[1].lower() == "true"
return NICDetails(**nic_details)
def get_cbinit_dir(execute_function):
"""Get the location of Cloudbase-Init from the instance."""
stdout = execute_function(
'$ENV:PROCESSOR_ARCHITECTURE', command_type=util.POWERSHELL)
architecture = stdout.strip()
locations = [execute_function('echo "$ENV:ProgramFiles"',
command_type=util.POWERSHELL)]
if architecture == 'AMD64':
location = execute_function(
'echo "${ENV:ProgramFiles(x86)}"',
command_type=util.POWERSHELL)
locations.append(location)
for location in locations:
location = location.strip()
_location = escape_path(location)
status = execute_function(
'Test-Path "{}\\Cloudbase` Solutions"'.format(
_location), command_type=util.POWERSHELL).strip().lower()
if status == "true":
return ntpath.join(
location,
"Cloudbase Solutions",
"Cloudbase-Init"
)
raise exceptions.ArgusError('Cloudbase-Init installation directory'
' not found')
def set_config_option(option, value, execute_function):
"""Set the value for the given *option* to *value*."""
line = "{} = {}".format(option, value)
cbdir = get_cbinit_dir(execute_function)
conf = ntpath.join(cbdir, "conf", "cloudbase-init.conf")
cmd = ('((Get-Content {0!r}) + {1!r}) |'
' Set-Content {0!r}'.format(conf, line))
execute_function(cmd, command_type=util.POWERSHELL)
def get_python_dir(execute_function):
"""Find python directory from the Cloudbase-Init installation."""
cbinit_dir = get_cbinit_dir(execute_function)
command = 'dir "{}" /b'.format(cbinit_dir)
stdout = execute_function(command,
command_type=util.CMD).strip()
names = list(filter(None, stdout.splitlines()))
for name in names:
if "python" in name.lower():
return ntpath.join(cbinit_dir, name)
def get_cbinit_key(execute_function):
"""Get the proper registry key for Cloudbase-Init."""
key = ("HKLM:SOFTWARE\\Cloudbase` Solutions\\"
"Cloudbase-init")
key_x64 = ("HKLM:SOFTWARE\\Wow6432Node\\Cloudbase` Solutions\\"
"Cloudbase-init")
cmd = 'Test-Path {}'.format(key)
result = execute_function(cmd, command_type=util.POWERSHELL)
if result.strip().lower() == "true":
return key
return key_x64
def get_os_version(client, field):
"""Gets the specified version from the OS.
:param client: represents the client object on which to run the command.
:param field: the version type.
"""
cmd = "[System.Environment]::OSVersion.Version.{}".format(field)
stdout, _, _ = client.run_command_with_retry(cmd,
command_type=util.POWERSHELL)
return util.get_int_from_str(stdout.strip())
def parse_netsh_output(output):
output = output.strip()
blocks = re.split(r"SubInterface\s+(.*?)-{46}\s+", output,
flags=re.DOTALL)
blocks = blocks[1:] # empty space
interfaces = blocks[0::2]
content = blocks[1::2]
Interfaces = []
for interface, block in zip(interfaces, content):
interface = interface.strip()
mtu = re.search(r"MTU\s*:\s*(\d+)\s+", block)
name, _, _ = interface.partition('Parameters')
if 'loopback' not in interface.lower():
Interfaces.append(Interface(name=name.strip(), mtu=mtu.group(1)))
return Interfaces
class InstanceIntrospection(base.CloudInstanceIntrospection):
"""Utilities for introspecting a Windows instance."""
def __init__(self, remote_client):
super(InstanceIntrospection, self).__init__(remote_client)
self._cmdlet = remote_client.manager.WINDOWS_MANAGEMENT_CMDLET
def get_disk_size(self):
cmd = ('({} win32_logicaldisk | where {{$_.DeviceID '
'-Match "C:"}}).Size').format(self._cmdlet)
return int(self.remote_client.run_command_verbose(
cmd, command_type=util.POWERSHELL))
def username_exists(self, username):
cmd = ('{0} Win32_Account | '
'where {{$_.Name -contains "{1}"}}'
.format(self._cmdlet, username))
stdout = self.remote_client.run_command_verbose(
cmd, command_type=util.POWERSHELL)
return bool(stdout)
def get_instance_ntp_peers(self):
command = 'w32tm /query /peers'
stdout = self.remote_client.run_command_verbose(command,
command_type=util.CMD)
return _get_ntp_peers(stdout)
def get_instance_keys_path(self):
cmd = 'echo %cd%'
stdout = self.remote_client.run_command_verbose(cmd,
command_type=util.CMD)
homedir, _, _ = stdout.rpartition(ntpath.sep)
return ntpath.join(
homedir, CONFIG.cloudbaseinit.created_user,
".ssh", "authorized_keys")
def get_instance_file_content(self, filepath):
cmd = '[io.file]::ReadAllText("%s")' % filepath
return self.remote_client.run_command_verbose(
cmd, command_type=util.POWERSHELL)
def get_userdata_executed_plugins(self):
cmd = r'(Get-ChildItem -Path C:\ *.txt).Count'
stdout = self.remote_client.run_command_verbose(
cmd, command_type=util.POWERSHELL)
return int(stdout)
def get_instance_mtu(self):
cmd = 'netsh interface ipv4 show subinterfaces level=verbose'
stdout = self.remote_client.run_command_verbose(
cmd, command_type=util.CMD)
return parse_netsh_output(stdout)[0]
def get_cloudbaseinit_traceback(self):
code = util.get_resource('windows/get_traceback.ps1')
remote_script = "C:\\{}.ps1".format(util.rand_name())
with _create_tempfile(content=code) as tmp:
self.remote_client.copy_file(tmp, remote_script)
stdout = self.remote_client.run_command_verbose(
remote_script,
command_type=util.POWERSHELL_SCRIPT_REMOTESIGNED)
return stdout.strip()
def _file_exist(self, filepath):
stdout = self.remote_client.run_command_verbose(
'Test-Path {}'.format(filepath), command_type=util.POWERSHELL)
return stdout.strip() == 'True'
def instance_exe_script_executed(self):
return self._file_exist("C:\\Scripts\\exe.output")
def get_group_members(self, group):
cmd = "net localgroup {}".format(group)
std_out = self.remote_client.run_command_verbose(
cmd, command_type=util.CMD)
member_search = re.search(
r"Members\s+-+\s+(.*?)The\s+command",
std_out, re.MULTILINE | re.DOTALL)
if not member_search:
raise ValueError('Unable to get members.')
return list(filter(None, member_search.group(1).split()))
def list_location(self, location):
command = "dir {} /b".format(location)
stdout = self.remote_client.run_command_verbose(
command, command_type=util.CMD)
return list(filter(None, stdout.splitlines()))
def get_trim_state(self):
# Query the current state of DisableDeleteNotify
# 1 - DeleteNotify is disabled
# 0 - DeleteNotify is enabled
command = "fsutil.exe behavior query disabledeletenotify"
stdout = self.remote_client.run_command_verbose(
command, command_type=util.CMD)
return "DisableDeleteNotify = 0" in stdout
def get_san_policy(self):
"""Get the SAN policy."""
return self.remote_client.manager.get_san_policy()
def get_power_setting_value(self):
command = ('powercfg.exe -query SCHEME_CURRENT SUB_VIDEO VIDEOIDLE'
' | findstr /R /C:"Current AC Power Setting Index"')
stdout = self.remote_client.run_command_verbose(
command, command_type=util.CMD)
return stdout.strip()
def get_service_triggers(self, service):
"""Get the triggers of the given service.
Return a tuple of two elements, where the first is the start
trigger and the second is the end trigger.
"""
command = "sc qtriggerinfo {}".format(service)
stdout = self.remote_client.run_command_verbose(
command, command_type=util.CMD)
match = re.search(r"START SERVICE\s+(.*?):.*?STOP SERVICE\s+(.*?):",
stdout, re.DOTALL)
if not match:
raise ValueError("Unable to get the triggers for the "
"given service.")
return (match.group(1).strip(), match.group(2).strip())
def get_instance_os_version(self):
"""Get the version of the underlying OS
Return a tuple of two elements, the major and the minor
version.
"""
major_version = get_os_version(self.remote_client, 'Major')
minor_version = get_os_version(self.remote_client, 'Minor')
return (major_version, minor_version)
def get_cloudconfig_executed_plugins(self):
expected = {
'b64', 'b64_1',
'gzip', 'gzip_1',
'gzip_base64', 'gzip_base64_1', 'gzip_base64_2'
}
files = {}
for basefile in expected:
path = ntpath.join("C:\\", basefile)
content = self.get_instance_file_content(path)
files[basefile] = content.strip()
return files
def get_timezone(self):
command = "tzutil /g"
stdout = self.remote_client.run_command_verbose(
"{}".format(command), command_type=util.POWERSHELL)
return stdout
def get_instance_hostname(self):
command = "hostname"
stdout = self.remote_client.run_command_verbose(
command, command_type=util.CMD)
return stdout.lower().strip()
def get_network_interfaces(self):
"""Get a list with dictionaries of network details.
If a value is an empty string, then that value is missing.
"""
location = r"C:\network_details.ps1"
self.remote_client.manager.download_resource(
resource_location="windows/network_details.ps1",
location=location)
# Run and parse the output, where each adapter details
# block is separated by a specific separator.
# Each block contains multiple fields separated by EOLs
# and each field contains multiple details separated by spaces.
output = self.remote_client.run_command_verbose(
location, command_type=util.POWERSHELL)
output = output.replace(SEP, "", 1)
nics = []
for block in output.split(SEP):
details = block.strip().splitlines()
if len(details) < 6:
continue # not enough, invalid data block
# Must follow `argus.util.NETWORK_KEYS` model.
nic_details = _get_nic_details(details)
nic = {
"mac": nic_details.mac,
"address": nic_details.address.v4,
"address6": nic_details.address.v6,
"gateway": nic_details.gateway.v4,
"gateway6": nic_details.gateway.v6,
"netmask": nic_details.netmask.v4,
"netmask6": nic_details.netmask.v6,
"dns": nic_details.dns.v4,
"dns6": nic_details.dns.v6,
"dhcp": nic_details.dhcp
}
nics.append(nic)
return nics
def get_user_flags(self, user):
cmd = self.remote_client.manager.get_agent_command(
agent_action="get_user_flags", source=user)
stdout = self.remote_client.run_command_verbose(cmd)
return stdout.strip()
def get_swap_status(self):
"""Get the swap memory status."""
swap_query = (r"HKLM:\SYSTEM\CurrentControlSet\Control\Session"
r" Manager\Memory Management")
cmd = r"(Get-ItemProperty '{}').PagingFiles".format(swap_query)
stdout = self.remote_client.run_command_verbose(cmd)
return stdout.strip()
def get_kms_host_settings(self):
licensing_query | |
<filename>examples/run_experiments.py
import argparse
import collections
import itertools
import glob
import json
import logging
import os
import shlex
import subprocess
import natsort
import numpy as np
import yaml
logger = logging.getLogger(__name__)
TrainedModel = collections.namedtuple('TrainedModel', [
'base_directory', 'model', 'environment', 'model_info', 'parameters',
])
def model_train(args, model, environment, dry_run, results_dir_base, skip_existing_train=False):
"""Run model training script."""
cmd_str = ""
needs_train = True
output_directory = os.path.join(results_dir_base, model['name'].replace(' ', '-'), environment)
try:
os.makedirs(output_directory)
except OSError:
logger.warning("Experiment directory '{}' already exists. Assuming trained model.".format(output_directory))
if skip_existing_train:
logger.warning("Skip existing train flag enabled, returning.")
return
needs_train = False
if needs_train:
process = model['train']['command'].strip().format(
environment=environment,
output=output_directory,
**model['hyperparameters']
)
process = shlex.split(process)
cmd_str = " ".join(process)
if dry_run:
logger.debug("[DRY RUN] Running: '{}'".format(cmd_str))
else:
logger.debug("Running: '{}'".format(cmd_str))
try:
subprocess.run(process, check=True)
except subprocess.CalledProcessError:
logger.error("Failed to train model '{}' on environment '{}'.".format(model['name'], environment))
return
# Check if output directory exists and return it.
if dry_run or model['train'].get('output_no_check', False):
output_model = os.path.join(output_directory, model['train']['output'])
else:
output_model = glob.glob(os.path.join(output_directory, model['train']['output']))
if not output_model:
logger.error("Unable to find trained model output file.")
return
# Fixes issue of 'normalize' file inside checkpoint folder
output_model = [f for f in output_model if 'normalize' not in f]
if not output_model:
logger.error("Unable to find trained model output file.")
return
output_model = natsort.natsorted(output_model, reverse=True)[0]
# Get all parameters used during training.
parameters = []
if 'parameters' in model['train']:
for parameters_filename in glob.glob(os.path.join(output_directory, model['train']['parameters'])):
with open(parameters_filename) as parameters_file:
for line in parameters_file:
try:
parameters.append(json.loads(line))
except ValueError:
continue
return TrainedModel(
base_directory=output_directory,
model=output_model,
model_info=model,
environment=environment,
parameters=parameters,
), cmd_str
def model_evaluate(args, model, environment, trained_model, dry_run):
"""Run model evaluation script."""
cmd_str = ""
needs_evaluate = True
output_directory = os.path.join(trained_model.base_directory, 'evaluations', environment)
# make sure file exists before running the rest
if not os.path.isfile(trained_model.model):
logger.error("Checkpoint does not exist: '{}', stopping eval.".format(
trained_model.model
))
return
try:
os.makedirs(output_directory)
except OSError:
if not args.force_evaluate:
logger.warning("Experiment directory '{}' already exists. Assuming evaluation done.".format(
output_directory
))
needs_evaluate = False
if needs_evaluate:
process = model['evaluate']['command'].strip().format(
environment=environment,
output=output_directory,
model=trained_model.model,
#**model['hyperparameters']
)
process = shlex.split(process)
cmd_str = " ".join(process)
if dry_run:
logger.debug("[DRY RUN] Running: '{}'".format(cmd_str))
else:
logger.debug("Running: '{}'".format(cmd_str))
try:
subprocess.run(process, check=True)
except subprocess.CalledProcessError:
logger.error("Failed to evaluate model '{}' on environment '{}'.".format(model['name'], environment))
return
# We can't evaluate the reward statistics on a dry run, so return a dummy dict
if dry_run:
return {}, cmd_str
else:
# Get evaluation results.
evaluation = glob.glob(os.path.join(output_directory, model['evaluate']['output']))
if not evaluation:
logger.error("Unable to find evaluation output file.")
return
evaluation = natsort.natsorted(evaluation, reverse=True)[0]
with open(evaluation) as evaluation_file:
episodes = []
for line in evaluation_file:
try:
episodes.append(json.loads(line))
except ValueError:
pass
rewards = np.asarray([data['reward'] for data in episodes])
return {
'model': trained_model.model_info,
'trained_on': trained_model.environment,
'trained_parameters': trained_model.parameters,
'evaluated_on': environment,
'episodes': episodes,
'rewards': {
'count': len(rewards),
'mean': float(np.mean(rewards)),
'median': float(np.median(rewards)),
'std': float(np.std(rewards)),
'min': float(np.min(rewards)),
'max': float(np.max(rewards)),
}
}, cmd_str
def random_evaluate(args, model, environment, results_dir, dry_run):
"""Similar to model_evaluate(), but doesn't take trained model as input.
Whereas the log dir format for regular (non-random) models is:
results_dir/model-name/training-env-name/progress.csv (logs, etc)
results_dir/model-name/training-env-name/checkpoints/* (saved model files)
results_dir/model-name/training-env-name/evaluations/testing-env-name/results.json (testing results)
The log dir format for random models is simply:
results_dir/Random/evaluations/testing-env-name/log.txt (testing log, etc.)
results_dir/Random/evaluations/testing-env-name/results.json (testing results)
"""
cmd_str = ""
needs_evaluate = True
output_directory = os.path.join(results_dir, model['name'], 'evaluations', environment)
try:
os.makedirs(output_directory)
except OSError:
if not args.force_evaluate:
logger.warning("Experiment directory '{}' already exists. Assuming evaluation done.".format(
output_directory
))
needs_evaluate = False
if needs_evaluate:
process = model['evaluate']['command'].strip().format(
environment=environment,
output=output_directory,
)
process = shlex.split(process)
cmd_str = " ".join(process)
if dry_run:
logger.debug("[DRY RUN] Running: '{}'".format(cmd_str))
else:
logger.debug("Running: '{}'".format(cmd_str))
try:
subprocess.run(process, check=True)
except subprocess.CalledProcessError:
logger.error("Failed to evaluate model '{}' on environment '{}'.".format(model['name'], environment))
return
# We can't evaluate the reward statistics on a dry run, so return a dummy dict
if dry_run:
return {}, cmd_str
else:
# Get evaluation results.
evaluation = glob.glob(os.path.join(output_directory, model['evaluate']['output']))
if not evaluation:
logger.error("Unable to find evaluation output file.")
return
evaluation = natsort.natsorted(evaluation, reverse=True)[0]
with open(evaluation) as evaluation_file:
episodes = []
for line in evaluation_file:
try:
episodes.append(json.loads(line))
except ValueError:
pass
rewards = np.asarray([data['reward'] for data in episodes])
return {
'model': model['name'],
'trained_on': 'N/A',
'trained_parameters': 'N/A',
'evaluated_on': environment,
'episodes': episodes,
'rewards': {
'count': len(rewards),
'mean': float(np.mean(rewards)),
'median': float(np.median(rewards)),
'std': float(np.std(rewards)),
'min': float(np.min(rewards)),
'max': float(np.max(rewards)),
}
}, cmd_str
def record_result(results_dir, result):
"""Record evaluation result."""
with open(os.path.join(results_dir, 'results.json'), 'a') as results_file:
results_file.write(json.dumps(result))
results_file.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('experiments', type=str,
help="Experiments definition file (.yml)")
parser.add_argument('output', type=str,
help="Output directory for all experiments")
parser.add_argument('--filter-models', type=str, nargs='+',
help="Only run experiments on specified models")
parser.add_argument('--filter-envs', type=str, nargs='+',
help="Only run experiments on specified environments")
parser.add_argument('--force-evaluate', action='store_true',
help="Run experiment even if directory already exists")
# NOTE: This creates dummy files. Make sure to run on empty output
# directory and delete afterwards
parser.add_argument('--dry-run', action='store_true',
help="Print the expr. commands but don't run them")
parser.add_argument('--dry-run-file', default='run_experiments_cmds',
help="Where to store the commands (saves both .txt and .json)")
# NOTE: If used with --dry-run on an experiments.yml with explicit
# testing on train envs, script will fail
parser.add_argument('--eval-train', action='store_true',
help="Implicitly evaluate on the training environment")
parser.add_argument('--skip-existing-train', action='store_true',
help="Skip models dirs with existing checkpoint files but no evaluate dir")
parser.add_argument('--skip-existing-eval', action='store_true',
help="Skip models dirs with existing evaluate dir")
args = parser.parse_args()
# Configure logger.
logging.basicConfig(
format='%(asctime)-15s %(levelname)s: %(message)s',
level=logging.DEBUG,
)
# Store then write out the commands to be run (dry run only)
# Dict should look like:
# commands = {
# 'PPO': [{
# 'train': 'run xyz_command',
# 'test': ['run a_command', 'run b_command', ...]
# }],
# 'TRPO': {
# ...
if args.dry_run:
commands = {}
# Load experiments definitions.
logger.info("Loading experiment plan '{}'.".format(args.experiments))
with open(args.experiments) as experiments_file:
config = yaml.load(experiments_file)
logger.info("Models:")
for model in config['models']:
logger.info(' * {}'.format(model['name']))
logger.info("Loaded {} environment sets.".format(len(config['environments'])))
for model in config['models']:
if args.filter_models and model['name'] not in args.filter_models:
continue
random_model = model['name'] == 'Random'
if random_model:
# Hyperparameters don't make sense for Random
assert 'hyperparameters' not in model
hp_dicts = [{}]
else:
# Check in advance to see if any model hyperparameters are specified as lists
# If so, then have an outer-loop that does a full sweep based on the list(s)
hp_original_copy = model['hyperparameters'].copy()
hps_with_list = list(filter(lambda kv: isinstance(kv[1], list), model['hyperparameters'].items()))
if len(hps_with_list) > 0:
logger.info("Found hyperparameter(s) specified as list, performing hyperparameter sweep.")
# Generate Cartesian product of all hyperparameters specified as lists
hp_names, hp_vals = zip(*hps_with_list)
hp_vals_combos = list(itertools.product(*hp_vals))
logger.info("Testing following combinations for {}: {}.".format(hp_names, hp_vals_combos))
# Create a different hyperparam. dict. for each combination
hp_dicts = []
for combo in hp_vals_combos:
base_dict = model['hyperparameters'].copy()
# Overwrite (list) values with specific combination
base_dict.update(dict(zip(hp_names, combo)))
hp_dicts.append(base_dict)
else:
hp_dicts = [model['hyperparameters']]
# Dump all the dicts for the log
logger.info("Full hyperparameter set(s) [total %d]:" % len(hp_dicts))
for hpd in hp_dicts:
logger.info(hpd)
# Loop over each hyperparameter configuration,
# and write-out to a different results DIR for each
output_dir_base = str(args.output)
for hp_dict in hp_dicts:
output_dir = output_dir_base
if not random_model:
# Code to modify the provided log dir (args.output) with hyperparameter suffix
model['hyperparameters'] = hp_dict
# Only append to output DIR if we're doing a hyperparameter sweep
if len(hp_dicts) > 1:
# Sort by key so that we get consistent log dirs
hp_dict_sorted = [(k, hp_dict[k]) for k in sorted(hp_dict)]
suffix = '_'.join(['{0}-{1}'.format(k, v) for k, v in hp_dict_sorted])
output_dir = output_dir_base + '___' + suffix
logger.info("Hyperparameter sweep dict: {}".format(hp_dict))
logger.info("Logging to: {}".format(output_dir))
logger.info("Evaluating model '{}'.".format(model['name']))
if args.dry_run:
# Algorithm -> list of train/test combos to try
if model['name'] not in commands:
commands[model['name']] = []
for environment in config['environments']:
if args.filter_envs and environment['train'] not in args.filter_envs:
continue
if args.skip_existing_train:
output_directory = os.path.join(output_dir_base, model['name'].replace(' ', '-'), environment['train'])
if os.path.isdir(output_directory):
logger.warning("Experiment directory '{}' already exists, and --skip-existing-train flag enabled. Skipping environment.".format(output_directory))
continue
if random_model:
# 'Random' is a special case where we don't actually train anything
logger.info("'Random' model specified, skipping training.")
else:
logger.info("Training on '{}'.".format(environment['train']))
ret = model_train(args, model, environment['train'], args.dry_run, output_dir, args.skip_existing_train)
if not ret:
continue
else:
trained_model, cmd = ret
if args.dry_run:
# Store the command for training on the train env
env_exprs = {}
if not random_model:
env_exprs['train'] = cmd
env_exprs['test'] = []
# Evaluate on the train environment
# In the current | |
SimTK::Array_< int >::size_type n, int const & fillValue)
Parameters
----------
n: SimTK::Array_< int >::size_type
fillValue: int const &
"""
return _simbody.SimTKArrayInt_assign(self, n, fillValue)
def fill(self, fillValue):
"""
fill(SimTKArrayInt self, int const & fillValue)
Parameters
----------
fillValue: int const &
"""
return _simbody.SimTKArrayInt_fill(self, fillValue)
def swap(self, other):
"""
swap(SimTKArrayInt self, SimTKArrayInt other)
Parameters
----------
other: SimTK::Array_< int > &
"""
return _simbody.SimTKArrayInt_swap(self, other)
def adoptData(self, *args):
"""
adoptData(SimTKArrayInt self, int * newData, SimTK::Array_< int >::size_type dataSize, SimTK::Array_< int >::size_type dataCapacity) -> SimTKArrayInt
Parameters
----------
newData: int *
dataSize: SimTK::Array_< int >::size_type
dataCapacity: SimTK::Array_< int >::size_type
adoptData(SimTKArrayInt self, int * newData, SimTK::Array_< int >::size_type dataSize) -> SimTKArrayInt
Parameters
----------
newData: int *
dataSize: SimTK::Array_< int >::size_type
"""
return _simbody.SimTKArrayInt_adoptData(self, *args)
def shareData(self, *args):
"""
shareData(SimTKArrayInt self, int * newData, SimTK::Array_< int >::size_type dataSize) -> SimTKArrayInt
Parameters
----------
newData: int *
dataSize: SimTK::Array_< int >::size_type
shareData(SimTKArrayInt self, int * first, int const * last1) -> SimTKArrayInt
Parameters
----------
first: int *
last1: int const *
"""
return _simbody.SimTKArrayInt_shareData(self, *args)
def size(self):
"""
size(SimTKArrayInt self) -> SimTK::Array_< int >::size_type
Parameters
----------
self: SimTK::Array_< int > const *
"""
return _simbody.SimTKArrayInt_size(self)
def max_size(self):
"""
max_size(SimTKArrayInt self) -> SimTK::Array_< int >::size_type
Parameters
----------
self: SimTK::Array_< int > const *
"""
return _simbody.SimTKArrayInt_max_size(self)
def empty(self):
"""
empty(SimTKArrayInt self) -> bool
Parameters
----------
self: SimTK::Array_< int > const *
"""
return _simbody.SimTKArrayInt_empty(self)
def capacity(self):
"""
capacity(SimTKArrayInt self) -> SimTK::Array_< int >::size_type
Parameters
----------
self: SimTK::Array_< int > const *
"""
return _simbody.SimTKArrayInt_capacity(self)
def resize(self, *args):
"""
resize(SimTKArrayInt self, SimTK::Array_< int >::size_type n)
Parameters
----------
n: SimTK::Array_< int >::size_type
resize(SimTKArrayInt self, SimTK::Array_< int >::size_type n, int const & initVal)
Parameters
----------
n: SimTK::Array_< int >::size_type
initVal: int const &
"""
return _simbody.SimTKArrayInt_resize(self, *args)
def reserve(self, n):
"""
reserve(SimTKArrayInt self, SimTK::Array_< int >::size_type n)
Parameters
----------
n: SimTK::Array_< int >::size_type
"""
return _simbody.SimTKArrayInt_reserve(self, n)
def shrink_to_fit(self):
"""
shrink_to_fit(SimTKArrayInt self)
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_shrink_to_fit(self)
def allocated(self):
"""
allocated(SimTKArrayInt self) -> SimTK::Array_< int >::size_type
Parameters
----------
self: SimTK::Array_< int > const *
"""
return _simbody.SimTKArrayInt_allocated(self)
def isOwner(self):
"""
isOwner(SimTKArrayInt self) -> bool
Parameters
----------
self: SimTK::Array_< int > const *
"""
return _simbody.SimTKArrayInt_isOwner(self)
def cbegin(self):
"""
cbegin(SimTKArrayInt self) -> int const *
Parameters
----------
self: SimTK::Array_< int > const *
"""
return _simbody.SimTKArrayInt_cbegin(self)
def begin(self, *args):
"""
begin(SimTKArrayInt self) -> int const
begin(SimTKArrayInt self) -> int *
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_begin(self, *args)
def cend(self):
"""
cend(SimTKArrayInt self) -> int const *
Parameters
----------
self: SimTK::Array_< int > const *
"""
return _simbody.SimTKArrayInt_cend(self)
def end(self, *args):
"""
end(SimTKArrayInt self) -> int const
end(SimTKArrayInt self) -> int *
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_end(self, *args)
def crbegin(self):
"""
crbegin(SimTKArrayInt self) -> SimTK::Array_< int >::const_reverse_iterator
Parameters
----------
self: SimTK::Array_< int > const *
"""
return _simbody.SimTKArrayInt_crbegin(self)
def rbegin(self, *args):
"""
rbegin(SimTKArrayInt self) -> SimTK::Array_< int >::const_reverse_iterator
rbegin(SimTKArrayInt self) -> SimTK::Array_< int >::reverse_iterator
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_rbegin(self, *args)
def crend(self):
"""
crend(SimTKArrayInt self) -> SimTK::Array_< int >::const_reverse_iterator
Parameters
----------
self: SimTK::Array_< int > const *
"""
return _simbody.SimTKArrayInt_crend(self)
def rend(self, *args):
"""
rend(SimTKArrayInt self) -> SimTK::Array_< int >::const_reverse_iterator
rend(SimTKArrayInt self) -> SimTK::Array_< int >::reverse_iterator
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_rend(self, *args)
def cdata(self):
"""
cdata(SimTKArrayInt self) -> int const *
Parameters
----------
self: SimTK::Array_< int > const *
"""
return _simbody.SimTKArrayInt_cdata(self)
def data(self, *args):
"""
data(SimTKArrayInt self) -> int const
data(SimTKArrayInt self) -> int *
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_data(self, *args)
def at(self, *args):
"""
at(SimTKArrayInt self, SimTK::Array_< int >::index_type i) -> int const
Parameters
----------
i: SimTK::Array_< int >::index_type
at(SimTKArrayInt self, SimTK::Array_< int >::index_type i) -> int &
Parameters
----------
i: SimTK::Array_< int >::index_type
"""
return _simbody.SimTKArrayInt_at(self, *args)
def getElt(self, i):
"""
getElt(SimTKArrayInt self, SimTK::Array_< int >::index_type i) -> int const &
Parameters
----------
i: SimTK::Array_< int >::index_type
"""
return _simbody.SimTKArrayInt_getElt(self, i)
def updElt(self, i):
"""
updElt(SimTKArrayInt self, SimTK::Array_< int >::index_type i) -> int &
Parameters
----------
i: SimTK::Array_< int >::index_type
"""
return _simbody.SimTKArrayInt_updElt(self, i)
def front(self, *args):
"""
front(SimTKArrayInt self) -> int const
front(SimTKArrayInt self) -> int &
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_front(self, *args)
def back(self, *args):
"""
back(SimTKArrayInt self) -> int const
back(SimTKArrayInt self) -> int &
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_back(self, *args)
def push_back(self, *args):
"""
push_back(SimTKArrayInt self, int const & value)
Parameters
----------
value: int const &
push_back(SimTKArrayInt self)
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_push_back(self, *args)
def raw_push_back(self):
"""
raw_push_back(SimTKArrayInt self) -> int *
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_raw_push_back(self)
def pop_back(self):
"""
pop_back(SimTKArrayInt self)
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_pop_back(self)
def erase(self, *args):
"""
erase(SimTKArrayInt self, int * first, int const * last1) -> int
Parameters
----------
first: int *
last1: int const *
erase(SimTKArrayInt self, int * p) -> int *
Parameters
----------
p: int *
"""
return _simbody.SimTKArrayInt_erase(self, *args)
def eraseFast(self, p):
"""
eraseFast(SimTKArrayInt self, int * p) -> int *
Parameters
----------
p: int *
"""
return _simbody.SimTKArrayInt_eraseFast(self, p)
def clear(self):
"""
clear(SimTKArrayInt self)
Parameters
----------
self: SimTK::Array_< int > *
"""
return _simbody.SimTKArrayInt_clear(self)
def insert(self, *args):
"""
insert(SimTKArrayInt self, int * p, SimTK::Array_< int >::size_type n, int const & value) -> int
Parameters
----------
p: int *
n: SimTK::Array_< int >::size_type
value: int const &
insert(SimTKArrayInt self, int * p, int const & value) -> int *
Parameters
----------
p: int *
value: int const &
"""
return _simbody.SimTKArrayInt_insert(self, *args)
SimTKArrayInt_swigregister = _simbody.SimTKArrayInt_swigregister
SimTKArrayInt_swigregister(SimTKArrayInt)
class SimTKArrayRotation(_object):
"""Proxy of C++ SimTK::Array_<(SimTK::Rotation_<(double)>)> class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SimTKArrayRotation, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SimTKArrayRotation, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SimTK::Array_<(SimTK::Rotation_<(double)>)> self) -> SimTKArrayRotation
__init__(SimTK::Array_<(SimTK::Rotation_<(double)>)> self, SimTK::Array_< SimTK::Rotation_< double > >::size_type n) -> SimTKArrayRotation
Parameters
----------
n: SimTK::Array_< SimTK::Rotation_< double > >::size_type
__init__(SimTK::Array_<(SimTK::Rotation_<(double)>)> self, SimTK::Array_< SimTK::Rotation_< double > >::size_type n, Rotation initVal) -> SimTKArrayRotation
Parameters
----------
n: SimTK::Array_< SimTK::Rotation_< double > >::size_type
initVal: SimTK::Rotation_< double > const &
__init__(SimTK::Array_<(SimTK::Rotation_<(double)>)> self, SimTKArrayRotation src) -> SimTKArrayRotation
Parameters
----------
src: SimTK::Array_< SimTK::Rotation_< double > > const &
__init__(SimTK::Array_<(SimTK::Rotation_<(double)>)> self, Rotation first, Rotation last1, DontCopy arg4) -> SimTKArrayRotation
Parameters
----------
first: SimTK::Rotation_< double > *
last1: SimTK::Rotation_< double > const *
arg4: SimTK::DontCopy const &
"""
this = _simbody.new_SimTKArrayRotation(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _simbody.delete_SimTKArrayRotation
__del__ = lambda self: None
def deallocate(self):
"""
deallocate(SimTKArrayRotation self) -> SimTKArrayRotation
Parameters
----------
self: SimTK::Array_< SimTK::Rotation_< double > > *
"""
return _simbody.SimTKArrayRotation_deallocate(self)
def assign(self, n, fillValue):
"""
assign(SimTKArrayRotation self, SimTK::Array_< SimTK::Rotation_< double > >::size_type n, Rotation fillValue)
Parameters
----------
n: SimTK::Array_< SimTK::Rotation_< double > >::size_type
fillValue: SimTK::Rotation_< double > const &
"""
return _simbody.SimTKArrayRotation_assign(self, n, fillValue)
def fill(self, fillValue):
"""
fill(SimTKArrayRotation self, Rotation fillValue)
Parameters
----------
fillValue: SimTK::Rotation_< double > const &
"""
return _simbody.SimTKArrayRotation_fill(self, fillValue)
def swap(self, other):
"""
swap(SimTKArrayRotation self, SimTKArrayRotation other)
Parameters
----------
other: SimTK::Array_< SimTK::Rotation_< double > > &
"""
return _simbody.SimTKArrayRotation_swap(self, other)
def adoptData(self, *args):
"""
adoptData(SimTKArrayRotation self, Rotation newData, SimTK::Array_< SimTK::Rotation_< double > >::size_type dataSize, SimTK::Array_< SimTK::Rotation_< double > >::size_type dataCapacity) -> SimTKArrayRotation
Parameters
----------
newData: SimTK::Rotation_< double > *
dataSize: SimTK::Array_< SimTK::Rotation_< double > >::size_type
dataCapacity: SimTK::Array_< SimTK::Rotation_< double > >::size_type
adoptData(SimTKArrayRotation self, Rotation newData, SimTK::Array_< SimTK::Rotation_< double > >::size_type dataSize) -> SimTKArrayRotation
Parameters
----------
newData: SimTK::Rotation_< double > *
dataSize: SimTK::Array_< SimTK::Rotation_< double > >::size_type
"""
return _simbody.SimTKArrayRotation_adoptData(self, *args)
def shareData(self, *args):
"""
shareData(SimTKArrayRotation self, Rotation newData, SimTK::Array_< SimTK::Rotation_< double > >::size_type dataSize) -> SimTKArrayRotation
Parameters
----------
newData: SimTK::Rotation_< double > *
dataSize: SimTK::Array_< SimTK::Rotation_< double > >::size_type
shareData(SimTKArrayRotation self, Rotation first, Rotation last1) -> SimTKArrayRotation
Parameters
----------
first: SimTK::Rotation_< double > *
last1: SimTK::Rotation_< double > const *
| |
increase lineN when the last line doesn't end with '\n'
if lineN < len(lines)-1 or line[-1] == '\n' :
lineN += 1
else:
raise ValueError("Unknown lineBreaking option ({}) is"
"specified.".format(lineBreaking))
# convert the vertices to stimulus units
self._rawVerts = vertices / self._pixelScaling
# thisW = current[0] - glyph.advance[0] + glyph.size[0] * alphaCorrection
# calculate final self.size and tightBox
if np.isnan(self._requestedSize[0]):
self.size[0] = max(self._lineWidths) + self.padding*2
if np.isnan(self._requestedSize[1]):
self.size[1] = ((lineN + 1) * self._lineHeight / self._pixelScaling
+ self.padding * 2)
# if we had to add more glyphs to make possible then
if self.glFont._dirty:
self.glFont.upload()
self.glFont._dirty = False
self._needVertexUpdate = True
def _getStartingVertices(self):
"""Returns vertices for a single non-printing char as a proxy
(needed to get location for caret when there are no actual chars)"""
yTop = self._anchorOffsetY - (self.glFont.height - self.glFont.ascender) * self.lineSpacing
yBot = yTop - self._lineHeight
x = 0
theseVertices = np.array([[x, yTop], [x, yBot], [x, yBot], [x, yTop]])
return theseVertices
def draw(self):
"""Draw the text to the back buffer"""
# Border width
self.box.setLineWidth(self.pallette['lineWidth']) # Use 1 as base if border width is none
#self.borderWidth = self.box.lineWidth
# Border colour
self.box.setLineColor(self.pallette['lineColor'], colorSpace='rgb')
#self.borderColor = self.box.lineColor
# Background
self.box.setFillColor(self.pallette['fillColor'], colorSpace='rgb')
#self.fillColor = self.box.fillColor
if self._needVertexUpdate:
#print("Updating vertices...")
self._updateVertices()
if self.fillColor is not None or self.borderColor is not None:
self.box.draw()
# self.boundingBox.draw() # could draw for debug purposes
gl.glPushMatrix()
self.win.setScale('pix')
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.glFont.textureID)
gl.glEnable(gl.GL_TEXTURE_2D)
gl.glDisable(gl.GL_DEPTH_TEST)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glVertexPointer(2, gl.GL_DOUBLE, 0, self.verticesPix.ctypes)
gl.glColorPointer(4, gl.GL_DOUBLE, 0, self._colors.ctypes)
gl.glTexCoordPointer(2, gl.GL_DOUBLE, 0, self._texcoords.ctypes)
self.shader.bind()
self.shader.setInt('texture', 0)
self.shader.setFloat('pixel', [1.0 / 512, 1.0 / 512])
nVerts = len(self._text)*4
gl.glDrawArrays(gl.GL_QUADS, 0, nVerts)
self.shader.unbind()
# removed the colors and font texture
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY)
gl.glDisableVertexAttribArray(1)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
gl.glDisable(gl.GL_TEXTURE_2D)
if self.hasFocus: # draw caret line
self.caret.draw()
gl.glPopMatrix()
def reset(self):
# Reset contents
self.text = self.startText
def clear(self):
# Clear contents
self.text = ""
def contains(self, x, y=None, units=None, tight=False):
"""Returns True if a point x,y is inside the stimulus' border.
Can accept variety of input options:
+ two separate args, x and y
+ one arg (list, tuple or array) containing two vals (x,y)
+ an object with a getPos() method that returns x,y, such
as a :class:`~psychopy.event.Mouse`.
Returns `True` if the point is within the area defined either by its
`border` attribute (if one defined), or its `vertices` attribute if
there is no .border. This method handles
complex shapes, including concavities and self-crossings.
Note that, if your stimulus uses a mask (such as a Gaussian) then
this is not accounted for by the `contains` method; the extent of the
stimulus is determined purely by the size, position (pos), and
orientation (ori) settings (and by the vertices for shape stimuli).
See Coder demos: shapeContains.py
See Coder demos: shapeContains.py
"""
if tight:
return self.boundingBox.contains(x, y, units)
else:
return self.box.contains(x, y, units)
def overlaps(self, polygon, tight=False):
"""Returns `True` if this stimulus intersects another one.
If `polygon` is another stimulus instance, then the vertices
and location of that stimulus will be used as the polygon.
Overlap detection is typically very good, but it
can fail with very pointy shapes in a crossed-swords configuration.
Note that, if your stimulus uses a mask (such as a Gaussian blob)
then this is not accounted for by the `overlaps` method; the extent
of the stimulus is determined purely by the size, pos, and
orientation settings (and by the vertices for shape stimuli).
Parameters
See coder demo, shapeContains.py
"""
if tight:
return self.boundingBox.overlaps(polygon)
else:
return self.box.overlaps(polygon)
def _updateVertices(self):
"""Sets Stim.verticesPix and ._borderPix from pos, size, ori,
flipVert, flipHoriz
"""
# check whether stimulus needs flipping in either direction
flip = np.array([1, 1])
if hasattr(self, 'flipHoriz') and self.flipHoriz:
flip[0] = -1 # True=(-1), False->(+1)
if hasattr(self, 'flipVert') and self.flipVert:
flip[1] = -1 # True=(-1), False->(+1)
font = self.glFont
# to start with the anchor is bottom left of *first line*
if self._anchorY == 'top':
self._anchorOffsetY = (-font.ascender / self._pixelScaling
- self.padding)
boxOffsetY = - self.size[1] / 2.0
elif self._anchorY == 'center':
self._anchorOffsetY = (
self.size[1] / 2
- (font.height / 2 - font.descender) / self._pixelScaling
- self.padding
)
boxOffsetY = 0
elif self._anchorY == 'bottom':
self._anchorOffsetY = (
self.size[1]
- (font.height / 2 + font.ascender) / self._pixelScaling
)
# self._anchorOffsetY = (-font.ascender / self._pixelScaling
# - self.padding)
boxOffsetY = + (self.size[1]) / 2.0
else:
raise ValueError('Unexpected value for _anchorY')
# calculate anchor offsets (text begins on left=0, box begins center=0)
if self._anchorX == 'right':
self._anchorOffsetX = - self.size[0] + self.padding
boxOffsetX = - self.size[0] / 2.0
elif self._anchorX == 'center':
self._anchorOffsetX = - self.size[0] / 2.0 + self.padding
boxOffsetX = 0
elif self._anchorX == 'left':
self._anchorOffsetX = 0 + self.padding
boxOffsetX = + self.size[0] / 2.0
else:
raise ValueError('Unexpected value for _anchorX')
self.vertices = self._rawVerts + (self._anchorOffsetX, self._anchorOffsetY)
vertsPix = convertToPix(vertices=self.vertices,
pos=self.pos,
win=self.win, units=self.units)
self.__dict__['verticesPix'] = vertsPix
# tight bounding box
if self.vertices.shape[0] < 1: # editable box with no letters?
self.boundingBox.size = 0, 0
self.boundingBox.pos = self.pos
else:
L = self.vertices[:, 0].min()
R = self.vertices[:, 0].max()
B = self.vertices[:, 1].min()
T = self.vertices[:, 1].max()
tightW = R-L
Xmid = (R+L)/2
tightH = T-B
Ymid = (T+B)/2
# for the tight box anchor offset is included in vertex calcs
self.boundingBox.size = tightW, tightH
self.boundingBox.pos = self.pos + (Xmid, Ymid)
# box (larger than bounding box) needs anchor offest adding
self.box.pos = self.pos + (boxOffsetX, boxOffsetY)
self.box.size = self.size # this might have changed from _requested
self._needVertexUpdate = False
def _onText(self, chr):
"""Called by the window when characters are received"""
if chr == '\t':
self.win.nextEditable()
return
if chr == '\r': # make it newline not Carriage Return
chr = '\n'
self.addCharAtCaret(chr)
if self.onTextCallback:
self.onTextCallback()
def _onCursorKeys(self, key):
"""Called by the window when cursor/del/backspace... are received"""
if key == 'MOTION_UP':
self.caret.row -= 1
elif key == 'MOTION_DOWN':
self.caret.row += 1
elif key == 'MOTION_RIGHT':
self.caret.char += 1
elif key == 'MOTION_LEFT':
self.caret.char -= 1
elif key == 'MOTION_BACKSPACE':
self.deleteCaretLeft()
elif key == 'MOTION_DELETE':
self.deleteCaretRight()
elif key == 'MOTION_NEXT_WORD':
pass
elif key == 'MOTION_PREVIOUS_WORD':
pass
elif key == 'MOTION_BEGINNING_OF_LINE':
self.caret.char = 0
elif key == 'MOTION_END_OF_LINE':
self.caret.char = END_OF_THIS_LINE
elif key == 'MOTION_NEXT_PAGE':
pass
elif key == 'MOTION_PREVIOUS_PAGE':
pass
elif key == 'MOTION_BEGINNING_OF_FILE':
pass
elif key == 'MOTION_END_OF_FILE':
pass
else:
print("Received unhandled cursor motion type: ", key)
@property
def hasFocus(self):
if self.win and self.win.currentEditable == self:
return True
return False
@hasFocus.setter
def hasFocus(self, focus):
if focus is False and self.hasFocus:
# If focus is being set to False, tell window to
# give focus to next editable.
if self.win:
self.win.nextEditable()
elif focus is True and self.hasFocus is False:
# If focus is being set True, set textbox instance to be
# window.currentEditable.
if self.win:
self.win.currentEditable=self
return False
def getText(self):
"""Returns the current text in the box, including formating tokens."""
return self.text
@property
def visibleText(self):
"""Returns the current visible text in the box"""
return self._text
def getVisibleText(self):
"""Returns the current visible text in the box"""
return self.visibleText
@attributeSetter
def pos(self, value):
"""The position of the center of the TextBox in the stimulus
:ref:`units <units>`
`value` should be an :ref:`x,y-pair <attrib-xy>`.
:ref:`Operations <attrib-operations>` are also supported.
Example::
stim.pos = (0.5, 0) # Set slightly to the right of center
stim.pos += (0.5, -1) # Increment pos rightwards and upwards.
Is now (1.0, -1.0)
stim.pos *= 0.2 # Move stim towards the center.
Is now (0.2, -0.2)
Tip: If you need the position of stim in pixels, you can obtain
it like this:
from psychopy.tools.monitorunittools import posToPix
posPix = posToPix(stim)
"""
self.__dict__['pos'] = val2array(value, False, False)
try:
self.box.pos = (self.__dict__['pos'] +
(self._anchorOffsetX, self._anchorOffsetY))
except AttributeError:
pass # may not be created yet, which is fine
self._needVertexUpdate = True
self._needUpdate = True
def setText(self, text=None, log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message.
"""
setAttribute(self, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.