filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_15872 | from collections import OrderedDict
import six
from django.http import Http404
from django.utils.encoding import force_text
from rest_framework import status
from rest_framework import exceptions, status
from rest_framework.compat import set_rollback
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from django.utils.translation import ugettext_lazy as _
class APIError(Exception):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_detail = _('A server error occurred.')
default_error_slug = 'internal_error'
def __init__(self, detail=None, error_slug=None):
if detail is not None:
self.detail = force_text(detail)
self.error_slug = force_text(error_slug)
else:
self.detail = force_text(self.default_detail)
self.error_slug = force_text(self.default_error_slug)
def __str__(self):
return self.detail
def custom_exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait
if isinstance(exc.detail, (list, dict)):
# Concatenate all field and non_field errors for message:
message = ''
for key in exc.detail:
try:
if isinstance(exc.detail[key], str):
message += exc.detail[key] + ' '
else:
for error in exc.detail[key]:
# Don't include duplicates in universal error message
if error not in message:
message += error + ' '
except TypeError:
if key == 'non_field_errors':
message = exc.detail[key][0]
else:
message = _('Invalid request.')
if message.endswith(' '):
message = message[:-1] # remove last space
data = OrderedDict([('status', 'error'), ('message', message), ('data', exc.detail)])
else:
data = OrderedDict([('status', 'error'), ('message', exc.detail)])
set_rollback()
return Response(data, status=exc.status_code, headers=headers)
elif isinstance(exc, Http404):
msg = _('Not found.')
data = {'status': 'error', 'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_404_NOT_FOUND)
elif isinstance(exc, PermissionDenied):
msg = _('Permission denied.')
data = {'status': 'error', 'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_403_FORBIDDEN)
# Note: Unhandled exceptions will raise a 500 error.
return None
|
the-stack_0_15873 | import torch
import sys
import csv
import nestedtensor
import utils
import torchvision
from torch.nn import functional as F
import random
class DETRNestedTensor(object):
def __init__(self, tensors, mask):
self.tensors = tensors
self.mask = mask
def to(self, *args, **kwargs):
cast_tensor = self.tensors.to(*args, **kwargs)
cast_mask = self.mask.to(
*args, **kwargs) if self.mask is not None else None
return type(self)(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
@classmethod
def from_tensor_list(cls, tensor_list):
# TODO make this more general
if tensor_list[0].ndim == 3:
# TODO make it support different-sized images
max_size = tuple(max(s)
for s in zip(*[img.shape for img in tensor_list]))
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = (len(tensor_list),) + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1],
: img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return cls(tensor, mask)
# Performance tanks hard for lots of small Tensors as expected
DEVICE = torch.device('cuda')
NDIM = 256
NHEAD = 8
MODEL = torch.nn.MultiheadAttention(NDIM, NHEAD).to(DEVICE).eval()
def run_benchmark(bsz, mean_i, mean_j, var, writer):
RAND_INTS = [(int(random.gauss(mean_j, var)), int(
random.gauss(mean_i, var))) for _ in range(bsz)]
src_ = nestedtensor.nested_tensor(
[torch.randn(NDIM * i * j).float().reshape(NDIM, i, j) for (i, j) in RAND_INTS], device=DEVICE, dtype=torch.float)
src = []
for i, s in enumerate(src_):
src.append(i*len(s) + s)
detr_nt_src = DETRNestedTensor.from_tensor_list(src)
sparsity = int(detr_nt_src.decompose()[1].float().mean().item() * 10) / 10
def gen_t_loop_mha(src):
detr_nt_src = DETRNestedTensor.from_tensor_list(src)
src, mask = detr_nt_src.decompose()
src = src.flatten(2).permute(2, 0, 1).contiguous()
mask = mask.flatten(1).contiguous()
def te():
MODEL(src, src, src, key_padding_mask=mask,
need_weights=False)
return te
def gen_nt_mha(src):
src = nestedtensor.nested_tensor([t.flatten(1).permute(
1, 0) for t in src], device=DEVICE, dtype=torch.float)
def nt():
MODEL(src, src, src, need_weights=False)
return nt
result_t = {**utils.benchmark_fn(gen_t_loop_mha(src), 5.0, cuda=True), "bsz": bsz,
"sparsity": sparsity, "var": var, "mean_i": mean_i, "mean_j": mean_j}
result_t["numel"] = sum([x.numel() for x in src_])
result_t["numel_div_avg_us"] = result_t["numel"] / result_t["avg_us"]
result_t["avg_ns_div_numel"] = result_t["avg_us"] / \
result_t["numel"] * 1000
writer.writerow(result_t)
result_nt = {**utils.benchmark_fn(gen_nt_mha(src), 5.0, cuda=True),
"bsz": bsz, "sparsity": 0.0, "var": var, "mean_i": mean_i, "mean_j": mean_j}
result_nt["numel"] = sum([x.numel() for x in src_])
result_nt["numel_div_avg_us"] = result_nt["numel"] / result_nt["avg_us"]
result_nt["avg_ns_div_numel"] = result_nt["avg_us"] / \
result_nt["numel"] * 1000
writer.writerow(result_nt)
if __name__ == "__main__":
random.seed(1011)
torch.manual_seed(1011)
writer = csv.DictWriter(sys.stdout, fieldnames=[
"name", "avg_us", "std_us", "runs", "bsz", "sparsity",
"var", "mean_i", "mean_j", "numel", "numel_div_avg_us",
"avg_ns_div_numel"])
writer.writeheader()
for var in [float(i) / 10 for i in range(0, 100, 50)]:
for batch_size in [2, 8, 16]:
run_benchmark(batch_size, 30, 30, var, writer)
|
the-stack_0_15876 | from src import Env, LoggerFactory as Logger
LEYEND = '''
Leyend:
R : Robot
B : Baby
C : Corral
# : Obstacule
* : Dirt
- : Empty cell
'''
def main(args, log):
robot = 'Reagent'
robotA = 'Practical'
if args.practical:
robot = 'Practical'
robotA = 'Reagent'
#//HACKME:Monkey patch current_agent
#src.agent.current_agent.func = current_agent
house = None
while True:
e = Env(args.rows, args.columns, args.dirtiness, args.obstacules, args.babies, args.time, args.bernoulli, robot)
house = e.copy_house()
log.info('The generated environment is:')
print(e)
print(LEYEND)
print('If this environment ok to you? Insert REPEAT to re-generate, insert anything else to continue')
s = input()
if s != 'REPEAT':
break
mean, mess = e.simulate(args.interactive)
if e.fired:
log.info('The time is over, task failed for robot')
log.info(f'The amount of dirt at the end of this simulation is: {mess}')
if args.simulation:
mean_first = mean
print('\n')
eA = Env(args.rows, args.columns, args.dirtiness, args.obstacules, args.babies, args.time, args.bernoulli, robotA)
eA.house = house
mean, mess = eA.simulate(args.interactive)
if eA.fired:
log.info('The time is over, task failed for robot')
log.info(f'The amount of dirt at the end of this simulation is: {mess}')
print('\n')
print('*************************************************************')
print('Final Results:', 'Results')
print('\n')
print(f'Task completed by {robot} agent: {e.succeded}')
print(f'Task completed by {robotA} agent: {eA.succeded}')
print('\n')
print(f'{robot} agent fired: {e.fired}')
print(f'{robotA} agent fired: {eA.fired}')
print('\n')
print(f'Percentage of dirt at the end of this simulation of the {robot} agent: {mean_first}')
print(f'Percentage of dirt at the end of this simulation of the {robotA} agent: {mean}')
print('\n')
print(f'Final house env of {robotA} agent')
print('\n')
print(eA)
print('\n')
print(f'Final house env of {robot} agent')
print('\n')
print(e)
print('*************************************************************')
if __name__ == '__main__':
import argparse
import os
parser = argparse.ArgumentParser(description='Kindergarden simulator')
parser.add_argument('-r', '--rows', type=int, default=9, help='number of rows of the house')
parser.add_argument('-c', '--columns', type=int, default=8, help='number of columns of the house')
parser.add_argument('-d', '--dirtiness', type=int, default=30, help='percentage of dirty cells at the start of the simulation')
parser.add_argument('-o', '--obstacules', type=int, default=20, help='percentage of obstacules at the start of the simulation')
parser.add_argument('-b', '--babies', type=int, default=5, help='number of babies in the house')
parser.add_argument('-t', '--time', type=int, default=5, help='number of turns between changes of the environment')
parser.add_argument('-p', '--practical', type=bool, const=True, nargs='?', help='set if you want to simulate Practical agent. Reagent agent is default')
parser.add_argument('-P', '--bernoulli', type=float, default=0.5, help='probability of a baby moving in an environment change (0 to 1)')
parser.add_argument('-l', '--level', type=str, default='INFO', help='log level')
parser.add_argument('-f', '--file', type=bool, const=True, nargs='?', help='set if you want log to a file')
parser.add_argument('-i', '--interactive', type=bool, const=True, nargs='?', help='set if you want to see what happens in every turn')
parser.add_argument('-s', '--simulation', type=bool, const=True, nargs='?', help='set if you want to simulate all the agents')
args = parser.parse_args()
if not os.path.exists('./logs'):
os.mkdir('./logs/')
log = Logger(name='Kindergarden', log=args.file)
log.setLevel(args.level)
main(args, log)
|
the-stack_0_15879 | import os
def ui2main(ui_name, file_name = 'main.py', model_name = '*_ui2py.py'):
if model_name == '*_ui2py.py':
model_name = ui_name.split('.')[0] + '_ui2py.py'
if os.path.exists(ui_name):
if os.path.exists(model_name):
print('由ui直接转化为py格式的文件' + model_name + '已存在')
else:
print('开始转化ui文件至:' + model_name)
# os.system('pyuic5 -o ' + model_name + ' ' + ui_name)
os.system('python3 -m PyQt5.uic.pyuic -o ' + model_name + ' ' + ui_name)
while True:
if os.path.exists(model_name):
break
if os.path.exists(file_name):
print('用于编写功能的py文件(运行此函数)' + file_name + '已存在')
else:
print('开始生成主函数文件至:' + file_name)
model_text = open(model_name,encoding='utf8').read().split('\n')
msg = {'model_class':'未识别出', 'button':[], 'action':[], 'combobox':[]}
for line in model_text:
if 'class ' in line:
msg['model_class'] = line.split(' ')[1].split('(')[0]
elif 'QtWidgets.QPushButton(' in line:
button_name = line.split(' = ')[0] # .replace(' ','')
msg['button'].append(button_name)
elif '= QtWidgets.QAction(' in line:
action_name = line.split(' = ')[0]
msg['action'].append(action_name)
elif 'QtWidgets.QComboBox(' in line:
combobox_name = line.split(' = ')[0]
msg['combobox'].append(combobox_name)
buttonactive_test = '\n # 激活全部按钮、菜单选项、下拉列表用于测试,实际使用时注释掉\n'
button_text = '\n # 事件连接函数\n'
buttonfun_text = '\n\n # 按钮\n'
for button in msg['button']:
buttonactive_test += button + '.setEnabled(True)\n'
button_text += button +'.clicked.connect(' + button.replace(' ','') + '_ClickFun)\n'
buttonfun_text += ' def ' + button.replace(' ','').replace('self.', '') + '_ClickFun(self):\n print("你按了 " + ' + button.replace(' ','') + '.text() + " 这个按钮")\n\n'
actionactive_test = '\n'
action_text = '\n'
actionfun_text = '\n # 菜单选项\n'
for action in msg['action']:
actionactive_test += action + '.setEnabled(True)\n'
action_text += action + '.triggered.connect(' + action.replace(' ', '') + '_ClickFun)\n'
actionfun_text += ' def ' + action.replace(' ', '').replace('self.',
'') + '_ClickFun(self):\n print("你按了 " + ' + action.replace(
' ', '') + '.text() + " 这个菜单选项")\n\n'
comboboxactive_test = '\n'
combobox_text = '\n'
comboboxfun_text = '\n # 下拉列表\n'
for combobox in msg['combobox']:
comboboxactive_test += combobox + '.setEnabled(True)\n'
combobox_text += combobox + '.currentIndexChanged.connect(' + combobox.replace(' ', '') + '_ClickFun)\n'
comboboxfun_text += ' def ' + combobox.replace(' ', '').replace('self.',
'') + '_ClickFun(self):\n print("你将该下拉列表选项变成了 " + ' + combobox.replace(
' ', '') + '.currentText())\n\n'
sum_test = buttonactive_test + actionactive_test + comboboxactive_test +\
button_text + action_text + combobox_text +\
buttonfun_text + actionfun_text + comboboxfun_text
file_text = open(str(__file__).replace('__init__.py', 'model.txt'), encoding='utf8').read()
file_text = file_text.replace('MyFunction',
str(os.path.realpath(__file__)).replace('\\', '/').split('/')[-3])
file_text = file_text.replace('模板类', msg['model_class'])
file_text = file_text.replace('模板', model_name.split('.')[0])
file_text = file_text.replace('此处是一堆连接', sum_test)
open(file_name, 'w+', encoding='utf8').write(file_text)
print('完成')
else:
print('文件' + ui_name + '不存在!程序退出')
# if __name__ == '__main__':
# CreateWritngFile('main.ui', 'test.py') |
the-stack_0_15880 | from collections import defaultdict
all_ingredients = []
candidates = defaultdict(list)
with open('in', 'r') as f:
for line in f.readlines():
ingredients, allergens = line.split(' (')
ingredients = ingredients.strip().split()
allergens = allergens.replace('contains ', '').replace(')', '').strip().split(', ')
all_ingredients += ingredients
for allergen in allergens:
candidates[allergen].append(set(ingredients))
are_allergens = set()
for allergen in candidates.keys():
options = set(all_ingredients)
for i in candidates[allergen]:
options = options.intersection(i)
for i in options:
are_allergens.add(i)
count = 0
for ingredient in all_ingredients:
if ingredient not in are_allergens:
count += 1
print(count)
|
the-stack_0_15881 | #standard imports
from typing import Tuple,Union
#scientific imports
import numpy as np
from sympy.ntheory import factorint
#project imports
from data_handler.signal_features import get_time_step
def calculate_flicker_amplitude(data:np.ndarray) -> float:
"""
Computes the flicker amplitude after Bastien et al. (2013)
:param data: data consisting of the full lightcurve
:return: flicker amplitude
"""
flicker_time = get_flicker_time(data)
t_step = get_time_step(data)
elements = data[0].shape
box_size = np.round(flicker_time/t_step)
bin_count = int(elements / box_size)
points_left = elements - box_size * bin_count
index_shift,cols = get_index_shift(points_left)
mean_array, subtract_array_amplitude = get_flicker_arrays(data, elements, cols, index_shift, box_size, flicker_time)
mean_amp = np.mean(mean_array)
subtract_array_amplitude = np.unique(subtract_array_amplitude)
flic_amplitude = 0
for i in range(0, len(subtract_array_amplitude)):
flic_amplitude += (subtract_array_amplitude[i] - mean_amp) ** 2
denominator = float(len(subtract_array_amplitude))
amp_flic = np.sqrt(flic_amplitude / denominator)
return amp_flic
def flicker_amplitude_to_frequency(flicker_amplitude : float) -> float:
"""
Converts the flicker amplitude to the first filter frequency according to Kallinger et al. (2016)
:param flicker_amplitude: Flicker amplitude calculated according to Bastien et. al (2013)
:return: First filter frequency
"""
return 10 ** (5.187) / (flicker_amplitude ** (1.560))
def get_flicker_time(data : np.ndarray) -> float:
"""
Returns the flicker time. 2.5 hours for SC, 5 days for LC data
:param data: data consisting of the full lightcurve
:return: flicker time
"""
t_step = get_time_step(data)
t_step *= 24*60
if t_step < 10:
return 2.5/(60*24) #2.5 hours time for SC data
else:
return 5/24 #5 days for LC data
def get_index_shift(points_left : int) -> Tuple[int,int]:
"""
Depending on how many points are left in the array from the binsize, this method will return the according
index_shift for the data as well as the amount of cols whereover these have to be iterated
:param points_left: Restpoints after binning
:return: index_shift,cols
"""
index_shift = 0
cols = 1
if points_left > 1:
factors=factorint(points_left, multiple=True)
if len(factors) > 1:
index_shift = factors[0]**factors[1]
else:
index_shift = factors[0]
cols = int(points_left / index_shift + 1)
elif points_left == 1:
cols = 2
index_shift = 1
return index_shift,cols
def get_flicker_arrays(data : np.ndarray, elements : Union[int,Tuple[int,]], cols : int, index_shift : int, box_size : int
, filter_time :float) -> Tuple[float,np.ndarray]:
"""
This method, depending on the indexshift, boxsize and filtertime creates the appropiate arrays, for which the
flicker amplitude is calculated. It calculates the mean of every box for the boxsize
"""
if isinstance(elements,tuple) and len(elements) > 1:
raise ValueError("Elements is not allowed to be a tuple longer than 1!")
else:
elements = elements[0]
bin_count = int(elements / box_size)
points_left = elements - box_size * bin_count
array_mean = np.zeros(cols)
for k in range(0,cols):
array_rebin = np.zeros(int(elements-points_left))
n_points_bin_array = np.zeros(int(elements-points_left))
i = k * index_shift
for j in range(0,bin_count):
mean_bin = 0.0
timetime_referenceeference = i
count = 1
while i < (int(elements)-1) and (data[0][i] - data[0][timetime_referenceeference])/(3600*24) < filter_time:
mean_bin +=data[1][i]
if data[1][i] != 0:
count +=1
i+=1
mean_bin += data[1][i]
if data[1][i] != 0:
count +=1
if count > 1:
mean_bin /= count-1
array_rebin[timetime_referenceeference - k * index_shift:(i - 1) - k * index_shift] = mean_bin
n_points_bin_array[timetime_referenceeference - k * index_shift:(i - 1) - k * index_shift] = count
subtract_array_amplitude = data[1][k * index_shift:k * index_shift + len(array_rebin)] - array_rebin
subtract_array_amplitude = subtract_array_amplitude[n_points_bin_array >= box_size / 2]
array_mean[k] = np.mean(subtract_array_amplitude)
return array_mean,subtract_array_amplitude |
the-stack_0_15884 | import requests
import pprint
import json
import os
from bs4 import BeautifulSoup
if os.path.isfile('flipkart.json'):
with open('flipkart.json','r')as file:
file_data=json.load(file)
print(file_data)
else:
link="https://www.flipkart.com/search?q=mi+all+mobile&sid=tyy%2C4io&as=on&as-show=on&otracker=AS_QueryStore_OrganicAutoSuggest_0_6&otracker1=AS_QueryStore_OrganicAutoSuggest_0_6&as-pos=0&as-type=RECENT&as-searchtext=mi%20all%20"
req=requests.get(link)
# print(req)
page=req.text
# print(page)
soup=BeautifulSoup(page,'html.parser')
# print(soup)
main_div=soup.find_all('div',class_="_1UoZlX")
# print(main_div)
list_for_all_phones=[]
list_for_rupess=[]
list_for_rating=[]
list_for_all_detail=[]
dictionary={}
dic_list=[]
for i in main_div:
# print(i)
col=(i.find('div',class_='_1-2Iqu row'))
# print(col)
n=col.find('div',class_='_3wU53n')
text=(n.text)
list_for_all_phones.append(text)
for k in list_for_all_phones:
dictionary['mobile_name']=k
price=col.find('div',class_='_1vC4OE _2rQ-NK')
price_text=(price.text)
list_for_rupess.append(price_text)
for l in list_for_rupess:
dictionary['price']=l
rating=col.find('div',class_='hGSR34')
rt=(rating.text)
list_for_rating.append(rt)
for m in list_for_rating:
dictionary['rating']=m
b=col.find('div',class_='_3ULzGw')
c=b.find('ul')
for j in c:
sp=(j.text).split('\n')
list_for_all_detail.append(sp)
dictionary['ram']=list_for_all_detail[0]
dictionary['Display']=list_for_all_detail[1]
dictionary['camera']=list_for_all_detail[2]
dictionary['battery']=list_for_all_detail[3]
dictionary['processor']=list_for_all_detail[4]
dictionary['warranty']=list_for_all_detail[5]
# pprint.pprint(dictionary)
dic_list.append(dictionary)
# pprint.pprint(dic_list)
with open('flipkart.json','w')as file:
json.dump(dic_list,file)
# print(list_for_all_phones)
# print(list_for_rupess)
# print(list_for_rating)
# print(list_for_all_detail)
|
the-stack_0_15885 | import glob
import json
import time
import yaml
import luigi
from luigi.contrib.spark import PySparkTask
from py4j.protocol import Py4JJavaError
from pyspark import SparkContext
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.utils import IllegalArgumentException
from pyspark.sql.functions import lit, input_file_name, regexp_extract
from pyspark.sql.types import StructType, StructField, StringType
from etl import logger
from etl.constants import Constants
from etl.jobs.util.cleaner import trim_all_str
from etl.source_files_conf_reader import read_module
from etl.workflow.config import PdcmConfig
ROOT_FOLDER = "data/UPDOG"
def build_schema_from_cols(columns):
schema = []
for column in columns:
schema.append(StructField(column, StringType(), True))
return StructType(schema)
def select_rows_with_data(df: DataFrame, columns) -> DataFrame:
if "Field" in df.columns:
df = df.select(columns).where("nvl(field, '') not like '#%'")
else:
df = df.select(columns)
return df
def clean_column_names(df: DataFrame):
columns = df.columns
for column in columns:
df = df.withColumnRenamed(column, trim_all_str(column))
return df
def read_files(session, path_patterns, schema):
start = time.time()
df = session.read.option('sep', '\t').option('header', True).option('schema', schema).csv(path_patterns)
df = clean_column_names(df)
df = select_rows_with_data(df, schema.fieldNames())
datasource_pattern = "{0}\\/([a-zA-Z-]+)(\\/)".format(ROOT_FOLDER.replace("/", "\\/"))
df = df.withColumn("_data_source", lit(input_file_name()))
df = df.withColumn(Constants.DATA_SOURCE_COLUMN, regexp_extract("_data_source", datasource_pattern, 1))
df = df.drop("_data_source")
end = time.time()
logger.info(
"Read from path {0} count: {1} in {2} seconds".format(path_patterns, df.count(), round(end - start, 4)))
return df
def read_json(session, json_content):
df = session.read.option("multiline", True).json(session.sparkContext.parallelize([json_content]))
return df
class ReadByModuleAndPathPatterns(PySparkTask):
raw_folder_name = luigi.Parameter()
path_patterns = luigi.ListParameter()
columns_to_read = luigi.ListParameter()
data_dir_out = luigi.Parameter()
def output(self):
return PdcmConfig().get_target(
"{0}/{1}/{2}".format(self.data_dir_out, Constants.RAW_DIRECTORY, self.raw_folder_name))
def app_options(self):
return [
'|'.join([p for p in self.path_patterns]),
','.join(self.columns_to_read),
self.output().path]
def main(self, sc: SparkContext, *args):
spark = SparkSession(sc)
path_patterns = args[0].split('|')
columns_to_read = args[1].split(',')
output_path = args[2]
schema = build_schema_from_cols(columns_to_read)
if sc.master == "yarn":
hadoop = sc._jvm.org.apache.hadoop
fs = hadoop.fs.FileSystem
current_fs = fs.get(sc._jsc.hadoopConfiguration())
path_patterns = [path for path in path_patterns if
path != "" and current_fs.globStatus(hadoop.fs.Path(path))]
try:
df = read_files(spark, path_patterns, schema)
except (Py4JJavaError, IllegalArgumentException, FileNotFoundError, IOError) as error:
no_empty_patterns = list(filter(lambda x: x != '', path_patterns))
if "java.io.FileNotFoundException" in str(error) or len(no_empty_patterns) == 0 or error.__class__ in [FileNotFoundError, IOError]:
empty_df = spark.createDataFrame(sc.emptyRDD(), schema)
df = empty_df
df = df.withColumn(Constants.DATA_SOURCE_COLUMN, lit(""))
else:
raise error
df.write.mode("overwrite").parquet(output_path)
def build_path_patterns(data_dir, providers, file_patterns):
data_dir_root = "{0}/{1}".format(data_dir, ROOT_FOLDER)
paths_patterns = []
for file_pattern in file_patterns:
matching_providers = []
for provider in providers:
current_file_pattern = str(file_pattern).replace("$provider", provider)
if glob.glob("{0}/{1}/{2}".format(data_dir_root, provider,
current_file_pattern)) or PdcmConfig().deploy_mode == "cluster":
matching_providers.append(provider)
if matching_providers:
joined_providers_list = ','.join([p for p in matching_providers])
providers_pattern = "{" + joined_providers_list + "}"
path_pattern = "{0}/{1}/{2}".format(
data_dir_root, providers_pattern, file_pattern.replace("$provider", providers_pattern))
paths_patterns.append(path_pattern)
return paths_patterns
def build_path_pattern_by_provider(data_dir, provider, file_pattern):
data_dir_root = "{0}/{1}".format(data_dir, ROOT_FOLDER)
path_pattern = "{0}/{1}/{2}".format(data_dir_root, provider, file_pattern.replace("$provider", provider))
return path_pattern
def get_tsv_extraction_task_by_module(data_dir, providers, data_dir_out, module_name):
module = read_module(module_name)
file_patterns = module["name_patterns"]
columns = module["columns"]
path_patterns = build_path_patterns(data_dir, list(providers), file_patterns)
return ReadByModuleAndPathPatterns(module_name, path_patterns, columns, data_dir_out)
def extract_provider_name(path: str):
init_index = path.index(ROOT_FOLDER) + len(ROOT_FOLDER) + 1
next_slash = path.index("/", init_index)
return path[init_index:next_slash]
def get_json_by_yaml(yaml_content):
yaml_as_json = yaml.safe_load(yaml_content)
yaml_as_json = json.dumps(yaml_as_json)
yaml_as_json = yaml_as_json.encode("unicode_escape").decode("utf-8")
return yaml_as_json
class ReadYamlsByModule(PySparkTask):
raw_folder_name = luigi.Parameter()
yaml_paths = luigi.ListParameter()
columns_to_read = luigi.ListParameter()
data_dir_out = luigi.Parameter()
def output(self):
return PdcmConfig().get_target(
"{0}/{1}/{2}".format(self.data_dir_out, Constants.RAW_DIRECTORY, self.raw_folder_name))
def app_options(self):
return [
','.join(self.yaml_paths),
','.join(self.columns_to_read),
PdcmConfig().deploy_mode,
self.output().path]
def main(self, sc, *args):
spark = SparkSession(sc)
yaml_file_paths = args[0].split(',')
columns_to_read = args[1].split(',')
deploy_mode = args[2]
output_path = args[3]
all_json_and_providers = []
if deploy_mode == "cluster":
for yaml_file_path in yaml_file_paths:
yaml_as_json = sc.wholeTextFiles(yaml_file_path).collect()[0][1]
yaml_as_json = get_json_by_yaml(yaml_as_json)
json_content_and_provider = (yaml_as_json, extract_provider_name(yaml_file_path))
all_json_and_providers.append(json_content_and_provider)
else:
for yaml_file_path in yaml_file_paths:
with open(yaml_file_path, 'r') as stream:
yaml_as_json = get_json_by_yaml(stream)
json_content_and_provider = (yaml_as_json, extract_provider_name(yaml_file_path))
all_json_and_providers.append(json_content_and_provider)
source_df = spark.createDataFrame(spark.sparkContext.emptyRDD(), build_schema_from_cols(columns_to_read))
source_df = source_df.withColumn(Constants.DATA_SOURCE_COLUMN, lit(None).astype(StringType()))
for json_and_provider in all_json_and_providers:
json_content = json_and_provider[0]
provider = json_and_provider[1]
df = read_json(spark, json_content)
df = df.select(columns_to_read)
df = df.withColumn(Constants.DATA_SOURCE_COLUMN, lit(provider))
source_df = source_df.union(df)
source_df.write.mode("overwrite").parquet(output_path)
def get_yaml_extraction_task_by_module(data_dir, providers, data_dir_out, module_name):
module = read_module(module_name)
file_patterns = module["name_patterns"]
columns = module["columns"]
# There should be only one yaml file by module
file_path = str(file_patterns[0])
yaml_paths = []
for provider in providers:
yaml_file_path = build_path_pattern_by_provider(data_dir, provider, file_path)
yaml_paths.append(yaml_file_path)
return ReadYamlsByModule(module_name, yaml_paths, columns, data_dir_out)
if __name__ == "__main__":
luigi.run()
|
the-stack_0_15886 | #!/usr/bin/env python3
import sys
import warnings
import tcod
import g
import states.mainmenu
def main() -> None:
screen_width = 720
screen_height = 480
tileset = tcod.tileset.load_tilesheet(
"data/cp437-14.png", 32, 8, tcod.tileset.CHARMAP_CP437
)
with tcod.context.new(
width=screen_width,
height=screen_height,
tileset=tileset,
title="libtcod tutorial revised",
renderer=tcod.RENDERER_SDL2,
vsync=True,
) as g.context:
g.console = tcod.Console(*g.context.recommended_console_size())
states.mainmenu.MainMenu().loop()
if __name__ == "__main__":
if not sys.warnoptions:
warnings.simplefilter("default") # Show all warnings once by default.
main()
|
the-stack_0_15889 | # This file is exec'd from settings.py, so it has access to and can
# modify all the variables in settings.py.
# If this file is changed in development, the development server will
# have to be manually restarted because changes will not be noticed
# immediately.
DEBUG = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
# Django needs to make databases in the test mysql server
'NAME': 'travismep',
'USER': 'root',
'PASSWORD': '',
'HOST': '127.0.0.1',
'OPTIONS': {
# In each case, we want strict mode on to catch truncation issues
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
# Load the socket info from .my.cnf in the travis user
'read_default_file': '~travis/.my.cnf'
},
'PORT': '',
'TEST': {
# We also want the test databse to for utf8 and the general
# collation to keep case sensitive unicode searches working
# as we would expect on production
'CHARSET': 'utf8',
'COLLATION': 'utf8_general_ci',
},
},
}
SOLR_CONNECTIONS = {
'default': {
'URL': 'http://localhost:8983/solr/',
'COLLECTION': 's-and-co',
'CONFIGSET': 'sandco',
'TEST': {
# aggressive commitWithin for test only
'COMMITWITHIN': 750,
}
}
}
# required by mezzanine for unit tests
ALLOWED_HOSTS = ['*']
# secret key added as a travis build step
|
the-stack_0_15894 | import sys
import os
import numpy as np
import pandas as pd
import copy
# extract names, temperature, pressures and make folder
# function for mech reading
def readmechanism(input_type, cwd):
'''
method to read the mechanism file depending on the input type
'''
if input_type == 'MESS':
# extract parameters
P_LIST, T_LIST, species_names, species_names_bimol_frag2 = data_names_mess(
os.path.join(cwd, 'inp'))
# extract matrix of rate constants
rates = MATRIX(os.path.join(cwd, 'inp'), P_LIST, T_LIST, species_names)
mech_dict = dict(zip(['P_VECT_MESS', 'T_VECT_MESS', 'SPECIES', 'SPECIES_BIMOL', 'rates'], [
P_LIST, T_LIST, species_names, species_names_bimol_frag2, rates]))
elif input_type == 'CKI':
species_names, species_names_bimol_frag2 = data_names_CKI(
os.path.join(cwd, 'inp'))
mech_dict = dict(zip(['SPECIES', 'SPECIES_BIMOL'], [
species_names, species_names_bimol_frag2]))
return mech_dict
########################### extract and process MESS type mechanism #####################
def data_names_mess(cwd):
"""
Extract from the input file me_ktp.inp useful data:
- list of pressures
- list of temperatures
- list of species (names)
"""
species_names_unimol = np.array([], dtype='<U16')
species_names_bimol = np.array([], dtype='<U16')
species_names_unimol_frag2 = np.array([], dtype='<U16')
species_names_bimol_frag2 = np.array([], dtype='<U16')
look_for_species = 0
look_for_bimol_fragment = 0
bad_wellwrds = ['WellDepth', 'WellCutoff', 'WellExtension',
'WellReductionThreshold', 'WellPartitionMethod', 'WellProjectionThreshold']
with open(os.path.join(cwd, 'me_ktp.inp')) as myfile:
for line in myfile:
# do not read comments
if line[0] == '!':
line = '' # empty line
elif len(line.split('!')) > 1: # remove commented part
line = line.split('!')[0]
# if you enter the model section: start looking for species
if line.find('Model') != -1 and line.find('ModelEnergyLimit') == -1:
look_for_species = 1
if line.find('PressureList') != -1:
# verifica di non poter usare semplicemente line.split
pressures = [x.strip() for x in line.split()]
del pressures[0]
# print(pressures)
# print(len(pressures))
if line.find('TemperatureList') != -1:
temperatures = [x.strip() for x in line.split()]
del temperatures[0]
if (line.find('Well') != -1 and all(line.find(bad) == -1 for bad in bad_wellwrds)) and look_for_species == 1:
full_line = [x.strip() for x in line.split()]
species_names_unimol = np.append(
species_names_unimol, full_line[1])
species_names_unimol_frag2 = np.append(
species_names_unimol_frag2, '')
if (line.find('Bimolecular')) != -1 and look_for_species == 1:
look_for_bimol_fragment = 1
full_line = [x.strip() for x in line.split()]
species_names_bimol = np.append(
species_names_bimol, full_line[1])
if (line.find('Fragment')) != -1 and look_for_bimol_fragment > 0:
look_for_bimol_fragment += 1
if look_for_bimol_fragment == 3:
full_line = [x.strip() for x in line.split()]
species_names_bimol_frag2 = np.append(
species_names_bimol_frag2, full_line[1])
look_for_bimol_fragment = 0
myfile.close()
# write files
P_LIST = np.unique(np.array(pressures, dtype=np.float32))
T_LIST = np.unique(np.array(temperatures, dtype=np.int16))
species_names = np.append(species_names_unimol, species_names_bimol)
species_names_frag2 = np.append(
species_names_unimol_frag2, species_names_bimol_frag2)
# check that bimol fragments have different names
if len(list(set(species_names_bimol_frag2))) != len(list(species_names_bimol_frag2)):
print('*Warning: some bimol fragments share the same names. check that they are isomers')
return P_LIST, T_LIST, species_names, species_names_frag2
def MATRIX(cwd, P_LIST, T_LIST, species_names):
"""
Extract from rate.out all the rate constants in the form of a list
"""
# pre-allocation
matrix_list = []
capture_list = []
# define checks for temperature and pressure and read the file
# len works on b
check_P = len(T_LIST)*len(species_names)*len(P_LIST)
# define current checks
check_P_curr = 0
check_list = 0
with open(os.path.join(cwd, 'rate.out')) as myfile:
for line in myfile:
# find the 'Temperature-Species rate tables to extract the rates
if line.find('Temperature-Species Rate Tables:') != -1:
check_list = 1
if ((check_list == 1) and (check_P_curr < check_P)):
# add the check on 'Pressure' in case the values of temperature and pressure are accidentally the same
if any(line.find(T) != -1 for T in np.array(T_LIST, dtype=str)) and (line.find('Pressure') == -1):
check_P_curr += 1
rates = [x.strip() for x in line.split()]
# replace '***' values with 0
rates = [x.replace('***', '0') for x in rates]
matrix_list.append(rates[1:-2]) # -2 excluded
capture_list.append(float(rates[-1]))
if line.find('Temperature-Pressure Rate Tables:') != -1:
check_list = 0 # don't read the file anylonger
myfile.close()
matrix_float = np.array(matrix_list, dtype=np.float64)
# remove negative values from the matrix
n_T = len(T_LIST)
n_P = len(P_LIST)
warnings_neg = '' # generate list of warnings for negative values
for ii, row in enumerate(matrix_float):
mask_neg = np.where(row < 0)
mask_toohigh = np.where(row > capture_list[ii])
row[mask_toohigh] = 0
for mask_neg_i in mask_neg[0]:
row[mask_neg_i] = 0
R = int(ii/n_T/n_P)
P = int((ii-R*n_T*n_P)/n_T)
T = ii-R*n_T*n_P-P*n_T
warnings_neg = warnings_neg + \
'removed negative k_{R} at {T} K and {P} atm, be careful \n'.format(
R=species_names[R], T=T_LIST[T], P=P_LIST[P])
np.savetxt('warnings_negval_messrates.txt', [warnings_neg], fmt='%s')
return matrix_float
def MATRIX_TP(T, P, T_LIST, P_LIST, species_names, matrix_float):
"""
Extract square matrix of k_ij at the selected temperature and pressure
T,P are expected to be numbers, either floating or integers, to be compared with T,P in the lists
"""
# transform input types
if not isinstance(T, int):
try:
T = int(T)
except:
print('input T not convertible to number')
return None
if not isinstance(P, float):
try:
P = float(P)
except:
print('input P not convertible to number')
return None
n_T = len(T_LIST)
n_P = len(P_LIST)
# preallocate the matrix
n_species = len(species_names)
mat_TP = np.zeros((n_species, n_species))
# reconstruct indices
# set as option the exception case: if you put a value not present
# index [0][0] to save just the index itself
P_index = np.where((P == P_LIST))[0][0]
T_index = np.where((T == T_LIST))[0][0]
for ii in range(0, n_species):
# identify the string to place in the row: ki->prods
rates_row = matrix_float[ii*(n_P)*(n_T)+P_index*(n_T)+T_index, :]
# use list comprehension: kij = rate from reactant i to product j
col_indices = np.array([jj != ii for jj in range(0, n_species)])
mat_TP[ii, col_indices] = rates_row
# this returns the matrix at a certain temperature and pressure
return mat_TP
def REAC_P(P, reac, P_LIST, T_LIST, species_names, matrix_float):
"""
Method: only needed for MESS input. it extracts from the matrix of rate constants the reactivity of the selected reactant at a certain pressure
output: matrix [n_T*n_species-1]|P
useful if: you need to check the reactivity of the reactant in the full range of temperature
"""
n_T = len(T_LIST)
n_P = len(P_LIST)
# select the reactant
if not isinstance(reac, str):
print('input is not a string. please insert "reac" as string')
return None
# if the input is not a string: show an error
elif sum(np.array([reac == r_list for r_list in species_names])) == 0:
print('selected reactant not found in the list of species. select another reactant')
return None
# if the reactant is not in the list: show an error
else:
try:
float(P)
except ValueError as e:
print('P not convertible to number, error: ' + str(e))
return None
# derive the reactivity matrix at all the different temperatures
P_index = np.where((P == P_LIST))[0][0]
reac_index = np.array(
[reac == r_list for r_list in species_names], dtype=int)
ii_reac = np.where(reac_index == 1)[0][0] # index of the reactant
ii_in = ii_reac*(n_P)*(n_T)+P_index*(n_T)
rates_reac = matrix_float[ii_in:ii_in+n_T, :]
return rates_reac
# extract and process CHEMKIN type mechanism ########################à
def data_names_CKI(cwd):
'''
extract species names of CKI input mechanism
'''
# names of all the primary species
species_names = np.array([], dtype='<U32')
# names of the "second" reactant found in bimolecular reaction channels
species_names_bimol = np.array([], dtype='<U32')
# check in the file if you reading the part with all the reactions
check_reactions = 0
with open(os.path.join(cwd, 'kin.CKI')) as myfile:
for line in myfile:
if line.find('REACTIONS') != -1:
check_reactions += 1
# read the lines with the reaction rates;
# only irreversible reactions are considered ('=>')
# lines starting with a comment ('!') are not considered
if check_reactions == 1 and line.find('=>') != -1 and line.strip()[0] != '!':
line = line.split('!')[0] # remove the comments
REACS = [x.strip() for x in line.split('=>')][0]
REACS = [x.strip() for x in REACS.split('+')]
rest_ofline = [x.strip() for x in line.split('=>')][1]
# the arrhenius parameters will be the last three elements in the split
# ARR_PAR = rest_ofline.split()[-3:]
PRODS = ''.join(rest_ofline.split()[:-3])
PRODS = PRODS.split('+')
reacting_species = [REACS, PRODS]
# ALLOCATE THE SPECIES INTO THE ARRAYS
# if species are not into the array of species: append them
for x in reacting_species:
# IF LEN IS 1 AND REACTION IS LIKE 2A=>PRODS OR REACS=>2B: RECOGNIZE SPECIES B
# and add another product
if x[0][0] == '2':
x[0] = x[0][1:]
x.append(x[0])
if len(x) == 1 and np.array([x[0] == SP for SP in species_names]).any() != True:
species_names = np.append(species_names, x[0])
species_names_bimol = np.append(
species_names_bimol, '')
elif len(x) == 2:
r1 = x[0]
r2 = x[1]
# check that the combination is not present
flag = 0
for i in np.arange(0, len(species_names)):
s1 = species_names[i]
s2 = species_names_bimol[i]
# set flag to 1 if you find the same set of species
if (r1==s1 and r2==s2) or (r1==s2 and r2==s1):
flag = 1
if flag == 0:
species_names = np.append(species_names, r1)
species_names_bimol = np.append(species_names_bimol, r2)
if len(x) > 2:
print(
'Wrong number of elements when reading the reaction: line ' + line)
print('exiting now..')
sys.exit()
return species_names, species_names_bimol
def copy_CKI_processed(oldpath, newpath, PRODSINKS, ISOM_EQUIL, REAC, PRODS):
'''
IN THIS METHOD, THE MECHANISM IS COPIED TO NEWPATH FOLDER AFTER PREPROCESSING
PRODSINKS = 1: THE PRODUCTS ARE SET AS IRREVERSIBLE SINKS, SO THE LINES ARE COMMENTED WITH !
ISOM_EQUIL = 1: ALL BIMOLECULAR REACTIONS ARE DELETED,SO THAT EQUILIBRIUM WITHIN SETS OF ISOMERS IS REACHED.
=> ALL BIMOLECULAR REACTIONS COMMENTED WITH !
'''
if os.path.isdir(newpath) == False:
raise RuntimeError(
'The destination folder for the new mech does not exist ')
else:
with open(os.path.join(oldpath, 'kin.CKI'), mode='r') as mech_orig_file:
mech_orig = mech_orig_file.readlines()
newfile = copy.deepcopy(mech_orig)
for idx, row in enumerate(newfile):
if ISOM_EQUIL == 1 and row.find('=>') != -1 and row.strip()[0] != '!':
# check first if you only want isomer equilibrium: so you delete all bimolecular reactions
reactant = [x.strip() for x in row.split('=>')][0]
reactant = [x.strip() for x in reactant.split('+')][0]
product = [x.strip() for x in row.split('=>')][1]
# only bimolecular products will be meaningful
product = [x.strip() for x in product.split('+')][0]
product = product.split()[0]
# delete all the reactions involving species other than the reactants
if np.array([reactant == REAC]).any() and np.array([product == REAC]).any():
delete = 'NO'
else:
delete = 'YES'
elif PRODSINKS == 1 and row.find('=>') != -1 and row.strip()[0] != '!':
# if there is no isom_equil, you did not delete all bimol. reactions:
# so if you set product as sinks, then you have to delete those lines.
# NB product sinks are incompatible with isom_equil
# extract the first reactant of each row
reactant = [x.strip() for x in row.split('=>')][0]
reactant = [x.strip() for x in reactant.split('+')][0]
# if the reactant of the line is in the list of products, comment the line
if np.array([reactant == np.array(PRODS)]).any():
delete = 'YES'
else:
delete = 'NO'
else: # any other case (read a reaction or an empty line with no need of deleting anything)
delete = 'NO'
# ON THE BASIS OF THE CONDITIONS ABOVE: DELETE THE REACTION OR NOT
if delete == 'YES':
# comment the line
newfile[idx] = '!' + row
# if duplicate reaction: comment also that
# the "lower" notation is to trasfer all to lower cases so that you make the search independent of the font
if newfile[idx+1].lower().find('DUPLICATE'.lower()) != -1 or newfile[idx+1].lower().find('DUP'.lower()) != -1:
newfile[idx+1] = '!' + newfile[idx+1]
# if PLOG: comment also all the lines below until you don't find PLOG anymore
check_plog = 0
iline = 1
while check_plog == 0:
if newfile[idx+iline].find('PLOG') != -1:
newfile[idx+iline] = '!' + newfile[idx+iline]
iline += 1
else:
check_plog = 1
# check for duplicates
if newfile[idx+iline].lower().find('DUPLICATE'.lower()) != -1 or newfile[idx+iline].lower().find('DUP'.lower()) != -1:
newfile[idx+iline] = '!' + newfile[idx+iline]
elif delete == 'NO':
# copy the line as it is
newfile[idx] = row
# remove the file if it exists and write the new one
if os.path.isfile(os.path.join(newpath, 'kin.txt')):
os.remove(os.path.join(newpath, 'kin.txt'))
with open(os.path.join(newpath, 'kin.txt'), mode='x') as inp:
inp.writelines(newfile)
|
the-stack_0_15896 | import random, math
from agent import Agent
from variables import *
MAX_NEIGHBOR_FORCE = abs(math.log(FISH_SENSING_DISTANCE/FISH_DESIRED_DIST))
# Model attempt some sort of propagation wave.
# Affected by propagation wave that pushes it farther away.
# If a marked fish is in a certain PROP_DIST (meaning getting
# attacked by Predator), then it experiences repulsive force.
# Smaller repulsive farther from the marked fish it is.
class PropagationFish(Agent):
def __init__(self, sim, start_loc = None):
random.seed()
super().__init__(sim, start_loc)
blue = random.randint(150, 255)
green = random.randint(0, 100)
red = random.randint(0, 100)
missing = 400-blue-green-red
red += missing//2
if red > 255:
red = 255
green += missing//2
if green > 255:
green = 255
self.nearby_predators = []
self.nearby_marked = []
self.marked = False
self.color = (red, green, blue)
def update(self):
total_x_vec = 0.0
total_y_vec = 0.0
# Computing effect that nearby predators on fish.
if len(self.nearby_predators) > 0:
predators = self.nearby_predators
for predator in predators:
x_vec = 0.0
y_vec = 0.0
pred, dist = predator
target = self.get_perceived_target_pos(pred.loc)
x, y = self.get_vector_to_target(target)
total_force = - PREDATOR_FISH_FORCE * pow((1.0)/dist, 4)
total_x_vec += x * total_force
total_y_vec += y * total_force
# Compute effect of propagation wave and compute other effects normally.
elif len(self.neighbors) > 0 or len(self.nearby_marked) > 0:
marked = self.nearby_marked
for fish in marked:
x_vec = 0.0
y_vec = 0.0
a, dist = fish
target = self.get_perceived_target_pos(a.loc)
x, y = self.get_vector_to_target(target)
total_force = -(1.0)*pow((1.0)/dist, 0.2)
total_x_vec += x * total_force
total_y_vec += y * total_force
neighbors = self.neighbors
if len(neighbors) > FISH_MAX_NEIGHBORS:
neighbors = sorted(self.neighbors, key=lambda x: x[1])
neighbors = neighbors[:FISH_MAX_NEIGHBORS]
for neighbor in neighbors:
x_vec = 0
y_vec = 0
fish, dist = neighbor
target = self.get_perceived_target_pos(fish.loc)
x, y = self.get_vector_to_target(target)
# Made the force between fishes stronger. Maybe change neighbor
# force constant in variables.py?
if dist > FISH_DESIRED_DIST:
total_force = 10*FISH_NEIGHBOR_FORCE * math.log(dist/FISH_DESIRED_DIST)/MAX_NEIGHBOR_FORCE
else:
total_force = -pow(FISH_DESIRED_DIST-dist, 1.5)
total_x_vec += x * total_force
total_y_vec += y * total_force
elif len(self.nearby_predators) == 0 and len(self.neighbors) == 0:
# randomly adjust speed
total_x_vec = (random.random() - 0.5) * 2 * FISH_ACCEL
total_y_vec = (random.random() - 0.5) * 2 * FISH_ACCEL
# normalize acceleration
accel = abs(total_x_vec) + abs(total_y_vec)
if accel > FISH_ACCEL:
adj = FISH_ACCEL/accel
total_x_vec *= adj
total_y_vec *= adj
self.x_speed += total_x_vec
self.y_speed += total_y_vec
# normalize speed
speed = abs(self.x_speed) + abs(self.y_speed)
if speed > FISH_SPEED:
adj = FISH_SPEED/speed
self.x_speed *= adj
self.y_speed *= adj
self.move()
|
the-stack_0_15897 | #
# (c) Copyright 2017-2018 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def load_proxy():
# docker exposes all of these variables as build args
# except for all_proxy
proxy_keys = ('http_proxy', 'https_proxy',
'no_proxy', 'all_proxy', 'ftp_proxy')
return {key: os.environ[key] for key in proxy_keys if key in os.environ}
def split_image(image):
"""Get the tag from a full image name.
127.0.0.1:5000/image:latest => 127.0.0.1:5000/image, latest
image:tag => image, tag
"""
parts = image.split('/', 1)
if len(parts) == 1 or (
'.' not in parts[0] and
':' not in parts[0]):
host, img = '', image
else:
host, img = parts
if ':' in img:
imagename, tag = img.rsplit(':', 1)
else:
imagename, tag = img, 'latest'
if host:
return host + "/" + imagename, tag
return imagename, tag
def all_blob_names(tree, parent=''):
"""This returns all blobs names from git tree object
"""
for t in tree.trees:
for b in all_blob_names(t, os.path.join(parent, tree.name)):
yield b
for b in tree.blobs:
yield os.path.join(parent, tree.name, b.name)
|
the-stack_0_15898 | import subprocess
import os
import json
from uuid import uuid4
from vo2mft.util import _solve_front_path, _twodof_solve_front_path, _twodof_body_fixed_solve_front_path
def solve(env, eps=1e-8, ions=False, flags=None, twodof=False, twodof_body_indep=False):
'''Return the solved final env corresponding to the given env, solved to
accuracy given by eps.
'''
solver_path = _solve_front_path()
if twodof and twodof_body_indep:
solver_path = _twodof_solve_front_path()
elif twodof:
solver_path = _twodof_body_fixed_solve_front_path()
in_path, out_path = str(uuid4()), str(uuid4())
write_env_file(env, in_path)
# Run solver.
solver_call = None
if twodof:
solver_call = [solver_path, "--eps", str(eps)]
if ions:
solver_call.append("--ions")
if flags != None:
solver_call.extend(flags)
solver_call.extend([in_path, out_path])
elif ions and not twodof:
solver_call = [solver_path, "--eps", str(eps), "--ions"]
if flags != None:
solver_call.extend(flags)
solver_call.extend([in_path, out_path])
subprocess.call(solver_call)
# Read solver output, if it exists.
final_env_path = out_path + "_fenv.json"
final_env = None
try:
final_env = read_env_file(final_env_path)
except FileNotFoundError:
pass
# Clean up solver input/output.
try:
os.remove(in_path)
os.remove(final_env_path)
except FileNotFoundError:
pass
return final_env
def solve_set(envs, eps=1e-8, ions=False, flags=None, twodof=False, twodof_body_indep=False):
'''Return a list of solved final envs corresponding to the given list of
envs, solved to accuracy given by eps.
The set of envs is solved serially (only one process is invoked).
'''
final_envs = []
for i, initial_env in enumerate(envs):
this_flags = None
if flags != None:
this_flags = flags[i]
this_final_env = solve(initial_env, eps, ions, this_flags, twodof, twodof_body_indep)
final_envs.append(this_final_env)
return final_envs
def write_env_file(env, env_path):
env_str = json.dumps(env)
with open(env_path, 'w') as fp:
fp.write(env_str)
def read_env_file(env_path):
env = None
with open(env_path, 'r') as fp:
env_str = fp.read()
env = json.loads(env_str)
return env
|
the-stack_0_15899 | import joblib
import pytest
import os
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from mlserver.settings import ModelSettings, ModelParameters
from mlserver.types import InferenceRequest
from mlserver_sklearn import SKLearnModel
TESTS_PATH = os.path.dirname(__file__)
TESTDATA_PATH = os.path.join(TESTS_PATH, "testdata")
def pytest_collection_modifyitems(items):
"""
Add pytest.mark.asyncio marker to every test.
"""
for item in items:
item.add_marker("asyncio")
@pytest.fixture
def model_uri(tmp_path) -> str:
n = 4
X = np.random.rand(n)
y = np.random.rand(n)
clf = DummyClassifier(strategy="prior")
clf.fit(X, y)
model_uri = os.path.join(tmp_path, "sklearn-model.joblib")
joblib.dump(clf, model_uri)
return model_uri
@pytest.fixture
def model_settings(model_uri: str) -> ModelSettings:
return ModelSettings(
name="sklearn-model",
parameters=ModelParameters(uri=model_uri, version="v1.2.3"),
)
@pytest.fixture
async def model(model_settings: ModelSettings) -> SKLearnModel:
model = SKLearnModel(model_settings)
await model.load()
return model
@pytest.fixture
def inference_request() -> InferenceRequest:
payload_path = os.path.join(TESTDATA_PATH, "inference-request.json")
return InferenceRequest.parse_file(payload_path)
@pytest.fixture
async def regression_model(tmp_path) -> SKLearnModel:
# Build a quick DummyRegressor
n = 4
X = np.random.rand(n)
y = np.random.rand(n)
clf = DummyRegressor()
clf.fit(X, y)
model_uri = os.path.join(tmp_path, "sklearn-regression-model.joblib")
joblib.dump(clf, model_uri)
settings = ModelSettings(
name="sklearn-regression-model",
parameters=ModelParameters(uri=model_uri, version="v1.2.3"),
)
model = SKLearnModel(settings)
await model.load()
return model
@pytest.fixture
def pandas_model_uri(tmp_path) -> str:
data: pd.DataFrame = pd.DataFrame(
{"a": [1, 2, 3], "op": ["+", "+", "-"], "y": [11, 22, -33]}
)
X: pd.DataFrame = data.drop("y", axis=1)
y: pd.DataFrame = data["y"]
numeric_features = ["a"]
numeric_transformer = StandardScaler()
categorical_features = ["op"]
categorical_transformer = OneHotEncoder(handle_unknown="ignore")
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(
steps=[("preprocessor", preprocessor), ("regression", DummyRegressor())]
)
model.fit(X, y)
model_uri = os.path.join(tmp_path, "sklearn-pandas-model.joblib")
joblib.dump(model, model_uri)
return model_uri
@pytest.fixture
def pandas_model_settings(pandas_model_uri: str) -> ModelSettings:
return ModelSettings(
name="sklearn-pandas-model",
parameters=ModelParameters(uri=pandas_model_uri, version="v1.2.3"),
)
@pytest.fixture
async def pandas_model(pandas_model_settings: ModelSettings) -> SKLearnModel:
model = SKLearnModel(pandas_model_settings)
await model.load()
return model
@pytest.fixture
def pandas_inference_request() -> InferenceRequest:
inference_request = {
"parameters": {"content_type": "pd"},
"inputs": [
{"name": "a", "datatype": "INT32", "data": [10], "shape": [1]},
{
"name": "op",
"datatype": "BYTES",
"data": ["-"],
"shape": [1],
"parameters": {"content_type": "str"},
},
],
}
return InferenceRequest.parse_obj(inference_request)
|
the-stack_0_15900 | """Settings from flow_manager NApp."""
# Pooling frequency
STATS_INTERVAL = 30
FLOWS_DICT_MAX_SIZE = 10000
# Time (in seconds) to wait retrieve box from storehouse
BOX_RESTORE_TIMER = 0.1
ENABLE_CONSISTENCY_CHECK = True
# List of flows ignored by the consistency check
# To filter by a cookie or `table_id` use [value]
# To filter by a cookie or `table_id` range [(value1, value2)]
CONSISTENCY_COOKIE_IGNORED_RANGE = []
CONSISTENCY_TABLE_ID_IGNORED_RANGE = []
|
the-stack_0_15902 | """
White space is used to control how whitespace is rendered.
"""
from ..defaults import BREAKPOINTS, UP, DOWN, FULL, ONLY
from ...core import CssModule
vals = [
('n', 'normal'),
('nw', 'nowrap'),
('p', 'pre'),
]
mdl = CssModule(
'White space',
[UP],
dynamic={'.ws': ['white-space']},
values=vals,
docstring=__doc__
)
|
the-stack_0_15904 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# variablecube2.py
#
from copy import copy
from pagebot.elements.element import Element
from pagebot.style import makeStyle
from pagebot.toolbox.units import pointOffset
from pagebot.toolbox.color import blackColor
class VariableCube2(Element):
"""
>>> from pagebot.fonttoolbox.objects.font import findFont
>>> from pagebot.document import Document
>>> vfFont = findFont('RobotoDelta_v2-VF')
>>> doc = Document(w=500, h=500, autoPages=1)
>>> page = doc[1]
>>> page.padding = 40
>>> vc = VariableCube2(vfFont, parent=page, x=40, y=40, w=page.pw)
"""
# Initialize the default behavior tags as different from Element.
def __init__(self, font, point=None, parent=None, style=None,
name=None, captionStyle=None, caption=None,
location=None, dimensions=None,
clipRect=None, mask=None, imo=None, **kwargs):
Element.__init__(self, point=point, parent=parent, style=style,
name=name, **kwargs)
self.vfFont = font
self.style = makeStyle(style, **kwargs) # Combine self.style from
# Try to figure out the requested dimensions if the element display per axes.
if dimensions is None:
dimensions = dict(wght=5, wdth=5, opsz=5)
self.dimensions = dimensions
# Each element should check at this point if the minimum set of style values
# are set and if their values are valid.
assert self.w is not None and self.h is not None # Make sure that these are defined.
# Make sure that this is a formatted string. Otherwise create it with the current style.
# Note that in case there is potential clash in the double usage of fill and stroke.
# FIXME: Review this: the 's' variable below is undefined.
#self.glyphNames = s or 'e'
self.glyphNames = 'e'
# Store the external location, to allow other axis values to be set.
if location is None:
location = {}
self.location = copy(location)
def draw(self, view, origin):
c = self.doc.context
p = pointOffset(self.origin, origin)
p = self._applyScale(view, p)
px, py, _ = self._applyAlignment(p) # Ignore z-axis for now.
fillColor = self.style.get('fill')
if fillColor is not None:
c.fill(fillColor)
c.stroke((0.8, 0.8, 0.8), 0.5)
c.rect(px, py, self.w, self.h)
if len(self.dimensions) == 1:
raise ValueError('Not supporting 1 axis now')
if len(self.dimensions) > 2:
raise ValueError('Not supporting >2 axis now')
axisNames = sorted(self.dimensions.keys())
axisX = axisNames[0]
sizeX = self.dimensions[axisX]
axisY = axisNames[1]
sizeY = self.dimensions[axisY]
stepX = self.w / (sizeX+1)
stepY = self.h / (sizeY+1)
"""Add more parametric layout behavior here."""
RANGE = 1000
for indexX in range(sizeX+1):
for indexY in range(sizeY+1):
ox = 30
oy = 25
ppx = ox + px + indexX * stepX
ppy = oy + py + indexY * stepY
self.location[axisX] = indexX * RANGE / sizeX
self.location[axisY] = indexY * RANGE / sizeY
glyphPathScale = self.fontSize/self.font.info.unitsPerEm
c.drawGlyphPath(c, self.vfFont.ttFont, self.glyphNames[0],
ppx, ppy, self.location, s=glyphPathScale,
fillColor=(0, 0, 0))
bs = c.newString('%s %d\n%s %d' % (axisX,
indexX * RANGE / sizeX,
axisY,
indexY * RANGE / sizeY),
fontSize=6,
fill=blackColor)
w, h = bs.textSize()
c.text(bs, ppx - stepX/4, ppy - 16)
# Bit of hack, we need the width of the glyph here.
bs = c.newString('Other axes: %s' % self.location,
fontSize=6, fill=blackColor)
w, h = bs.textSize()
c.text(bs, px, py - 16)
if __name__ == '__main__':
import doctest
import sys
sys.exit(doctest.testmod()[0])
|
the-stack_0_15905 | # -*- coding: utf-8 -*-
from pydrake.multibody.parsing import (
Parser,
PackageMap,
LoadModelDirectives,
ProcessModelDirectives,
ModelInstanceInfo,
AddFrame,
GetScopedFrameByName,
GetScopedFrameName,
)
import os
import unittest
from pydrake.common import FindResourceOrThrow
from pydrake.multibody.tree import (
ModelInstanceIndex,
)
from pydrake.multibody.plant import (
MultibodyPlant,
)
class TestParsing(unittest.TestCase):
def test_package_map(self):
dut = PackageMap()
tmpdir = os.environ.get('TEST_TMPDIR')
model = FindResourceOrThrow(
"drake/examples/atlas/urdf/atlas_minimal_contact.urdf")
# Simple coverage test for Add, Contains, size, GetPath, AddPackageXml.
dut.Add(package_name="root", package_path=tmpdir)
self.assertEqual(dut.size(), 1)
self.assertTrue(dut.Contains(package_name="root"))
self.assertEqual(dut.GetPath(package_name="root"), tmpdir)
dut.AddPackageXml(filename=FindResourceOrThrow(
"drake/multibody/parsing/test/box_package/package.xml"))
# Simple coverage test for Drake paths.
dut.PopulateUpstreamToDrake(model_file=model)
self.assertGreater(dut.size(), 1)
# Simple coverage test for folder and environment.
dut.PopulateFromEnvironment(environment_variable='TEST_TMPDIR')
dut.PopulateFromFolder(path=tmpdir)
def test_parser_file(self):
"""Calls every combination of arguments for the Parser methods which
use a file_name (not contents) and inspects their return type.
"""
sdf_file = FindResourceOrThrow(
"drake/multibody/benchmarks/acrobot/acrobot.sdf")
urdf_file = FindResourceOrThrow(
"drake/multibody/benchmarks/acrobot/acrobot.urdf")
for dut, file_name, model_name, result_dim in (
(Parser.AddModelFromFile, sdf_file, None, int),
(Parser.AddModelFromFile, sdf_file, "", int),
(Parser.AddModelFromFile, sdf_file, "a", int),
(Parser.AddModelFromFile, urdf_file, None, int),
(Parser.AddModelFromFile, urdf_file, "", int),
(Parser.AddModelFromFile, urdf_file, "a", int),
(Parser.AddAllModelsFromFile, sdf_file, None, list),
(Parser.AddAllModelsFromFile, urdf_file, None, list),
):
plant = MultibodyPlant(time_step=0.01)
parser = Parser(plant=plant)
if model_name is None:
result = dut(parser, file_name=file_name)
else:
result = dut(parser, file_name=file_name,
model_name=model_name)
if result_dim is int:
self.assertIsInstance(result, ModelInstanceIndex)
else:
assert result_dim is list
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], ModelInstanceIndex)
def test_parser_string(self):
"""Checks parsing from a string (not file_name)."""
sdf_file = FindResourceOrThrow(
"drake/multibody/benchmarks/acrobot/acrobot.sdf")
with open(sdf_file, "r") as f:
sdf_contents = f.read()
plant = MultibodyPlant(time_step=0.01)
parser = Parser(plant=plant)
result = parser.AddModelFromString(
file_contents=sdf_contents, file_type="sdf")
self.assertIsInstance(result, ModelInstanceIndex)
def test_model_directives(self):
model_dir = os.path.dirname(FindResourceOrThrow(
"drake/multibody/parsing/test/"
"process_model_directives_test/package.xml"))
plant = MultibodyPlant(time_step=0.01)
parser = Parser(plant=plant)
parser.package_map().PopulateFromFolder(model_dir)
directives_file = model_dir + "/add_scoped_top.yaml"
directives = LoadModelDirectives(directives_file)
added_models = ProcessModelDirectives(
directives=directives, plant=plant, parser=parser)
# Check for an instance.
model_names = [model.model_name for model in added_models]
self.assertIn("extra_model", model_names)
plant.GetModelInstanceByName("extra_model")
# Test that other bound symbols exist.
ModelInstanceInfo.model_name
ModelInstanceInfo.model_path
ModelInstanceInfo.parent_frame_name
ModelInstanceInfo.child_frame_name
ModelInstanceInfo.X_PC
ModelInstanceInfo.model_instance
AddFrame.name
AddFrame.X_PF
frame = GetScopedFrameByName(plant, "world")
self.assertIsNotNone(GetScopedFrameName(plant, frame))
def test_model_directives_doc(self):
"""Check that the warning note in the docstring was added."""
self.assertIn("Note:\n", ProcessModelDirectives.__doc__)
|
the-stack_0_15906 | import signal
import socket
import subprocess
import time
from eth_utils import (
to_text,
)
import requests
def wait_for_socket(ipc_path, timeout=60):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
def wait_for_http(endpoint_uri, timeout=60):
start = time.time()
while time.time() < start + timeout:
try:
requests.get(endpoint_uri)
except requests.ConnectionError:
time.sleep(0.01)
else:
break
def get_process(command_list, terminates=False):
proc = subprocess.Popen(
command_list,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if terminates:
wait_for_popen(proc, 30)
try:
yield proc
finally:
kill_proc_gracefully(proc)
output, errors = proc.communicate()
print(
"Parity Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
|
the-stack_0_15907 | import math
import statistics as stats
import numpy as np
from Modulos.Utils import Truncate
def FuncionAcumuladaExponencial(x, valor_lambda):
'''La función devuelve el valor de la función acumulada para la distribución exponencial valuada en X
Parámetros: x: variable a valuar la función
valor_lambda: valor del lambda calculado para la serie'''
return 1-math.exp(-valor_lambda*x)
def FuncionDensidadNormal(x, media, desviacion_estandar):
'''La función devuelve el valor de la función de densidad para la distribución normal valuada en X
Parámetros: x: variable a valuar la función
media: media calculada para la serie
desviacion_estandar: desviación estandar calculada para la serie'''
return (math.exp(-0.5*((x-media)/desviacion_estandar)**2))/(desviacion_estandar*math.sqrt(2*math.pi))
def FuncionAcumuladaUniforme(a, b, x):
'''La función devuelve el valor de la función acumulada para la distribución uniforme valuada en X
Parámetros: x: variable a valuar la función
a: extremo superior
b: extremo inferior'''
return (x-a)/(b-a)
def ProbabilidadAcumuladaExponencial(desde, hasta, valor_lambda):
'''La función devuelve el valor de la probabilidad acumulada para la distribución Exponencial
Parámetros: desde: valor inicial del intervalo
hasta: valor final del intervalo
valor_lambda: valor del lambda calculado para la serie'''
return FuncionAcumuladaExponencial(hasta, valor_lambda) - FuncionAcumuladaExponencial(desde, valor_lambda)
def ProbabilidadAcumuladaUniforme(desde, hasta, a, b):
'''La función devuelve el valor de la probabilidad acumulada para la distribución uniforme
Parámetros: desde: valor inicial del intervalo
hasta: valor final del intervalo
a: extremo superior
b: extremo inferior'''
return FuncionAcumuladaUniforme(a, b, hasta) - FuncionAcumuladaUniforme(a, b, desde)
def FrecuenciasEsperadas(tamaño_muestra, intervalos, tipoDistribucion, media, desviacion_estandar, a, b):
'''La función calcula la frecuencia esperada para cada intervalo según el tipo de distribución elegida
Parámetros: tamaño_muestra: entero que representa la cantidad de elementos de la serie
intervalos: intervalos Dict<str, extremos> Diccionario que utiliza como clave la representacion del intervalo
tipoDistribucion: entero que representa el tipo de distribución elegida como hipótesis nula (0=uniforme, 1=exponencial, 2=normal)
media: media calculada para la serie
desviacion_estandar: desviacion estandar calculada para la serie
a: valor minimo de la serie
b: valor maximo de la serie
Return: lista con frecuencias esperadas'''
frec_esp_arr = []
if tipoDistribucion == 1:
valor_lambda = Truncate(1/media, 7)
for i in intervalos:
intervalo = intervalos[i]
desde, hasta = intervalo[0], intervalo[1]
if tipoDistribucion == 0:
prob = ProbabilidadAcumuladaUniforme(desde, hasta, a, b)
frec_esp = round(prob*tamaño_muestra)
elif tipoDistribucion == 1:
prob = ProbabilidadAcumuladaExponencial(desde, hasta, valor_lambda)
frec_esp = Truncate(prob*tamaño_muestra, 4)
elif tipoDistribucion == 2:
marca_clase = (desde+hasta)/2
prob = FuncionDensidadNormal(marca_clase, media, desviacion_estandar) * (hasta-desde)
frec_esp = Truncate(prob*tamaño_muestra, 4)
frec_esp_arr.append(frec_esp)
return frec_esp_arr
def testFrecuenciasEsperadasExponencial():
arr = [0.10, 0.25, 1.53, 2.83, 3.50, 4.14, 5.65, 6.96, 7.19, 8.25,1.20, 5.24, 4.75, 3.96, 2.21, 3.15, 2.53, 1.16, 0.32, 0.90, 0.87, 1.34, 1.87, 2.91, 0.71, 1.69, 0.69, 0.55, 0.43, 0.26]
intervalos = {'0 - 1': [0, 1], '1 - 2': [1, 2], '2 - 3': [2, 3], '3 - 4': [3, 4], '4 - 5': [4, 5], '5 - 6': [5, 6], '6 - 7': [6, 7], '7 - 8': [7, 8], '8 - 9': [8, 9], '9 - 10': [9, 10]}
tipoDistribucion = 1
tamaño_muestra = 30
a, b = min(arr), max(arr)
media = stats.mean(arr)
print('Media:', media)
desviacion_estandar = np.std(arr, ddof=1)
frec_esp = FrecuenciasEsperadas(tamaño_muestra, intervalos, tipoDistribucion, media, desviacion_estandar, a, b)
print(frec_esp)
def testFrecuenciasEsperadasNormal():
arr = [1.56, 2.21, 3.15, 4.61, 4.18, 5.20, 6.94, 7.71, 5.15, 6.76, 7.28, 4.23, 3.21, 2.75, 4.69, 5.86, 6.25, 4.27, 4.91, 4.78, 2.46, 3.97, 5.71, 6.19, 4.20, 3.48, 5.83, 6.36, 5.90, 5.43]
intervalos = {'0 - 1': [0, 1], '1 - 2': [1, 2], '2 - 3': [2, 3], '3 - 4': [3, 4], '4 - 5': [4, 5], '5 - 6': [5, 6], '6 - 7': [6, 7], '7 - 8': [7, 8], '8 - 9': [8, 9], '9 - 10': [9, 10]}
media = stats.mean(arr)
print('Media:', media)
desviacion_estandar = np.std(arr, ddof=1)
print('Desv estandar: ', desviacion_estandar)
tipoDistribucion = 2
tamaño_muestra = 30
a, b = min(arr), max(arr)
frec_esp = FrecuenciasEsperadas(tamaño_muestra, intervalos, tipoDistribucion, media, desviacion_estandar, a, b)
print(frec_esp)
def testFrecuenciasEsperadasUniforme():
arr = [0.15, 0.22, 0.41, 0.65, 0.84, 0.81, 0.62, 0.45, 0.32, 0.07, 0.11, 0.29, 0.58, 0.73, 0.93, 0.97, 0.79, 0.55, 0.35, 0.09, 0.99, 0.51, 0.35, 0.02, 0.19, 0.24, 0.98, 0.10, 0.31, 0.17]
intervalos = {'0.0 - 0.2': [0.0, 0.2], '0.2 - 0.4': [0.2, 0.4], '0.4 - 0.6': [0.4, 0.6], '0.6 - 0.8': [0.6, 0.8], '0.8 - 1.0': [0.8, 1.0]}
media = stats.mean(arr)
desviacion_estandar = np.std(arr, ddof=1)
tipoDistribucion = 0
tamaño_muestra = 30
a, b = min(arr), max(arr)
print(a,b)
frec_esp = FrecuenciasEsperadas(tamaño_muestra, intervalos, tipoDistribucion, media, desviacion_estandar, a, b)
print(frec_esp)
|
the-stack_0_15909 | # From the OpenCV library import imread function only.
from cv2 import imread
# Reading the image using imread() function
image = imread("../0_assets/road.jpg")
# Extracting RGB values.
# Here we have randomly choose a pixel.
# The [100, 100] represents pixel position in X and Y.
# Keep note that you have to be in bounds with respect to image size.
(B, G, R) = image[100, 100]
# Display the pixel values
print("R = {}, G = {}, B = {}".format(R, G, B))
# We can also specify color channel to extract on a pixel.
# The color channel parameter is on the 3rd parameter of image.
# ! Which I guess specifies B color of the pixel (???)
B = image[100, 100, 0]
print("B = {}".format(B))
|
the-stack_0_15910 | import os
import numpy as np
from scipy import fftpack
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.wcs import WCS
from astropy.stats import SigmaClip
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.convolution import Gaussian2DKernel
from astropy.coordinates import SkyCoord
from astropy.wcs.utils import skycoord_to_pixel
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy import units as u
from photutils import source_properties
from photutils import detect_sources
from photutils import Background2D, MedianBackground
import huntsman_dust.util_plot as util_plot
def image_load(image_path):
"""Returns image, header and wcs objects.
Args:
image_path(str, required): Image path to particular FITs. File
Returns:
image(array): This is the image data
header(table): This is the header object
wcs: World Coordinte System object
"""
hdulist = fits.open(image_path)
image = hdulist[0].data
header = hdulist[0].header
wcs = WCS(header)
return image, header, wcs
def background_2D(image,
sigma,
iters,
box_size,
filter_size,
plt_grid):
"""2D background estimation.
This function creates a 2D background estimate by dividing the image into
a grid, defined by box_size.
Args:
image(array, required): This is the image data
sigma(float, required): Sigma level
iters(int, required): Number of iterations
box_size(int, required): Defines the box dimesions, in pixels
filter_size(int, required): Defines the filter reach in pixels
plt_grid(boolean): Overplot grid on image
Returns:
bkg(array): 2D background level
bkgrms(array): RMS background
"""
sigma_clip = SigmaClip(sigma=sigma,
iters=iters)
mask = (image == 0)
bkg_estimator = MedianBackground()
bkg = Background2D(image,
box_size=box_size,
filter_size=filter_size,
sigma_clip=sigma_clip,
bkg_estimator=bkg_estimator,
mask=mask,
edge_method=u'pad')
# print('Background Median: ' + str(bkg.background_median))
# print('Background RMS median: ' + str(bkg.background_rms_median))
if plt_grid is True:
plt.imshow(bkg.background,
origin='lower',
cmap='Greys')
bkg.plot_meshes(outlines=True,
color='#1f77b4')
bkgrms = bkg.background_rms
return bkg, bkgrms
def find_objects(image,
threshold,
FWHM,
npixels):
"""Find sources in image by a segmentation process.
This function detects sources a given sigma above a threshold,
only if it has more that npixels that are interconnected.
Args:
image(array, required): This is the image data
threshold(array, required): This is the threshold above which
detection occurs
FWHM(int, required): Full Width Half Maximum of 2D circular
gaussian kernel used to filter the
image prior to thresholding. Input is
in terms of pixels.
npixels(int, required): The minimum number of pixels to define
a sources
Returns:
segm: The segmentation image
"""
sigma = FWHM * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma,
x_size=3,
y_size=3)
kernel.normalize()
segm = detect_sources(image,
threshold,
npixels=npixels,
filter_kernel=kernel)
return segm
def ds9_region(image_path,
image,
segm,
wcs,
ds9_region):
""""Creates ds9 region file.
This function creates a ds9 region file to display the sources
detected by the segmentation function. This file is written to
the same directory the fits files are in.
Args:
image_path(str, required): Image path to particular FITs. File
image(array, required): This is the image data
segm: The segmentation image
wcs: World Coordinte System object
ds9_region(boolean, opt): If true, creates region file
"""
if ds9_region is True:
data_path = os.path.splitext(image_path)
region_path = str(data_path[0]) + '_ds9region'
scale = proj_plane_pixel_scales(wcs)
image_scale = scale[0]
reg = source_properties(image, segm, wcs=wcs)
with open(region_path+'.reg', 'w') as f:
f.write('# Region file format: DS9 version 7.6\n\n')
f.write('global color=#ff7733\n')
f.write('global width=2\n')
f.write('fk5\n\n')
for i in range(0, len(reg.id)):
x = reg[i].sky_centroid_icrs.ra.to(u.deg)
y = reg[i].sky_centroid_icrs.dec
r = image_scale*reg[i].equivalent_radius
f.write('circle('+str(x.value)+','+str(y.value)+',' +
str(r.value)+')'+' # Source Number:' +
str(reg[i].id)+'\n')
def mask_galaxy(image,
wcs,
Ra,
Dec,
name,
radius):
"""Masks galaxy at Ra, Dec within a radius given in arcminutes
Creates a circular mask centered at a given Ra, Dec. The radius
is given in arcmins. The wcs object is used to convert these inputs
to pixel locations. A pixel scale is also determined. If the object
name is suppled, SESAME is used to find object center. If no active
internet connection is available, center location must be manually
entered, in degrees. If no center coordinates are supplied, (0, 0)
is the default center.
Args:
image(array, required): Image data
wcs: World Coordinte System object
name(str, optional): Name of galaxy or object
Ra(str): Right Ascention
Dec(str): Declination
Radius(float, required): Radius to be masked, in arcminutes
Returns:
masked_img(array): Image which has been masked
mask(boolean array): Mask of the given object"""
# Radius must be given in arcminutes
# Dimentions of the image
dim = (image.shape)
y, x = dim[0], dim[1]
# Finds the center of an object by inputting its name into SESAME
# This requires an active internet connection
# a, b are the coordinates of the center given in pixels
try:
center = SkyCoord.from_name(name)
except Exception:
print("No active internet connection. Manually enter Ra, Dec.")
Ra = Ra
Dec = Dec
center = SkyCoord(Ra, Dec, unit="deg")
c_pix = skycoord_to_pixel(center, wcs)
a, b = c_pix[0], c_pix[1]
print(center)
radius = radius*u.arcmin
# Finds pixel scale using WSC object. The default units can be found by
# unit = header['CUNIT1'], they are degrees by convention
# degrees are converted to arcmins and radius in computed in pixels
scale = proj_plane_pixel_scales(wcs)
pix_scale = scale[0]*u.deg.to(u.arcmin)
print('Image Scale: ' + str(pix_scale)+' arcmin/pix')
rad_pix = (radius/pix_scale).value
# Indexes each pixel and checks if its is >= radius from center
Y, X = np.ogrid[:y, :x]
dist_from_center = np.sqrt((X - a)**2 + (Y - b)**2)
mask = dist_from_center <= rad_pix
return mask
def plt_fits(image,
wcs,
figure,
title,
cmap,
norm):
"""Plots FITs images with axis given in Ra, Dec.
Args:
image(array): Image data
wcs: World Coordinte System object
figure(optional): Figure Number
title(str, optional): Title of the figure
cmap(str, optiona): Color map
norm: Image normalizatuion
"""
util_plot.util_plot()
fig = plt.figure(num=figure)
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(image, origin='lower', cmap=cmap, norm=norm)
ax.coords[0].set_axislabel('RA')
ax.coords[1].set_axislabel('DEC')
ax.set_title(title)
def plt_image(image,
figure,
title,
xlabel,
ylabel,
cmap,
norm):
"""Plots FITs images with axis given in Ra, Dec.
Args:
image(array): Image data
wcs: World Coordinte System object
figure(optional): Figure Number
title(str, optional): Title of the figure
cmap(str, optiona): Color map
norm: Image normalizatuion
"""
util_plot.util_plot()
plt.figure(num=figure)
plt.imshow(image, origin='lower', cmap=cmap, norm=norm)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
def fits_write(image, header, img_path, name=None):
"""Writes an 2D data array to a fits file.
Writes a 2D array to a fits file in the same directory as the oringinal
image. It appends the image header to this new fits file.
Args:
image(array): The image data to be written to a fits file
header(hdu.header): The header information to be appended
img_path(str): Path to source file
name(str): Name of new fits file. Ex: mask.fits
"""
hdu = fits.PrimaryHDU()
hdu.data = image.astype(float)
hdu.header = header
data_path, file = os.path.split(img_path)
file_path = os.path.join(data_path, name + "."+'fits')
hdu.writeto(file_path, overwrite=True)
def azimuthalAverage(image, center=None):
"""
Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fracitonal pixels).
Contributed by Jessica R. Lu
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if not center:
center = np.array([(y.max()-y.min())/2.0, (x.max()-x.min())/2.0])
r = np.hypot(x - center[1], y - center[0])
# Get sorted radii
ind = np.argsort(r.flat)
r_sorted = r.flat[ind]
i_sorted = image.flat[ind]
# Get the integer part of the radii (bin size = 1)
r_int = r_sorted.astype(int)
# Find all pixels that fall within each radial bin.
deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented
rind = np.where(deltar)[0] # location of changed radius
nr = rind[1:] - rind[:-1] # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csim = np.cumsum(i_sorted, dtype=float)
tbin = csim[rind[1:]] - csim[rind[:-1]]
radial_prof = tbin / nr
return radial_prof
def p_spec(image):
"""Performs 2D FFT on image and averages radially."""
image = image.astype(float)
psd2D = np.abs(fftpack.fftshift(fftpack.fft2(image)))**2
psd1D = azimuthalAverage(psd2D)
return psd1D
|
the-stack_0_15912 | import typing
from abc import ABC, abstractmethod
from .utils import image as image_utils
class CameraBase(ABC):
@abstractmethod
def read(self) -> typing.Tuple[bool, typing.Any]:
...
@abstractmethod
def release(self) -> None:
...
class CvCamera(CameraBase):
def __init__(self, width: int, cam_id=0):
try:
import cv2
except ModuleNotFoundError:
raise ModuleNotFoundError(
"OpenCV could not be found. Please see instructions on how to configure your system."
)
self.__camera = cv2.VideoCapture(cam_id)
self.__width = width
def read(self):
ok, frame = self.__camera.read()
if not ok:
return ok, None
return ok, image_utils.resize(frame, self.__width)
def release(self):
self.__camera.release()
|
the-stack_0_15913 | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import unittest
from unittest.mock import MagicMock, patch
from git.git_repository import GitRepository
from manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin
from system.config_file import ConfigFile
class TestComponentOpenSearchDashboardsMin(unittest.TestCase):
@patch("subprocess.check_output")
def test_branches(self, mock: MagicMock) -> None:
mock.return_value = "\n".join(["main", "1.x", "1.21", "20.1", "something", "else"]).encode()
self.assertEqual(ComponentOpenSearchDashboardsMin.branches(), ["main", "1.x", "1.21", "20.1"])
mock.assert_called_with(
"git ls-remote https://github.com/opensearch-project/OpenSearch-Dashboards.git refs/heads/* | cut -f2 | cut -d/ -f3",
shell=True,
)
@patch("os.makedirs")
@patch.object(GitRepository, "__checkout__")
def test_checkout(self, *mocks: MagicMock) -> None:
component = ComponentOpenSearchDashboardsMin.checkout("path")
self.assertEqual(component.name, "OpenSearch-Dashboards")
self.assertFalse(component.snapshot)
@patch.object(ConfigFile, "from_file")
def test_version(self, mock_config: MagicMock) -> None:
mock_config.return_value = ConfigFile('{"version":"2.1"}')
component = ComponentOpenSearchDashboardsMin(MagicMock(working_directory="path"))
self.assertEqual(component.version, "2.1")
@patch.object(ConfigFile, "from_file")
def test_properties(self, mock_config: MagicMock) -> None:
mock_config.return_value = ConfigFile('{"version":"2.1"}')
component = ComponentOpenSearchDashboardsMin(MagicMock(working_directory="path"))
self.assertEqual(component.properties.get_value("version"), "2.1")
@patch.object(ConfigFile, "from_file")
def test_to_dict(self, mock_config: MagicMock) -> None:
mock_config.return_value = ConfigFile('{"version":"2.1"}')
repo = MagicMock(ref="ref", url="repo")
component = ComponentOpenSearchDashboardsMin(repo)
self.assertEqual(
component.to_dict(),
{"name": "OpenSearch-Dashboards", "ref": "ref", "repository": "repo"},
)
|
the-stack_0_15914 | from myClasses import *
import openpyxl
from openpyxl import load_workbook
import mdToArray
import os
import shutil
from openpyxl.styles import Font
class ArrayToExcel:
def __init__(self):
self.book = None
self.books=[]
self.templateBookPath=None
self.templateSheetName = None
self.startRow = None
self.startCol = None
self.baseFont=None
# self.templateWb=None
return
def reset(self):
self.book=None
self.books=[]
def setBook(self, book: Book):
self.book = book
def addBook(self,book:Book):
self.books.append(book)
def readTemplate(self, path, templateSheetName: str, startRow: int, startCol: int):
# self.wb = load_workbook(filename=path, keep_vba=True)
self.templateBookPath=path
self.templateSheetName = templateSheetName
self.startCol = startCol
self.startRow = startRow
# cant't copy sheet from book A to book B.
# self.templateWb=load_workbook(filename=self.templateBookPath, keep_vba=True,read_only=False)
def generateBook(self,outputPath:str,font:str,size):
outputDir=outputPath[0:outputPath.rfind("\\")]
if not os.path.exists(outputPath):
if not os.path.exists(outputDir):
os.makedirs(outputDir)
shutil.copy(self.templateBookPath, outputPath)
# wb=load_workbook(filename=self.templateBookPath, keep_vba=True,read_only=False)
wb=load_workbook(outputPath,keep_vba=True,read_only=False)
for sheet in self.book.sheets: # type:Sheet
print(sheet.sheetName)
print(sheet.data)
#delete before created sheet
if sheet.sheetName in wb.sheetnames:
std=wb.get_sheet_by_name(sheet.sheetName)
wb.remove_sheet(std)
ws=None
if self.templateSheetName in wb.sheetnames:
ws = wb.copy_worksheet(
wb.get_sheet_by_name(self.templateSheetName))
else:
ws=wb.create_sheet()
ws.title = sheet.sheetName
# rootFont=ws.cell(self.startRow+1, self.startCol+1).font
# self.baseFont=Font(name=rootFont.name,sz=rootFont.sz)
self.baseFont=Font(name=font,size=size)
for r, row in enumerate(sheet.data):
for c, column in enumerate(row):
if column != "":
self.__setVal(ws, r+1, c+1, column)
std=wb.get_sheet_by_name('template')
wb.remove_sheet(std)
wb.save(outputPath)
wb.close()
return
def generateBooks(self,outputPath:str,font:str,size):
outputDir=outputPath[0:outputPath.rfind("\\")]
if not os.path.exists(outputPath):
if not os.path.exists(outputDir):
os.makedirs(outputDir)
shutil.copy(self.templateBookPath, outputPath)
# wb=load_workbook(filename=self.templateBookPath, keep_vba=True,read_only=False)
wb=load_workbook(outputPath,keep_vba=True,read_only=False)
for book in self.books:
for sheet in book.sheets: # type:Sheet
print(sheet.sheetName)
print(sheet.data)
#delete before created sheet
if sheet.sheetName in wb.sheetnames:
std=wb.get_sheet_by_name(sheet.sheetName)
wb.remove_sheet(std)
ws=None
if self.templateSheetName in wb.sheetnames:
ws = wb.copy_worksheet(
wb.get_sheet_by_name(self.templateSheetName))
else:
ws=wb.create_sheet()
ws.title = sheet.sheetName
# rootFont=ws.cell(self.startRow+1, self.startCol+1).font
# self.baseFont=Font(name=rootFont.name,sz=rootFont.sz)
self.baseFont=Font(name=font,size=size)
for r, row in enumerate(sheet.data):
for c, column in enumerate(row):
if column != "":
self.__setVal(ws, r+1, c+1, column)
wb.save(outputPath)
wb.close()
return
def __setVal(self, ws: openpyxl.worksheet, row, col, val):
cell=ws.cell(row=row+self.startRow, column=col+self.startCol)
cell.font=self.baseFont
cell.value=val
return
if __name__ == "__main__":
# mte = mdToArray.MdToArray()
# mte.read("mdDocs/sample.md")
# mte.compile()
import pickle
# with open("book.pickle","wb")as f:
# pickle.dump(mte.book, f)
with open("book.pickle", "rb")as f:
book = pickle.load(f)
ate = ArrayToExcel()
ate.setBook(book=book)
ate.readTemplate("format.xlsm", "template", 3, 3)
ate.generateBook()
|
the-stack_0_15916 | import salabim as sim
left = -1
right = +1
def sidename(side):
return "l" if side == left else "r"
def shortname(ship):
s = ""
for c in ship.name():
if c != ".":
s = s + c
return s
def shipcolor(side):
if side == left:
return "blue"
else:
return "red"
def ship_polygon(ship):
return (ship.side * (ship.length - 2), 0, ship.side * 3, 0, ship.side * 2, 3, ship.side * (ship.length - 2), 3)
def lock_water_rectangle(t):
if lock.mode() == "Switch":
y = sim.interpolate(t, lock.mode_time(), lock.scheduled_time(), ylevel[lock.side], ylevel[-lock.side])
else:
y = ylevel[lock.side]
return (xdoor[left], -waterdepth, xdoor[right], y)
def lock_door_left_rectangle(t):
if lock.mode() == "Switch" or lock.side == right:
y = ylevel[right] + 2
else:
y = ylevel[left] - waterdepth
return (xdoor[left] - 1, -waterdepth, xdoor[left] + 1, y)
def lock_door_right_rectangle(t):
if lock.mode() == "Switch" or lock.side == left:
y = ylevel[right] + 2
else:
y = ylevel[right] - waterdepth
return (xdoor[right] - 1, -waterdepth, xdoor[right] + 1, y)
def animation_pre_tick(self, t):
if lock.mode() == "Switch":
y = sim.interpolate(t, lock.mode_time(), lock.scheduled_time(), ylevel[lock.side], ylevel[-lock.side])
else:
y = ylevel[lock.side]
lockqueue.animate(x=xdoor[-lock.side], y=y, direction="w" if lock.side == left else "e")
def do_animation():
global ylevel, xdoor, waterdepth
lockheight = 5
waterdepth = 2
ylevel = {left: 0, right: lockheight}
xdoor = {left: -0.5 * locklength, right: 0.5 * locklength}
xbound = {left: -1.2 * locklength, right: 1.2 * locklength}
sim.Environment.animation_pre_tick = animation_pre_tick
env.animation_parameters(
x0=xbound[left], y0=-waterdepth, x1=xbound[right], modelname="Lock", speed=8, background_color="20%gray"
)
for side in [left, right]:
wait[side].animate(x=xdoor[side], y=10 + ylevel[side], direction="n")
sim.Animate(rectangle0=(xbound[left], ylevel[left] - waterdepth, xdoor[left], ylevel[left]), fillcolor0="aqua")
sim.Animate(rectangle0=(xdoor[right], ylevel[right] - waterdepth, xbound[right], ylevel[right]), fillcolor0="aqua")
a = sim.Animate(rectangle0=(0, 0, 0, 0), fillcolor0="aqua")
a.rectangle = lock_water_rectangle
a = sim.Animate(rectangle0=(0, 0, 0, 0))
a.rectangle = lock_door_left_rectangle
a = sim.Animate(rectangle0=(0, 0, 0, 0))
a.rectangle = lock_door_right_rectangle
a = sim.Animate(text="", x0=10, y0=650, screen_coordinates=True, fontsize0=15, font="narrow", anchor="w")
a.text = lambda t: "mean waiting left : {:5.1f} (n={})".format(
wait[left].length_of_stay.mean(), wait[left].length_of_stay.number_of_entries()
)
a = sim.Animate(text="", x0=10, y0=630, screen_coordinates=True, fontsize0=15, font="narrow", anchor="w")
a.text = lambda t: "mean waiting right: {:5.1f} (n={})".format(
wait[right].length_of_stay.mean(), wait[right].length_of_stay.number_of_entries()
)
a = sim.Animate(text="xx=12.34", x0=10, y0=610, screen_coordinates=True, fontsize0=15, font="narrow", anchor="w")
a.text = lambda t: " nr waiting left : {:3d}".format(wait[left].length())
a = sim.Animate(text="xx=12.34", x0=10, y0=590, screen_coordinates=True, fontsize0=15, font="narrow", anchor="w")
a.text = lambda t: " nr waiting right: {:3d}".format(wait[right].length())
sim.AnimateSlider(
x=520,
y=0,
width=100,
height=20,
vmin=16,
vmax=60,
resolution=4,
v=iat,
label="iat",
action=set_iat,
xy_anchor="nw",
)
sim.AnimateSlider(
x=660,
y=0,
width=100,
height=20,
vmin=10,
vmax=60,
resolution=5,
v=meanlength,
label="mean length",
action=set_meanlength,
xy_anchor="nw",
)
def set_iat(val):
global iat
iat = float(val)
def set_meanlength(val):
global meanlength
meanlength = float(val)
class Shipgenerator(sim.Component):
def process(self):
while True:
yield self.hold(sim.Exponential(iat).sample())
ship = Ship(name=sidename(self.side) + "ship.")
ship.side = self.side
ship.length = meanlength * sim.Uniform(2.0 / 3, 4.0 / 3).sample()
if lock.mode() == "Idle":
lock.activate()
class Ship(sim.Component):
def animation_objects(self, q):
size_x = self.length
size_y = 5
if self.side == left:
anchor = "se"
else:
anchor = "sw"
an1 = sim.Animate(polygon0=ship_polygon(self), fillcolor0=shipcolor(self.side), anchor=anchor, linewidth0=0)
an2 = sim.Animate(
text=shortname(self), textcolor0="white", anchor=anchor, fontsize0=2.4, offsetx0=self.side * 5, offsety0=0.7
)
return (size_x, size_y, an1, an2)
def process(self):
self.enter(wait[self.side])
if lock.ispassive():
lock.activate()
yield self.request((lockmeters[self.side], self.length), key_in[self.side])
self.leave(wait[self.side])
self.enter(lockqueue)
yield self.hold(intime)
self.release(key_in[self.side])
yield self.request(key_out)
self.leave(lockqueue)
yield self.hold(outtime)
self.release(key_out)
class Lock(sim.Component):
def process(self):
yield self.request(key_in[left])
yield self.request(key_in[right])
yield self.request(key_out)
while True:
if len(key_in[self.side].requesters()) == 0:
if len(key_in[-self.side].requesters()) == 0:
yield self.passivate()
self.release(key_in[self.side])
yield self.request((key_in[self.side], 1, 1000))
lockmeters[self.side].release()
yield self.hold(switchtime, mode="Switch")
self.side = -self.side
self.release(key_out)
yield self.request((key_out, 1, 1000), mode=None)
env = sim.Environment()
locklength = 60
switchtime = 10
intime = 2
outtime = 2
meanlength = 30
iat = 30
lockmeters = {}
key_in = {}
wait = {}
lockqueue = sim.Queue("lockqueue")
key_out = sim.Resource(name=" key_out")
for side in (left, right):
wait[side] = sim.Queue(name=sidename(side) + "Wait")
lockmeters[side] = sim.Resource(capacity=locklength, name=sidename(side) + " lock meters", anonymous=True)
key_in[side] = sim.Resource(name=sidename(side) + " key in")
shipgenerator = Shipgenerator(name=sidename(side) + "Shipgenerator")
shipgenerator.side = side
lock = Lock(name="lock")
lock.side = left
do_animation()
env.run()
|
the-stack_0_15917 | #!/usr/bin/env python3
from math import exp, pi
import os
import random
import torch
import unittest
import gpytorch
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.priors import SmoothedBoxPrior
from gpytorch.distributions import MultivariateNormal
from torch.utils.data import TensorDataset, DataLoader
# Simple training data: let's try to learn a sine function,
# but with KISS-GP let's use 100 training examples.
def make_data():
train_x = torch.linspace(0, 1, 1000)
train_y = torch.sin(train_x * (4 * pi)) + torch.randn(train_x.size()) * 0.2
test_x = torch.linspace(0.02, 1, 51)
test_y = torch.sin(test_x * (4 * pi))
return train_x, train_y, test_x, test_y
class GPRegressionModel(gpytorch.models.ApproximateGP):
def __init__(self, grid_size=20, grid_bounds=[(-0.1, 1.1)]):
variational_distribution = gpytorch.variational.CholeskyVariationalDistribution(
num_inducing_points=int(pow(grid_size, len(grid_bounds)))
)
variational_strategy = gpytorch.variational.GridInterpolationVariationalStrategy(
self, grid_size=grid_size, grid_bounds=grid_bounds, variational_distribution=variational_distribution
)
super(GPRegressionModel, self).__init__(variational_strategy)
self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-10, 10))
self.covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-3), exp(6), sigma=0.1)))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
class TestKISSGPVariationalRegression(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_kissgp_gp_mean_abs_error(self):
train_x, train_y, test_x, test_y = make_data()
train_dataset = TensorDataset(train_x, train_y)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=64)
model = GPRegressionModel()
likelihood = GaussianLikelihood()
mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=len(train_y))
# We use SGD here, rather than Adam
# Emperically, we find that SGD is better for variational regression
optimizer = torch.optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.01)
# Our loss object
# We're using the VariationalELBO object
mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=train_y.size(0))
# The training loop
def train(n_epochs=15):
# We use a Learning rate scheduler from PyTorch to lower the learning rate during optimization
# We're going to drop the learning rate by 1/10 after 3/4 of training
# This helps the model converge to a minimum
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0.75 * n_epochs], gamma=0.1)
for _ in range(n_epochs):
scheduler.step()
for x_batch, y_batch in train_loader:
x_batch = x_batch.float()
y_batch = y_batch.float()
optimizer.zero_grad()
output = model(x_batch)
loss = -mll(output, y_batch)
loss.backward()
optimizer.step()
train()
for _, param in model.named_parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for param in likelihood.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
model.eval()
likelihood.eval()
test_preds = likelihood(model(test_x)).mean
mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
self.assertLess(mean_abs_error.squeeze().item(), 0.1)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_15918 |
class RandomKitchenSinks():
def __init__(self, gamma, n_components, random_state=None):
""" Parameters:
gamma: float
Parameter of the rbf kernel to be approximated exp(-gamma * x^2)
n_components: int
Number of components (output dimensionality) used to approximate the kernel
"""
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, Y=None):
import sklearn.kernel_approximation
self.n_components = int(self.n_components)
self.gamma = float(self.gamma)
self.preprocessor = sklearn.kernel_approximation.RBFSampler(
self.gamma, self.n_components, self.random_state)
self.preprocessor.fit(X)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
def get_model(name, config, random_state):
list_param = {"random_state": random_state}
for k in config:
if k.startswith("feature_preprocessor:kitchen_sinks:"):
param_name = k.split(":")[2]
list_param[param_name] = config[k]
model = RandomKitchenSinks(**list_param)
return (name, model)
|
the-stack_0_15922 | from conans import ConanFile, CMake, tools
import os
class OpenEXRConan(ConanFile):
name = "openexr"
version = "2.4.0"
description = "OpenEXR is a high dynamic-range (HDR) image file format developed by Industrial Light & " \
"Magic for use in computer imaging applications."
topics = ("conan", "openexr", "hdr", "image", "picture")
license = "BSD-3-Clause"
homepage = "https://github.com/openexr/openexr"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
generators = "cmake", "cmake_find_package"
exports_sources = "CMakeLists.txt"
_source_subfolder = "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
self.options.remove("fPIC")
def requirements(self):
self.requires("zlib/1.2.11")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("openexr-{}".format(self.version), self._source_subfolder)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["PYILMBASE_ENABLE"] = False
cmake.definitions["OPENEXR_VIEWERS_ENABLE"] = False
cmake.definitions["OPENEXR_BUILD_BOTH_STATIC_SHARED"] = False
cmake.definitions["OPENEXR_BUILD_UTILS"] = False
cmake.definitions["BUILD_TESTING"] = False
cmake.configure()
return cmake
def _patch_files(self):
for lib in ("OpenEXR", "IlmBase"):
if self.settings.os == "Windows":
tools.replace_in_file(os.path.join(self._source_subfolder, lib, "config", "LibraryDefine.cmake"),
"${CMAKE_COMMAND} -E chdir ${CMAKE_INSTALL_FULL_LIBDIR}",
"${CMAKE_COMMAND} -E chdir ${CMAKE_INSTALL_FULL_BINDIR}")
if self.settings.build_type == "Debug":
tools.replace_in_file(os.path.join(self._source_subfolder, lib, "config", "LibraryDefine.cmake"),
"set(verlibname ${CMAKE_SHARED_LIBRARY_PREFIX}${libname}${@LIB@_LIB_SUFFIX}${CMAKE_SHARED_LIBRARY_SUFFIX})".replace("@LIB@", lib.upper()),
"set(verlibname ${CMAKE_SHARED_LIBRARY_PREFIX}${libname}${@LIB@_LIB_SUFFIX}_d${CMAKE_SHARED_LIBRARY_SUFFIX})".replace("@LIB@", lib.upper()))
tools.replace_in_file(os.path.join(self._source_subfolder, lib, "config", "LibraryDefine.cmake"),
"set(baselibname ${CMAKE_SHARED_LIBRARY_PREFIX}${libname}${CMAKE_SHARED_LIBRARY_SUFFIX})",
"set(baselibname ${CMAKE_SHARED_LIBRARY_PREFIX}${libname}_d${CMAKE_SHARED_LIBRARY_SUFFIX})")
def build(self):
self._patch_files()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE.md", src=self._source_subfolder, dst="licenses")
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "OpenEXR"
self.cpp_info.names["cmake_find_package_multi"] = "OpenEXR"
parsed_version = self.version.split(".")
lib_suffix = "-{}_{}".format(parsed_version[0], parsed_version[1])
if self.settings.build_type == "Debug":
lib_suffix += "_d"
self.cpp_info.libs = ["IlmImf{}".format(lib_suffix),
"IlmImfUtil{}".format(lib_suffix),
"IlmThread{}".format(lib_suffix),
"Iex{}".format(lib_suffix),
"IexMath{}".format(lib_suffix),
"Imath{}".format(lib_suffix),
"Half{}".format(lib_suffix)]
self.cpp_info.includedirs = [os.path.join("include", "OpenEXR"), "include"]
if self.options.shared and self.settings.os == "Windows":
self.cpp_info.defines.append("OPENEXR_DLL")
if self.settings.os == "Linux":
self.cpp_info.system_libs.append("pthread")
|
the-stack_0_15924 | """SCons.Tool.386asm
Tool specification for the 386ASM assembler for the Phar Lap ETS embedded
operating system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
from SCons.Tool.PharLapCommon import addPharLapPaths
import SCons.Util
as_module = __import__('as', globals(), locals(), [])
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
as_module.generate(env)
env['AS'] = '386asm'
env['ASFLAGS'] = SCons.Util.CLVar('')
env['ASPPFLAGS'] = '$ASFLAGS'
env['ASCOM'] = '$AS $ASFLAGS $SOURCES -o $TARGET'
env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS $SOURCES -o $TARGET'
addPharLapPaths(env)
def exists(env):
return env.Detect('386asm')
|
the-stack_0_15925 |
import offline
import gen
import matplotlib.pyplot as plt
import networkx as nx
import sys
def DrawDenseGraph(graph):
"""
BRIEF When the graph is dense, circular is the way to go
"""
nx_graph = NetworkXGraph(graph)
nx.draw_circular(nx_graph)
plt.show()
complement = nx.complement(nx_graph)
nx.draw_circular(complement)
plt.show()
print('{0:<10} = {1}'.format('e(G)' , nx_graph.size()))
print('{0:<10} = {1}'.format('e(~G)', complement.size()))
sys.stdout.flush()
def DrawSparseGraph(graph):
"""
BRIEF Use spring for drawing a sparse graph
"""
nx_graph = NetworkXGraph(graph)
nx.draw_spring(nx_graph)
plt.show()
def NetworkXGraph(graph):
"""
BRIEF We'll always use this code to create a NetworkX graph
"""
nx_graph = nx.Graph()
for name in graph.nodes:
nx_graph.add_node(name)
for edge in graph.edges:
nx_graph.add_edge(*edge)
return nx_graph
if __name__ == '__main__':
"""
BRIEF Main execution - draw the superfoods graph
"""
graph = offline.Graph(gen.Read(gen.SUPERFOOD_FILE))
graph.SetEdges(offline.Euclidean, .5)
DrawDenseGraph(graph)
graph.SetEdges(offline.Euclidean, .3)
DrawSparseGraph(graph)
|
the-stack_0_15926 | import json
import uuid
import logging
import copy
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
class ImportEscherMapUtil:
@staticmethod
def validate_eschermap_params(params, expected, opt_param=set()):
"""
Validates that required parameters are present.
Warns if unexpected parameters appear
"""
expected = set(expected)
opt_param = set(opt_param)
pkeys = set(params)
if expected - pkeys:
raise ValueError("Required keys {} not in supplied parameters"
.format(", ".join(expected - pkeys)))
defined_param = expected | opt_param
for param in params:
if param not in defined_param:
logging.warning("Unexpected parameter {} supplied".format(param))
def _save_escher_map(self, escher_data, workspace_id, escher_map_name):
"""
save KBaseFBA.EscherMap to workspace
"""
logging.info('start saving KBaseFBA.EscherMap')
if not isinstance(workspace_id, int):
logging.warning('Invalid workspace ID: {}'.format(workspace_id))
try:
workspace_id = self.dfu.ws_name_to_id(workspace_id)
except Exception:
raise ValueError('Cannot convert {} to valid workspace id'.format(workspace_id))
info = self.dfu.save_objects({'id': workspace_id,
'objects': [{'type': 'KBaseFBA.EscherMap',
'data': escher_data,
'name': escher_map_name}]})[0]
return "%s/%s/%s" % (info[6], info[0], info[4])
def _refactor_escher_data(self, escher_data):
"""
refactor escher data to better fit KBaseFBA.EscherMap object
"""
logging.info('start refactoring escher data')
refactored_escher_data = copy.deepcopy(escher_data)
if refactored_escher_data == escher_data:
logging.warning('No changes in escher data')
return refactored_escher_data
def __init__(self, config):
self.callback_url = config['SDK_CALLBACK_URL']
self.token = config['KB_AUTH_TOKEN']
self.dfu = DataFileUtil(self.callback_url)
logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',
level=logging.INFO)
def import_eschermap_from_staging(self, params):
"""
import_attribute_mapping_from_staging: import a JSON file as KBaseFBA.EscherMap
required params:
staging_file_subdir_path - subdirectory file path
e.g.
for file: /data/bulk/user_name/file_name
staging_file_subdir_path is file_name
for file: /data/bulk/user_name/subdir_1/subdir_2/file_name
staging_file_subdir_path is subdir_1/subdir_2/file_name
escher_map_name: output KBaseFBA.EscherMap object name
workspace_id: workspace ID
return:
obj_ref: return object reference
"""
self.validate_eschermap_params(params, ['staging_file_subdir_path', 'escher_map_name',
'workspace_id'])
download_staging_file_params = {
'staging_file_subdir_path': params.get('staging_file_subdir_path')
}
scratch_file_path = self.dfu.download_staging_file(
download_staging_file_params).get('copy_file_path')
try:
with open(scratch_file_path) as f:
escher_data = json.load(f)
except Exception:
raise ValueError('Failed to parse JSON file.')
escher_data = self._refactor_escher_data(escher_data)
obj_ref = self._save_escher_map(escher_data,
params['workspace_id'],
params['escher_map_name'])
returnVal = {'obj_ref': obj_ref}
return returnVal
def generate_report(self, obj_ref, params):
"""
generate_report: generate summary report
obj_ref: generated workspace object references.
"""
logging.info('start generating report')
upload_message = 'Import Finished\n'
get_objects_params = {'object_refs': [obj_ref],
'ignore_errors': False}
object_data = self.dfu.get_objects(get_objects_params)
upload_message += "Imported Escher Map Name: "
upload_message += str(object_data.get('data')[0].get('info')[1]) + '\n'
upload_message += 'Imported File: {}\n'.format(params['staging_file_subdir_path'])
report_params = {'message': upload_message,
'objects_created': [{'ref': obj_ref,
'description': 'Imported Escher Map'}],
'workspace_id': params['workspace_id'],
'report_object_name': 'kb_upload_methods_report_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
|
the-stack_0_15928 | from datetime import datetime, timedelta, timezone
import unittest
import paniot
from . import mixin
class IotApiTest(mixin.AioMixin, unittest.IsolatedAsyncioTestCase):
async def test_01(self):
resp = await self.api.device(pagelength=0)
self.assertEqual(resp.status, 400)
async def test_02(self):
with self.assertRaises(paniot.ArgsError) as e:
resp = await self.api.device_details()
self.assertEqual(str(e.exception),
'deviceid or ip required')
async def test_03(self):
with self.assertRaises(paniot.ArgsError) as e:
resp = await self.api.device_details(
ip='x',
deviceid='x')
self.assertEqual(str(e.exception),
'deviceid and ip cannot be used at the same time')
async def test_04(self):
resp = await self.api.device_details(ip='x')
self.assertEqual(resp.status, 404)
async def test_05(self):
resp = await self.api.device_details(deviceid='x')
self.assertEqual(resp.status, 404)
async def test_06(self):
resp = await self.api.device(pagelength=1)
self.assertEqual(resp.status, 200)
x = await resp.json()
self.assertEqual(x['total'], 1)
self.assertEqual(len(x['devices']), 1)
key = 'number_of_caution_alerts'
self.assertNotIn(key, x['devices'][0],
'%s key requires detail' % key)
deviceid = x['devices'][0]['deviceid']
ip = x['devices'][0]['ip_address']
resp = await self.api.device_details(deviceid=deviceid)
self.assertEqual(resp.status, 200)
x = await resp.json()
self.assertEqual(x['deviceid'], deviceid)
resp = await self.api.device_details(ip=ip)
self.assertEqual(resp.status, 200)
x = await resp.json()
self.assertEqual(x['devices'][0]['ip_address'], ip)
async def test_07(self):
resp = await self.api.device(detail=True)
self.assertEqual(resp.status, 200)
x = await resp.json()
self.assertEqual(x['total'], len(x['devices']))
key = 'number_of_caution_alerts'
self.assertIn(key, x['devices'][0],
'%s key missing for detail' % key)
async def test_08(self):
d = datetime.now(tz=timezone.utc) + timedelta(seconds=10)
stime = d.strftime('%Y-%m-%dT%H:%M:%SZ')
resp = await self.api.device(stime=stime)
self.assertEqual(resp.status, 200)
x = await resp.json()
t = await resp.text()
msg = 'devices in future stime %s: ' % stime
msg += t
self.assertEqual(x['total'], 0, msg)
self.assertEqual(len(x['devices']), 0, msg)
async def test_09(self):
total = 0
async for ok, x in self.api.devices_all():
self.assertTrue(ok)
total += 1
if total > 1050:
break
|
the-stack_0_15929 | from __future__ import unicode_literals
from utils import CanadianScraper, CanadianPerson as Person
import re
import os
import subprocess
from pupa.scrape import Organization
from six.moves.urllib.request import urlopen
COUNCIL_PAGE = 'http://www.community.gov.yk.ca/pdf/loc_govdir.pdf'
class YukonMunicipalitiesPersonScraper(CanadianScraper):
def scrape(self):
response = urlopen(COUNCIL_PAGE).read()
pdf = open('/tmp/yt.pdf', 'w')
pdf.write(response)
pdf.close()
data = subprocess.check_output(['pdftotext', '-layout', '/tmp/yt.pdf', '-'])
data = re.split(r'\n\s*\n', data)
for municipality in data:
if 'Councillors' not in municipality:
continue
lines = municipality.split('\n')
if 'Page' in lines[0]:
lines.pop(0)
if not lines[0].strip():
lines.pop(0)
col1end = re.search(r'\s{2,}(\w)', lines[0].strip()).end()
col2end = re.search(r':\s{2,}(\w)', lines[0].strip()).end()
if 'Council' in lines[1]:
address = lines[2][:col1end - 1].strip() + ' ' + lines[3][:col1end - 1].strip()
district = lines[0][:col1end - 1].strip() + ' ' + lines[1][:col1end - 1].strip()
else:
address = lines[1][:col1end - 1].strip() + ' ' + lines[2][:col1end - 1].strip()
district = lines[0][:col1end - 1].strip()
organization = Organization(name=district + ' Council', classification='legislature', jurisdiction_id=self.jurisdiction.jurisdiction_id)
organization.add_source(COUNCIL_PAGE)
yield organization
phone = re.findall(r'(?<=Phone: )\(?(\d{3}[\)-] ?\d{3}-\d{4})', municipality)[0].replace(') ', '-')
email = re.findall(r'(?<=E-mail:) (\S*)', municipality)[0]
fax = None
if 'Fax' in municipality:
fax = re.findall(r'(?<=Fax: )\(?(\d{3}[\)-] ?\d{3}-\d{4})', municipality)[0].replace(') ', '-')
website = None
if 'Website' in municipality:
website = re.findall(r'((http:\/\/|www.)(\S*))', municipality)[0][0]
councillor_or_mayor = False
for line in lines:
if 'Mayor:' in line:
councillor_or_mayor = True
role = 'Mayor'
continue
if 'Councillors' in line:
councillor_or_mayor = True
role = 'Councillor'
continue
if councillor_or_mayor:
councillor = line[col1end - 1:col2end - 1].strip()
if not councillor:
continue
p = Person(primary_org='legislature', name=councillor, district=district)
p.add_source(COUNCIL_PAGE)
membership = p.add_membership(organization, role=role, district=district)
membership.add_contact_detail('address', address, 'legislature')
membership.add_contact_detail('voice', phone, 'legislature')
membership.add_contact_detail('email', email)
if fax:
membership.add_contact_detail('fax', fax, 'legislature')
if website:
p.add_link(website)
yield p
os.system('rm /tmp/yt.pdf')
|
the-stack_0_15930 | import tensorflow as tf
tf.compat.v1.disable_v2_behavior()
from src.datasets.newsgroups import NewsGroups
from src.models.cnn1 import getCNN1
from src.models.predict import predict_mcdropout
import tensorflow as tf
def build():
# config
RANDOM_STATE = 1
VOCAB_SIZE = 20000
MAX_SEQUENCE_LENGTH = 500
NUM_SPLITS = 5
# get data
newsGroups = NewsGroups()
X_train_set, y_train_set, X_test_set, y_test_set, X_val, y_val = newsGroups.getRankedDataSplits(
vocab_size=VOCAB_SIZE,
max_sequence_length=MAX_SEQUENCE_LENGTH,
n_splits=NUM_SPLITS,
test_size=4500,
random_state=RANDOM_STATE
)
# training
models_n = []
for i in range(NUM_SPLITS):
model = tf.keras.models.load_model(f'models/newsGroups/CNN1_BL_{i}')
models_n.append(model)
# predict
dfs = [predict_mcdropout(models_n[i], X_val, y_val) for i in range(NUM_SPLITS)]
#save df
name = 'CNN1_MCD'
i = 0
for df in dfs:
df.to_pickle(f"pickle/newsGroups/{name}_{i}.pkl")
i = i+1 |
the-stack_0_15931 | import re
from dndme.commands import Command
from dndme.gametime import Date
class AdjustDate(Command):
keywords = ['date']
help_text = """{keyword}
{divider}
Summary: Query, set, or adjust the in-game date using the calendar
specified at startup.
Usage:
{keyword}
{keyword} <day> <month> [<year>]
{keyword} [+|-]<days>
Examples:
{keyword}
{keyword} 20 July
{keyword} 20 July 1969
{keyword} +7
{keyword} -10
"""
def get_suggestions(self, words):
calendar = self.game.calendar
if len(words) == 3:
return [month['name']
for month in calendar.cal_data['months'].values()]
def do_command(self, *args):
calendar = self.game.calendar
data = ' '.join(args)
if not data:
print(f"The date is {calendar}")
return
m_adjustment = re.match('([+-]\d+)', data)
if m_adjustment:
days = int(m_adjustment.groups()[0])
calendar.adjust_date(days)
print(f"The date is now {calendar}")
self.game.changed = True
return
m_set = re.match('(\d+) (\w+) *(\d*)', data)
if m_set:
day, month, year = m_set.groups()
day = int(day)
year = int(year) if year else calendar.date.year
calendar.set_date(Date(day, month, year))
print(f"The date is now {calendar}")
self.game.changed = True
return
print(f"Invalid date: {data}") |
the-stack_0_15932 | import copy
from itertools import zip_longest
from typing import Any, Callable, Dict, List, Optional
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities import rank_zero_info
class PrintTableMetricsCallback(Callback):
"""
Prints a table with the metrics in columns on every epoch end
Example::
from pl_bolts.callbacks import PrintTableMetricsCallback
callback = PrintTableMetricsCallback()
Pass into trainer like so:
.. code-block:: python
trainer = pl.Trainer(callbacks=[callback])
trainer.fit(...)
# ------------------------------
# at the end of every epoch it will print
# ------------------------------
# loss│train_loss│val_loss│epoch
# ──────────────────────────────
# 2.2541470527648926│2.2541470527648926│2.2158432006835938│0
"""
def __init__(self) -> None:
self.metrics: List = []
def on_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
metrics_dict = copy.copy(trainer.callback_metrics)
self.metrics.append(metrics_dict)
rank_zero_info(dicts_to_table(self.metrics))
def dicts_to_table(
dicts: List[Dict],
keys: Optional[List[str]] = None,
pads: Optional[List[str]] = None,
fcodes: Optional[List[str]] = None,
convert_headers: Optional[Dict[str, Callable]] = None,
header_names: Optional[List[str]] = None,
skip_none_lines: bool = False,
replace_values: Optional[Dict[str, Any]] = None
) -> str:
"""
Generate ascii table from dictionary
Taken from (https://stackoverflow.com/questions/40056747/print-a-list-of-dictionaries-in-table-form)
Args:
dicts: input dictionary list; empty lists make keys OR header_names mandatory
keys: order list of keys to generate columns for; no key/dict-key should
suffix with '____' else adjust code-suffix
pads: indicate padding direction and size, eg <10 to right pad alias left-align
fcodes: formating codes for respective column type, eg .3f
convert_headers: apply converters(dict) on column keys k, eg timestamps
header_names: supply for custom column headers instead of keys
skip_none_lines: skip line if contains None
replace_values: specify per column keys k a map from seen value to new value;
new value must comply with the columns fcode; CAUTION: modifies input (due speed)
Example:
>>> a = {'a': 1, 'b': 2}
>>> b = {'a': 3, 'b': 4}
>>> print(dicts_to_table([a, b]))
a│b
───
1│2
3│4
"""
# optional arg prelude
if keys is None:
if len(dicts) > 0:
keys = dicts[0].keys() # type: ignore[assignment]
elif header_names is not None:
keys = header_names
else:
raise ValueError('keys or header_names mandatory on empty input list')
if pads is None:
pads = [''] * len(keys) # type: ignore[arg-type]
elif len(pads) != len(keys): # type: ignore[arg-type]
raise ValueError(f'bad pad length {len(pads)}, expected: {len(keys)}') # type: ignore[arg-type]
if fcodes is None:
fcodes = [''] * len(keys) # type: ignore[arg-type]
elif len(fcodes) != len(fcodes):
raise ValueError(f'bad fcodes length {len(fcodes)}, expected: {len(keys)}') # type: ignore[arg-type]
if convert_headers is None:
convert_headers = {}
if header_names is None:
header_names = keys
if replace_values is None:
replace_values = {}
# build header
headline = '│'.join(f"{v:{pad}}" for v, pad in zip_longest(header_names, pads)) # type: ignore[arg-type]
underline = '─' * len(headline)
# suffix special keys to apply converters to later on
marked_keys = [h + '____' if h in convert_headers else h for h in keys] # type: ignore[union-attr]
marked_values = {}
s = '│'.join(f"{{{h}:{pad}{fcode}}}" for h, pad, fcode in zip_longest(marked_keys, pads, fcodes))
lines = [headline, underline]
for d in dicts:
none_keys = [k for k, v in d.items() if v is None]
if skip_none_lines and none_keys:
continue
elif replace_values:
for k in d.keys():
if k in replace_values and d[k] in replace_values[k]:
d[k] = replace_values[k][d[k]]
if d[k] is None:
raise ValueError(f"bad or no mapping for key '{k}' is None. Use skip or change replace mapping.")
elif none_keys:
raise ValueError(f'keys {none_keys} are None in {d}. Do skip or use replace mapping.')
for h in convert_headers:
if h in keys: # type: ignore[operator]
converter = convert_headers[h]
marked_values[h + '____'] = converter(d)
line = s.format(**d, **marked_values)
lines.append(line)
return '\n'.join(lines)
|
the-stack_0_15934 | # Prepares spreadsheet summarizing email schedulings.
#
# summarize_all_time() prepares two tabs:
# "Summary All-Time" tab: Aggregates counts by day and by 'summary_all_time_group_by_fields', a comma-separated list of payload fields. For example,
# 'event_state, event_url, event_title, event_type, event_start_timestamp_local'
#
# "Summary By Week" tab: Total scheduling count by week
#
# forecast() prepares one tab, all schedulings on date 'ds', one row per
# recipient on date 'ds', up to 'limit' recipients.
#
# Other inputs:
# 'mailing_name', e.g. 'event_invite'
# 'output_sheet', e.g. 'https://docs.google.com/spreadsheets/d/1KcZIW6piCZ60GR68KTN_UJB5wpfIh8Idc2b2E-7enFs'
import asyncio
import datetime
import pandas as pd
class BsdTriggeredEmailForecast:
FROM_SCHEDULINGS_JOIN_PAYLOADS = """
FROM "{schema}"."triggered_email_schedulings"
JOIN "{schema}"."triggered_email_payloads_{mailing_name}"
ON "triggered_email_payloads_{mailing_name}".ds = "triggered_email_schedulings".ds
AND (("triggered_email_payloads_{mailing_name}".cons_id IS NULL AND "triggered_email_schedulings".cons_id IS NULL)
OR "triggered_email_payloads_{mailing_name}".cons_id = "triggered_email_schedulings".cons_id)
AND "triggered_email_payloads_{mailing_name}".email = "triggered_email_schedulings".email
AND (("triggered_email_payloads_{mailing_name}".secondary_id IS NULL AND "triggered_email_schedulings".secondary_id IS NULL)
OR "triggered_email_payloads_{mailing_name}".secondary_id = "triggered_email_schedulings".secondary_id)
"""
GET_SCHEDULINGS_SQL = """
SELECT
"triggered_email_schedulings".email
, {output_fields}
FROM_AND_JOIN_GOES_HERE
WHERE "triggered_email_schedulings".ds = '{ds}'
AND "triggered_email_schedulings".mailing_name = '{mailing_name}'
ORDER BY "triggered_email_schedulings".email
, "triggered_email_schedulings".secondary_id
, "triggered_email_schedulings".scheduled_at
LIMIT {limit}
;
""".replace(
"FROM_AND_JOIN_GOES_HERE", FROM_SCHEDULINGS_JOIN_PAYLOADS
)
GET_SUMMARY_ALL_TIME_SQL = """
SELECT
"triggered_email_schedulings".ds
, {summary_all_time_group_by_fields}
, COUNT(*) AS cons_count
FROM_AND_JOIN_GOES_HERE
WHERE "triggered_email_schedulings".mailing_name = '{mailing_name}'
GROUP BY "triggered_email_schedulings".ds, {summary_all_time_group_by_fields}
ORDER BY 1 DESC, cons_count DESC
;
""".replace(
"FROM_AND_JOIN_GOES_HERE", FROM_SCHEDULINGS_JOIN_PAYLOADS
)
GET_SUMMARY_BY_WEEK_SQL = """
SELECT
DATE_TRUNC('w', "triggered_email_schedulings".ds) AS week_begin
, COUNT(*) AS cons_count
FROM_AND_JOIN_GOES_HERE
WHERE "triggered_email_schedulings".mailing_name = '{mailing_name}'
GROUP BY DATE_TRUNC('w', "triggered_email_schedulings".ds)
ORDER BY 1 DESC
;
""".replace(
"FROM_AND_JOIN_GOES_HERE", FROM_SCHEDULINGS_JOIN_PAYLOADS
)
TAB_NAME_SUMMARY_ALL_TIME = "Summary All-Time"
TAB_NAME_SUMMARY_BY_WEEK = "Summary By Week"
def __init__(self, civis, schema, caliban):
self.civis = civis
self.schema = schema
self.caliban = caliban
def forecast(self, ds, mailing_name, output_sheet, output_fields, tab_name, limit):
schedulings = self.get_schedulings(ds, mailing_name, output_fields, limit)
if schedulings.empty:
return
self.caliban.export_to_worksheets(output_sheet, tab_name, schedulings)
def summarize_all_time(
self, mailing_name, output_sheet, summary_all_time_group_by_fields
):
if not summary_all_time_group_by_fields:
return
summary = self.get_summary_all_time(
mailing_name, summary_all_time_group_by_fields
)
self.caliban.export_to_worksheets(
output_sheet, self.TAB_NAME_SUMMARY_ALL_TIME, summary
)
summary_by_week = self.get_summary_by_week(mailing_name)
self.caliban.export_to_worksheets(
output_sheet, self.TAB_NAME_SUMMARY_BY_WEEK, summary_by_week
)
def get_schedulings(self, ds, mailing_name, output_fields, limit):
query = self.GET_SCHEDULINGS_SQL.format(
ds=ds,
mailing_name=mailing_name,
schema=self.schema,
output_fields=output_fields,
limit=limit,
)
print("get_schedulings query:")
print(query)
df = asyncio.run(self.civis.read_civis_sql(query))
print(f"Got {len(df)} schedulings for {mailing_name} on {ds}:")
print(df)
return df
def get_summary_all_time(self, mailing_name, summary_all_time_group_by_fields):
query = self.GET_SUMMARY_ALL_TIME_SQL.format(
mailing_name=mailing_name,
schema=self.schema,
summary_all_time_group_by_fields=summary_all_time_group_by_fields,
)
print("get_summary_all_time query:")
print(query)
df = asyncio.run(self.civis.read_civis_sql(query))
print(f"Got summary all-time for {mailing_name}:")
print(df)
return df
def get_summary_by_week(self, mailing_name):
query = self.GET_SUMMARY_BY_WEEK_SQL.format(
mailing_name=mailing_name, schema=self.schema
)
print("get_summary_by_week query:")
print(query)
df = asyncio.run(self.civis.read_civis_sql(query))
print(f"Got summary by week for {mailing_name}:")
print(df)
return df
|
the-stack_0_15935 | """
Authors: Pratik Bhatu.
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
from argparse import RawTextHelpFormatter
import os
import os.path
import sys
import json
from RandomForests.convert_pickle_to_graphviz import convert_pickle_to_graphviz
from RandomForests.parse_graphviz_to_ezpc_input import parse_graphviz_to_ezpc_input
from RandomForests.patch_ezpc_code_params import patch_ezpc_code_params
def parse_args():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument(
"--task",
required=False,
type=str,
choices=["cla", "reg"],
help="""Choose cla for classificatin.
Choose reg for regression.
""",
)
parser.add_argument(
"--no_features",
required=False,
type=int,
help="Number of features in the dataset.",
)
parser.add_argument(
"--model_type",
required=False,
type=str,
choices=["tree", "forest"],
help="""Choose tree for decision tree.
Choose forest for random forest.
""",
)
parser.add_argument(
"--pickle",
required=False,
type=str,
help="Path to the pickle file",
)
parser.add_argument(
"--scale",
required=False,
type=int,
default=10,
help="Scaling factor for float -> fixedpt.",
)
parser.add_argument(
"--bitlen",
required=False,
type=int,
default=64,
choices=[32, 64],
help="Bit length to compile for.",
)
parser.add_argument(
"--role",
required=True,
type=str,
choices=["server", "client"],
default="server",
help="Pickle file owner is server, data owner is client",
)
parser.add_argument(
"--config",
required=False,
type=str,
help="Path to the client config file",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
if args.role == "server":
if args.pickle is None:
print("Path to pickle file not specified. See --help for options")
if args.model_type is None:
print("Model type not specified. See --help for options")
if args.no_features is None:
print("Number of features not specified. See --help for options.")
if args.task is None:
print("Task is not specified. See --help for options.")
if None in [args.pickle, args.model_type, args.no_features, args.task]:
sys.exit()
else:
if args.config is None:
print(
"Path to the client config file not specified. See --help for options"
)
sys.exit()
# args.task, args.model_type, args.no_features, args.pickle, args.scale, args.bitlen, args.config
if args.role == "server":
if not os.path.isfile(args.pickle):
sys.exit("Pickle file (" + args.pickle + ") specified does not exist")
pickle_dir = os.path.dirname(os.path.abspath(args.pickle))
build_dir = os.path.join(pickle_dir, "ezpc_build_dir")
os.system("rm -rf {build_dir}".format(build_dir=build_dir))
os.mkdir(build_dir)
# Dumps tree0, tree1, ..treeN.txt
no_of_estim = convert_pickle_to_graphviz(
args.pickle, args.task, args.model_type, build_dir
)
max_tree_depth = -1
for i in range(0, no_of_estim):
tree_file_path = os.path.join(build_dir, "tree" + str(i) + ".txt")
max_depth = parse_graphviz_to_ezpc_input(
tree_file_path, args.task, args.scale
)
max_tree_depth = max(max_tree_depth, max_depth)
print("Parsed all trees in Random Forest")
no_features = args.no_features
scale = args.scale
bitlen = args.bitlen
client_json = {
"no_of_trees": no_of_estim,
"depth": max_tree_depth,
"no_of_features": no_features,
"scale": scale,
"bitlen": bitlen,
}
json_path = os.path.join(build_dir, "client.json")
with open(json_path, "w") as f:
json.dump(client_json, f)
else:
if not os.path.isfile(args.config):
sys.exit("Config file (" + args.config + ") specified does not exist")
with open(args.config) as f:
client_json = json.load(f)
no_of_estim = client_json["no_of_trees"]
max_tree_depth = client_json["depth"]
no_features = client_json["no_of_features"]
scale = client_json["scale"]
bitlen = client_json["bitlen"]
config_dir = os.path.dirname(os.path.abspath(args.config))
build_dir = os.path.join(config_dir, "ezpc_build_dir")
os.system("rm -rf {build_dir}".format(build_dir=build_dir))
os.mkdir(build_dir)
ezpc_file_name = "random_forest.ezpc"
output_path = os.path.join(build_dir, ezpc_file_name)
patch_ezpc_code_params(no_of_estim, max_tree_depth, no_features, scale, output_path)
athos_dir = os.path.dirname(os.path.abspath(__file__))
ezpc_dir = os.path.join(athos_dir, "../EzPC/EzPC/")
os.system('cp "{ezpc}" "{ezpc_dir}"'.format(ezpc=output_path, ezpc_dir=ezpc_dir))
os.chdir(ezpc_dir)
ezpc_args = ""
ezpc_args = "--bitlen {bl} --codegen {target} ".format(bl=bitlen, target="ABY")
output_name = "random_forest0.cpp"
os.system(
'eval `opam config env`; ./ezpc.sh "{}" '.format(ezpc_file_name) + ezpc_args
)
os.system("./compile_aby.sh {}".format(output_name))
output_binary_path = os.path.join(build_dir, "random_forest")
os.system(
'mv "{bin}" "{op_bin}"'.format(bin="random_forest0", op_bin=output_binary_path)
)
print("\n\n")
print("Compiled binary: " + output_binary_path)
if args.role == "server":
model_weights = "weight_sf_" + str(scale) + ".inp"
weights_path = os.path.join(build_dir, model_weights)
print("Model weights dumped in " + weights_path)
print("Send client.json to the client machine. Path: ", json_path)
print("\n\n")
|
the-stack_0_15936 | """
Module containing all the utilities to compute and integrate the
recipe water footprint into the recommender system.
"""
import pandas as pd
from configuration import load_configuration
class WaterFootprintUtils:
"""
Class that represent utilities for the water footprint
reduction. This class provides a method for computing
the user score based on his reviews and orders.
It also provides a method for reducing the given
recommendations for the user.
"""
def __init__(self):
config = load_configuration()
self.orders = pd.read_pickle(config["path_orders"])
self.recipes = pd.read_pickle(config["path_recipes"])
self.user_scores = pd.read_pickle(config["path_user_scores"])
self.classes = ["A", "B", "C", "D", "E"]
def __get_recipe_class_to_recommend(self, user_score):
"""
Get the recipe categories to recommend based on the user score.
The categories are lower or equal than the user score.
:param user_score: the score of the user.
:return: a list containing the categories of the recipe to
recommend.
"""
return self.classes[:self.classes.index(user_score)+1]
def __get_recipe_class(self, recipe_id):
"""
Get the category of the recipe from its id.
:param recipe_id: the id of the recipe.
:return: the category of the recipe if exists.
"""
category = self.recipes.query(f"id == {recipe_id}")["category"].tolist()
return category[0] if category else None
def __get_user_score(self, user_id):
"""
Get the score of the user based on his reviews.
User orders are summed and weighted based on their
categories. Then based on the result the user score
is found.
:param user_id: the id of the user.
:return: the user score.
"""
score = self.user_scores.query(f"user_id == {user_id}")["score"].tolist()
return score[0] if score else None
def __get_recipe_category(self, recipe_id):
"""
Return the category of the recipe row from the
dataframe based on the recipe id.
:param recipe_id: the id of the recipe.
:return: the category of the recipe at the provided id.
"""
recipe = self.recipes.query(f"id == {recipe_id}")["category"].tolist()
return recipe[0] if recipe else "F"
def get_recommendations_correct(self, recommendations, user_id, algo_type):
"""
Get the correct recipe recommendations from a list of
recommendations ids based on the user score and the
type of the algorithm.
:param recommendations: a list containing all the recommended recipes.
:param user_id: the id of the user.
:param algo_type: the type of the algorithm
(Content Based or Collaborative Filtering)
:return: a list containing all the recipes filtered by
water footprint.
"""
user_score = self.__get_user_score(user_id)
class_to_rec = self.__get_recipe_class_to_recommend(user_score)
return (
[
rec
for rec in recommendations
if self.recipes["category"][rec] in class_to_rec
]
if algo_type == "cb"
else [
recipe_id
for recipe_id in recommendations
# if self.recipes.query(f"id == {recipe_id}")["category"].tolist()[0] in class_to_rec
if self.__get_recipe_category(recipe_id) in class_to_rec
]
)
if __name__ == "__main__":
wf = WaterFootprintUtils()
|
the-stack_0_15938 | import time
import json
import logging
import threading
from queue import Queue
from accounts import data_get_all, check_session, login
from connections import get, post
from utils import load_account, create_path, intervals
logging.basicConfig(
format='[%(asctime)s][%(levelname)s]: %(message)s',
level=logging.DEBUG, datefmt='%d/%b/%Y:%H:%M:%S'
)
# Logging logging.INFO only so it doesnt floaded with logging.DEBUG
for logs in logging.Logger.manager.loggerDict:
logging.getLogger(logs).setLevel(logging.INFO)
class Stacher:
def __init__(self, email, password, save_path=None, exclude=[]):
self.email = email
self.password = password
self.path = save_path
self.exclude = []
for gameworld in exclude:
self.exclude.append(gameworld.lower())
self.account = self.check_account()
self.start()
def start(self):
avatar_pool = {}
while True:
logging.info('check avatar.')
lobby_details = data_get_all(self.account)
avatars = [avatar for caches in lobby_details['cache']
if 'Collection:Avatar:' in caches['name']
for avatar in caches['data']['cache']
]
for avatar in avatars:
if avatar['data']['consumersId'] not in avatar_pool:
if avatar['data']['worldName'].lower() not in self.exclude:
av = self.account.build_avatar(
avatar['data']['worldName'],
avatar['data']['consumersId'],
self.get_ranking,
self.path
)
avatar_pool[avatar['data']['consumersId']] = av
# starting avatar
for gi in avatar_pool:
try:
avatar_pool[gi].start()
except Exception as e:
logging.debug(f'{e}')
continue
# sleeping
interval = intervals(10)
logging.info(f'Stacher sleeping:{interval//60}:{interval%60}')
time.sleep(interval)
def check_account(self):
try:
account = load_account()
if self.test_login(account):
account = login(self.email, self.password)
logging.info(f'Welcome!!! {account.details["avatarName"]}')
else:
logging.info(f'Welcome back!! {account.details["avatarName"]}')
except FileNotFoundError:
account = login(self.email, self.password)
logging.info(f'Welcome!!! {account.details["avatarName"]}')
finally:
return account
@staticmethod
def test_login(account):
return 'error' in check_session(account, state='lobby')
@staticmethod
def stacher_thread(task, ranking_type,
ranking_subtype, avatar, url):
while True:
start, end, results = task.get()
if start is None:
break
try:
data = {
'controller': 'ranking',
'action': 'getRanking',
'params': {
'start': start,
'end': end,
'rankingType': ranking_type,
'rankingSubtype': ranking_subtype
},
'session': avatar.session_gameworld
}
r = post(url+f'c=ranking&a=getRanking&t{(time.time()*1000):.0f}',
headers=avatar.headers_gameworld,
json=data,
cookies=avatar.cookies_gameworld,
timeout=60
)
results.extend(r.json()['response']['results'])
except Exception as e:
logging.debug(f'{e}')
finally:
task.task_done()
@staticmethod
def get_ranking(avatar, ranking_type,
ranking_subtype, table_name):
# get total player
url = avatar.lobby_api
data = {
'controller': 'cache',
'action': 'get',
'params': {
'names': [f'GameWorld:{avatar.gameworld_id}']
},
'session': avatar.session_lobby
}
r = post(url,
headers=avatar.headers_lobby,
json=data,
cookies=avatar.cookies_lobby,
timeout=60
)
total_player = int(r.json()['cache'][0]['data']['playersRegistered'])
# prepare thread
url = avatar.gameworld_api
start, end = 0, 9
results = []
threads = []
task = Queue()
for _ in range(2):
worker = threading.Thread(target=Stacher.stacher_thread,
args=(task, ranking_type,
ranking_subtype, avatar, url
)
)
worker.start()
threads.append(worker)
# dispatch thread
for _ in range((total_player//10)+1):
task.put((start, end, results))
time.sleep(0.1)
start, end = start+10, end+10
# threading done
task.join()
for _ in range(2):
task.put((None, None, None))
for t in threads:
t.join()
# save results
path = create_path(avatar.gameworld.lower(),
avatar.gameworld_id,
avatar.path
)
try:
cache = open(path, 'r')
cache = json.load(cache)
try:
cache[table_name]
except KeyError:
cache[table_name] = {}
except FileNotFoundError:
cache = {}
cache[table_name] = {}
result = (line for line in results)
data = (
{
x['playerId']: {
'name': x['name'],
'data': [{
'timestamp': time.time(),
'points': x['points']
}]
}
} for x in result
)
for x in data:
for pid in x:
if pid in cache[table_name]:
cache[table_name][pid]['data'].extend(x[pid]['data'])
else:
cache[table_name][pid] = x[pid]
with open(path, 'w') as f:
f.write(json.dumps(cache, indent=4))
logging.info(f'{table_name} on {avatar.gameworld} done.')
|
the-stack_0_15941 | """ANTLR3 exception hierarchy"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from constants import INVALID_TOKEN_TYPE
class BacktrackingFailed(Exception):
"""@brief Raised to signal failed backtrack attempt"""
pass
class RecognitionException(Exception):
"""@brief The root of the ANTLR exception hierarchy.
To avoid English-only error messages and to generally make things
as flexible as possible, these exceptions are not created with strings,
but rather the information necessary to generate an error. Then
the various reporting methods in Parser and Lexer can be overridden
to generate a localized error message. For example, MismatchedToken
exceptions are built with the expected token type.
So, don't expect getMessage() to return anything.
Note that as of Java 1.4, you can access the stack trace, which means
that you can compute the complete trace of rules from the start symbol.
This gives you considerable context information with which to generate
useful error messages.
ANTLR generates code that throws exceptions upon recognition error and
also generates code to catch these exceptions in each rule. If you
want to quit upon first error, you can turn off the automatic error
handling mechanism using rulecatch action, but you still need to
override methods mismatch and recoverFromMismatchSet.
In general, the recognition exceptions can track where in a grammar a
problem occurred and/or what was the expected input. While the parser
knows its state (such as current input symbol and line info) that
state can change before the exception is reported so current token index
is computed and stored at exception time. From this info, you can
perhaps print an entire line of input not just a single token, for example.
Better to just say the recognizer had a problem and then let the parser
figure out a fancy report.
"""
def __init__(self, input=None):
Exception.__init__(self)
# What input stream did the error occur in?
self.input = None
# What is index of token/char were we looking at when the error
# occurred?
self.index = None
# The current Token when an error occurred. Since not all streams
# can retrieve the ith Token, we have to track the Token object.
# For parsers. Even when it's a tree parser, token might be set.
self.token = None
# If this is a tree parser exception, node is set to the node with
# the problem.
self.node = None
# The current char when an error occurred. For lexers.
self.c = None
# Track the line at which the error occurred in case this is
# generated from a lexer. We need to track this since the
# unexpected char doesn't carry the line info.
self.line = None
self.charPositionInLine = None
# If you are parsing a tree node stream, you will encounter som
# imaginary nodes w/o line/col info. We now search backwards looking
# for most recent token with line/col info, but notify getErrorHeader()
# that info is approximate.
self.approximateLineInfo = False
if input is not None:
self.input = input
self.index = input.index()
# late import to avoid cyclic dependencies
from .streams import TokenStream, CharStream
from .tree import TreeNodeStream
if isinstance(self.input, TokenStream):
self.token = self.input.LT(1)
self.line = self.token.line
self.charPositionInLine = self.token.charPositionInLine
if isinstance(self.input, TreeNodeStream):
self.extractInformationFromTreeNodeStream(self.input)
else:
if isinstance(self.input, CharStream):
self.c = self.input.LT(1)
self.line = self.input.line
self.charPositionInLine = self.input.charPositionInLine
else:
self.c = self.input.LA(1)
def extractInformationFromTreeNodeStream(self, nodes):
from antlr3.tree import Tree, CommonTree
from antlr3.tokens import CommonToken
self.node = nodes.LT(1)
adaptor = nodes.adaptor
payload = adaptor.getToken(self.node)
if payload is not None:
self.token = payload
if payload.line <= 0:
# imaginary node; no line/pos info; scan backwards
i = -1
priorNode = nodes.LT(i)
while priorNode is not None:
priorPayload = adaptor.getToken(priorNode)
if priorPayload is not None and priorPayload.line > 0:
# we found the most recent real line / pos info
self.line = priorPayload.line
self.charPositionInLine = priorPayload.charPositionInLine
self.approximateLineInfo = True
break
i -= 1
priorNode = nodes.LT(i)
else: # node created from real token
self.line = payload.line
self.charPositionInLine = payload.charPositionInLine
elif isinstance(self.node, Tree):
self.line = self.node.line
self.charPositionInLine = self.node.charPositionInLine
if isinstance(self.node, CommonTree):
self.token = self.node.token
else:
type = adaptor.getType(self.node)
text = adaptor.getText(self.node)
self.token = CommonToken(type=type, text=text)
def getUnexpectedType(self):
"""Return the token type or char of the unexpected input element"""
from antlr3.streams import TokenStream
from antlr3.tree import TreeNodeStream
if isinstance(self.input, TokenStream):
return self.token.type
elif isinstance(self.input, TreeNodeStream):
adaptor = self.input.treeAdaptor
return adaptor.getType(self.node)
else:
return self.c
unexpectedType = property(getUnexpectedType)
class MismatchedTokenException(RecognitionException):
"""@brief A mismatched char or Token or tree node."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
#return "MismatchedTokenException("+self.expecting+")"
return "MismatchedTokenException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class UnwantedTokenException(MismatchedTokenException):
"""An extra token while parsing a TokenStream"""
def getUnexpectedToken(self):
return self.token
def __str__(self):
exp = ", expected %s" % self.expecting
if self.expecting == INVALID_TOKEN_TYPE:
exp = ""
if self.token is None:
return "UnwantedTokenException(found=%s%s)" % (None, exp)
return "UnwantedTokenException(found=%s%s)" % (self.token.text, exp)
__repr__ = __str__
class MissingTokenException(MismatchedTokenException):
"""
We were expecting a token but it's not found. The current token
is actually what we wanted next.
"""
def __init__(self, expecting, input, inserted):
MismatchedTokenException.__init__(self, expecting, input)
self.inserted = inserted
def getMissingType(self):
return self.expecting
def __str__(self):
if self.inserted is not None and self.token is not None:
return "MissingTokenException(inserted %r at %r)" % (
self.inserted, self.token.text)
if self.token is not None:
return "MissingTokenException(at %r)" % self.token.text
return "MissingTokenException"
__repr__ = __str__
class MismatchedRangeException(RecognitionException):
"""@brief The next token does not match a range of expected types."""
def __init__(self, a, b, input):
RecognitionException.__init__(self, input)
self.a = a
self.b = b
def __str__(self):
return "MismatchedRangeException(%r not in [%r..%r])" % (
self.getUnexpectedType(), self.a, self.b
)
__repr__ = __str__
class MismatchedSetException(RecognitionException):
"""@brief The next token does not match a set of expected types."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedSetException(%r not in %r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class MismatchedNotSetException(MismatchedSetException):
"""@brief Used for remote debugger deserialization"""
def __str__(self):
return "MismatchedNotSetException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class NoViableAltException(RecognitionException):
"""@brief Unable to decide which alternative to choose."""
def __init__(
self, grammarDecisionDescription, decisionNumber, stateNumber, input
):
RecognitionException.__init__(self, input)
self.grammarDecisionDescription = grammarDecisionDescription
self.decisionNumber = decisionNumber
self.stateNumber = stateNumber
def __str__(self):
return "NoViableAltException(%r!=[%r])" % (
self.unexpectedType, self.grammarDecisionDescription
)
__repr__ = __str__
class EarlyExitException(RecognitionException):
"""@brief The recognizer did not match anything for a (..)+ loop."""
def __init__(self, decisionNumber, input):
RecognitionException.__init__(self, input)
self.decisionNumber = decisionNumber
class FailedPredicateException(RecognitionException):
"""@brief A semantic predicate failed during validation.
Validation of predicates
occurs when normally parsing the alternative just like matching a token.
Disambiguating predicate evaluation occurs when we hoist a predicate into
a prediction decision.
"""
def __init__(self, input, ruleName, predicateText):
RecognitionException.__init__(self, input)
self.ruleName = ruleName
self.predicateText = predicateText
def __str__(self):
return "FailedPredicateException(" + self.ruleName + ",{" + self.predicateText + "}?)"
__repr__ = __str__
class MismatchedTreeNodeException(RecognitionException):
"""@brief The next tree mode does not match the expected type."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedTreeNodeException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
|
the-stack_0_15942 | import argparse
import tskit
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
'Quick hack: add an extra mutations column calculated by re-laying'
' mutations using parsimony. The modified'
'csv is output to stdout, so do e.g. `python add_RF.py file.csv > new.csv`'
)
)
parser.add_argument("csv_file")
parser.add_argument("-c", "--column_containing_paths", default=-1,
help=(
"The column in the CSV containing the paths (can be negative, for pos from "
"end). The value in this column is the path to the .trees file"
)
)
args = parser.parse_args()
with open(args.csv_file, "rt") as f:
new_fields = ["", ""]
for line_num, line in enumerate(f):
fields = line.strip().split(",")
try:
ts = tskit.load(fields[args.column_containing_paths])
tables = ts.dump_tables()
tables.mutations.clear()
parsimony_muts = 0
tree_iter = ts.trees()
tree = next(tree_iter)
anc_states = []
for v in ts.variants():
while v.site.position >= tree.interval.right:
tree = next(tree_iter)
anc_state, muts = tree.map_mutations(v.genotypes, v.alleles)
anc_states.append(anc_state)
for m in muts:
tables.mutations.append(
m.replace(parent=tskit.NULL, site=v.site.id))
parsimony_muts += len(muts)
tables.compute_mutation_parents()
tables.sites.packset_ancestral_state(anc_states)
ts = tables.tree_sequence()
new_fields[0] = str(parsimony_muts)
new_fields[1] = str(ts.nbytes)
except FileNotFoundError:
new_fields = ["", ""] if line_num>0 else ["parsimony_muts", "parsimony_nbytes"]
# Add elements before the c'th one
for f in new_fields:
fields.insert(args.column_containing_paths, f)
print(",".join(fields))
|
the-stack_0_15944 | """Common verify functions for rsvp"""
# Python
import re
import logging
# Genie
from genie.utils import Dq
from genie.utils.timeout import Timeout
from genie.metaparser.util.exceptions import SchemaEmptyParserError
log = logging.getLogger(__name__)
def verify_lsp_neighbor(
device,
ipv4_address,
expected_status="Up",
max_time=60,
check_interval=10,
lsp_state_flag=False
):
""" Verify lsp state is up for neighbor
Args:
device ('obj'): device to use
ipv4_address ('str'): IPv4 address to check neighbor node
expected_status ('str'): Expected neighbor lsp status
max_time ('int'): Maximum time to keep checking
check_interval ('int'): How often to check
lsp_state_flag ('bool'): Flag for verifying Lsp state
Returns:
True/False
Raises:
N/A
"""
# Parse IPv4 address
ipv4_address = ipv4_address.split("/")[0]
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
try:
output = device.parse("show rsvp neighbor detail")
except SchemaEmptyParserError:
log.info('Parser is empty')
timeout.sleep()
continue
# Example RSVP Neighbor Detail Dictionary
# {
# "rsvp-neighbor-information": {
# "rsvp-neighbor-count": str,
# "rsvp-neighbor": [
# {
# "rsvp-neighbor-address": str,
# "rsvp-neighbor-status": str,
# ...
# }
# ]
# }
# }
# Get RSVP neighbor list
for neighbor in output.q.get_values("rsvp-neighbor"):
if neighbor.get("rsvp-neighbor-address") == ipv4_address:
# Case when user wants to check the Lsp status of neighbor
if (lsp_state_flag and
neighbor.get("rsvp-neighbor-status") == expected_status):
return True
break
timeout.sleep()
return False
def verify_rsvp_neighbor(device, expected_ipaddress, max_time=30, check_interval=10):
"""
Verify there is a neighbor
Args:
device (`obj`): Device object
expected_ipaddress (`str`): The IP address that is expected in the output
max_time (`int`): Max time, default: 30
check_interval (`int`): Check interval, default: 10
Returns:
result (`bool`): Verified result
"""
# {'rsvp-neighbor-information':
# { 'rsvp-neighbor-count': '4',
# 'rsvp-neighbor': [
# {'rsvp-neighbor-address': '59.128.3.252',
# 'neighbor-idle': '39:15',
# 'neighbor-up-count': '0',
# 'neighbor-down-count': '0',
# 'last-changed-time': '39:15',
# 'hello-interval': '9',
# 'hellos-sent': '262',
# 'hellos-received': '0',
# 'messages-received': '0'},
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
try:
out = device.parse("show rsvp neighbor")
except SchemaEmptyParserError:
timeout.sleep()
continue
if expected_ipaddress in out.q.get_values("rsvp-neighbor-address"):
return True
timeout.sleep()
return False
def verify_rsvp_session_state(device, expected_state, session_name=None,
session_type="Transit", max_time=60, check_interval=10):
""" Verify RSVP session state
Args:
device (obj): device object
expected_state (str): Expected state
session_name (str, optional): Session name. Defaults to None.
session_type (str): Session type. Defaults to "Transit"
max_time (int, optional): Maximum timeout time. Defaults to 60.
check_interval (int, optional): Check interval. Defaults to 10.
"""
#'rsvp-session-information': {
# 'rsvp-session-data': [{
# 'session-type': 'Transit',
# 'count': '30',
# 'rsvp-session': [{
# 'destination-address': '10.49.194.125',
# 'source-address': '10.49.194.127',
# 'lsp-state': 'Up',
# 'route-count': '0',
# 'rsb-count': '1',
# 'resv-style': 'FF',
# 'label-in': '46',
# 'label-out': '44',
# 'name': 'test_lsp_01'
# },
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
try:
out = device.parse('show rsvp session')
except SchemaEmptyParserError:
timeout.sleep()
continue
for session in out.q.get_values('rsvp-session-data'):
if session.get('session-type') == session_type:
session_data = Dq(session).get_values('rsvp-session')
for data in session_data:
if session_name and session_name != data.get('name'):
continue
if data.get('lsp-state').lower() != expected_state.lower():
continue
return True
timeout.sleep()
return False
def verify_rsvp_session_state(device, expected_state, session_name=None,
session_type="Transit", max_time=60, check_interval=10):
""" Verify RSVP session state
Args:
device (obj): device object
expected_state (str): Expected state
session_name (str, optional): Session name. Defaults to None.
session_type (str): Which session to look into. Defaults to "Transit"
max_time (int, optional): Maximum timeout time. Defaults to 60.
check_interval (int, optional): Check interval. Defaults to 10.
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
try:
out = device.parse('show rsvp session')
except SchemaEmptyParserError:
timeout.sleep()
continue
# Example dictionary
# {
# "rsvp-session-information": {
# "rsvp-session-data": [{
# "session-type": str,
# "rsvp-session": [{
# "lsp-state": str,
# "name": str,
# }],
# }]
# }
# }
for session in out.q.get_values('rsvp-session-data'):
if session.get('session-type') == session_type:
session_data = Dq(session).get_values('rsvp-session')
for data in session_data:
if session_name != data.get('name'):
continue
if data.get('lsp-state').lower() != expected_state.lower():
continue
return True
timeout.sleep()
return False |
the-stack_0_15946 | # References:
# https://developers.google.com/gmail/api/quickstart/python
# https://developers.google.com/gmail/api/guides/sending
# https://www.thepythoncode.com/article/use-gmail-api-in-python
from __future__ import print_function
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from email.mime.text import MIMEText
import base64
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/gmail.send']
def create_message(sender, to, subject, message_test):
message = MIMEText(message_test)
message['to'] = to
message['from'] = sender
message['subject'] = subject
return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}
class Gmail():
def __init__(self):
creds = None
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
self.service = build('gmail', 'v1', credentials=creds)
def send_message(self, message):
try:
message = (self.service.users().messages().send(userId='me', body=message).execute())
print('Message Id: {}'.format(message['id']))
return message
except Exception as e:
print(e)
|
the-stack_0_15948 | from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
import pyqtgraph
import PyQt5.QtGui as qtg
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
matplotlib.use('Qt5Agg')
class MatplotlibWidget(qtg.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def __init__(self, size=(5.0, 4.0), dpi=100):
qtg.QWidget.__init__(self)
self.fig = Figure(size, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = qtg.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
# Create the data
fs = 10e3
N = 1e5
amp = 2 * np.sqrt(2)
noise_power = 0.01 * fs / 2
time = np.arange(N) / float(fs)
mod = 500*np.cos(2*np.pi*0.25*time)
carrier = amp * np.sin(2*np.pi*3e3*time + mod)
noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
noise *= np.exp(-time/5)
x = carrier + noise
f, t, Sxx = signal.spectrogram(x, fs)
# Interpret image data as row-major instead of col-major
pyqtgraph.setConfigOptions(imageAxisOrder='row-major')
pyqtgraph.mkQApp()
win = pyqtgraph.GraphicsLayoutWidget()
# A plot area (ViewBox + axes) for displaying the image
p1 = win.addPlot()
# Item for displaying image data
img = pyqtgraph.ImageItem()
p1.addItem(img)
# Add a histogram with which to control the gradient of the image
hist = pyqtgraph.HistogramLUTItem()
# Link the histogram to the image
hist.setImageItem(img)
# If you don't add the histogram to the window, it stays invisible, but I find it useful.
win.addItem(hist)
# Show the window
win.show()
# Fit the min and max levels of the histogram to the data available
hist.setLevels(np.min(Sxx), np.max(Sxx))
# This gradient is roughly comparable to the gradient used by Matplotlib
# You can adjust it and then save it using hist.gradient.saveState()
hist.gradient.restoreState(
{'mode': 'rgb',
'ticks': [(0.5, (0, 182, 188, 255)),
(1.0, (246, 111, 0, 255)),
(0.0, (75, 0, 113, 255))]})
# Sxx contains the amplitude for each pixel
img.setImage(Sxx)
# Scale the X and Y Axis to time and frequency (standard is pixels)
img.scale(t[-1]/np.size(Sxx, axis=1),
f[-1]/np.size(Sxx, axis=0))
# Limit panning/zooming to the spectrogram
p1.setLimits(xMin=0, xMax=t[-1], yMin=0, yMax=f[-1])
# Add labels to the axis
p1.setLabel('bottom', "Time", units='s')
# If you include the units, Pyqtgraph automatically scales the axis and adjusts the SI prefix (in this case kHz)
p1.setLabel('left', "Frequency", units='Hz')
# Plotting with Matplotlib in comparison
plt.pcolormesh(t, f, Sxx)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.colorbar()
plt.show()
pyqtgraph.Qt.QtGui.QApplication.instance().exec_()
|
the-stack_0_15949 | from .dataset import DataSet, DataSetMode, RawDataSet
from calamari_ocr.ocr.data_processing import DataPreprocessor
from calamari_ocr.ocr.text_processing import TextProcessor
from calamari_ocr.ocr.augmentation import DataAugmenter
from typing import Generator, Tuple, List, Any
import numpy as np
import multiprocessing
from collections import namedtuple
import queue
from calamari_ocr.utils.multiprocessing import tqdm_wrapper
from abc import ABC, abstractmethod
import logging
from .queue_helper import MaxElementsQueuer
from ..augmentation.dataaugmentationparams import DataAugmentationAmount, DataAugmentationAmountReference
logger = logging.getLogger(__name__)
class OrderedQueueTask:
def __init__(self, input_queue, output_queue, context=multiprocessing.get_context()):
self.input_queue = input_queue
self.output_queue = output_queue
self.context = context
self.p = self.context.Process(daemon=True, target=self.run)
def start(self):
self.p.start()
def stop(self):
self.p.terminate()
def join(self):
self.p.join()
def run(self) -> None:
data = []
current_idx = 0
while True:
while True:
try:
data.append(self.input_queue.get(timeout=0.1))
except queue.Empty:
continue
except KeyboardInterrupt:
return
break
data.sort(key=lambda data: data[0])
while len(data) > 0 and data[0][0] <= current_idx:
try:
self.output_queue.put(data[0], timeout=0.1)
self.output_queue.task_done()
del data[0]
current_idx += 1
except queue.Full:
continue
except KeyboardInterrupt:
return
DataProcessingTaskData = namedtuple("DataProcessingTaskData", [
"skip_invalid_gt",
"data_aug_params",
"text_processor",
"data_processor",
"data_augmenter",
"generate_only_non_augmented",
])
class DataProcessingTask:
def __init__(self, params, input_queue: multiprocessing.JoinableQueue, output_queue: multiprocessing.JoinableQueue, context=multiprocessing.get_context()):
self.params = params
self.input_queue = input_queue
self.output_queue = output_queue
self.p = context.Process(daemon=True, target=self.run)
def start(self):
self.p.start()
def stop(self):
self.p.terminate()
def join(self):
self.p.join()
def run(self) -> None:
while True:
try:
data = self.input_queue.get(timeout=0.1)
except queue.Empty:
continue
except KeyboardInterrupt:
# allow keyboard interrupt
return
out = self.apply_single(*data)
if out:
while True:
try:
self.output_queue.put(out, timeout=0.1)
break
except queue.Full:
continue
except KeyboardInterrupt:
return
self.output_queue.task_done()
def apply_single(self, idx, sample_id, line, text):
#if not dataset.is_sample_valid(sample, line, text):
# if not skip_invalid_gt:
# print("ERROR: invalid sample {}".format(sample))
# return None
if self.params.data_processor and line is not None:
line, params = self.params.data_processor.apply([line], 1, False)[0]
else:
params = None
if self.params.text_processor and text is not None:
text = self.params.text_processor.apply([text], 1, False)[0]
# data augmentation
if not self.params.data_aug_params.no_augs() \
and line is not None \
and not self.params.generate_only_non_augmented.value \
and self.params.data_augmenter \
and np.random.rand() <= self.params.data_aug_params.to_rel():
line, text = self.params.data_augmenter.augment_single(line, text)
return idx, sample_id, line, text, params
class InputDataset(ABC):
def __init__(self,
mode: DataSetMode,
):
self.mode = mode
self._generate_only_non_augmented = multiprocessing.Value('b', False)
self.initialized = False
def __enter__(self):
if self.initialized:
raise AssertionError("Input dataset already initialized.")
logger.debug("InputDataset {} entered".format(self))
self.initialized = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.initialized = False
logger.debug("InputDataset {} exited".format(self))
def check_initialized(self):
if not self.initialized:
raise AssertionError("InputDataset is not initialised. Call 'with InputDataset() as input_dataset:'. "
"After the scope is closed the threads will be closed, too, for cleaning up.")
@abstractmethod
def __len__(self):
return 0
@abstractmethod
def epoch_size(self):
return len(self)
@property
def generate_only_non_augmented(self):
return self._generate_only_non_augmented.value
@generate_only_non_augmented.setter
def generate_only_non_augmented(self, value):
self._generate_only_non_augmented.value = value
@abstractmethod
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
@abstractmethod
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
class RawInputDataset(InputDataset):
def __init__(self,
mode: DataSetMode,
raw_datas, raw_texts, raw_params,
):
super().__init__(mode)
self.preloaded_datas, self.preloaded_texts, self.preloaded_params = raw_datas, raw_texts, raw_params
def __len__(self):
if self._generate_only_non_augmented.value:
return len(self.preloaded_params)
return len(self.preloaded_datas)
def epoch_size(self):
return len(self)
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
for text in self.preloaded_texts:
yield text
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
for epoch in range(epochs):
if self.mode == DataSetMode.TRAIN:
# only train here, pred and eval are covered by else block
# train mode wont generate parameters
if self._generate_only_non_augmented.value:
# preloaded datas are ordered: first original data, then data augmented, however,
# preloaded params store the 'length' of the non augmented data
# thus, only orignal data is yielded
for data, text, params in zip(self.preloaded_datas, self.preloaded_texts, self.preloaded_params):
yield data, text, None
else:
# yield all data, however no params
for data, text in zip(self.preloaded_datas, self.preloaded_texts):
yield data, text, None
else:
# all other modes generate everything we got, but does not support data augmentation
for data, text, params in zip(self.preloaded_datas, self.preloaded_texts, self.preloaded_params):
yield data, text, params
class StreamingInputDataset(InputDataset):
def __init__(self,
dataset: DataSet,
data_preprocessor: DataPreprocessor,
text_preprocessor: TextProcessor,
data_augmenter: DataAugmenter = None,
data_augmentation_factor: float = 0,
skip_invalid_gt=True,
processes=4):
super().__init__(dataset.mode)
self.dataset = dataset
self.data_processor = data_preprocessor
self.text_processor = text_preprocessor
self.skip_invalid_gt = skip_invalid_gt
self.data_augmenter = data_augmenter
self.data_augmentation_params = DataAugmentationAmount.from_factor(data_augmentation_factor)
self.mp_context = multiprocessing.get_context('spawn')
self.processes = max(1, processes)
if data_augmenter and dataset.mode != DataSetMode.TRAIN and dataset.mode != DataSetMode.PRED_AND_EVAL:
# no pred_and_eval bc it's augmentation
raise Exception('Data augmentation is only supported for training, but got {} dataset instead'.format(dataset.mode))
if not self.data_augmentation_params.no_augs() and self.data_augmenter is None:
raise Exception('Requested data augmentation, but no data augmented provided. Use e. g. SimpleDataAugmenter')
self.data_input_queue = None
self.unordered_output_queue = None
self.data_processing_tasks = []
self.data_generator = None
self.ordered_output_queue = None
self.data_ordering = None
def __enter__(self):
super().__enter__()
# create all tasks and queues
self.max_queuer = MaxElementsQueuer(self.processes * 4, ctx=self.mp_context)
self.data_input_queue = self.max_queuer.input_queue
self.ordered_output_queue = self.max_queuer.output_queue
self.unordered_output_queue = self.mp_context.JoinableQueue()
self.data_processing_tasks = [
DataProcessingTask(
DataProcessingTaskData(
self.skip_invalid_gt,
self.data_augmentation_params,
self.text_processor,
self.data_processor,
self.data_augmenter,
self._generate_only_non_augmented,
),
self.data_input_queue,
self.unordered_output_queue,
) for _ in range(self.processes)
]
self.data_generator = self.dataset.create_generator(self.mp_context, self.data_input_queue)
self.data_generator.start()
self.data_ordering = OrderedQueueTask(self.unordered_output_queue, self.ordered_output_queue, self.mp_context)
self.data_ordering.start()
for p in self.data_processing_tasks:
p.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# stop all tasks
self.data_generator.stop()
for p in self.data_processing_tasks:
p.stop()
self.data_ordering.stop()
self.data_input_queue = None
self.unordered_output_queue = None
self.data_processing_tasks = []
self.data_generator = None
self.ordered_output_queue = None
self.data_ordering = None
super().__exit__(exc_type, exc_val, exc_tb)
def __len__(self):
return len(self.dataset.samples())
def epoch_size(self):
if self._generate_only_non_augmented.value:
return len(self)
return self.data_augmentation_params.epoch_size(len(self))
def to_raw_input_dataset(self, processes=1, progress_bar=False, text_only=False) -> RawInputDataset:
print("Preloading dataset type {} with size {}".format(self.dataset.mode, len(self)))
prev = self._generate_only_non_augmented.value
self._generate_only_non_augmented.value = True
datas, texts, params = zip(*list(tqdm_wrapper(self.generator(epochs=1, text_only=text_only),
desc="Preloading data", total=len(self.dataset),
progress_bar=progress_bar)))
preloaded_datas, preloaded_texts, preloaded_params = datas, texts, params
self._generate_only_non_augmented.value = prev
if not self.data_augmentation_params.no_augs() and (self.dataset.mode == DataSetMode.TRAIN or self.dataset.mode == DataSetMode.PRED_AND_EVAL):
abs_n_augs = self.data_augmentation_params.to_abs()
preloaded_datas, preloaded_texts \
= self.data_augmenter.augment_datas(list(datas), list(texts), n_augmentations=abs_n_augs,
processes=processes, progress_bar=progress_bar)
return RawInputDataset(self.mode, preloaded_datas, preloaded_texts, preloaded_params)
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
for _, text, _ in self.generator(epochs=1, text_only=True):
if self.text_processor:
text = self.text_processor.apply([text], 1, False)[0]
yield text
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
self.data_generator.request(epochs, text_only)
for epoch in range(epochs):
for iter in range(len(self.dataset)):
while True:
try:
global_id, id, line, text, params = self.ordered_output_queue.get(timeout=0.1)
yield line, text, params
except queue.Empty:
# test data ordering thread was canceled
if not self.data_ordering.p.is_alive() and self.ordered_output_queue.empty():
return
continue
except KeyboardInterrupt:
return
break
|
the-stack_0_15952 | import os
import time
import argparse
import math
from numpy import finfo
import pdb
import torch
from distributed import apply_gradient_allreduce
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from model import Tacotron2
from data_utils import TextMelLoader, TextMelCollate
from loss_function import Tacotron2Loss
from logger import Tacotron2Logger
from hparams import create_hparams
from text import text_to_sequence, sequence_to_text
from tqdm import tqdm
def reduce_tensor(tensor, n_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= n_gpus
return rt
def init_distributed(hparams, n_gpus, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(
backend=hparams.dist_backend, init_method=hparams.dist_url,
world_size=n_gpus, rank=rank, group_name=group_name)
print("Done initializing distributed")
def prepare_dataloaders(hparams):
# Get data, data loaders and collate function ready
trainset = TextMelLoader(hparams.training_files, hparams)
valset = TextMelLoader(hparams.validation_files, hparams)
collate_fn = TextMelCollate(hparams.n_frames_per_step)
if hparams.distributed_run:
train_sampler = DistributedSampler(trainset)
shuffle = False
else:
train_sampler = None
shuffle = True
train_loader = DataLoader(trainset, num_workers=1, shuffle=shuffle,
sampler=train_sampler,
batch_size=hparams.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
return train_loader, valset, collate_fn
def prepare_directories_and_logger(output_directory, log_directory, rank):
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
logger = Tacotron2Logger(os.path.join(output_directory, log_directory))
else:
logger = None
return logger
def load_model(hparams):
model = Tacotron2(hparams).cuda()
if hparams.fp16_run:
model.decoder.attention_layer.score_mask_value = finfo('float16').min
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
return model
def warm_start_model(checkpoint_path, model, ignore_layers):
assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model_dict = checkpoint_dict['state_dict']
if len(ignore_layers) > 0:
model_dict = {k: v for k, v in model_dict.items()
if k not in ignore_layers}
dummy_dict = model.state_dict()
dummy_dict.update(model_dict)
model_dict = dummy_dict
model.load_state_dict(model_dict)
return model
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}" .format(
checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
torch.save({'iteration': iteration,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def validate(model, criterion, valset, iteration, batch_size, n_gpus,
collate_fn, logger, distributed_run, rank):
"""Handles all the validation scoring and printing"""
model.eval()
with torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=1,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
val_loss = 0.0
for i, batch in enumerate(val_loader):
x, y = model.parse_batch(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_val_loss = loss.item()
val_loss += reduced_val_loss
val_loss = val_loss / (i + 1)
model.train()
if rank == 0:
print("Validation loss {}: {:9f} ".format(iteration, val_loss))
logger.log_validation(val_loss, model, y, y_pred, iteration)
def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
rank, group_name, hparams):
"""Training and validation logging results to tensorboard and stdout
Params
------
output_directory (string): directory to save checkpoints
log_directory (string) directory to save tensorboard logs
checkpoint_path(string): checkpoint path
n_gpus (int): number of gpus
rank (int): rank of current gpu
hparams (object): comma separated list of "name=value" pairs.
"""
if hparams.distributed_run:
init_distributed(hparams, n_gpus, rank, group_name)
torch.manual_seed(hparams.seed)
torch.cuda.manual_seed(hparams.seed)
model = load_model(hparams)
learning_rate = hparams.learning_rate
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
weight_decay=hparams.weight_decay)
if hparams.fp16_run:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level='O2')
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
criterion = Tacotron2Loss()
logger = prepare_directories_and_logger(
output_directory, log_directory, rank)
train_loader, valset, collate_fn = prepare_dataloaders(hparams)
# Load checkpoint if one exists
iteration = 0
epoch_offset = 0
if checkpoint_path is not None:
if warm_start:
model = warm_start_model(
checkpoint_path, model, hparams.ignore_layers)
else:
model, optimizer, _learning_rate, iteration = load_checkpoint(
checkpoint_path, model, optimizer)
if hparams.use_saved_learning_rate:
learning_rate = _learning_rate
iteration += 1 # next iteration is iteration + 1
epoch_offset = max(0, int(iteration / len(train_loader)))
model.train()
is_overflow = False
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, hparams.epochs):
print("Epoch: {}".format(epoch), flush=True)
for i, batch in enumerate(train_loader):
start = time.perf_counter()
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
model.zero_grad()
#itext_padded, iinput_lengths, imel_padded, igate_padded, ioutput_lengths = batch
#pdb.set_trace()
#print(sequence_to_text(itext_padded[0].tolist()))
#print('.')
x, y = model.parse_batch(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if hparams.distributed_run:
reduced_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_loss = loss.item()
if hparams.fp16_run:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if hparams.fp16_run:
grad_norm = torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), hparams.grad_clip_thresh)
is_overflow = math.isnan(grad_norm)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), hparams.grad_clip_thresh)
optimizer.step()
if not is_overflow and rank == 0:
duration = time.perf_counter() - start
print("Train loss {} {:.6f} Grad Norm {:.6f} {:.2f}s/it".format(
iteration, reduced_loss, grad_norm, duration), flush=True)
logger.log_training(
reduced_loss, grad_norm, learning_rate, duration, iteration)
if not is_overflow and (iteration % hparams.iters_per_checkpoint == 0):
validate(model, criterion, valset, iteration,
hparams.batch_size, n_gpus, collate_fn, logger,
hparams.distributed_run, rank)
if rank == 0:
checkpoint_path = os.path.join(
output_directory, "checkpoint_{}".format(iteration))
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_directory', type=str,
help='directory to save checkpoints')
parser.add_argument('-l', '--log_directory', type=str,
help='directory to save tensorboard logs')
parser.add_argument('-c', '--checkpoint_path', type=str, default=None,
required=False, help='checkpoint path')
parser.add_argument('--warm_start', action='store_true',
help='load model weights only, ignore specified layers')
parser.add_argument('--n_gpus', type=int, default=1,
required=False, help='number of gpus')
parser.add_argument('--rank', type=int, default=0,
required=False, help='rank of current gpu')
parser.add_argument('--group_name', type=str, default='group_name',
required=False, help='Distributed group name')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
hparams = create_hparams(args.hparams)
torch.backends.cudnn.enabled = hparams.cudnn_enabled
torch.backends.cudnn.benchmark = hparams.cudnn_benchmark
print("FP16 Run:", hparams.fp16_run)
print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling)
print("Distributed Run:", hparams.distributed_run)
print("cuDNN Enabled:", hparams.cudnn_enabled)
print("cuDNN Benchmark:", hparams.cudnn_benchmark)
train(args.output_directory, args.log_directory, args.checkpoint_path,
args.warm_start, args.n_gpus, args.rank, args.group_name, hparams)
|
the-stack_0_15953 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import six
from aiida.tools.dbimporters.baseclasses import (DbImporter, DbSearchResults,
UpfEntry)
class NnincDbImporter(DbImporter):
"""
Database importer for NNIN/C Pseudopotential Virtual Vault.
"""
def _str_clause(self, key, alias, values):
"""
Returns part of HTTP GET query for querying string fields.
"""
if not isinstance(values, six.string_types):
raise ValueError("incorrect value for keyword '{}' -- only "
'strings and integers are accepted'.format(alias))
return '{}={}'.format(key, values)
_keywords = {'xc_approximation': ['frmxcprox', _str_clause],
'xc_type': ['frmxctype', _str_clause],
'pseudopotential_class': ['frmspclass', _str_clause],
'element': ['element', None]}
def __init__(self, **kwargs):
self._query_url = 'http://nninc.cnf.cornell.edu/dd_search.php'
self.setup_db(**kwargs)
def query_get(self, **kwargs):
"""
Forms a HTTP GET query for querying the NNIN/C Pseudopotential
Virtual Vault.
:return: a string with HTTP GET statement.
"""
get_parts = []
for key in self._keywords.keys():
if key in kwargs.keys():
values = kwargs.pop(key)
if self._keywords[key][1] is not None:
get_parts.append(
self._keywords[key][1](self,
self._keywords[key][0],
key,
values))
if kwargs.keys():
raise NotImplementedError("search keyword(s) '"
"', '".join(kwargs.keys()) + \
"' is(are) not implemented for NNIN/C")
return self._query_url + '?' + '&'.join(get_parts)
def query(self, **kwargs):
"""
Performs a query on the NNIN/C Pseudopotential Virtual Vault using
``keyword = value`` pairs, specified in ``kwargs``.
:return: an instance of
:py:class:`aiida.tools.dbimporters.plugins.nninc.NnincSearchResults`.
"""
from six.moves import urllib
import re
query = self.query_get(**kwargs)
response = urllib.request.urlopen(query).read()
results = re.findall("psp_files/([^']+)\.UPF", response)
elements = kwargs.get('element', None)
if elements and not isinstance(elements, list):
elements = [elements]
if elements:
results_now = set()
for psp in results:
for element in elements:
if psp.startswith('{}.'.format(element)):
results_now = results_now | set([psp])
results = list(results_now)
return NnincSearchResults([{'id': x} for x in results])
def setup_db(self, query_url=None, **kwargs):
"""
Changes the database connection details.
"""
if query_url:
self._query_url = query_url
if kwargs.keys():
raise NotImplementedError( \
"unknown database connection parameter(s): '" + \
"', '".join(kwargs.keys()) + \
"', available parameters: 'query_url'")
def get_supported_keywords(self):
"""
Returns the list of all supported query keywords.
:return: list of strings
"""
return self._keywords.keys()
class NnincSearchResults(DbSearchResults):
"""
Results of the search, performed on NNIN/C Pseudopotential Virtual
Vault.
"""
_base_url = 'http://nninc.cnf.cornell.edu/psp_files/'
def __init__(self, results):
super(NnincSearchResults, self).__init__(results)
self._return_class = NnincEntry
def __len__(self):
return len(self._results)
def _get_source_dict(self, result_dict):
"""
Returns a dictionary, which is passed as kwargs to the created
DbEntry instance, describing the source of the entry.
:param result_dict: dictionary, describing an entry in the results.
"""
return {'id': result_dict['id']}
def _get_url(self, result_dict):
"""
Returns an URL of an entry CIF file.
:param result_dict: dictionary, describing an entry in the results.
"""
return self._base_url + result_dict['id'] + '.UPF'
class NnincEntry(UpfEntry):
"""
Represents an entry from NNIN/C Pseudopotential Virtual Vault.
"""
def __init__(self, uri, **kwargs):
"""
Creates an instance of
:py:class:`aiida.tools.dbimporters.plugins.nninc.NnincEntry`, related
to the supplied URI.
"""
super(NnincEntry, self).__init__(db_name='NNIN/C Pseudopotential Virtual Vault',
db_uri='http://nninc.cnf.cornell.edu',
uri=uri,
**kwargs)
|
the-stack_0_15956 | """
-------------------------------------------------------
helper
a couple of helper functions
-------------------------------------------------------
Author: Dallas Fraser
ID: 110242560
Email: [email protected]
Version: 2014-09-10
-------------------------------------------------------
"""
import networkx as nx
def make_co_cricket():
'''
make_co_cricket
assembles a co-cricket
Parameters:
None
Returns:
g: the co-cricket (Graph)
'''
g = make_diamond()
g.add_node(4)
return g
def make_kite():
'''
make_kite
assembles a kite (co-chair)
Parameters:
None
Returns:
kite: the kite (Graph)
'''
kite = make_diamond()
kite.add_node(4)
kite.add_edge(2, 4)
return kite
def make_claw():
'''
make_claw
assembles a claw
Parameters:
None
Returns:
claw: the claw (Graph)
'''
claw = nx.Graph()
for x in range(0, 4):
# add four vertices
claw.add_node(x)
hub = 0 #0-vertex is the hub of claw
for x in range(1, 4):
claw.add_edge(hub, x)
return claw
def make_co_claw():
'''
make_co_claw
assembles a co-claw
Parameters:
None
Returns:
co_claw: the co_claw (Graph)
'''
return nx.complement(make_claw())
def make_cycle(n):
'''
make_cycle
assembles a cycle with n vertices
Parameters:
n: the number of vertices in cycle (int)
Returns:
cycle: the cycle (Graph)
'''
cycle = nx.Graph()
for vertex in range(0,n):
# add all the vertices
cycle.add_node(vertex)
for vertex in range(0,n):
# add all the edges
cycle.add_edge(vertex, (vertex+1) % n)
cycle.add_edge(vertex, (vertex-1) % n)
return cycle
def make_co_cycle(n):
'''
a function the creates an complement of a cycle of size n
Parameters:
n: the size of the anti cycle
Returns:
co_cycle: a networkx graph (networkx)
'''
return nx.complement(make_cycle(n))
def make_wheel(n):
'''
make_wheel
assembles a wheel with n vertices
Parameters:
n: the number of vertices in the wheel (int)
Returns:
wheel: the wheel (networkx)
'''
wheel = make_cycle(n-1)
wheel.add_node(n-1)
for edge in range(0,n-1):
wheel.add_edge(edge,n-1)
return wheel
def join(G, H):
'''
join
a function which (complete) joins one graph G to graph H
Parameters:
G: Graph with at least one vertice (Graph)
H: Graph with at least one vertice (Graph)
Returns:
F: The join of G and H (Graph)
'''
# add all of
F = nx.Graph()
F.add_nodes_from(G.nodes())
F.add_edges_from(G.edges())
shift = G.number_of_nodes()
# add all nodes of H
for vertex in H.nodes():
F.add_node(vertex)
# add all of F edges
for e1, e2 in H.edges():
F.add_edge(e1 + shift, e2 + shift)
# join the two sets of nodes
for v1 in G.nodes():
for v2 in H.nodes():
F.add_edge(v1,v2+shift)
return F
def make_diamond():
'''
make_diamond
assembles a diamond
Parameters:
None
Returns:
diamond: the diamond graph (networkx)
'''
diamond = nx.Graph()
for x in range(0, 4):
# add four vertices
diamond.add_node(x)
diamond.add_edge(0, 1)
diamond.add_edge(0, 2)
diamond.add_edge(0, 3)
diamond.add_edge(1, 2)
diamond.add_edge(1, 3)
return diamond
def make_co_diamond():
'''
make_co_diamond
assembles a co-diamond
Parameters:
None
Returns:
co_diamond: the co-diamond graph (networkx)
'''
return nx.complement(make_diamond())
def make_cok4():
'''
make_coK4
assembles a co-K4
Parameters:
None
Returns:
g: the co-K4 graph (networkx)
'''
g = nx.Graph()
g.add_node(0)
g.add_node(1)
g.add_node(2)
g.add_node(3)
return g
def text_to_networkx(lines):
'''
text_to_networkx
a function that takes the lines from a text file and puts into a format for
networkx graph
Parameters:
lines: a list of lines from the text file (list)
Returns:
graph: a networkx graph
'''
# try:
graph = nx.Graph()
index = 0
nodes = []
for line in lines:
# add all the nodes
entries = line.split(":")
if len(entries) == 2:
try:
node = int(entries[0])
except:
node = None
if node is None:
node = index
graph.add_node(node)
nodes.append(node)
index += 1
index = 0
for line in lines:
# add all the edges
entries = line.split(":")
if (len(entries) > 1):
entries[1] = entries[1].replace(" ", "")
edges = entries[1].split(",")
for edge in edges:
if edge != '':
graph.add_edge(nodes[index], int(edge))
index += 1
return graph
def networkx_to_text(G):
'''
a function that converts a graph G to text
Parameters:
G: the graph (networkx)
Returns:
text: the graph text (string)
'''
text = ""
for node in G.nodes():
text += str(node) + ":"
n = []
for neighbor in G.neighbors(node):
n.append(str(neighbor))
text += ",".join(n)
text += "\n"
return text
def make_clique(n):
'''
makes a clique of size n
Parameters:
n: the size of the clique (int)
Returns:
clique: the graph (networkx)
'''
clique = nx.Graph()
for v in range(0, n):
clique.add_node(v)
end = len(clique.nodes())
for target in clique.nodes():
for source in range(target+1, end):
clique.add_edge(target, source)
return clique
def make_2K2():
'''
a function which assembles a 2K2
Parameters:
None
Returns:
g: 2K2 graph (network)
'''
return nx.complement(make_cycle(4))
def make_co_twin_c5():
'''
a function to assemble a co-Twin-C5
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_cycle(5)
g.add_node(5)
g.add_edge(5, 0)
g.add_edge(5, 2)
g.add_edge(5, 1)
return g
def make_co_twin_house():
'''
a function to assemble a co-Twin-House
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_diamond()
g.add_node(4)
g.add_node(5)
g.add_edge(2, 4)
g.add_edge(3, 5)
return g
def make_co_p2_p3():
'''
a function to assemble a co p2-p3 graph
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_diamond()
g.add_node(4)
g.add_edge(2, 4)
g.add_edge(3, 4)
return g
def make_co_A():
'''
a function to assemble a co-A graph
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = nx.Graph()
for i in range(0, 5):
g.add_node(i)
g.add_edge(0, 1)
g.add_edge(0, 3)
g.add_edge(0, 4)
g.add_edge(1, 2)
g.add_edge(1, 4)
g.add_edge(1, 5)
g.add_edge(2, 5)
g.add_edge(3, 4)
g.add_edge(4, 5)
return g
def make_co_R():
'''
a method to assemble a co-R graph
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_diamond()
g.add_node(4)
g.add_node(5)
g.add_edge(0, 4)
g.add_edge(1, 4)
g.add_edge(2, 4)
g.add_edge(3, 5)
return g
def make_bridge():
'''
a method to assemble a bridge graph
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_co_R()
g.add_edge(0, 5)
g.add_edge(1, 5)
return g
def forbidden_line_subgraphs():
'''
a method to assemble all 9 of the forbidden subgraphs
of line graphs
Parameters:
None
Returns:
graphs: a list of graphs (networkx)
'''
graphs = []
graphs.append(make_claw()) # claw
graphs.append(make_wheel(6)) # W5
graphs.append(make_bridge()) # Bridge
graphs.append(make_co_R()) # Co-R
graphs.append(make_co_A()) # Co-A
graphs.append(make_co_p2_p3())
graphs.append(make_co_twin_house())
graphs.append(make_co_twin_c5())
k5_e = make_clique(5)
k5_e.remove_edge(3, 4)
graphs.append(k5_e)
return graphs
import unittest
import os
class tester(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testMakeClique(self):
edges = [(0, 1), (0, 2), (1, 2)]
nodes = [0, 1, 2]
clique = make_clique(3)
self.assertEqual(edges, clique.edges(), 'Make Clique: failed on edges')
self.assertEqual(nodes, clique.nodes(), 'Make Clique: failed on nodes')
edges = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
nodes = [0, 1, 2, 3]
clique = make_clique(4)
self.assertEqual(edges, clique.edges(), 'Make Clique: failed on edges')
self.assertEqual(nodes, clique.nodes(), 'Make Clique: failed on nodes')
def testMakeDiamond(self):
g = make_diamond()
edges = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]
vertices = [0, 1, 2, 3]
self.assertEqual(edges, g.edges(), "Make Diamond: failed on edges")
self.assertEqual(vertices, g.nodes(),
"Make Diamond: failed on vertices")
def testMakeCoDiamond(self):
g = make_co_diamond()
edges = [(2, 3)]
vertices = [0, 1, 2, 3]
self.assertEqual(edges, g.edges(),
"Make Co-Diamond: failed on edges")
self.assertEqual(vertices, g.nodes(),
"Make Co-Diamond: failed on vertices")
def testMakeClaw(self):
g = make_claw()
edges = [(0, 1), (0, 2), (0, 3)]
vertices =[0, 1, 2, 3]
self.assertEqual(edges, g.edges(), "Make Claw: failed on edges")
self.assertEqual(vertices, g.nodes(), "Make Claw: failed on vertices")
def testMakeCoClaw(self):
g = make_co_claw()
edges = [(1, 2), (1, 3), (2, 3)]
vertices =[0, 1, 2, 3]
self.assertEqual(edges, g.edges(), "Make Co-Claw: failed on edges")
self.assertEqual(vertices, g.nodes(),
"Make Co-Claw: failed on vertices")
def testMakeCycle(self):
g = make_cycle(3)
edges = [(0,1), (0,2), (1,2)]
vertices = [0, 1, 2]
self.assertEqual(edges, g.edges(), "Make Cycle: failed on edges")
self.assertEqual(vertices, g.nodes(), "Make Cycle: failed on vertices")
def testJoin(self):
# wheel test
g = make_cycle(5)
h = nx.Graph()
h.add_node(0)
f = join(g, h)
expect = nx.wheel_graph(6) # expect a wheel
self.assertEqual(expect.nodes(), f.nodes(),
" Join: nodes failed on wheel test")
self.assertEqual(nx.is_isomorphic(f, expect), True,
" Join: edges failed on wheel test")
# join of two trianges = K6
g = nx.complete_graph(3)
h = nx.complete_graph(3)
f = join(g, h)
expect = nx.complete_graph(6)
self.assertEqual(expect.nodes(), f.nodes(),
"Join: nodes failed for K6 test")
self.assertEqual(nx.is_isomorphic(f, expect), True,
" Join: edges failed on wheel K6 test")
def testWheel(self):
# w5
w = make_wheel(5)
g = make_cycle(4)
g.add_node(5)
g.add_edge(0,4)
g.add_edge(1,4)
g.add_edge(2,4)
g.add_edge(3,4)
self.assertEqual(w.edges(), g.edges(), "Make wheel: Failed for W5 test")
def testTextToNetworkx(self):
directory = os.getcwd()
while "inducer" in directory:
directory = os.path.dirname(directory)
claw = make_claw()
c7 = make_cycle(7)
co_claw = make_co_claw()
tests = {'test1.txt': claw, 'test2.txt': c7, 'test3.txt': co_claw}
for f, expect in tests.items():
filepath = os.path.join(directory, "tests", f)
with open(filepath) as f:
content = f.read()
lines = content.replace("\r", "")
lines = lines.split("\n")
result = text_to_networkx(lines)
self.assertEqual(expect.nodes() ,result.nodes() ,
"Text to Networkx Failed Nodes: %s" % f)
self.assertEqual(expect.edges() ,result.edges() ,
"Text to Networkx Failed Nodes: %s" % f)
def testNetworkxToText(self):
g = make_claw()
text = networkx_to_text(g)
self.assertEqual("0:1,2,3\n1:0\n2:0\n3:0\n", text)
g = make_diamond()
text = networkx_to_text(g)
self.assertEqual("0:1,2,3\n1:0,2,3\n2:0,1\n3:0,1\n", text)
def testMakeCoK4(self):
cok4 = make_cok4()
self.assertEqual(cok4.nodes(), [0, 1, 2, 3])
self.assertEqual(cok4.edges(), [])
def testMake2K2(self):
g = make_2K2()
expect = [0, 1, 2, 3]
self.assertEqual(g.nodes(), expect)
expect = [(0, 2), (1, 3)]
self.assertEqual(g.edges(), expect)
def testMakeCoTwinC5(self):
result = make_co_twin_c5()
self.assertEqual(len(result.nodes()), 6)
expect = [(0, 1), (0, 4), (0, 5), (1, 2), (1, 5),
(2, 3), (2, 5), (3, 4)]
self.assertEqual(expect, result.edges())
def testMakeCoTwinHouse(self):
result = make_co_twin_house()
self.assertEqual(len(result.nodes()), 6)
expect = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 4), (3, 5)]
self.assertEqual(expect, result.edges())
def testMakeCoP2P3(self):
result = make_co_p2_p3()
self.assertEqual(len(result.nodes()), 5)
expect = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 4), (3, 4)]
self.assertEqual(expect, result.edges())
def testMakeCoA(self):
result = make_co_A()
self.assertEqual(len(result.nodes()), 6)
expect = [(0, 1), (0, 3), (0, 4), (1, 2), (1, 4),
(1, 5), (2, 5), (3, 4), (4, 5)]
self.assertEqual(result.edges(), expect)
def testMakeCoR(self):
result = make_co_R()
self.assertEqual(len(result.nodes()), 6)
expect = [(0, 1), (0, 2), (0, 3), (0, 4), (1, 2),
(1, 3), (1, 4), (2, 4), (3, 5)]
self.assertEqual(result.edges(), expect)
def testMakeBridge(self):
result = make_bridge()
self.assertEqual(len(result.nodes()), 6)
expect = [(0, 1), (0, 2), (0, 3), (0, 4),(0, 5), (1, 2),
(1, 3), (1, 4),(1, 5), (2, 4), (3, 5)]
self.assertEqual(result.edges(), expect)
def testForbiddenLineSubgraphs(self):
result = forbidden_line_subgraphs()
self.assertEqual(len(result), 9)
|
the-stack_0_15961 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for cinder.volume.rpcapi
"""
from oslo.config import cfg
from cinder import context
from cinder import db
from cinder.openstack.common import jsonutils
from cinder.openstack.common import rpc
from cinder import test
from cinder.volume import rpcapi as volume_rpcapi
CONF = cfg.CONF
class VolumeRpcAPITestCase(test.TestCase):
def setUp(self):
super(VolumeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
vol = {}
vol['host'] = 'fake_host'
vol['availability_zone'] = CONF.storage_availability_zone
vol['status'] = "available"
vol['attach_status'] = "detached"
vol['metadata'] = {"test_key": "test_val"}
volume = db.volume_create(self.context, vol)
snpshot = {
'volume_id': 'fake_id',
'status': "creating",
'progress': '0%',
'volume_size': 0,
'display_name': 'fake_name',
'display_description': 'fake_description'}
snapshot = db.snapshot_create(self.context, snpshot)
self.fake_volume = jsonutils.to_primitive(volume)
self.fake_volume_metadata = volume["volume_metadata"]
self.fake_snapshot = jsonutils.to_primitive(snapshot)
def test_serialized_volume_has_id(self):
self.assertIn('id', self.fake_volume)
def _test_volume_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
if 'rpcapi_class' in kwargs:
rpcapi_class = kwargs['rpcapi_class']
del kwargs['rpcapi_class']
else:
rpcapi_class = volume_rpcapi.VolumeAPI
rpcapi = rpcapi_class()
expected_retval = 'foo' if method == 'call' else None
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
if 'request_spec' in kwargs:
spec = jsonutils.to_primitive(kwargs['request_spec'])
kwargs['request_spec'] = spec
expected_msg = rpcapi.make_msg(method, **kwargs)
if 'volume' in expected_msg['args']:
volume = expected_msg['args']['volume']
del expected_msg['args']['volume']
expected_msg['args']['volume_id'] = volume['id']
if 'snapshot' in expected_msg['args']:
snapshot = expected_msg['args']['snapshot']
del expected_msg['args']['snapshot']
expected_msg['args']['snapshot_id'] = snapshot['id']
if 'host' in expected_msg['args']:
del expected_msg['args']['host']
if 'dest_host' in expected_msg['args']:
dest_host = expected_msg['args']['dest_host']
dest_host_dict = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
del expected_msg['args']['dest_host']
expected_msg['args']['host'] = dest_host_dict
if 'new_volume' in expected_msg['args']:
volume = expected_msg['args']['new_volume']
del expected_msg['args']['new_volume']
expected_msg['args']['new_volume_id'] = volume['id']
expected_msg['version'] = expected_version
if 'host' in kwargs:
host = kwargs['host']
else:
host = kwargs['volume']['host']
expected_topic = '%s:%s' % (CONF.volume_topic, host)
self.fake_args = None
self.fake_kwargs = None
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
self.stubs.Set(rpc, rpc_method, _fake_rpc_method)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [ctxt, expected_topic, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
def test_create_volume(self):
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume,
host='fake_host1',
request_spec='fake_request_spec',
filter_properties='fake_properties',
allow_reschedule=True,
snapshot_id='fake_snapshot_id',
image_id='fake_image_id',
source_volid='fake_src_id',
version='1.4')
def test_create_volume_serialization(self):
request_spec = {"metadata": self.fake_volume_metadata}
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume,
host='fake_host1',
request_spec=request_spec,
filter_properties='fake_properties',
allow_reschedule=True,
snapshot_id='fake_snapshot_id',
image_id='fake_image_id',
source_volid='fake_src_id',
version='1.4')
def test_delete_volume(self):
self._test_volume_api('delete_volume',
rpc_method='cast',
volume=self.fake_volume)
def test_create_snapshot(self):
self._test_volume_api('create_snapshot',
rpc_method='cast',
volume=self.fake_volume,
snapshot=self.fake_snapshot)
def test_delete_snapshot(self):
self._test_volume_api('delete_snapshot',
rpc_method='cast',
snapshot=self.fake_snapshot,
host='fake_host')
def test_attach_volume_to_instance(self):
self._test_volume_api('attach_volume',
rpc_method='call',
volume=self.fake_volume,
instance_uuid='fake_uuid',
host_name=None,
mountpoint='fake_mountpoint',
mode='ro',
version='1.11')
def test_attach_volume_to_host(self):
self._test_volume_api('attach_volume',
rpc_method='call',
volume=self.fake_volume,
instance_uuid=None,
host_name='fake_host',
mountpoint='fake_mountpoint',
mode='rw',
version='1.11')
def test_detach_volume(self):
self._test_volume_api('detach_volume',
rpc_method='call',
volume=self.fake_volume)
def test_copy_volume_to_image(self):
self._test_volume_api('copy_volume_to_image',
rpc_method='cast',
volume=self.fake_volume,
image_meta={'id': 'fake_image_id',
'container_format': 'fake_type',
'disk_format': 'fake_type'},
version='1.3')
def test_initialize_connection(self):
self._test_volume_api('initialize_connection',
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector')
def test_terminate_connection(self):
self._test_volume_api('terminate_connection',
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector',
force=False)
def test_accept_transfer(self):
self._test_volume_api('accept_transfer',
rpc_method='cast',
volume=self.fake_volume,
new_user='e5565fd0-06c8-11e3-'
'8ffd-0800200c9b77',
new_project='e4465fd0-06c8-11e3'
'-8ffd-0800200c9a66',
version='1.9')
def test_extend_volume(self):
self._test_volume_api('extend_volume',
rpc_method='cast',
volume=self.fake_volume,
new_size=1,
version='1.6')
def test_migrate_volume(self):
class FakeHost(object):
def __init__(self):
self.host = 'host'
self.capabilities = {}
dest_host = FakeHost()
self._test_volume_api('migrate_volume',
rpc_method='cast',
volume=self.fake_volume,
dest_host=dest_host,
force_host_copy=True,
version='1.8')
def test_migrate_volume_completion(self):
self._test_volume_api('migrate_volume_completion',
rpc_method='call',
volume=self.fake_volume,
new_volume=self.fake_volume,
error=False,
version='1.10')
|
the-stack_0_15967 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Dataset utils for the Learned Interpreters framework."""
import collections
import dataclasses
from typing import Any, Optional
from absl import logging
import jax
import jax.numpy as jnp
import six
import tensorflow as tf
import tensorflow_datasets as tfds
import tree
from ipagnn.datasets import datasets # pylint: disable=unused-import
@dataclasses.dataclass
class DatasetInfo:
dataset: Any = None
generator: Any = None
environment: Any = None
info: Optional[Any] = None # info_lib.LearnedInterpretersDatasetInfo
set_task: Any = None # Callable[[TaskFn, int], Any] = None
def _default_padding_value(dtype):
"""Gets the default value for the given dtype for padding.
Args:
dtype: A tensorflow dtype.
Returns:
A default (zero) value for the given type.
"""
if dtype == tf.string:
return ' '
elif dtype == tf.int64:
return tf.constant(0, dtype=tf.int64)
elif dtype == tf.int32:
return tf.constant(0, dtype=tf.int32)
elif dtype == tf.float32:
return tf.constant(0.0, dtype=tf.float32)
elif dtype == tf.float64:
return tf.constant(0.0, dtype=tf.float64)
elif dtype == tf.bool:
return tf.constant(False, dtype=tf.bool)
else:
raise ValueError('Unexpected type.', dtype)
def verify_reasonable_dataset(dataset_name, info, config):
"""Verifies that the dataset configs are at least reasonable.
For example, if the max_length is set too low such that every example would be
filtered out, we catch that here.
This lets us fail fast if we accidentally put in configs that will lead to all
examples being filtered out, rather than silently succeeding but never making
progress.
Args:
dataset_name: The name of the dataset being loaded.
info: The dataset info object.
config: The config for the model.
"""
if dataset_name.startswith('control_flow_programs'):
# TODO(dbieber): Move this logic into the dataset definition.
length = info.program_generator_config.length
tokens_per_statement = info.program_encoder.tokens_per_statement
assert (
not config.dataset.max_length
or config.dataset.max_length >= tokens_per_statement * length)
def cannot_set_task(**kwargs):
"""Use this as the set_task fn when no curriculum is permitted."""
del kwargs # Unused.
raise ValueError('The task cannot be changed. This is probably because the '
'data is being loaded from disk, rather than generated '
'at training time.')
def get_split(config):
"""Select the default split according to the config.
Args:
config: (ml_collections.ConfigDict) The experimental config.
Returns:
The TFDS split for the experimental setup indicated by the config.
"""
splits = {
'train': 'train[:70%]',
'valid': 'train[70%:90%]',
'test': 'train[90%:]',
}
if config.dataset.split == 'default':
split_name = 'valid' if config.runner.mode.startswith('eval') else 'train'
split = splits[split_name]
elif config.dataset.split in splits:
split = splits[config.dataset.split]
else:
raise ValueError('Unexpected split.')
return split
def get_dataset(data_dir, config, dataset_name=None):
"""The training dataset for the code model for fault localization.
Args:
data_dir: The data directory to use with tfds.load.
config: The config for the model.
dataset_name: If set, use this dataset name in place of the one from the
config.
Returns:
train_dataset: The tf.data.Dataset with batched examples.
info: The DatasetInfo object containing the feature connectors and other
info about the dataset.
"""
dataset_name = dataset_name or config.dataset.name
split = get_split(config)
version = (
None if config.dataset.version == 'default' else config.dataset.version)
# If in interact mode, use an interactive dataset.
if config.runner.mode == 'interact':
dbuilder = tfds.builder(
dataset_name, data_dir=data_dir, version=version)
unused_split_generators = dbuilder._split_generators(dl_manager=None) # pylint: disable=protected-access
info = dbuilder.info
info._builder.set_representation(config.dataset.representation) # pylint: disable=protected-access
assert config.dataset.batch_size == 1
dataset = make_interactive_dataset(info, config)
if config.dataset.batch:
dataset = apply_batching(dataset, info, config)
set_task = cannot_set_task
return DatasetInfo(
dataset=dataset,
info=info,
set_task=set_task
)
# Load the dataset.
if config.dataset.in_memory:
dbuilder = tfds.builder(
dataset_name, data_dir=data_dir, version=version)
unused_split_generators = dbuilder._split_generators(dl_manager=None) # pylint: disable=protected-access
dataset, set_task = dbuilder.as_in_memory_dataset(split='all')
info = dbuilder.info
else:
name = dataset_name
if version is not None:
name = f'{name}:{version}'
dataset, info = tfds.load(
name=name, split=split,
data_dir=data_dir,
# batch_size=config.dataset.batch_size,
with_info=True)
set_task = cannot_set_task
info._builder.set_representation(config.dataset.representation) # pylint: disable=protected-access
verify_reasonable_dataset(dataset_name, info, config)
dataset = dataset.repeat()
dataset = apply_filtering(dataset, info, config)
if config.dataset.batch:
dataset = apply_batching(dataset, info, config)
return DatasetInfo(
dataset=dataset,
info=info,
set_task=set_task,
)
def apply_filtering(dataset, info, config):
del info # Unused.
# TODO(dbieber): Reinstate filtering, but refactor it.
# if config.dataset.max_length:
# dataset = dataset.filter(
# lambda x: x[info._builder.key('length')] <= config.dataset.max_length) # pylint: disable=protected-access
if config.dataset.max_examples:
dataset = dataset.take(config.dataset.max_examples)
return dataset
def apply_sharding(generator, stack_fn, shape_fn):
"""Shards a dataset with a device dimension.
Args:
generator: Yields pytrees of numpy arrays.
stack_fn: Applied to each example before stacking.
shape_fn: Applied to each example to determine which examples to group.
Examples with the same shape are grouped.
Returns:
A new generator where each leaf now has a leading device axis.
"""
def generator_fn():
used_shapes = set()
examples_by_shapes = collections.defaultdict(list)
for example in generator:
shapes = shape_fn(example)
if shapes not in used_shapes and shapes not in examples_by_shapes:
logging.info('New shape started: %s', shapes)
examples_by_shapes[shapes].append(example)
if len(examples_by_shapes[shapes]) == jax.local_device_count():
stacked_examples = tree.map_structure(
lambda *x: jnp.stack(x, axis=0),
*[stack_fn(example) for example in examples_by_shapes[shapes]]
)
yield stacked_examples, examples_by_shapes[shapes]
examples_by_shapes[shapes] = []
if shapes not in used_shapes:
logging.info('New shape finished: %s', shapes)
used_shapes.add(shapes)
return generator_fn()
def apply_batching(dataset, info, config):
"""Applies standard batching to the dataset."""
del info # Unused.
padded_shapes = tree.map_structure(
lambda items: [None] * len(items),
tf.compat.v1.data.get_output_shapes(dataset))
padding_values = tree.map_structure(
_default_padding_value,
tf.compat.v1.data.get_output_types(dataset))
dataset = dataset.padded_batch(
config.dataset.batch_size, padded_shapes, padding_values,
drop_remainder=True)
return dataset
def dataset_from_generator(generator_fn, info, config):
"""Creates a dataset from a given generator fn."""
del config # Unused.
dtype = info.features.dtype
shape = info.features.shape
dataset = tf.data.Dataset.from_generator(generator_fn, dtype, shape)
return dataset
def _example_from_string(code, info):
example_dict = info._builder.generate_example_from_string(code) # pylint: disable=protected-access
encoded_example = info.features.encode_example(example_dict)
decoded_example = info.features.decode_example(encoded_example)
return decoded_example
def make_interactive_dataset(info, config):
"""Makes a dataset from interactively provided examples."""
logging.info('Generating dataset interactively. batch_size=%d',
config.dataset.batch_size)
def generator_fn():
while True:
example_str = six.moves.input()
if not example_str:
break
try:
yield _example_from_string(example_str, info)
except Exception as e: # pylint: disable=broad-except
logging.info('Encountered error in _example_from_string: %s', e)
return dataset_from_generator(generator_fn, info, config)
|
the-stack_0_15969 | from django.shortcuts import render
from django.core.paginator import Paginator
from .models import Question
from stack_overclone.error_views import page_not_found_view
def index_view(request):
query_set = Question.objects.get_new()
page = paginate(query_set, request)
data = {
'questions': page.object_list,
'page': page
}
return render(request, 'index.html', context=data)
def hot_questions_view(request):
query_set = Question.objects.get_top_rated()
page = paginate(query_set, request)
data = {
'questions': page.object_list,
'page': page
}
return render(request, 'hot_questions.html', context=data)
def questions_by_tag_view(request, tag):
query_set = Question.objects.get_by_tag(tag)
if query_set.count() == 0:
return page_not_found_view(request, 'No such tag')
page = paginate(query_set, request)
data = {
'tag': tag,
'questions': page.object_list,
'page': page
}
return render(request, 'questions_by_tag.html', context=data)
def question_and_answers_view(request, question_id):
try:
question = Question.objects.get(id=question_id)
except Exception:
return page_not_found_view(request, 'No such question')
query_set = question.get_answers()
page = paginate(query_set, request)
data = {
'question': question,
'answers': page.object_list,
'page': page
}
return render(request, 'question_and_answers.html', context=data)
def ask_question_view(request):
return render(request, 'ask_question.html')
def paginate(query_set, request, per_page=20):
paginator = Paginator(query_set, per_page)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return page
from django import template
register = template.Library()
@register.filter(name='add')
def add(value, arg):
return int(value) + int(arg)
|
the-stack_0_15970 | import random
import math
lower = int(input("Enter lower bound: - "))
upper = int (input("Enter higher bound: - "))
x = random.randint(lower, upper)
allowedGuess = round(math.log(upper - lower + 1, 2))
print("\n\tYou only have ", allowedGuess, " chances to guess the integer!\n")
guessCount = 0
while guessCount < allowedGuess:
guessCount += 1
guess = int (input("Guess a number:- "))
if x == guess:
if x == 1:
print("Congratulations you guessed the number in 1 try")
else:
print("Congratulations you guessed the number in ", guessCount, " tries")
break
elif x > guess:
print("You guessed too small!")
elif x < guess:
print("You guessed too high!")
if guessCount >= allowedGuess:
print("\nThe number is %d" % x)
print("\tBetter luck next time!") |
the-stack_0_15971 | #!/usr/bin/python3
import getopt
import os
import sys
import json
usage = """USAGE: $ python3 save_codes.py https://atcoder.jp/contests/abc244/tasks/abc244_a bin/out/v220314.py
Options:
-h --help print this help and exit
"""
opt_list = {"help": "h"}
json_path = "./bin/codes.json"
def kill(s, status):
if s != "":
print("[\033[31m!\033[m]", s, file=sys.stderr)
print(usage, end="", file=sys.stderr)
sys.exit(status)
try:
opts, args = getopt.getopt(sys.argv[1:], "".join(opt_list.values()), list(opt_list.keys()))
except getopt.GetoptError as e:
kill(e, 2)
for o, v in opts:
for opt_long, opt_short in opt_list.items():
# Shorten
if opt_long in o:
o = "-" + opt_short
break
print(f"[\033[34m#\033[m] o: {o}, v: {v}", file=sys.stderr)
if o == "-h":
kill("", 0)
if len(args) != 2:
kill(f"len(args): {len(args)}", 2)
url, src_path = args
if not url:
kill("url is empty", 2)
if not src_path:
kill("src_path is empty", 2)
print("[\033[34m#\033[m] url:", url, file=sys.stderr)
src_suffix = src_path.split(".")[-1]
f = open(src_path, "r")
lines = f.read().splitlines()
while lines[-1] == "":
lines.pop()
if not os.path.isfile(json_path):
# Create json file
with open(json_path, "w") as f:
json.dump({}, f)
# Input
with open(json_path, "r") as f:
codes_json = json.load(f)
# {
# "https://atcoder.jp/contests/abc244/tasks/abc244_a": {
# "bin/out/v220314.py": [
# "n = int(input())",
# "s = input()",
# "print(s[-1])",
# ]
# }
# }
# Add
di = codes_json.get(url, {})
di[src_path] = lines
codes_json[url] = di
# Output
# Options: ensure_ascii=False, indent=2, sort_keys=True
with open(json_path, "w") as f:
json.dump(codes_json, f, ensure_ascii=False, indent=2)
print(f"[\033[32m+\033[m] {src_path} -> {json_path}", file=sys.stderr)
|
the-stack_0_15972 | """
@brief test log(time=3s)
"""
import sys
import os
import unittest
import warnings
import pandas
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.ensae_teaching_cs.data import google_trends, twitter_zip
class TestDataWeb(unittest.TestCase):
def test_google_trends_macron(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_google_trends_macron")
text = google_trends(local=True, filename=False)
assert text is not None
name = google_trends(local=True, filename=True)
assert name.endswith("macron.csv")
try:
text2 = google_trends(
local=False, cache_folder=temp, filename=False)
except ConnectionResetError as e:
warnings.warn("Cannot check remote marathon.txt.\n" + str(e))
return
assert text2 is not None
self.assertEqual(len(text), len(text2))
self.maxDiff = None
self.assertEqual(text, text2)
def test_twitter_zip(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_twitter_zip")
try:
twitter_zip(local=True, filename=False,
unzip=False, cache_folder=temp)
assert False
except ValueError:
pass
name = twitter_zip(local=True, filename=True, as_df=False, unzip=False)
assert name.endswith("tweets_macron_sijetaispresident_201609.zip")
try:
text2 = twitter_zip(
local=False, cache_folder=temp, filename=False, unzip=True, as_df=True)
except ConnectionResetError as e:
warnings.warn("Cannot check remote.\n" + str(e))
return
assert isinstance(text2, pandas.DataFrame)
fLOG(text2.columns)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_15973 | # -*- coding: utf-8 -*-
"""Test database functionality."""
import os
import shutil
import sqlite3
import tempfile
import unittest
from contextlib import closing
from datetime import datetime
from dateutil.tz import tzutc
from mock import patch
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.types import (
INTEGER,
TEXT,
)
from pic2map.db import (
Database,
LocationDB,
transform_metadata_to_row,
)
class DatabaseTest(unittest.TestCase):
"""Database wrapper test cases."""
def test_get_table_metadata(self):
"""Table metadata can be retrieved using index notation."""
with tempfile.NamedTemporaryFile() as db_file:
with closing(sqlite3.connect(db_file.name)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE messages (id INTEGER, message TEXT)')
database = Database(db_file.name)
table = database['messages']
schema = {column.name: type(column.type)
for column in table.columns}
self.assertDictEqual(
schema,
{'id': INTEGER, 'message': TEXT})
def test_get_unknown_table_metadata(self):
"""NoSuchTableError raised when table name is not found."""
with tempfile.NamedTemporaryFile() as db_file:
with closing(sqlite3.connect(db_file.name)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE messages (id INTEGER, message TEXT)')
database = Database(db_file.name)
with self.assertRaises(NoSuchTableError):
database['unknown']
def test_type_error_on_wrong_table_name(self):
"""TypeError raised when table name is not a string."""
with tempfile.NamedTemporaryFile() as db_file:
with closing(sqlite3.connect(db_file.name)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE messages (id INTEGER, message TEXT)')
database = Database(db_file.name)
with self.assertRaises(TypeError):
database[0]
def test_context_manager(self):
"""Connection is opened/closed when used as a context manager."""
database = Database(':memory:')
# Connection is None when database object is created
self.assertIsNone(database.connection)
with database:
# Connection is not closed inside the context
self.assertFalse(database.connection.closed)
# Connection is closed outside the context
self.assertTrue(database.connection.closed)
class LocationDBTest(unittest.TestCase):
"""Location database tests."""
def setUp(self):
"""Create temporary directory."""
self.directory = tempfile.mkdtemp()
self.base_directory_patcher = patch('pic2map.db.BaseDirectory')
base_directory = self.base_directory_patcher.start()
base_directory.save_data_path.return_value = self.directory
def tearDown(self):
"""Remove temporary directory."""
self.base_directory_patcher.stop()
shutil.rmtree(self.directory)
def test_database_exists(self):
"""Database not create if exists."""
filename = os.path.join(self.directory, 'location.db')
with closing(sqlite3.connect(filename)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE location (column_1 TEXT, column_2 TEXT)')
location_db = LocationDB()
self.assertListEqual(
location_db.location_table.columns.keys(),
['column_1', 'column_2'],
)
def test_create_database(self):
"""Create database file."""
LocationDB()
filename = os.path.join(self.directory, 'location.db')
self.assertTrue(os.path.isfile(filename))
def test_insert(self):
"""Insert records in database."""
rows = [
{
'filename': 'a.jpg',
'latitude': 1.2,
'longitude': 2.1,
'datetime': datetime(2015, 1, 1, 12, 34, 56)
},
{
'filename': 'b.jpg',
'latitude': 3.4,
'longitude': 4.3,
'datetime': datetime(2015, 1, 1, 12, 34, 56)
},
]
with LocationDB() as location_db:
location_db.insert(rows)
filename = os.path.join(self.directory, 'location.db')
with closing(sqlite3.connect(filename)) as connection:
with closing(connection.cursor()) as cursor:
result = cursor.execute('SELECT COUNT(*) FROM location')
self.assertListEqual(result.fetchall(), [(2,)])
def test_select_all(self):
"""Select all rows from location table."""
filename = os.path.join(self.directory, 'location.db')
with closing(sqlite3.connect(filename)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE location (name TEXT)')
cursor.execute(
'INSERT INTO location VALUES ("Hello world!")')
connection.commit()
with LocationDB() as location_db:
result = location_db.select_all()
rows = result.fetchall()
self.assertEqual(len(rows), 1)
row = rows[0]
self.assertSequenceEqual(row, (u'Hello world!',))
def test_remove(self):
"""Delete rows for files under a given directory."""
file_count = 10
filename = os.path.join(self.directory, 'location.db')
with closing(sqlite3.connect(filename)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE location (filename TEXT)')
for directory in ['a', 'b']:
for index in range(file_count):
cursor.execute(
'INSERT INTO location VALUES ("{}/{}.jpg")'
.format(directory, index))
connection.commit()
with LocationDB() as location_db:
result = location_db.delete('a')
self.assertEqual(result.rowcount, file_count)
def test_count(self):
"""Count rows in database."""
file_count = 10
filename = os.path.join(self.directory, 'location.db')
with closing(sqlite3.connect(filename)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE location (filename TEXT)')
for index in range(file_count):
cursor.execute(
'INSERT INTO location VALUES ("{}.jpg")'.format(index))
connection.commit()
with LocationDB() as location_db:
result = location_db.count()
self.assertEqual(result, file_count)
class TransformMetadataToRowTest(unittest.TestCase):
"""EXIF metadata to database row transformation tests."""
def test_transform_metadata(self):
"""Transform metadata to row."""
metadata = {
'SourceFile': 'a.jpg',
'EXIF:GPSLatitude': 1.2,
'EXIF:GPSLatitudeRef': 'N',
'EXIF:GPSLongitude': 2.1,
'EXIF:GPSLongitudeRef': 'E',
'EXIF:GPSDateStamp': '2015:01:01',
'EXIF:GPSTimeStamp': '12:34:56',
}
expected_row = {
'filename': 'a.jpg',
'latitude': 1.2,
'longitude': 2.1,
'datetime': datetime(2015, 1, 1, 12, 34, 56, tzinfo=tzutc()),
}
row = transform_metadata_to_row(metadata)
self.assertEqual(row, expected_row)
def test_transform_metadata_negative(self):
"""Transform metadata with negative latitude/longitude to row."""
metadata = {
'SourceFile': 'a.jpg',
'EXIF:GPSLatitude': 1.2,
'EXIF:GPSLatitudeRef': 'S',
'EXIF:GPSLongitude': 2.1,
'EXIF:GPSLongitudeRef': 'W',
'EXIF:GPSDateStamp': '2015:01:01',
'EXIF:GPSTimeStamp': '12:34:56',
}
expected_row = {
'filename': 'a.jpg',
'latitude': -1.2,
'longitude': -2.1,
'datetime': datetime(2015, 1, 1, 12, 34, 56, tzinfo=tzutc()),
}
row = transform_metadata_to_row(metadata)
self.assertEqual(row, expected_row)
def test_transform_metadata_no_datetime(self):
"""Transform metadata to row."""
metadata = {
'SourceFile': 'a.jpg',
'EXIF:GPSLatitude': 1.2,
'EXIF:GPSLatitudeRef': 'N',
'EXIF:GPSLongitude': 2.1,
'EXIF:GPSLongitudeRef': 'E',
}
expected_row = {
'filename': 'a.jpg',
'latitude': 1.2,
'longitude': 2.1,
'datetime': None,
}
row = transform_metadata_to_row(metadata)
self.assertEqual(row, expected_row)
|
the-stack_0_15974 | import _plotly_utils.basevalidators
class FillcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name='fillcolor', parent_name='scatterpolargl', **kwargs
):
super(FillcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
the-stack_0_15976 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import sys
import urllib.request
import urllib.parse
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError, LineBotApiError
)
from linebot.models import (
CarouselColumn, CarouselTemplate, FollowEvent,
LocationMessage, MessageEvent, TemplateSendMessage,
TextMessage, TextSendMessage, UnfollowEvent, URITemplateAction
)
# TODO: 位置情報を送るメニューボタンの配置
# TODO: Webサーバを利用して静的ファイルを相対参照
# get api_key, channel_secret and channel_access_token from environment variable
GNAVI_API_KEY = os.getenv('GNAVI_API_KEY')
CHANNEL_SECRET = os.getenv('LINE_CHANNEL_SECRET')
CHANNEL_ACCESS_TOKEN = os.getenv('LINE_CHANNEL_ACCESS_TOKEN')
BOT_SERVER_URL = os.getenv('BOT_SERVER_URL')
os.environ['http_proxy'] = os.getenv('FIXIE_URL')
os.environ['https_proxy'] = os.getenv('FIXIE_URL')
if GNAVI_API_KEY is None:
print('Specify GNAVI_API_KEY as environment variable.')
sys.exit(1)
if CHANNEL_SECRET is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if CHANNEL_ACCESS_TOKEN is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
if BOT_SERVER_URL is None:
print('Specify BOT_SERVER_URL as environment variable.')
sys.exit(1)
if os.getenv('FIXIE_URL') is None:
print('Specify FIXIE_URL as environment variable.')
sys.exit(1)
# instantiation
# TODO: インスタンス生成はグローバルでなくファクトリメソッドに移したい
# TODO: グローバルに参照可能な api_callerを作成するか, 個々に作成するかどちらが良いか確認
app = Flask(__name__)
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(CHANNEL_SECRET)
RESTSEARCH_URL = "https://api.gnavi.co.jp/RestSearchAPI/v3/"
DEF_ERR_MESSAGE = """
申し訳ありません、データを取得できませんでした。
少し時間を空けて、もう一度試してみてください。
"""
NO_HIT_ERR_MESSAGE = "お近くにぐるなびに登録されている喫茶店はないようです" + chr(0x100017)
LINK_TEXT = "ぐるなびで見る"
FOLLOWED_RESPONSE = "フォローありがとうございます。位置情報を送っていただくことで、お近くの喫茶店をお伝えします" + chr(0x100059)
def call_restsearch(latitude, longitude):
query = {
"keyid": GNAVI_API_KEY,
"latitude": latitude,
"longitude": longitude,
# TODO: category_sを動的に生成
"category_s": "RSFST18008,RSFST18009,RSFST18010,RSFST18011,RSFST18012"
# TODO: hit_per_pageや offsetの変更に対応 (e.g., 指定可能にする, 多すぎるときは普通にブラウザに飛ばす, など)
# TODO: rangeをユーザーアクションによって選択可能にしたい
# "range": search_range
}
params = urllib.parse.urlencode(query, safe=",")
response = urllib.request.urlopen(RESTSEARCH_URL + "?" + params).read()
result = json.loads(response)
if "error" in result:
if "message" in result:
raise Exception("{}".format(result["message"]))
else:
raise Exception(DEF_ERR_MESSAGE)
total_hit_count = result.get("total_hit_count", 0)
if total_hit_count < 1:
raise Exception(NO_HIT_ERR_MESSAGE)
return result
@app.route("/callback", methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
except LineBotApiError as e:
app.logger.exception(f'LineBotApiError: {e.status_code} {e.message}', e)
raise e
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_text_message(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text)
)
# TODO: ちゃんと例外処理
@handler.add(MessageEvent, message=LocationMessage)
def handle_location_message(event):
user_lat = event.message.latitude
user_longit = event.message.longitude
cafe_search_result = call_restsearch(user_lat, user_longit)
print("cafe_search_result is: {}".format(cafe_search_result))
response_json_list = []
# process result
for (count, rest) in enumerate(cafe_search_result.get("rest")):
# TODO: holiday, opentimeで表示を絞りたい
access = rest.get("access", {})
access_walk = "徒歩 {}分".format(access.get("walk", ""))
holiday = "定休日: {}".format(rest.get("holiday", ""))
image_url = rest.get("image_url", {})
image1 = image_url.get("shop_image1", "thumbnail_template.jpg")
if image1 == "":
image1 = BOT_SERVER_URL + "/static/thumbnail_template.jpg"
name = rest.get("name", "")
opentime = "営業時間: {}".format(rest.get("opentime", ""))
# pr = rest.get("pr", "")
# pr_short = pr.get("pr_short", "")
url = rest.get("url", "")
result_text = opentime + "\n" + holiday + "\n" + access_walk + "\n"
if len(result_text) > 60:
result_text = result_text[:56] + "..."
result_dict = {
"thumbnail_image_url": image1,
"title": name,
# "text": pr_short + "\n" + opentime + "\n" + holiday + "\n"
# + access_walk + "\n",
"text": result_text,
"actions": {
"label": "ぐるなびで見る",
"uri": url
}
}
response_json_list.append(result_dict)
print("response_json_list is: {}".format(response_json_list))
columns = [
CarouselColumn(
thumbnail_image_url=column["thumbnail_image_url"],
title=column["title"],
text=column["text"],
actions=[
URITemplateAction(
label=column["actions"]["label"],
uri=column["actions"]["uri"],
)
]
)
for column in response_json_list
]
# TODO: GoogleMapへのリンク実装
messages = TemplateSendMessage(
alt_text="喫茶店の情報をお伝えしました",
template=CarouselTemplate(columns=columns),
)
print("messages is: {}".format(messages))
line_bot_api.reply_message(
event.reply_token,
messages=messages
)
@handler.add(FollowEvent)
def handle_follow(event):
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=FOLLOWED_RESPONSE)
)
@handler.add(UnfollowEvent)
def handle_unfollow():
app.logger.info("Got Unfollow event")
if __name__ == "__main__":
# arg_parser = ArgumentParser(
# usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
# )
# arg_parser.add_argument('-p', '--port', type=int, default=8000, help='port')
# arg_parser.add_argument('-d', '--debug', default=False, help='debug')
# options = arg_parser.parse_args()
#
# app.run(debug=options.debug, port=options.port)
port = int(os.getenv("PORT", 5000))
app.run(host="0.0.0.0", port=port)
|
the-stack_0_15977 | from .droppedKey import DroppedKey
from .items import *
class Seashell(DroppedKey):
# Thanks to patches, a seashell is just a dropped key as far as the randomizer is concerned.
def configure(self, options):
if not options.seashells:
self.OPTIONS = [SEASHELL]
class SeashellMansion(DroppedKey):
MULTIWORLD = False
|
the-stack_0_15978 | import os, time, json, sys
#import from non-standard path module, do not remove.
csfp = os.path.abspath(os.path.join(os.path.dirname(__file__), 'experiment_replication'))
if csfp not in sys.path:
sys.path.insert(0, csfp)
import torch
from multitasking_transformers.heads import SubwordClassificationHead
from multitasking_transformers.multitaskers.util import get_model_path
from transformers import BertConfig, BertForTokenClassification, BertModel
from tokenizers import BertWordPieceTokenizer, Encoding
from pprint import pprint
from experiment_replication.raw_datasets.language import get_language
text = """Admission Date: [**2109-7-21**] Discharge Date: [**2109-8-13**]
Date of Birth: [**2053-6-5**] Sex: F
Service: [**Doctor Last Name 1181**] MEDICINE
HISTORY OF PRESENT ILLNESS: This is a 56-year-old white
female with a history of right frontal craniotomy on [**2109-7-1**], for a dysembryoplastic angioneural epithelial lesion
with features of an oligodendroglioma who was started on
Dilantin postoperatively for seizure prophylaxis and was
subsequently developed eye discharge and was seen by an
optometrist who treated it with sulfate ophthalmic drops.
The patient then developed oral sores and rash in the chest
the night before admission which rapidly spread to the face,
trunk, and upper extremities within the last 24 hours. The
patient was unable to eat secondary to mouth pain. She had
fevers, weakness, and diarrhea. There were no genital
the morning of [**7-20**].
PAST MEDICAL HISTORY: 1. Hypercholesterolemia. 2. Benign
right frontal cystic tumor status post right frontal
craniotomy on [**2109-7-1**].
"""
batch_size = 25
#Defines the maximum number of subwords per sequence during chunking.
#Smaller values result in faster per instance computations, larger values are faster for longer chunks of text
max_sequence_length = 512
def visualize(data_generator):
from spacy import displacy
from spacy.gold import biluo_tags_from_offsets
from spacy.tokens import Span
language = get_language()
ner = language.create_pipe("ner")
# language.add_pipe(ner, last=True)
docs = []
print(data_generator)
for text, annotation in data_generator:
doc = language(text)
for label in annotation['entity_labels']:
ner.add_label(label)
spans = []
for key in annotation['entities']:
for start, stop, label in annotation['entities'][key]:
span = doc.char_span(start, stop, label=label)
if span is None:
continue
spans.append(span)
doc.ents = spans
docs.append(doc)
displacy.serve(docs, style="ent")
device='cpu'
clinical_ner_tasks = ['i2b2_2010','n2c2_2018', 'i2b2_2012', 'i2b2_2014', 'quaero_2014']
model_path = get_model_path('mt_clinical_bert_8_tasks')
tokenizer = BertWordPieceTokenizer(os.path.join(model_path, 'vocab.txt'), lowercase=True, add_special_tokens=False)
#initialize finetuned stacked transformer
bert = BertModel.from_pretrained(model_path)
bert.eval()
heads = {}
#initialize pre-trained heads
for task in clinical_ner_tasks:
config = json.load(open(os.path.join(model_path, f"SubwordClassificationHead_{task}.json"), 'rb'))
heads[task] = SubwordClassificationHead(task, labels=config['labels'],
hidden_size=config['hidden_size'],
hidden_dropout_prob=config['hidden_dropout_prob'])
heads[task].from_pretrained(model_path)
encoding = tokenizer.encode(text)
def prepare_encoding(encoding: Encoding):
"""
Given a arbitrarily long text (>512 subwords), chunks it into the BERT context window.
:param encoding:
:return:
"""
def chunk_encoding(tensor : torch.Tensor):
chunks = tensor.split(max_sequence_length)
batch = torch.zeros(size=(len(chunks), max_sequence_length), dtype=torch.long)
#we don't include special tokens during prediction (empirically, doesn't look like it hurts!)
for index, chunk in enumerate(chunks):
batch[index][0:len(chunk)] = torch.clone(chunk)
# batch[index][0] = tokenizer.cls_token
# batch[index][chunk.shape[0] + 1] = tokenizer.sep_token
return batch, [len(chunk) for chunk in chunks]
input_ids, num_tokens_in_instance = chunk_encoding(torch.tensor(encoding.ids, dtype=torch.long))
attention_mask, _ = chunk_encoding(torch.tensor(encoding.attention_mask, dtype=torch.long))
token_type_ids, _ = chunk_encoding(torch.tensor(encoding.type_ids, dtype=torch.long))
return (input_ids, attention_mask, token_type_ids),\
[encoding.offsets[i:i+max_sequence_length] for i in range(0, len(encoding.offsets) ,max_sequence_length)],\
num_tokens_in_instance
(input_ids, attention_mask, token_type_ids), offsets, num_tokens_in_instance = prepare_encoding(encoding)
token_representations = bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[0]
head_annotations = []
#Get head predictions
for task, head in heads.items():
print(f"Predicting head: {head}")
batch_subword_scores = head(token_representations)[0]
batch_predicted_labels = batch_subword_scores.max(2)[1].tolist()
# print(len(batch_predicted_labels))
spans = []
for idx, (predicted_labels, sequence_offsets) in enumerate(zip(batch_predicted_labels, offsets)):
#print(predicted_labels)
#merge multiple spans together into final annotation.
predicted_labels = list(map(lambda x : x[2:] if '-' in x else x.replace('BERT_TOKEN', 'O'),
[head.config['labels'][label_key] for label_key in predicted_labels]))
sequence_offsets = sequence_offsets
predicted_labels = predicted_labels
# print(sequence_offsets)
# print(predicted_labels)
# print(f"Num tokens in instance: {num_tokens_in_instance[idx]}")
i = 0
prev_label = 'O'
#Group together tokens tagged with entities (post-processing heuristic)
while i < num_tokens_in_instance[idx]:
if predicted_labels[i] == 'O':
i += 1
continue
label_start = i
while i+1 != num_tokens_in_instance[idx] and predicted_labels[i] == predicted_labels[i+1]:
i+=1
label_end = i
spans.append((sequence_offsets[label_start:label_end+1][0][0],
sequence_offsets[label_start:label_end+1][-1][1],
predicted_labels[i]))
i+=1
# print(task)
# print(spans)
annotation = {'entities':{f"T{i}": [span] for i, span in enumerate(spans)},
'entity_labels': list(map(lambda x : x[2:] if '-' in x else x, head.config['labels']))}
head_annotations.append( tuple((str(encoding.original_str), annotation)))
visualize(head_annotations)
|
the-stack_0_15980 | # Copyright 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants as lib_const
from neutron_lib import context
from neutron_lib import rpc as n_rpc
from neutron_lib.services.qos import constants as qos_consts
from oslo_utils import uuidutils
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3.extensions.qos import fip as fip_qos
from neutron.agent.l3 import l3_agent_extension_api as l3_ext_api
from neutron.agent.l3 import router_info as l3router
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.tests import base
from neutron.tests.unit.agent.l3 import test_agent
_uuid = uuidutils.generate_uuid
TEST_QOS_FIP = "3.3.3.3"
TEST_FIP = "1.1.1.1"
TEST_FIP2 = "2.2.2.2"
HOSTNAME = 'myhost'
class QosExtensionBaseTestCase(test_agent.BasicRouterOperationsFramework):
def setUp(self):
super(QosExtensionBaseTestCase, self).setUp()
self.fip_qos_ext = fip_qos.FipQosAgentExtension()
self.context = context.get_admin_context()
self.connection = mock.Mock()
self.policy = policy.QosPolicy(context=None,
name='test1', id=_uuid())
self.ingress_rule = (
rule.QosBandwidthLimitRule(context=None, id=_uuid(),
qos_policy_id=self.policy.id,
max_kbps=1111,
max_burst_kbps=2222,
direction=lib_const.INGRESS_DIRECTION))
self.egress_rule = (
rule.QosBandwidthLimitRule(context=None, id=_uuid(),
qos_policy_id=self.policy.id,
max_kbps=3333,
max_burst_kbps=4444,
direction=lib_const.EGRESS_DIRECTION))
self.policy.rules = [self.ingress_rule, self.egress_rule]
self.new_ingress_rule = (
rule.QosBandwidthLimitRule(context=None, id=_uuid(),
qos_policy_id=self.policy.id,
max_kbps=5555,
max_burst_kbps=6666,
direction=lib_const.INGRESS_DIRECTION))
self.ingress_rule_only_has_max_kbps = (
rule.QosBandwidthLimitRule(context=None, id=_uuid(),
qos_policy_id=self.policy.id,
max_kbps=5555,
max_burst_kbps=0,
direction=lib_const.INGRESS_DIRECTION))
self.policy2 = policy.QosPolicy(context=None,
name='test2', id=_uuid())
self.policy2.rules = [self.ingress_rule]
self.policy3 = policy.QosPolicy(context=None,
name='test3', id=_uuid())
self.policy3.rules = [self.egress_rule]
self.policy4 = policy.QosPolicy(context=None,
name='test4', id=_uuid())
self.dscp = rule.QosDscpMarkingRule(context=None, id=_uuid(),
qos_policy_id=self.policy4.id,
dscp_mark=32)
self.dscp.obj_reset_changes()
self.policy4.rules = [self.dscp]
self.qos_policies = {self.policy.id: self.policy,
self.policy2.id: self.policy2,
self.policy3.id: self.policy3,
self.policy4.id: self.policy4}
self.agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.ex_gw_port = {'id': _uuid()}
self.fip = {'id': _uuid(),
'floating_ip_address': TEST_QOS_FIP,
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME,
'qos_policy_id': self.policy.id}
self.router_id = _uuid()
self.router = {'id': self.router_id,
'gw_port': self.ex_gw_port,
'ha': False,
'distributed': False,
lib_const.FLOATINGIP_KEY: [self.fip]}
self.router_info = l3router.RouterInfo(self.agent, self.router_id,
self.router, **self.ri_kwargs)
self.router_info.ex_gw_port = self.ex_gw_port
self.agent.router_info[self.router_id] = self.router_info
def _mock_get_router_info(router_id):
return self.router_info
self.get_router_info = mock.patch(
'neutron.agent.l3.l3_agent_extension_api.'
'L3AgentExtensionAPI.get_router_info').start()
self.get_router_info.side_effect = _mock_get_router_info
self.agent_api = l3_ext_api.L3AgentExtensionAPI(None, None)
self.fip_qos_ext.consume_api(self.agent_api)
class FipQosExtensionInitializeTestCase(QosExtensionBaseTestCase):
@mock.patch.object(registry, 'register')
@mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback')
def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock):
with mock.patch.object(n_rpc, 'Connection',
return_value=self.connection) as create_connection:
self.fip_qos_ext.initialize(
self.connection, lib_const.L3_AGENT_MODE)
create_connection.assert_has_calls([mock.call()])
self.connection.create_consumer.assert_has_calls(
[mock.call(
resources_rpc.resource_type_versioned_topic(
resources.QOS_POLICY),
[rpc_mock()],
fanout=True)]
)
subscribe_mock.assert_called_with(mock.ANY, resources.QOS_POLICY)
class FipQosExtensionTestCase(QosExtensionBaseTestCase):
def setUp(self):
super(FipQosExtensionTestCase, self).setUp()
self.fip_qos_ext.initialize(
self.connection, lib_const.L3_AGENT_MODE)
self._set_pull_mock()
def _set_pull_mock(self):
def _pull_mock(context, resource_type, resource_id):
return self.qos_policies[resource_id]
self.pull = mock.patch(
'neutron.api.rpc.handlers.resources_rpc.'
'ResourcesPullRpcApi.pull').start()
self.pull.side_effect = _pull_mock
def _test_new_fip_add(self, func):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
func(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
def test_add_router(self):
self._test_new_fip_add(self.fip_qos_ext.add_router)
def test_update_router(self):
self._test_new_fip_add(self.fip_qos_ext.update_router)
def test_update_router_fip_policy_changed(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
# the policy of floating IP has been changed to
# which only has one egress rule
self.fip[qos_consts.QOS_POLICY_ID] = self.policy3.id
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.clear_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP)])
def test_update_router_fip_policy_changed_to_none(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
# floating IP remove the qos_policy bonding
self.fip[qos_consts.QOS_POLICY_ID] = None
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.clear_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP)],
any_order=True)
def test__process_update_policy(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
# the rules of floating IP policy has been changed
self.fip_qos_ext._policy_rules_modified = mock.Mock(
return_value=True)
self.policy.rules = [self.new_ingress_rule, self.egress_rule]
self.fip_qos_ext._process_update_policy(self.policy)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 5555, 6666)])
def _test_qos_policy_scenarios(self, fip_removed=True,
qos_rules_removed=False):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
if fip_removed:
# floating IP dissociated, then it does not belong to
# this router
self.router[lib_const.FLOATINGIP_KEY] = []
if qos_rules_removed:
self.policy.rules = []
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.clear_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP)],
any_order=True)
def test_delete_router(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
self.fip_qos_ext.delete_router(self.context, self.router)
self.assertIsNone(
self.fip_qos_ext.fip_qos_map.router_floating_ips.get(
self.router_id))
self.assertIsNone(
self.fip_qos_ext.fip_qos_map.ingress_ratelimits.get(
TEST_QOS_FIP))
self.assertIsNone(
self.fip_qos_ext.fip_qos_map.egress_ratelimits.get(
TEST_QOS_FIP))
self.assertIsNone(
self.fip_qos_ext.fip_qos_map.get_resource_policy(
TEST_QOS_FIP))
def test_update_router_fip_removed(self):
self._test_qos_policy_scenarios()
def test_fip_qos_changed_to_none(self):
self._test_qos_policy_scenarios(qos_rules_removed=True)
def _test_only_one_direction_rule(self, func, policy, direction):
tc_wrapper = mock.Mock()
with mock.patch.object(
self.fip_qos_ext.resource_rpc, 'pull',
return_value=policy):
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
func(self.context, self.router)
if direction == lib_const.INGRESS_DIRECTION:
calls = [mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222)]
else:
calls = [mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)]
tc_wrapper.set_ip_rate_limit.assert_has_calls(calls)
def test_add_router_only_ingress(self):
self._test_only_one_direction_rule(self.fip_qos_ext.add_router,
self.policy2,
lib_const.INGRESS_DIRECTION)
def test_add_router_only_egress(self):
self._test_only_one_direction_rule(self.fip_qos_ext.add_router,
self.policy3,
lib_const.EGRESS_DIRECTION)
def test_update_router_only_ingress(self):
self._test_only_one_direction_rule(self.fip_qos_ext.update_router,
self.policy2,
lib_const.INGRESS_DIRECTION)
def test_update_router_only_egress(self):
self._test_only_one_direction_rule(self.fip_qos_ext.update_router,
self.policy3,
lib_const.EGRESS_DIRECTION)
def test_rule_only_has_max_kbps(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
# policy ingress rule changed to only has one max_kbps value
self.policy.rules = [self.ingress_rule_only_has_max_kbps,
self.egress_rule]
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 5555, 0)])
def test_qos_policy_has_no_bandwidth_limit_rule(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip['qos_policy_id'] = self.policy4.id
self.fip_qos_ext.add_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_not_called()
def _test_process_ip_rates(self, with_cache):
rates = {'egress': {'rate': 333, 'burst': 444},
'ingress': {'rate': 111, 'burst': 222}}
fip = '123.123.123.123'
device = mock.Mock()
tc_wrapper = mock.Mock()
with mock.patch.object(
self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper) as get_tc_wrapper:
with mock.patch.object(
self.fip_qos_ext, 'process_ip_rate_limit') as process_ip:
self.fip_qos_ext.process_ip_rates(
fip, device, rates, with_cache=with_cache)
if with_cache:
self.assertEqual(2, process_ip.call_count)
else:
self.assertEqual(2, get_tc_wrapper.call_count)
self.assertEqual(
2, tc_wrapper.set_ip_rate_limit.call_count)
def test_process_ip_rates_with_cache(self):
self._test_process_ip_rates(with_cache=True)
def test_process_ip_rates_without_cache(self):
self._test_process_ip_rates(with_cache=False)
class RouterFipRateLimitMapsTestCase(base.BaseTestCase):
def setUp(self):
super(RouterFipRateLimitMapsTestCase, self).setUp()
self.policy_map = fip_qos.RouterFipRateLimitMaps()
def test_find_fip_router_id(self):
router_id = _uuid()
self.policy_map.router_floating_ips[router_id] = set([TEST_FIP,
TEST_FIP2])
self.assertIsNone(self.policy_map.find_fip_router_id("8.8.8.8"))
self.assertEqual(router_id,
self.policy_map.find_fip_router_id(TEST_FIP))
def test_get_router_floating_ips(self):
router_id = _uuid()
test_ips = [TEST_FIP, TEST_FIP2]
self.policy_map.router_floating_ips[router_id] = set([TEST_FIP,
TEST_FIP2])
get_ips = self.policy_map.get_router_floating_ips(router_id)
self.assertEqual(len(test_ips), len(get_ips))
def test_remove_fip_ratelimit_cache(self):
fip = "1.1.1.1"
self.policy_map.set_fip_ratelimit_cache(
"ingress", fip, 100, 200)
self.policy_map.set_fip_ratelimit_cache(
"egress", fip, 100, 200)
self.policy_map.remove_fip_ratelimit_cache("ingress", fip)
self.assertIsNone(self.policy_map.ingress_ratelimits.get(fip))
self.policy_map.remove_fip_ratelimit_cache("egress", fip)
self.assertIsNone(self.policy_map.egress_ratelimits.get(fip))
def test_set_fip_ratelimit_cache(self):
fip = "1.1.1.1"
self.policy_map.set_fip_ratelimit_cache(
"ingress", fip, 100, 200)
self.policy_map.set_fip_ratelimit_cache(
"egress", fip, 300, 400)
in_rate, in_burst = self.policy_map.get_fip_ratelimit_cache(
"ingress", fip)
self.assertEqual(100, in_rate)
self.assertEqual(200, in_burst)
e_rate, e_burst = self.policy_map.get_fip_ratelimit_cache(
"egress", fip)
self.assertEqual(300, e_rate)
self.assertEqual(400, e_burst)
|
the-stack_0_15981 | import math
from itertools import combinations_with_replacement
from operator import attrgetter
from typing import Dict, List, Type
from locust import User
def weight_users(user_classes: List[Type[User]], user_count: int) -> Dict[str, int]:
"""
Compute the desired state of users using the weight of each user class.
:param user_classes: the list of user class
:param user_count: total number of users
:return: the set of users to run
"""
assert user_count >= 0
if len(user_classes) == 0:
return {}
user_classes = sorted(user_classes, key=attrgetter("__name__"))
user_classes_count = {user_class.__name__: 0 for user_class in user_classes}
# If the number of users is less than the number of user classes, at most one user of each user class
# is chosen. User classes with higher weight are chosen first.
if user_count <= len(user_classes):
user_classes_count.update(
{
user_class.__name__: 1
for user_class in sorted(user_classes, key=attrgetter("weight"), reverse=True)[:user_count]
}
)
return user_classes_count
# If the number of users is greater than or equal to the number of user classes, at least one user of each
# user class will be chosen. The greater number of users is, the better the actual distribution
# of users will match the desired one (as dictated by the weight attributes).
weights = list(map(attrgetter("weight"), user_classes))
relative_weights = [weight / sum(weights) for weight in weights]
user_classes_count = {
user_class.__name__: round(relative_weight * user_count) or 1
for user_class, relative_weight in zip(user_classes, relative_weights)
}
if sum(user_classes_count.values()) == user_count:
return user_classes_count
else:
user_classes_count = _find_ideal_users_to_add_or_remove(
user_classes, user_count - sum(user_classes_count.values()), user_classes_count
)
assert sum(user_classes_count.values()) == user_count
return user_classes_count
def _find_ideal_users_to_add_or_remove(
user_classes: List[Type[User]], user_count_to_add_or_remove: int, user_classes_count: Dict[str, int]
) -> Dict[str, int]:
sign = -1 if user_count_to_add_or_remove < 0 else 1
user_count_to_add_or_remove = abs(user_count_to_add_or_remove)
assert user_count_to_add_or_remove <= len(user_classes), user_count_to_add_or_remove
# Formula for combination with replacement
# (https://www.tutorialspoint.com/statistics/combination_with_replacement.htm)
number_of_combinations = math.factorial(len(user_classes) + user_count_to_add_or_remove - 1) / (
math.factorial(user_count_to_add_or_remove) * math.factorial(len(user_classes) - 1)
)
# If the number of combinations with replacement is above this threshold, we simply add/remove
# users for the first "number_of_users_to_add_or_remove" users. Otherwise, computing the best
# distribution is too expensive in terms of computation.
max_number_of_combinations_threshold = 1000
if number_of_combinations <= max_number_of_combinations_threshold:
user_classes_count_candidates: Dict[float, Dict[str, int]] = {}
for user_classes_combination in combinations_with_replacement(user_classes, user_count_to_add_or_remove):
# Copy in order to not mutate `user_classes_count` for the parent scope
user_classes_count_candidate = user_classes_count.copy()
for user_class in user_classes_combination:
user_classes_count_candidate[user_class.__name__] += sign
distance = distance_from_desired_distribution(user_classes, user_classes_count_candidate)
if distance not in user_classes_count_candidates:
user_classes_count_candidates[distance] = user_classes_count_candidate.copy()
return user_classes_count_candidates[min(user_classes_count_candidates.keys())]
else:
# Copy in order to not mutate `user_classes_count` for the parent scope
user_classes_count_candidate = user_classes_count.copy()
for user_class in user_classes[:user_count_to_add_or_remove]:
user_classes_count_candidate[user_class.__name__] += sign
return user_classes_count_candidate
def distance_from_desired_distribution(user_classes: List[Type[User]], user_classes_count: Dict[str, int]) -> float:
actual_ratio_of_user_class = {
user_class: user_class_count / sum(user_classes_count.values())
for user_class, user_class_count in user_classes_count.items()
}
expected_ratio_of_user_class = {
user_class.__name__: user_class.weight / sum(map(attrgetter("weight"), user_classes))
for user_class in user_classes
}
differences = [
actual_ratio_of_user_class[user_class] - expected_ratio
for user_class, expected_ratio in expected_ratio_of_user_class.items()
]
return math.sqrt(math.fsum(map(lambda x: x ** 2, differences)))
|
the-stack_0_15982 | import cv2
import numpy as np
import os
import re
import argparse
def list_files(path):
pattern = re.compile(r'(-?\d+),(-?\d+).png')
res = list()
rg = list() #[xmin ymin xmax ymax]
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
m = pattern.match(filename)
if m is not None:
x = int(m.group(1))
y = int(m.group(2))
p = os.path.join(dirpath, filename)
res.append((x,y,p))
if len(rg) == 0:
rg.append(x)
rg.append(y)
rg.append(x)
rg.append(y)
else:
if rg[0] > x:
rg[0] = x
if rg[1] > y:
rg[1] = y
if rg[2] < x:
rg[2] = x
if rg[3] < y:
rg[3] = y
rg = (rg[0], rg[1], rg[2] + 1, rg[3] + 1)
return (res, rg)
def merge(res, rg):
st = np.array((256, 256), dtype=np.int32)
rg = np.array(rg, dtype=np.int32)
sz = (rg[2:4] - rg[0:2]) * st
img = np.zeros((sz[1], sz[0], 4), dtype=np.uint8)
st = np.array((st[0], st[1], st[0], st[1]), dtype=np.int32)
sz = np.array((rg[0], rg[1], rg[0], rg[1]), dtype=np.int32)
for (x, z, path) in res:
if x < rg[0] or z < rg[1] or x >= rg[2] or z >= rg[3]:
continue
tg = np.array((x, z, x + 1, z + 1), dtype=np.int32)
tg = (tg - sz) * st
part = cv2.imread(path, flags=cv2.IMREAD_UNCHANGED)
if part is None:
continue
img[tg[1]:tg[3],tg[0]:tg[2],:] = part[:,:,:]
return img
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', type=str)
parser.add_argument('-o', '--output_file', type=str)
parser.add_argument('-r', '--range', type=str) # xmin,ymin;xmax,ymax
args = parser.parse_args()
(res, rg) = list_files(args.input_dir)
if not (args.range == 'max'):
sp = args.range.split(' ')
p1 = sp[0:2]
xmin = int(p1[0])
ymin = int(p1[1])
p2 = sp[2:4]
xmax = int(p2[0]) + 1
ymax = int(p2[1]) + 1
rg = (xmin, ymin, xmax, ymax)
h = merge(res, rg)
cv2.imwrite(args.output_file, h)
pass |
the-stack_0_15983 | """
<Program Name>
download.py
<Started>
February 21, 2012. Based on previous version by Geremy Condra.
<Author>
Konstantin Andrianov
Vladimir Diaz <[email protected]>
<Copyright>
See LICENSE for licensing information.
<Purpose>
Download metadata and target files and check their validity. The hash and
length of a downloaded file has to match the hash and length supplied by the
metadata of that file. The downloaded file is technically a file-like object
that will automatically destroys itself once closed. Note that the file-like
object, 'tuf.util.TempFile', is returned by the '_download_file()' function.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import socket
import logging
import timeit
import ssl
import tuf
import tuf.conf
import tuf.hash
import tuf.util
import tuf.formats
import tuf._vendor.six as six
# 'ssl.match_hostname' was added in Python 3.2. The vendored version is needed
# for Python 2.6 and 2.7.
try:
from ssl import match_hostname, CertificateError
except ImportError:
from tuf._vendor.ssl_match_hostname import match_hostname, CertificateError
# See 'log.py' to learn how logging is handled in TUF.
logger = logging.getLogger('tuf.download')
def safe_download(url, required_length):
"""
<Purpose>
Given the 'url' and 'required_length' of the desired file, open a connection
to 'url', download it, and return the contents of the file. Also ensure
the length of the downloaded file matches 'required_length' exactly.
tuf.download.unsafe_download() may be called if an upper download limit is
preferred.
'tuf.util.TempFile', the file-like object returned, is used instead of
regular tempfile object because of additional functionality provided, such
as handling compressed metadata and automatically closing files after
moving to final destination.
<Arguments>
url:
A URL string that represents the location of the file.
required_length:
An integer value representing the length of the file. This is an exact
limit.
<Side Effects>
A 'tuf.util.TempFile' object is created on disk to store the contents of
'url'.
<Exceptions>
tuf.DownloadLengthMismatchError, if there was a mismatch of observed vs
expected lengths while downloading the file.
tuf.FormatError, if any of the arguments are improperly formatted.
Any other unforeseen runtime exception.
<Returns>
A 'tuf.util.TempFile' file-like object that points to the contents of 'url'.
"""
return _download_file(url, required_length, STRICT_REQUIRED_LENGTH=True)
def unsafe_download(url, required_length):
"""
<Purpose>
Given the 'url' and 'required_length' of the desired file, open a connection
to 'url', download it, and return the contents of the file. Also ensure
the length of the downloaded file is up to 'required_length', and no larger.
tuf.download.safe_download() may be called if an exact download limit is
preferred.
'tuf.util.TempFile', the file-like object returned, is used instead of
regular tempfile object because of additional functionality provided, such
as handling compressed metadata and automatically closing files after
moving to final destination.
<Arguments>
url:
A URL string that represents the location of the file.
required_length:
An integer value representing the length of the file. This is an upper
limit.
<Side Effects>
A 'tuf.util.TempFile' object is created on disk to store the contents of
'url'.
<Exceptions>
tuf.DownloadLengthMismatchError, if there was a mismatch of observed vs
expected lengths while downloading the file.
tuf.FormatError, if any of the arguments are improperly formatted.
Any other unforeseen runtime exception.
<Returns>
A 'tuf.util.TempFile' file-like object that points to the contents of 'url'.
"""
return _download_file(url, required_length, STRICT_REQUIRED_LENGTH=False)
def _download_file(url, required_length, STRICT_REQUIRED_LENGTH=True):
"""
<Purpose>
Given the url, hashes and length of the desired file, this function
opens a connection to 'url' and downloads the file while ensuring its
length and hashes match 'required_hashes' and 'required_length'.
tuf.util.TempFile is used instead of regular tempfile object because of
additional functionality provided by 'tuf.util.TempFile'.
<Arguments>
url:
A URL string that represents the location of the file.
required_length:
An integer value representing the length of the file.
STRICT_REQUIRED_LENGTH:
A Boolean indicator used to signal whether we should perform strict
checking of required_length. True by default. We explicitly set this to
False when we know that we want to turn this off for downloading the
timestamp metadata, which has no signed required_length.
<Side Effects>
A 'tuf.util.TempFile' object is created on disk to store the contents of
'url'.
<Exceptions>
tuf.DownloadLengthMismatchError, if there was a mismatch of observed vs
expected lengths while downloading the file.
tuf.FormatError, if any of the arguments are improperly formatted.
Any other unforeseen runtime exception.
<Returns>
A 'tuf.util.TempFile' file-like object that points to the contents of 'url'.
"""
# Do all of the arguments have the appropriate format?
# Raise 'tuf.FormatError' if there is a mismatch.
tuf.formats.URL_SCHEMA.check_match(url)
tuf.formats.LENGTH_SCHEMA.check_match(required_length)
# 'url.replace()' is for compatibility with Windows-based systems because
# they might put back-slashes in place of forward-slashes. This converts it
# to the common format.
url = url.replace('\\', '/')
logger.info('Downloading: '+str(url))
# This is the temporary file that we will return to contain the contents of
# the downloaded file.
temp_file = tuf.util.TempFile()
try:
# Open the connection to the remote file.
connection = _open_connection(url)
# We ask the server about how big it thinks this file should be.
reported_length = _get_content_length(connection)
# Then, we check whether the required length matches the reported length.
_check_content_length(reported_length, required_length,
STRICT_REQUIRED_LENGTH)
# Download the contents of the URL, up to the required length, to a
# temporary file, and get the total number of downloaded bytes.
total_downloaded = _download_fixed_amount_of_data(connection, temp_file,
required_length)
# Does the total number of downloaded bytes match the required length?
_check_downloaded_length(total_downloaded, required_length,
STRICT_REQUIRED_LENGTH=STRICT_REQUIRED_LENGTH)
except:
# Close 'temp_file'. Any written data is lost.
temp_file.close_temp_file()
logger.exception('Could not download URL: '+str(url))
raise
else:
return temp_file
def _download_fixed_amount_of_data(connection, temp_file, required_length):
"""
<Purpose>
This is a helper function, where the download really happens. While-block
reads data from connection a fixed chunk of data at a time, or less, until
'required_length' is reached.
<Arguments>
connection:
The object that the _open_connection returns for communicating with the
server about the contents of a URL.
temp_file:
A temporary file where the contents at the URL specified by the
'connection' object will be stored.
required_length:
The number of bytes that we must download for the file. This is almost
always specified by the TUF metadata for the data file in question
(except in the case of timestamp metadata, in which case we would fix a
reasonable upper bound).
<Side Effects>
Data from the server will be written to 'temp_file'.
<Exceptions>
Runtime or network exceptions will be raised without question.
<Returns>
total_downloaded:
The total number of bytes downloaded for the desired file.
"""
# Tolerate servers with a slow start by ignoring their delivery speed for
# 'tuf.conf.SLOW_START_GRACE_PERIOD' seconds. Set 'seconds_spent_receiving'
# to negative SLOW_START_GRACE_PERIOD seconds, and begin checking the average
# download speed once it is positive.
grace_period = -tuf.conf.SLOW_START_GRACE_PERIOD
# Keep track of total bytes downloaded.
number_of_bytes_received = 0
start_time = timeit.default_timer()
try:
while True:
# We download a fixed chunk of data in every round. This is so that we
# can defend against slow retrieval attacks. Furthermore, we do not wish
# to download an extremely large file in one shot.
data = b''
read_amount = min(tuf.conf.CHUNK_SIZE,
required_length - number_of_bytes_received)
#logger.debug('Reading next chunk...')
try:
data = connection.read(read_amount)
# Python 3.2 returns 'IOError' if the remote file object has timed out.
except (socket.error, IOError):
pass
number_of_bytes_received = number_of_bytes_received + len(data)
# Data successfully read from the connection. Store it.
temp_file.write(data)
if number_of_bytes_received == required_length:
break
stop_time = timeit.default_timer()
seconds_spent_receiving = stop_time - start_time
if (seconds_spent_receiving + grace_period) < 0:
#logger.debug('Ignoring average download speed for another: '+\
#str(-seconds_spent_receiving) + ' seconds')
continue
# Measure the average download speed.
average_download_speed = number_of_bytes_received / seconds_spent_receiving
# If the average download speed is below a certain threshold, we flag
# this as a possible slow-retrieval attack.
if average_download_speed < tuf.conf.MIN_AVERAGE_DOWNLOAD_SPEED:
break
else:
logger.debug('Good average download speed: '+\
str(average_download_speed) + ' bytes per second')
# We might have no more data to read. Check number of bytes downloaded.
if not data:
message = 'Downloaded '+str(number_of_bytes_received)+'/'+ \
str(required_length)+' bytes.'
logger.debug(message)
# Finally, we signal that the download is complete.
break
except:
raise
else:
# This else block returns and skips closing the connection in the finally
# block, so close the connection here.
connection.close()
return number_of_bytes_received
finally:
# Whatever happens, make sure that we always close the connection.
connection.close()
def _get_request(url):
"""
Wraps the URL to retrieve to protects against "creative"
interpretation of the RFC: http://bugs.python.org/issue8732
https://github.com/pypa/pip/blob/d0fa66ecc03ab20b7411b35f7c7b423f31f77761/pip/download.py#L147
"""
return six.moves.urllib.request.Request(url, headers={'Accept-encoding': 'identity'})
def _get_opener(scheme=None):
"""
Build a urllib2 opener based on whether the user now wants SSL.
https://github.com/pypa/pip/blob/d0fa66ecc03ab20b7411b35f7c7b423f31f77761/pip/download.py#L178
"""
if scheme == "https":
assert os.path.isfile(tuf.conf.ssl_certificates)
# If we are going over https, use an opener which will provide SSL
# certificate verification.
https_handler = VerifiedHTTPSHandler()
opener = six.moves.urllib.request.build_opener(https_handler)
# strip out HTTPHandler to prevent MITM spoof
for handler in opener.handlers:
if isinstance(handler, six.moves.urllib.request.HTTPHandler):
opener.handlers.remove(handler)
else:
# Otherwise, use the default opener.
opener = six.moves.urllib.request.build_opener()
return opener
def _open_connection(url):
"""
<Purpose>
Helper function that opens a connection to the url. urllib2 supports http,
ftp, and file. In python (2.6+) where the ssl module is available, urllib2
also supports https.
TODO: Determine whether this follows http redirects and decide if we like
that. For example, would we not want to allow redirection from ssl to
non-ssl urls?
<Arguments>
url:
URL string (e.g., 'http://...' or 'ftp://...' or 'file://...')
<Exceptions>
None.
<Side Effects>
Opens a connection to a remote server.
<Returns>
File-like object.
"""
# urllib2.Request produces a Request object that allows for a finer control
# of the requesting process. Request object allows to add headers or data to
# the HTTP request. For instance, request method add_header(key, val) can be
# used to change/spoof 'User-Agent' from default Python-urllib/x.y to
# 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)' this can be useful if
# servers do not recognize connections that originates from
# Python-urllib/x.y.
parsed_url = six.moves.urllib.parse.urlparse(url)
opener = _get_opener(scheme=parsed_url.scheme)
request = _get_request(url)
return opener.open(request, timeout = tuf.conf.SOCKET_TIMEOUT)
def _get_content_length(connection):
"""
<Purpose>
A helper function that gets the purported file length from server.
<Arguments>
connection:
The object that the _open_connection function returns for communicating
with the server about the contents of a URL.
<Side Effects>
No known side effects.
<Exceptions>
Runtime exceptions will be suppressed but logged.
<Returns>
reported_length:
The total number of bytes reported by server. If the process fails, we
return None; otherwise we would return a nonnegative integer.
"""
try:
# What is the length of this document according to the HTTP spec?
reported_length = connection.info().get('Content-Length')
# Try casting it as a decimal number.
reported_length = int(reported_length, 10)
# Make sure that it is a nonnegative integer.
assert reported_length > -1
except:
message = \
'Could not get content length about ' + str(connection) + ' from server.'
logger.exception(message)
reported_length = None
finally:
return reported_length
def _check_content_length(reported_length, required_length, strict_length=True):
"""
<Purpose>
A helper function that checks whether the length reported by server is
equal to the length we expected.
<Arguments>
reported_length:
The total number of bytes reported by the server.
required_length:
The total number of bytes obtained from (possibly default) metadata.
strict_length:
Boolean that indicates whether the required length of the file is an
exact match, or an upper limit (e.g., downloading a Timestamp file).
<Side Effects>
No known side effects.
<Exceptions>
No known exceptions.
<Returns>
None.
"""
logger.debug('The server reported a length of '+repr(reported_length)+' bytes.')
comparison_result = None
try:
if reported_length < required_length:
comparison_result = 'less than'
elif reported_length > required_length:
comparison_result = 'greater than'
else:
comparison_result = 'equal to'
except:
logger.exception('Could not check reported and required lengths.')
if strict_length:
message = 'The reported length is '+comparison_result+' the required '+\
'length of '+repr(required_length)+' bytes.'
logger.debug(message)
else:
message = 'The reported length is '+comparison_result+' the upper limit '+\
'of '+repr(required_length)+' bytes.'
logger.debug(message)
def _check_downloaded_length(total_downloaded, required_length,
STRICT_REQUIRED_LENGTH=True):
"""
<Purpose>
A helper function which checks whether the total number of downloaded bytes
matches our expectation.
<Arguments>
total_downloaded:
The total number of bytes supposedly downloaded for the file in question.
required_length:
The total number of bytes expected of the file as seen from its metadata.
The Timestamp role is always downloaded without a known file length, and
the Root role when the client cannot download any of the required
top-level roles. In both cases, 'required_length' is actually an upper
limit on the length of the downloaded file.
STRICT_REQUIRED_LENGTH:
A Boolean indicator used to signal whether we should perform strict
checking of required_length. True by default. We explicitly set this to
False when we know that we want to turn this off for downloading the
timestamp metadata, which has no signed required_length.
<Side Effects>
None.
<Exceptions>
tuf.DownloadLengthMismatchError, if STRICT_REQUIRED_LENGTH is True and
total_downloaded is not equal required_length.
<Returns>
None.
"""
if total_downloaded == required_length:
logger.info('Downloaded '+str(total_downloaded)+' bytes out of the '+\
'expected '+str(required_length)+ ' bytes.')
else:
difference_in_bytes = abs(total_downloaded - required_length)
# What we downloaded is not equal to the required length, but did we ask
# for strict checking of required length?
if STRICT_REQUIRED_LENGTH:
message = 'Downloaded '+str(total_downloaded)+' bytes, but expected '+\
str(required_length)+' bytes. There is a difference of '+\
str(difference_in_bytes)+' bytes.'
# This must be due to a programming error, and must never happen!
logger.error(message)
raise tuf.DownloadLengthMismatchError(required_length, total_downloaded)
else:
message = 'Downloaded '+str(total_downloaded)+' bytes out of an upper '+\
'limit of '+str(required_length)+' bytes.'
# We specifically disabled strict checking of required length, but we
# will log a warning anyway. This is useful when we wish to download the
# Timestamp or Root metadata, for which we have no signed metadata; so,
# we must guess a reasonable required_length for it.
logger.info(message)
class VerifiedHTTPSConnection(six.moves.http_client.HTTPSConnection):
"""
A connection that wraps connections with ssl certificate verification.
https://github.com/pypa/pip/blob/d0fa66ecc03ab20b7411b35f7c7b423f31f77761/pip/download.py#L72
"""
def connect(self):
self.connection_kwargs = {}
# for > py2.5
if hasattr(self, 'timeout'):
self.connection_kwargs.update(timeout = self.timeout)
# for >= py2.7
if hasattr(self, 'source_address'):
self.connection_kwargs.update(source_address = self.source_address)
sock = socket.create_connection((self.host, self.port), **self.connection_kwargs)
# for >= py2.7
if getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# set location of certificate authorities
assert os.path.isfile( tuf.conf.ssl_certificates )
cert_path = tuf.conf.ssl_certificates
# TODO: Disallow SSLv2.
# http://docs.python.org/dev/library/ssl.html#protocol-versions
# TODO: Select the right ciphers.
# http://docs.python.org/dev/library/ssl.html#cipher-selection
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=cert_path)
match_hostname(self.sock.getpeercert(), self.host)
class VerifiedHTTPSHandler(six.moves.urllib.request.HTTPSHandler):
"""
A HTTPSHandler that uses our own VerifiedHTTPSConnection.
https://github.com/pypa/pip/blob/d0fa66ecc03ab20b7411b35f7c7b423f31f77761/pip/download.py#L109
"""
def __init__(self, connection_class = VerifiedHTTPSConnection):
self.specialized_conn_class = connection_class
six.moves.urllib.request.HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(self.specialized_conn_class, req)
|
the-stack_0_15986 | from cline import CommandLineArguments, Task
from examples.example02.arguments import NumberArgs
class SubtractTask(Task[NumberArgs]):
@classmethod
def make_args(cls, args: CommandLineArguments) -> NumberArgs:
"""
Makes and returns strongly-typed arguments for this task based on the
parsed command line arguments `args`.
Arguments:
args: Parsed command line arguments
Raises:
CannotMakeArguments: If the given arguments are not relevant to this
task
Returns:
Task arguments
"""
# Asserts that the "sub" flag is present and truthy.
args.assert_true("sub")
# If "a" or "b" aren't set or aren't integers then "get_integer" will
# raise `CannotMakeArguments`:
return NumberArgs(
a=args.get_integer("a"),
b=args.get_integer("b"),
)
def invoke(self) -> int:
"""
Invokes the task.
Reads arguments from `self.args`. Writes output to `self.out`.
Returns the shell exit code.
"""
# Since the arguments are strongly-typed, we don't need to worry about
# parsing integers and handing failures therein:
result = self.args.a - self.args.b
self.out.write(f"{result}\n")
return 0
|
the-stack_0_15987 | from __future__ import print_function, division
from sympy.core import S, C
from sympy.core.compatibility import u
from sympy.core.exprtools import factor_terms
from sympy.core.function import (Function, Derivative, ArgumentIndexError,
AppliedUndef)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.core.expr import Expr
from sympy.core import Add, Mul
from sympy.core.relational import Eq
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.trigonometric import atan2
###############################################################################
######################### REAL and IMAGINARY PARTS ############################
###############################################################################
class re(Function):
"""Returns real part of expression. This function performs only
elementary analysis and so it will fail to decompose properly
more complicated expressions. If completely simplified result
is needed then use Basic.as_real_imag() or perform complex
expansion on instance of this function.
>>> from sympy import re, im, I, E
>>> from sympy.abc import x, y
>>> re(2*E)
2*E
>>> re(2*I + 17)
17
>>> re(2*I)
0
>>> re(im(x) + x*I + 2)
2
See Also
========
im
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return arg
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return S.Zero
elif arg.is_Function and arg.func is conjugate:
return re(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
elif not term.has(S.ImaginaryUnit) and term.is_real:
excluded.append(term)
else:
# Try to do some advanced expansion. If
# impossible, don't try to do re(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[0])
else:
included.append(term)
if len(args) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) - im(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Returns the real number with a zero complex part.
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return re(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* im(Derivative(self.args[0], x, evaluate=True))
def _eval_rewrite_as_im(self, arg):
return self.args[0] - im(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _sage_(self):
import sage.all as sage
return sage.real_part(self.args[0]._sage_())
class im(Function):
"""
Returns imaginary part of expression. This function performs only
elementary analysis and so it will fail to decompose properly more
complicated expressions. If completely simplified result is needed then
use Basic.as_real_imag() or perform complex expansion on instance of
this function.
Examples
========
>>> from sympy import re, im, E, I
>>> from sympy.abc import x, y
>>> im(2*E)
0
>>> re(2*I + 17)
17
>>> im(x*I)
re(x)
>>> im(re(x) + y)
im(y)
See Also
========
re
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return S.Zero
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return -S.ImaginaryUnit * arg
elif arg.is_Function and arg.func is conjugate:
return -im(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
else:
excluded.append(coeff)
elif term.has(S.ImaginaryUnit) or not term.is_real:
# Try to do some advanced expansion. If
# impossible, don't try to do im(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[1])
else:
included.append(term)
if len(args) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) + re(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Return the imaginary part with a zero real part.
Examples
========
>>> from sympy.functions import im
>>> from sympy import I
>>> im(2 + 3*I).as_real_imag()
(3, 0)
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return im(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* re(Derivative(self.args[0], x, evaluate=True))
def _sage_(self):
import sage.all as sage
return sage.imag_part(self.args[0]._sage_())
def _eval_rewrite_as_re(self, arg):
return self.args[0] - re(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
###############################################################################
############### SIGN, ABSOLUTE VALUE, ARGUMENT and CONJUGATION ################
###############################################################################
class sign(Function):
"""
Returns the complex sign of an expression:
If the expresssion is real the sign will be:
* 1 if expression is positive
* 0 if expression is equal to zero
* -1 if expression is negative
If the expresssion is imaginary the sign will be:
* I if im(expression) is positive
* -I if im(expression) is negative
Otherwise an unevaluated expression will be returned. When evaluated, the
result (in general) will be ``cos(arg(expr)) + I*sin(arg(expr))``.
Examples
========
>>> from sympy.functions import sign
>>> from sympy.core.numbers import I
>>> sign(-1)
-1
>>> sign(0)
0
>>> sign(-3*I)
-I
>>> sign(1 + I)
sign(1 + I)
>>> _.evalf()
0.707106781186548 + 0.707106781186548*I
See Also
========
Abs, conjugate
"""
is_finite = True
is_complex = True
def doit(self):
if self.args[0].is_nonzero:
return self.args[0] / Abs(self.args[0])
return self
@classmethod
def eval(cls, arg):
# handle what we can
if arg.is_Mul:
c, args = arg.as_coeff_mul()
unk = []
s = sign(c)
for a in args:
if a.is_negative:
s = -s
elif a.is_positive:
pass
else:
ai = im(a)
if a.is_imaginary and ai.is_comparable: # i.e. a = I*real
s *= S.ImaginaryUnit
if ai.is_negative:
# can't use sign(ai) here since ai might not be
# a Number
s = -s
else:
unk.append(a)
if c is S.One and len(unk) == len(args):
return None
return s * cls(arg._new_rawargs(*unk))
if arg is S.NaN:
return S.NaN
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_positive:
return S.One
if arg.is_negative:
return S.NegativeOne
if arg.is_Function:
if arg.func is sign:
return arg
if arg.is_imaginary:
if arg.is_Pow and arg.exp is S.Half:
# we catch this because non-trivial sqrt args are not expanded
# e.g. sqrt(1-sqrt(2)) --x--> to I*sqrt(sqrt(2) - 1)
return S.ImaginaryUnit
arg2 = -S.ImaginaryUnit * arg
if arg2.is_positive:
return S.ImaginaryUnit
if arg2.is_negative:
return -S.ImaginaryUnit
def _eval_Abs(self):
if self.args[0].is_nonzero:
return S.One
def _eval_conjugate(self):
return sign(conjugate(self.args[0]))
def _eval_derivative(self, x):
if self.args[0].is_real:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(self.args[0])
elif self.args[0].is_imaginary:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(-S.ImaginaryUnit * self.args[0])
def _eval_is_nonnegative(self):
if self.args[0].is_nonnegative:
return True
def _eval_is_nonpositive(self):
if self.args[0].is_nonpositive:
return True
def _eval_is_imaginary(self):
return self.args[0].is_imaginary
def _eval_is_integer(self):
return self.args[0].is_real
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_power(self, other):
if (
self.args[0].is_real and
self.args[0].is_nonzero and
other.is_integer and
other.is_even
):
return S.One
def _sage_(self):
import sage.all as sage
return sage.sgn(self.args[0]._sage_())
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((1, arg > 0), (-1, arg < 0), (0, True))
def _eval_rewrite_as_Heaviside(self, arg):
if arg.is_real:
return C.Heaviside(arg)*2-1
def _eval_simplify(self, ratio, measure):
return self.func(self.args[0].factor())
class Abs(Function):
"""
Return the absolute value of the argument.
This is an extension of the built-in function abs() to accept symbolic
values. If you pass a SymPy expression to the built-in abs(), it will
pass it automatically to Abs().
Examples
========
>>> from sympy import Abs, Symbol, S
>>> Abs(-1)
1
>>> x = Symbol('x', real=True)
>>> Abs(-x)
Abs(x)
>>> Abs(x**2)
x**2
>>> abs(-x) # The Python built-in
Abs(x)
Note that the Python built-in will return either an Expr or int depending on
the argument::
>>> type(abs(-1))
<... 'int'>
>>> type(abs(S.NegativeOne))
<class 'sympy.core.numbers.One'>
Abs will always return a sympy object.
See Also
========
sign, conjugate
"""
is_real = True
is_negative = False
unbranched = True
def fdiff(self, argindex=1):
"""
Get the first derivative of the argument to Abs().
Examples
========
>>> from sympy.abc import x
>>> from sympy.functions import Abs
>>> Abs(-x).fdiff()
sign(x)
"""
if argindex == 1:
return sign(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy.simplify.simplify import signsimp
if hasattr(arg, '_eval_Abs'):
obj = arg._eval_Abs()
if obj is not None:
return obj
if not isinstance(arg, Expr):
raise TypeError("Bad argument type for Abs(): %s" % type(arg))
# handle what we can
arg = signsimp(arg, evaluate=False)
if arg.is_Mul:
known = []
unk = []
for t in arg.args:
tnew = cls(t)
if tnew.func is cls:
unk.append(tnew.args[0])
else:
known.append(tnew)
known = Mul(*known)
unk = cls(Mul(*unk), evaluate=False) if unk else S.One
return known*unk
if arg is S.NaN:
return S.NaN
if arg.is_Pow:
base, exponent = arg.as_base_exp()
if base.is_real:
if exponent.is_integer:
if exponent.is_even:
return arg
if base is S.NegativeOne:
return S.One
if base.func is cls and exponent is S.NegativeOne:
return arg
return Abs(base)**exponent
if base.is_positive == True:
return base**re(exponent)
return (-base)**re(exponent)*exp(-S.Pi*im(exponent))
if isinstance(arg, exp):
return exp(re(arg.args[0]))
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_nonnegative:
return arg
if arg.is_nonpositive:
return -arg
if arg.is_imaginary:
arg2 = -S.ImaginaryUnit * arg
if arg2.is_nonnegative:
return arg2
if arg.is_Add:
if arg.has(S.Infinity, S.NegativeInfinity):
if any(a.is_infinite for a in arg.as_real_imag()):
return S.Infinity
if arg.is_real is None and arg.is_imaginary is None:
if all(a.is_real or a.is_imaginary or (S.ImaginaryUnit*a).is_real for a in arg.args):
from sympy import expand_mul
return sqrt(expand_mul(arg*arg.conjugate()))
if arg.is_real is False and arg.is_imaginary is False:
from sympy import expand_mul
return sqrt(expand_mul(arg*arg.conjugate()))
def _eval_is_integer(self):
if self.args[0].is_real:
return self.args[0].is_integer
def _eval_is_nonzero(self):
return self._args[0].is_nonzero
def _eval_is_positive(self):
return self.is_nonzero
def _eval_is_rational(self):
if self.args[0].is_real:
return self.args[0].is_rational
def _eval_is_even(self):
if self.args[0].is_real:
return self.args[0].is_even
def _eval_is_odd(self):
if self.args[0].is_real:
return self.args[0].is_odd
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _eval_power(self, exponent):
if self.args[0].is_real and exponent.is_integer:
if exponent.is_even:
return self.args[0]**exponent
elif exponent is not S.NegativeOne and exponent.is_Integer:
return self.args[0]**(exponent - 1)*self
return
def _eval_nseries(self, x, n, logx):
direction = self.args[0].leadterm(x)[0]
s = self.args[0]._eval_nseries(x, n=n, logx=logx)
when = Eq(direction, 0)
return Piecewise(
((s.subs(direction, 0)), when),
(sign(direction)*s, True),
)
def _sage_(self):
import sage.all as sage
return sage.abs_symbolic(self.args[0]._sage_())
def _eval_derivative(self, x):
if self.args[0].is_real or self.args[0].is_imaginary:
return Derivative(self.args[0], x, evaluate=True) \
* sign(conjugate(self.args[0]))
return (re(self.args[0]) * Derivative(re(self.args[0]), x,
evaluate=True) + im(self.args[0]) * Derivative(im(self.args[0]),
x, evaluate=True)) / Abs(self.args[0])
def _eval_rewrite_as_Heaviside(self, arg):
# Note this only holds for real arg (since Heaviside is not defined
# for complex arguments).
if arg.is_real:
return arg*(C.Heaviside(arg) - C.Heaviside(-arg))
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((arg, arg >= 0), (-arg, True))
def _eval_rewrite_as_sign(self, arg):
return arg/C.sign(arg)
class arg(Function):
"""Returns the argument (in radians) of a complex number"""
is_real = True
is_finite = True
@classmethod
def eval(cls, arg):
if not arg.is_Atom:
c, arg_ = factor_terms(arg).as_coeff_Mul()
if arg_.is_Mul:
arg_ = Mul(*[a if (sign(a) not in (-1, 1)) else
sign(a) for a in arg_.args])
arg_ = sign(c)*arg_
else:
arg_ = arg
x, y = re(arg_), im(arg_)
rv = atan2(y, x)
if rv.is_number and not rv.atoms(AppliedUndef):
return rv
if arg_ != arg:
return cls(arg_, evaluate=False)
def _eval_derivative(self, t):
x, y = re(self.args[0]), im(self.args[0])
return (x * Derivative(y, t, evaluate=True) - y *
Derivative(x, t, evaluate=True)) / (x**2 + y**2)
def _eval_rewrite_as_atan2(self, arg):
x, y = re(self.args[0]), im(self.args[0])
return atan2(y, x)
class conjugate(Function):
"""
Changes the sign of the imaginary part of a complex number.
Examples
========
>>> from sympy import conjugate, I
>>> conjugate(1 + I)
1 - I
See Also
========
sign, Abs
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_conjugate()
if obj is not None:
return obj
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
def _eval_adjoint(self):
return transpose(self.args[0])
def _eval_conjugate(self):
return self.args[0]
def _eval_derivative(self, x):
if x.is_real:
return conjugate(Derivative(self.args[0], x, evaluate=True))
elif x.is_imaginary:
return -conjugate(Derivative(self.args[0], x, evaluate=True))
def _eval_transpose(self):
return adjoint(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
class transpose(Function):
"""
Linear map transposition.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_transpose()
if obj is not None:
return obj
def _eval_adjoint(self):
return conjugate(self.args[0])
def _eval_conjugate(self):
return adjoint(self.args[0])
def _eval_transpose(self):
return self.args[0]
class adjoint(Function):
"""
Conjugate transpose or Hermite conjugation.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_adjoint()
if obj is not None:
return obj
obj = arg._eval_transpose()
if obj is not None:
return conjugate(obj)
def _eval_adjoint(self):
return self.args[0]
def _eval_conjugate(self):
return transpose(self.args[0])
def _eval_transpose(self):
return conjugate(self.args[0])
def _latex(self, printer, exp=None, *args):
arg = printer._print(self.args[0])
tex = r'%s^{\dag}' % arg
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, printer._print(exp))
return tex
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if printer._use_unicode:
pform = pform**prettyForm(u('\N{DAGGER}'))
else:
pform = pform**prettyForm('+')
return pform
###############################################################################
############### HANDLING OF POLAR NUMBERS #####################################
###############################################################################
class polar_lift(Function):
"""
Lift argument to the Riemann surface of the logarithm, using the
standard branch.
>>> from sympy import Symbol, polar_lift, I
>>> p = Symbol('p', polar=True)
>>> x = Symbol('x')
>>> polar_lift(4)
4*exp_polar(0)
>>> polar_lift(-4)
4*exp_polar(I*pi)
>>> polar_lift(-I)
exp_polar(-I*pi/2)
>>> polar_lift(I + 2)
polar_lift(2 + I)
>>> polar_lift(4*x)
4*polar_lift(x)
>>> polar_lift(4*p)
4*p
See Also
========
sympy.functions.elementary.exponential.exp_polar
periodic_argument
"""
is_polar = True
is_comparable = False # Cannot be evalf'd.
@classmethod
def eval(cls, arg):
from sympy import exp_polar, pi, I, arg as argument
if arg.is_number:
ar = argument(arg)
# In general we want to affirm that something is known,
# e.g. `not ar.has(argument) and not ar.has(atan)`
# but for now we will just be more restrictive and
# see that it has evaluated to one of the known values.
if ar in (0, pi/2, -pi/2, pi):
return exp_polar(I*ar)*abs(arg)
if arg.is_Mul:
args = arg.args
else:
args = [arg]
included = []
excluded = []
positive = []
for arg in args:
if arg.is_polar:
included += [arg]
elif arg.is_positive:
positive += [arg]
else:
excluded += [arg]
if len(excluded) < len(args):
if excluded:
return Mul(*(included + positive))*polar_lift(Mul(*excluded))
elif included:
return Mul(*(included + positive))
else:
return Mul(*positive)*exp_polar(0)
def _eval_evalf(self, prec):
""" Careful! any evalf of polar numbers is flaky """
return self.args[0]._eval_evalf(prec)
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
class periodic_argument(Function):
"""
Represent the argument on a quotient of the Riemann surface of the
logarithm. That is, given a period P, always return a value in
(-P/2, P/2], by using exp(P*I) == 1.
>>> from sympy import exp, exp_polar, periodic_argument, unbranched_argument
>>> from sympy import I, pi
>>> unbranched_argument(exp(5*I*pi))
pi
>>> unbranched_argument(exp_polar(5*I*pi))
5*pi
>>> periodic_argument(exp_polar(5*I*pi), 2*pi)
pi
>>> periodic_argument(exp_polar(5*I*pi), 3*pi)
-pi
>>> periodic_argument(exp_polar(5*I*pi), pi)
0
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
principal_branch
"""
@classmethod
def _getunbranched(cls, ar):
from sympy import exp_polar, log, polar_lift
if ar.is_Mul:
args = ar.args
else:
args = [ar]
unbranched = 0
for a in args:
if not a.is_polar:
unbranched += arg(a)
elif a.func is exp_polar:
unbranched += a.exp.as_real_imag()[1]
elif a.is_Pow:
re, im = a.exp.as_real_imag()
unbranched += re*unbranched_argument(
a.base) + im*log(abs(a.base))
elif a.func is polar_lift:
unbranched += arg(a.args[0])
else:
return None
return unbranched
@classmethod
def eval(cls, ar, period):
# Our strategy is to evaluate the argument on the Riemann surface of the
# logarithm, and then reduce.
# NOTE evidently this means it is a rather bad idea to use this with
# period != 2*pi and non-polar numbers.
from sympy import ceiling, oo, atan2, atan, polar_lift, pi, Mul
if not period.is_positive:
return None
if period == oo and isinstance(ar, principal_branch):
return periodic_argument(*ar.args)
if ar.func is polar_lift and period >= 2*pi:
return periodic_argument(ar.args[0], period)
if ar.is_Mul:
newargs = [x for x in ar.args if not x.is_positive]
if len(newargs) != len(ar.args):
return periodic_argument(Mul(*newargs), period)
unbranched = cls._getunbranched(ar)
if unbranched is None:
return None
if unbranched.has(periodic_argument, atan2, arg, atan):
return None
if period == oo:
return unbranched
if period != oo:
n = ceiling(unbranched/period - S(1)/2)*period
if not n.has(ceiling):
return unbranched - n
def _eval_evalf(self, prec):
from sympy import ceiling, oo
z, period = self.args
if period == oo:
unbranched = periodic_argument._getunbranched(z)
if unbranched is None:
return self
return unbranched._eval_evalf(prec)
ub = periodic_argument(z, oo)._eval_evalf(prec)
return (ub - ceiling(ub/period - S(1)/2)*period)._eval_evalf(prec)
def unbranched_argument(arg):
from sympy import oo
return periodic_argument(arg, oo)
class principal_branch(Function):
"""
Represent a polar number reduced to its principal branch on a quotient
of the Riemann surface of the logarithm.
This is a function of two arguments. The first argument is a polar
number `z`, and the second one a positive real number of infinity, `p`.
The result is "z mod exp_polar(I*p)".
>>> from sympy import exp_polar, principal_branch, oo, I, pi
>>> from sympy.abc import z
>>> principal_branch(z, oo)
z
>>> principal_branch(exp_polar(2*pi*I)*3, 2*pi)
3*exp_polar(0)
>>> principal_branch(exp_polar(2*pi*I)*3*z, 2*pi)
3*principal_branch(z, 2*pi)
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
periodic_argument
"""
is_polar = True
is_comparable = False # cannot always be evalf'd
@classmethod
def eval(self, x, period):
from sympy import oo, exp_polar, I, Mul, polar_lift, Symbol
if isinstance(x, polar_lift):
return principal_branch(x.args[0], period)
if period == oo:
return x
ub = periodic_argument(x, oo)
barg = periodic_argument(x, period)
if ub != barg and not ub.has(periodic_argument) \
and not barg.has(periodic_argument):
pl = polar_lift(x)
def mr(expr):
if not isinstance(expr, Symbol):
return polar_lift(expr)
return expr
pl = pl.replace(polar_lift, mr)
if not pl.has(polar_lift):
res = exp_polar(I*(barg - ub))*pl
if not res.is_polar and not res.has(exp_polar):
res *= exp_polar(0)
return res
if not x.free_symbols:
c, m = x, ()
else:
c, m = x.as_coeff_mul(*x.free_symbols)
others = []
for y in m:
if y.is_positive:
c *= y
else:
others += [y]
m = tuple(others)
arg = periodic_argument(c, period)
if arg.has(periodic_argument):
return None
if arg.is_number and (unbranched_argument(c) != arg or
(arg == 0 and m != () and c != 1)):
if arg == 0:
return abs(c)*principal_branch(Mul(*m), period)
return principal_branch(exp_polar(I*arg)*Mul(*m), period)*abs(c)
if arg.is_number and ((abs(arg) < period/2) == True or arg == period/2) \
and m == ():
return exp_polar(arg*I)*abs(c)
def _eval_evalf(self, prec):
from sympy import exp, pi, I
z, period = self.args
p = periodic_argument(z, period)._eval_evalf(prec)
if abs(p) > pi or p == -pi:
return self # Cannot evalf for this argument.
return (abs(z)*exp(I*p))._eval_evalf(prec)
# /cyclic/
from sympy.core import basic as _
_.abs_ = Abs
del _
|
the-stack_0_15990 | # qubit number=3
# total number=70
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=59
prog.cz(input_qubit[0],input_qubit[2]) # number=60
prog.h(input_qubit[2]) # number=61
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.h(input_qubit[2]) # number=67
prog.cz(input_qubit[0],input_qubit[2]) # number=68
prog.h(input_qubit[2]) # number=69
prog.x(input_qubit[2]) # number=55
prog.cx(input_qubit[0],input_qubit[2]) # number=56
prog.h(input_qubit[2]) # number=64
prog.cz(input_qubit[0],input_qubit[2]) # number=65
prog.h(input_qubit[2]) # number=66
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.rx(2.3310617489636263,input_qubit[2]) # number=58
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.x(input_qubit[1]) # number=62
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.rx(-0.9173450548482197,input_qubit[1]) # number=57
prog.cx(input_qubit[2],input_qubit[1]) # number=63
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy350.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_0_15991 | import torch
from pytorch_lightning import LightningModule
from torch import nn
from inferface.config import NetworkLayerSizes, LossNames, FairFaceColumnKeys
class AgeGenderRaceClassifier(LightningModule):
def __init__(self,
input_size: int = NetworkLayerSizes.INPUT.value,
output_size_age: int = NetworkLayerSizes.AGE_9_OUTPUT.value,
output_size_gender: int = NetworkLayerSizes.GENDER_2_OUTPUT.value,
output_size_race: int = NetworkLayerSizes.RACE_7_OUTPUT.value,
lr: float = 1e-3,
dropout: float = 0.4
):
super().__init__()
self.lr = lr
self.dropout = dropout
self.fc_age = nn.Sequential(nn.Linear(input_size, 256),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(256, 64),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(64, output_size_age),
nn.LogSoftmax(dim=1))
self.fc_gender = nn.Sequential(nn.Linear(input_size, 256),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(256, 64),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(64, output_size_gender),
nn.Sigmoid())
self.fc_race = nn.Sequential(nn.Linear(input_size, 256),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(256, 64),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(64, output_size_race),
nn.LogSoftmax(dim=1))
self.criterion_binary = nn.BCELoss()
self.criterion_multioutput = nn.CrossEntropyLoss()
def forward(self, x):
age = self.fc_age(x)
gender = self.fc_gender(x)
race = self.fc_race(x)
return age, gender, race
def _loop(self, batch, batch_idx, stage):
image_path, embedding, age, gender, race = batch[FairFaceColumnKeys.KEY_FILE.value], \
batch[FairFaceColumnKeys.KEY_EMBEDDING.value], \
batch[FairFaceColumnKeys.KEY_AGE.value], \
batch[FairFaceColumnKeys.KEY_GENDER.value], \
batch[FairFaceColumnKeys.KEY_RACE.value]
age_hat, gender_hat, race_hat = self(embedding)
loss_age = self.criterion_multioutput(age_hat, age)
self.log(f"{stage}_{LossNames.LOSS_AGE.value}", loss_age)
loss_gender = self.criterion_binary(gender_hat, gender)
self.log(f"{stage}_{LossNames.LOSS_GENDER.value}", loss_gender)
loss_race = self.criterion_multioutput(race_hat, race)
self.log(f"{stage}_{LossNames.LOSS_RACE.value}", loss_race)
loss = loss_age + loss_gender + loss_race
self.log(f"{stage}_{LossNames.LOSS_TOTAL.value}", loss)
return loss
def training_step(self, batch, batch_idx):
stage = 'train'
return self._loop(batch, batch_idx, stage)
def validation_step(self, batch, batch_idx):
stage = 'val'
return self._loop(batch, batch_idx, stage)
def test_step(self, batch, batch_idx):
stage = 'test'
return self._loop(batch, batch_idx, stage)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
|
the-stack_0_15992 | from django.shortcuts import render, redirect
from urllib import request
from http import cookiejar
import urllib
import json
from django.shortcuts import HttpResponse
from urllib import parse
from bs4 import BeautifulSoup
import pymysql.cursors
import pymysql
import numpy as np
from main.fdu_cookie import FduCookie
# import tesserocr
# Create your views here.
def index(req):
login_url = 'https://gsas.fudan.edu.cn/sscjcx/index'
code_url = 'https://gsas.fudan.edu.cn/captcha/imageCode'
user_agent = r'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
r'Chrome/27.0.1453.94 Safari/537.36'
head = {'User-Agent': user_agent, 'Connection': 'keep-alive'}
cookie = cookiejar.CookieJar()
handler = request.HTTPCookieProcessor(cookie)
opener = request.build_opener(handler)
req_crawler = request.Request(url=login_url, headers=head)
req_code = request.Request(url=code_url, headers=head)
response = opener.open(req_crawler)
for item in cookie:
cookie_name = item.name
cookie_value = item.value
response_code = opener.open(req_code)
code = response_code.read()
with open('main/static/'+str(cookie_value)+'.png','wb') as code_img:
code_img.write(code)
response.close()
response_code.close()
return render(req, 'base.html', {'string': str(cookie_value)})
def cookie_homepage(req):
return render(req, 'base_cookie.html')
def test_post(req):
if req.method == 'GET':
return HttpResponse('get')
else:
req_data = json.loads(req.body)
username = req_data['username']
password = req_data['password']
varycode = req_data['varycode']
cookie = req_data['crawcookie']
print('username', username)
# print(type(username))
# print(username, password, varycode, cookie)
# img_file = 'main/static/'+str(cookie)+'.png'
# varycode = tesserocr.file_to_text(img_file)
# varycode = str(varycode).strip().strip(b'\x00'.decode())
# print('varycode', varycode)
user_agent = r'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
r'Chrome/27.0.1453.94 Safari/537.36'
head = {'User-Agent': user_agent, 'Connection': 'keep-alive'}
head['Cookie'] = 'cn_com_southsoft_gms='+str(cookie)
post_url = 'https://gsas.fudan.edu.cn/sscjcx/28198B369067E88DAB9FEFE85484DBF4'
try:
post_data = {}
post_data['nd'] = '2021'
post_data['username'] = username
post_data['password'] = password
post_data['validateCode'] = varycode
# print(post_data)
datepostdata = parse.urlencode(post_data).encode('utf-8')
craw_req = request.Request(url=post_url, data=datepostdata, headers=head)
craw_response = urllib.request.urlopen(craw_req)
html = craw_response.read().decode('utf-8')
soup = BeautifulSoup(html, "lxml")
craw_response.close()
result = soup.select("#errorInfo")
if len(result)<1:
# print('success')
table = soup.table
trs = table.find_all('tr')
total_grade_tr = trs[-1]
name_tr = trs[0]
type_tr = trs[1]
# 总成绩
total_grade = total_grade_tr.find_all('td')[-1].get_text()
# 报考类型
st_type = type_tr.find_all('td')[-1].get_text()
st_type = str(st_type).strip().strip(b'\x00'.decode())
# print(st_type)
# 姓名
st_name = name_tr.find_all('td')[-1].get_text()
st_name = str(st_name).strip().strip(b'\x00'.decode())
student_type = 0
# 专硕
if '085211' in st_type:
student_type = 0
# 学硕
elif '081201' in st_type or '081202' in st_type or '081203' in st_type or '083900' in st_type:
student_type = 1
else:
student_type = 2
rep = {'status': 0, 'st_type': student_type, 'total_grade': total_grade, 'st_name': st_name}
if student_type !=2:
# 插入数据库
connect = pymysql.Connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='',
db='student',
charset='utf8'
)
cursor = connect.cursor()
sql = "SELECT * FROM student WHERE number = %d;"
cursor.execute(sql % int(username))
if cursor.rowcount > 0:
pass
else:
sql = "INSERT INTO student(number, type, grade) VALUES (%s, %d, %d);"
insert_data = (str(username), student_type, int(total_grade))
cursor.execute(sql % insert_data)
connect.commit()
if student_type == 0:
sql = "SELECT grade FROM student WHERE type = 0 ORDER BY grade desc;"
cursor.execute(sql)
grade_list = []
for item in cursor.fetchall():
grade_list.append(int(item[0]))
# print(grade_list)
total_grade = int(total_grade)
index = grade_list.index(total_grade)
total = cursor.rowcount
rep['rank'] = str(index+1) + '/' + str(total)
elif student_type == 1:
sql = "SELECT grade FROM student WHERE type = 1 ORDER BY grade desc;"
cursor.execute(sql)
grade_list = []
for item in cursor.fetchall():
grade_list.append(int(item[0]))
# print(grade_list)
total_grade = int(total_grade)
index = grade_list.index(total_grade)
total = cursor.rowcount
rep['rank'] = str(index+1) + '/' + str(total)
cursor.close()
connect.close()
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
else:
result = result[0].get_text()
result = str(result).strip().strip(b'\x00'.decode())
print(result)
rep = {'status': 1, 'data': result}
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
except urllib.error.URLError as e:
print(e.reason)
rep = {'status': 1, 'data': 'error'}
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
def cookie_crow(req):
if req.method == 'GET':
return HttpResponse('get')
else:
req_data = json.loads(req.body)
cookie = req_data['crawcookie']
# print(cookie)
try:
fducookie = FduCookie(cookie)
suffix = fducookie.get_suffix()
# print('suffix', suffix)
if suffix is not None:
student_info = fducookie.get_score(suffix)
# print('student info', student_info)
username = student_info['uid']
st_name = student_info['st_name']
total_grade = student_info['score']
student_type = student_info['type']
rep = {'status': 0, 'st_type': student_type, 'total_grade': total_grade, 'st_name': st_name}
# print(rep)
if student_type !=2:
# 插入数据库
connect = pymysql.Connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='',
db='student',
charset='utf8'
)
cursor = connect.cursor()
sql = "SELECT * FROM student WHERE number = %d;"
cursor.execute(sql % int(username))
if cursor.rowcount > 0:
pass
else:
sql = "INSERT INTO student(number, type, grade) VALUES (%s, %d, %d);"
insert_data = (str(username), student_type, int(total_grade))
cursor.execute(sql % insert_data)
connect.commit()
if student_type == 0:
sql = "SELECT grade FROM student WHERE type = 0 ORDER BY grade desc;"
cursor.execute(sql)
grade_list = []
for item in cursor.fetchall():
grade_list.append(int(item[0]))
# print(grade_list)
total_grade = int(total_grade)
index = grade_list.index(total_grade)
total = cursor.rowcount
rep['rank'] = str(index+1) + '/' + str(total)
elif student_type == 1:
sql = "SELECT grade FROM student WHERE type = 1 ORDER BY grade desc;"
cursor.execute(sql)
grade_list = []
for item in cursor.fetchall():
grade_list.append(int(item[0]))
# print(grade_list)
total_grade = int(total_grade)
index = grade_list.index(total_grade)
total = cursor.rowcount
rep['rank'] = str(index+1) + '/' + str(total)
cursor.close()
connect.close()
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
else:
rep = {'status': 1, 'data': "cookie is invalid"}
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
except urllib.error.URLError as e:
print(e.reason)
rep = {'status': 1, 'data': 'error'}
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
def rank(req):
stu_type = int(req.GET.get('type'))
# stu_type = int(req.GET.get('type'))
# print('stu_type', stu_type)
connect = pymysql.Connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='zhangzhao1996',
db='student',
charset='utf8'
)
cursor = connect.cursor()
sql = ''
if stu_type == 0:
sql = "SELECT grade FROM student WHERE type = 0 ORDER BY grade desc;"
elif stu_type == 1:
sql = "SELECT grade FROM student WHERE type = 1 ORDER BY grade desc;"
else:
resp = HttpResponse()
resp.status_code = 404
return resp
cursor.execute(sql)
grade_list = []
for item in cursor.fetchall():
grade_list.append(int(item[0]))
# print(grade_list)
cursor.close()
connect.close()
grade_set = np.unique(grade_list)
rank_list = []
# for grade in grade_set[::-1]:
# rank_list.append({'grade': str(grade), 'rank': str(grade_list.index(grade)+1)})
max_grade = 510
for i, grade in enumerate(grade_list):
if max_grade > grade:
max_grade = grade
rank_list.append({'grade': str(grade), 'rank': str(i+1)})
return HttpResponse(json.dumps(rank_list, ensure_ascii=False), content_type="application/json, charset=utf-8")
def ranking(req):
return render(req, 'rank.html')
def rankinglm(req):
return render(req, 'ranklm.html')
def verify_student(req):
if req.method == 'GET':
return HttpResponse('get')
else:
req_data = json.loads(req.body)
uid = req_data['uid']
# print(uid)
connect = pymysql.Connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='',
db='student',
charset='utf8'
)
cursor = connect.cursor()
sql = "SELECT * FROM student WHERE number = %d;"
cursor.execute(sql % int(uid))
if cursor.rowcount > 0:
item = cursor.fetchone()
# print(item)
type = int(item[-2])
score = int(item[-1])
rep = {'status': 0, 'st_type': type, 'score': score}
# print(rep)
cursor.close()
connect.close()
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
else:
rep = {'status': 1}
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
def verify_homepage(req):
return render(req, 'verify_student.html')
|
the-stack_0_15995 | #!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DIRNAME = os.path.dirname(__file__)
if django.VERSION[1] < 4:
# If the version is NOT django 4 or greater
# then remove the TZ setting.
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3', }
},
INSTALLED_APPS=('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'downtime',
'eultheme',))
else:
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3', }
},
INSTALLED_APPS=('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'downtime',
'eultheme',),
USE_TZ=True)
try:
# Django 1.7 needs this, but other versions dont.
django.setup()
except AttributeError:
pass
try:
from django.test.simple import DjangoTestSuiteRunner
test_runner = DjangoTestSuiteRunner(verbosity=1)
except ImportError:
from django.test.runner import DiscoverRunner
test_runner = DiscoverRunner(verbosity=1)
failures = test_runner.run_tests(['eultheme', ])
if failures:
sys.exit(failures)
|
the-stack_0_15998 | """Utility functions for attitude dynamics."""
import numpy as np
def cross_matrix(vector) -> np.ndarray:
"""The cross-product 'tilde' matrix of a 3x1 vector."""
return np.array(
[
[0, -vector[2], vector[1]],
[vector[2], 0, -vector[0]],
[-vector[1], vector[0], 0]
]
)
|
the-stack_0_16000 | # -*- coding: utf-8 -*-
import os
import json
from django.utils import timezone
from nose.tools import * # noqa: F403
from api.citations.utils import render_citation
from osf_tests.factories import UserFactory, PreprintFactory
from tests.base import OsfTestCase
from osf.models import OSFUser
class Node:
_id = '2nthu'
csl = {'publisher': 'Open Science Framework', 'author': [{'given': u'Henrique', 'family': u'Harman'}],
'URL': 'localhost:5000/2nthu', 'issued': {'date-parts': [[2016, 12, 6]]},
'title': u'The study of chocolate in its many forms', 'type': 'webpage', 'id': u'2nthu'}
visible_contributors = ''
class TestCiteprocpy(OsfTestCase):
def setUp(self):
super(TestCiteprocpy, self).setUp()
self.user = UserFactory(fullname='Henrique Harman')
def test_failing_citations(self):
node = Node()
node.visible_contributors = OSFUser.objects.filter(fullname='Henrique Harman')
url_data_path = os.path.join(os.path.dirname(__file__), '../website/static/citeprocpy_test_data.json')
with open(url_data_path) as url_test_data:
data = json.load(url_test_data)['fails']
matches = []
for k, v in data.items():
try:
citeprocpy = render_citation(node, k)
except (TypeError, AttributeError):
citeprocpy = ''
if citeprocpy == v:
matches.append(k)
assert(len(matches) == 0)
def test_passing_citations(self):
node = Node()
node.visible_contributors = OSFUser.objects.filter(fullname='Henrique Harman')
url_data_path = os.path.join(os.path.dirname(__file__), '../website/static/citeprocpy_test_data.json')
with open(url_data_path) as url_test_data:
data = json.load(url_test_data)['passes']
not_matches = []
citation = []
for k, v in data.items():
try:
citeprocpy = render_citation(node, k)
except (TypeError, AttributeError):
citeprocpy = ''
if citeprocpy != v:
not_matches.append(k)
citation.append(citeprocpy)
assert(len(not_matches) == 0)
class TestCiteprocpyMLA(OsfTestCase):
MLA_DATE_FORMAT = '%-d {month} %Y'
# MLA month abreviations here
# http://www.pomfret.ctschool.net/computer_classes/documents/mla-abbreviationsofmonths.pdf
MLA_MONTH_MAP = {
1: 'Jan.',
2: 'Feb.',
3: 'Mar.',
4: 'Apr.',
5: 'May',
6: 'June',
7: 'July',
8: 'Aug.',
9: 'Sept.',
10: 'Oct.',
11: 'Nov.',
12: 'Dec.',
}
def setUp(self):
super(TestCiteprocpyMLA, self).setUp()
self.user = UserFactory(fullname='John Tordoff')
self.second_contrib = UserFactory(fullname='Carson Wentz')
self.third_contrib = UserFactory(fullname='Nick Foles')
self.preprint = PreprintFactory(creator=self.user, title='My Preprint')
date = timezone.now().date()
self.formated_date = date.strftime(self.MLA_DATE_FORMAT).format(month=self.MLA_MONTH_MAP[date.month])
def test_render_citations_mla_one_author(self):
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
# test_suffix
self.user.suffix = 'Junior'
self.user.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John, Junior. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
# test_no_middle_names
self.user.suffix = ''
self.user.middle_names = ''
self.user.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
def test_citation_no_repeated_periods(self):
self.preprint.title = 'A Study of Coffee.'
self.preprint.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John. “{}” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
def test_citation_osf_provider(self):
self.preprint.title = 'A Study of Coffee.'
self.preprint.save()
self.preprint.provider.name = 'Open Science Framework'
self.preprint.provider.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John. “{}” {}, {}. Web.'.format(
self.preprint.title,
'OSF Preprints',
self.formated_date)
)
def test_two_authors(self):
self.preprint.add_contributor(self.second_contrib)
self.preprint.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John, and Carson Wentz. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
def test_three_authors(self):
self.preprint.add_contributor(self.second_contrib)
self.preprint.add_contributor(self.third_contrib)
self.preprint.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John, et al. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
# first name suffix
self.user.suffix = 'Jr.'
self.user.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John, Jr., et al. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
|
the-stack_0_16002 | #!/usr/bin/python3
# Tested with Python 3.8.6
#------------------------------------------------------------------------------
# find_bpl_hotspots.py
#------------------------------------------------------------------------------
# Author: Isabel J. Rodriguez
# 2021.01.23
#------------------------------------------------------------------------------
"""
Scrape data from the Bklyn Reach website and generate a csv file containing
relevant information from participating libraries in the BPL system.
INPUTS
------
NONE
Uses the existing Bklyn Reach url: https://www.bklynlibrary.org/reach/
OUTPUTS
-------
Output file:
"bpl_wifi.csv"
Data included:
LIBRARY
ADDRESS
WI-FI PROGRAM
AVAILABILITY
LIBRARY WEBSITE
"""
# Standard Python library imports
import csv
import sys
import time
# Companion scripts
from write_to_csv import write_to_csv
from exception_handler import exception_handler
from soupify_webpage import parse_html
# Geolocator
from geopy.geocoders import Nominatim
def pull_wifi_data():
# fetch html
bpl_reach_url= 'https://www.bklynlibrary.org/reach/'
webpage_soup = parse_html(bpl_reach_url)
# parse html content
containers = webpage_soup.findAll("div", {"class" : "panel-body"})
# containers[0] has all active participating libraries
# containers[1] libraries listed as having a program 'coming soon'
list_active = containers[0].ul.findAll("li")
return list_active
def geolocate_coordinates(street_address=None):
if street_address is not None:
try:
geolocator = Nominatim(user_agent="bpl_wifi")
location = geolocator.geocode(street_address)
print(location.address)
latitude = str(location.latitude)
longitude = str(location.longitude)
except AttributeError:
latitude = 'NaN'
longitude = 'NaN'
return latitude, longitude
def pull_address_data(url=None):
"""
Libraries with active extended wi-fi programs have their websites listed.
Access websites and pull street address and zip code. If an street address
intersection is given e.g.,
"16 Brighton First Rd. at Brighton Beach Ave."
remove the intersection and return e.g., "16 Brighton First Rd."
"""
if url is not None:
webpage_soup = parse_html(url)
street_container = webpage_soup.findAll("div", {"class":"street-block"})
zip_container = webpage_soup.findAll("div", {"class":"addressfield-container-inline locality-block country-US"})
street_address = street_container[0].div.text
zip_code = zip_container[0].findAll("span", {"class":"postal-code"})[0].text
# clean address data
split_address = street_address.split()
stopwords = ['at', '(near', '(Near', '(at', '(@']
# remove street intersection
for stopword in stopwords:
if stopword in split_address:
street_address = split_address[:split_address.index(stopword)]
street_address = ' '.join(street_address)
else:
pass
# addresses with street numbers spelled out decreases accuracy
# replace with number (e.g., first --> 1st)
# this is done on a case-by-case basis but could be generalized
if 'First' in street_address:
street_address = street_address.replace("First", "1st")
else:
pass
if 'Fourth' in street_address:
street_address = street_address.replace("Fourth", "4th")
# grab geolocation data
latitude, longitude = geolocate_coordinates(street_address=street_address + ', Brooklyn')
return street_address, zip_code, latitude, longitude
def store_data(list_active):
"""
Create a dictionary to store information for Brooklyn Public
Libraries participating in the Bklyn Reach extended wi-fi program.
"""
# Bklyn Reach service details
wifi_range = '300 feet'
wifi_availability = '24/7'
wifi_program = 'Bklyn Reach'
city = 'Brooklyn'
state = 'New York'
# create a storage container for BPL data
bp_libraries = {list_active[i].text: {'STREET ADDRESS' : '',
'CITY' : city,
'STATE' : state,
'ZIP CODE' : '',
'LATITUDE' : '',
'LONGITUDE' : '',
'WI-FI PROGRAM': wifi_program,
'AVAILABILITY': wifi_availability,
'WI-FI RANGE' : wifi_range,
'LIBRARY WEBSITE': '' }
for i in range(len(list_active))}
print("Compiling data...")
for i in range (len(list_active)):
nested_dict = bp_libraries[list_active[i].text]
street_address, zip_code, latitude, longitude = pull_address_data(list_active[i].a["href"])
nested_dict['STREET ADDRESS'] = street_address
nested_dict['ZIP CODE'] = zip_code
nested_dict['LATITUDE'] = latitude
nested_dict['LONGITUDE'] = longitude
nested_dict['LIBRARY WEBSITE'] = list_active[i].a["href"]
return bp_libraries
def write_data_to_csv(bp_libraries,
output_filename=None,
output_folder=None):
"""
Pull data from storage dictionary into a list of lists,
and write to csv.
ARGUMENTS
---------
bp_libraries : dict
output_filename : str
e.g., "bpl_wifi.csv"
output_folder : str
RETURNS
-------
None
"""
output = []
# Order and sort data into output container
for key, val in bp_libraries.items():
output.append([key,
val['STREET ADDRESS'],
val['CITY'],
val['STATE'],
val['ZIP CODE'],
val['LATITUDE'],
val['LONGITUDE'],
val['WI-FI PROGRAM'],
val['AVAILABILITY'],
val['LIBRARY WEBSITE']])
output.sort(key=lambda header: header[0])
print("Compilation complete. Writing out to a csv file.")
write_to_csv(output_filename=output_filename,
output_folder=output_folder,
output=output)
@exception_handler
def main(output_filename=None):
"""
Contains a pipeline that accepts an input csv file, and
outputs processed and sorted data into an output csv file.
ARGUMENTS
---------
output_filename : str
e.g., "wifi.csv"
RETURNS
-------
None
"""
list_active = pull_wifi_data()
bp_libraries = store_data(list_active)
write_data_to_csv(bp_libraries,
output_filename=output_filename,
output_folder=output_folder)
if __name__ == "__main__":
date = time.strftime("%m%d%Y")
output_folder = "../output/"
output_filename = "bpl_wifi_{}.csv".format(date)
main(output_filename)
|
the-stack_0_16003 | """
LC 621
You are given a list of tasks that need to be run, in any order, on a server. Each task will take one CPU interval to execute but once a task has finished, it has a cooling period during which it can’t be run again. If the cooling period for all tasks is ‘K’ intervals, find the minimum number of CPU intervals that the server needs to finish all tasks.
If at any time the server can’t execute any task then it must stay idle.
Example 1:
Input: [a, a, a, b, c, c], K=2
Output: 7
Explanation: a -> c -> b -> a -> c -> idle -> a
Example 2:
Input: [a, b, a], K=3
Output: 5
Explanation: a -> b -> idle -> idle -> a
"""
from collections import deque
from heapq import *
def schedule_tasks(tasks, k):
if len(tasks) <= 1:
return len(tasks)
if k == 0:
return len(tasks)
# count
freqs = {}
for task in tasks:
if task in freqs:
freqs[task] += 1
else:
freqs[task] = 1
freqs = list(freqs.values())
max_f = max(freqs)
n_max = freqs.count(max_f)
# only consider the most frequent tasks
# if gaps are needed, gaps happend for 1 of the most frequent tasks
# the tail has n_max tasks
# the front has max_f - 1 groups, k + 1 values in each
return max(len(tasks), n_max + (k + 1) * (max_f - 1))
def main():
print("Minimum intervals needed to execute all tasks: " +
str(schedule_tasks(['a', 'a', 'a', 'b', 'c', 'c'], 2)))
print("Minimum intervals needed to execute all tasks: " +
str(schedule_tasks(['a', 'b', 'a'], 3)))
main()
"""
#letters is O(1)
Time O(N)
Space O(1)
"""
|
the-stack_0_16006 | """Fixtures for pywemo."""
import asyncio
import contextlib
from unittest.mock import create_autospec, patch
import pytest
import pywemo
from homeassistant.components.wemo import CONF_DISCOVERY, CONF_STATIC
from homeassistant.components.wemo.const import DOMAIN
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
MOCK_HOST = "127.0.0.1"
MOCK_PORT = 50000
MOCK_NAME = "WemoDeviceName"
MOCK_SERIAL_NUMBER = "WemoSerialNumber"
MOCK_FIRMWARE_VERSION = "WeMo_WW_2.00.XXXXX.PVT-OWRT"
MOCK_INSIGHT_CURRENT_WATTS = 0.01
MOCK_INSIGHT_TODAY_KWH = 3.33
MOCK_INSIGHT_STATE_THRESHOLD_POWER = 8.0
@pytest.fixture(name="pywemo_model")
def pywemo_model_fixture():
"""Fixture containing a pywemo class name used by pywemo_device_fixture."""
return "LightSwitch"
@pytest.fixture(name="pywemo_registry", autouse=True)
async def async_pywemo_registry_fixture():
"""Fixture for SubscriptionRegistry instances."""
registry = create_autospec(pywemo.SubscriptionRegistry, instance=True)
registry.callbacks = {}
registry.semaphore = asyncio.Semaphore(value=0)
def on_func(device, type_filter, callback):
registry.callbacks[device.name] = callback
registry.semaphore.release()
registry.on.side_effect = on_func
registry.is_subscribed.return_value = False
with patch("pywemo.SubscriptionRegistry", return_value=registry):
yield registry
@pytest.fixture(name="pywemo_discovery_responder", autouse=True)
def pywemo_discovery_responder_fixture():
"""Fixture for the DiscoveryResponder instance."""
with patch("pywemo.ssdp.DiscoveryResponder", autospec=True):
yield
@contextlib.contextmanager
def create_pywemo_device(pywemo_registry, pywemo_model):
"""Create a WeMoDevice instance."""
cls = getattr(pywemo, pywemo_model)
device = create_autospec(cls, instance=True)
device.host = MOCK_HOST
device.port = MOCK_PORT
device.name = MOCK_NAME
device.serialnumber = MOCK_SERIAL_NUMBER
device.model_name = pywemo_model.replace("LongPress", "")
device.udn = f"uuid:{device.model_name}-1_0-{device.serialnumber}"
device.firmware_version = MOCK_FIRMWARE_VERSION
device.get_state.return_value = 0 # Default to Off
device.supports_long_press.return_value = cls.supports_long_press()
if issubclass(cls, pywemo.Insight):
device.standby_state = pywemo.StandbyState.OFF
device.current_power_watts = MOCK_INSIGHT_CURRENT_WATTS
device.today_kwh = MOCK_INSIGHT_TODAY_KWH
device.threshold_power_watts = MOCK_INSIGHT_STATE_THRESHOLD_POWER
device.on_for = 1234
device.today_on_time = 5678
device.total_on_time = 9012
if issubclass(cls, pywemo.Maker):
device.has_sensor = 1
device.sensor_state = 1
device.switch_mode = 1
device.switch_state = 0
url = f"http://{MOCK_HOST}:{MOCK_PORT}/setup.xml"
with patch("pywemo.setup_url_for_address", return_value=url), patch(
"pywemo.discovery.device_from_description", return_value=device
):
yield device
@pytest.fixture(name="pywemo_device")
def pywemo_device_fixture(pywemo_registry, pywemo_model):
"""Fixture for WeMoDevice instances."""
with create_pywemo_device(pywemo_registry, pywemo_model) as pywemo_device:
yield pywemo_device
@pytest.fixture(name="wemo_entity_suffix")
def wemo_entity_suffix_fixture():
"""Fixture to select a specific entity for wemo_entity."""
return ""
async def async_create_wemo_entity(hass, pywemo_device, wemo_entity_suffix):
"""Create a hass entity for a wemo device."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_DISCOVERY: False,
CONF_STATIC: [f"{MOCK_HOST}:{MOCK_PORT}"],
},
},
)
await hass.async_block_till_done()
entity_registry = er.async_get(hass)
for entry in entity_registry.entities.values():
if entry.entity_id.endswith(wemo_entity_suffix or pywemo_device.name.lower()):
return entry
return None
@pytest.fixture(name="wemo_entity")
async def async_wemo_entity_fixture(hass, pywemo_device, wemo_entity_suffix):
"""Fixture for a Wemo entity in hass."""
return await async_create_wemo_entity(hass, pywemo_device, wemo_entity_suffix)
|
the-stack_0_16012 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .certificate_item import CertificateItem
class DeletedCertificateItem(CertificateItem):
"""The deleted certificate item containing metadata about the deleted
certificate.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Certificate identifier.
:type id: str
:param attributes: The certificate management attributes.
:type attributes: ~azure.keyvault.models.CertificateAttributes
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param x509_thumbprint: Thumbprint of the certificate.
:type x509_thumbprint: bytes
:param recovery_id: The url of the recovery object, used to identify and
recover the deleted certificate.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the certificate is scheduled to
be purged, in UTC
:vartype scheduled_purge_date: datetime
:ivar deleted_date: The time when the certificate was deleted, in UTC
:vartype deleted_date: datetime
"""
_validation = {
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'x509_thumbprint': {'key': 'x5t', 'type': 'base64'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(self, *, id: str=None, attributes=None, tags=None, x509_thumbprint: bytes=None, recovery_id: str=None, **kwargs) -> None:
super(DeletedCertificateItem, self).__init__(id=id, attributes=attributes, tags=tags, x509_thumbprint=x509_thumbprint, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
|
the-stack_0_16013 | from __init__ import *
import sys
sys.path.insert(0, ROOT)
from fractions import Fraction
from compiler import *
from constructs import *
def interpolate(G, U, l, pipe_data, name):
z = pipe_data['z']
y = pipe_data['y']
x = pipe_data['x']
extent = pipe_data['extent']
interior = pipe_data['interior']
ghosts = pipe_data['ghosts']
inner_box = interior[l]['inner_box']
UU = Function(([z, y, x], [extent[l], extent[l], extent[l]]),
Double, str(name))
zz = z/2
yy = y/2
xx = x/2
def z1(xx):
return G(zz , yy+1, xx) + G(zz , yy , xx)
def z2(xx):
return G(zz+1, yy , xx) + G(zz , yy , xx)
def z3(xx):
return G(zz+1, yy+1, xx) + G(zz+1, yy, xx) \
+ G(zz , yy+1, xx) + G(zz , yy, xx)
expr_000 = G(zz, yy, xx)
expr_001 = 0.500 * (G(zz, yy, xx) + G(zz, yy, xx+1))
expr_010 = 0.500 * z1(xx)
expr_011 = 0.250 * (z1(xx) + z1(xx+1))
expr_100 = 0.500 * z2(xx)
expr_101 = 0.250 * (z2(xx) + z2(xx+1))
expr_110 = 0.250 * z3(xx)
expr_111 = 0.125 * (z3(xx) + z3(xx+1))
even_x = Condition(x%2, '==', 0)
even_y = Condition(y%2, '==', 0)
even_z = Condition(z%2, '==', 0)
if U == None:
correct = 0.0
else:
correct = U(z, y, x)
UU.defn = [ correct + \
Select(even_z,
Select(even_y,
Select(even_x,
expr_000,
expr_001),
Select(even_x,
expr_010,
expr_011)),
Select(even_y,
Select(even_x,
expr_100,
expr_101),
Select(even_x,
expr_110,
expr_111))) ]
return UU
|
the-stack_0_16017 |
import sys
if './' not in sys.path: sys.path.append('./')
from screws.freeze.base import FrozenOnly
from objects.CSCG._3d.forms.trace._2tr.discretize.vector.standard import _3dCSCG_2Trace_Discretize_StandardVector
from objects.CSCG._3d.forms.trace._2tr.discretize.vector.boundary_wise import _3dCSCG_2Trace_Discretize_BoundaryWiseVector
from objects.CSCG._3d.forms.trace._2tr.discretize.scalar.standard import _3dCSCG_2Trace_Discretize_StandardScalar
from objects.CSCG._3d.forms.trace._2tr.discretize.scalar.boundary_wise import _3dCSCG_2Trace_Discretize_BoundaryWiseScalar
class _3dCSCG_2Trace_Discretize(FrozenOnly):
""""""
def __init__(self, tf):
self._tf_ = tf
self._standard_vector_ = _3dCSCG_2Trace_Discretize_StandardVector(tf)
self._boundary_wise_vector_ = _3dCSCG_2Trace_Discretize_BoundaryWiseVector(tf)
self._standard_scalar_ = _3dCSCG_2Trace_Discretize_StandardScalar(tf)
self._boundary_wise_scalar_ = _3dCSCG_2Trace_Discretize_BoundaryWiseScalar(tf)
self._freeze_self_()
def __call__(self, update_cochain=True, target='func', **kwargs):
"""
Do the discretization.
:param bool update_cochain: Whether we update the cochain if the trace form.
:param target:
:param kwargs: Keywords arguments to be passed to particular discretization schemes.
:return: The cochain corresponding to the particular discretization scheme.
"""
SELF = self._tf_
if target == 'func':
if SELF.TW.func.body.__class__.__name__ == '_3dCSCG_ScalarField':
if SELF.func.ftype == 'standard':
return self._standard_scalar_(
update_cochain=update_cochain, **kwargs)
else:
raise Exception(f'3dCSCG 2-trace can not (target func) discretize '
f'_3dCSCG_ScalarField of ftype {SELF.func.ftype}.')
elif SELF.TW.func.body.__class__.__name__ == '_3dCSCG_VectorField':
if SELF.func.ftype == 'standard': # we will discretize the norm component of the vector.
return self._standard_vector_(
update_cochain=update_cochain, **kwargs)
else:
raise Exception(f'3dCSCG 2-trace can not (target func) discretize '
f'_3dCSCG_VectorField of ftype {SELF.func.ftype}.')
else:
raise NotImplementedError(f'3dCSCG 2-trace can not (target func) '
f'discretize {SELF.TW.func.body.__class__}.')
elif target == 'BC': # We target at the BC, so we do not update the cochain!
if SELF.TW.BC.body.__class__.__name__ == '_3dCSCG_ScalarField':
if SELF.BC.ftype == 'standard':
return self._standard_scalar_(
update_cochain=False, target='BC', **kwargs)
elif SELF.BC.ftype == 'boundary-wise':
return self._boundary_wise_scalar_(
**kwargs) # must be False update_cochain and 'BC' target.
else:
raise Exception(f'3dCSCG 2-trace can not (target BC) discretize '
f'_3dCSCG_ScalarField of ftype {SELF.BC.ftype}.')
elif SELF.TW.BC.body.__class__.__name__ == '_3dCSCG_VectorField':
if SELF.BC.ftype == 'standard': # we will discretize the norm flux of the vector.
return self._standard_vector_(
update_cochain=False, target='BC', **kwargs)
elif SELF.BC.ftype == 'boundary-wise': # we will discretize the norm flux of the vector.
return self._boundary_wise_vector_(
**kwargs) # must be False update_cochain and 'BC' target.
else:
raise Exception(f'3dCSCG 2-trace can not (target BC) discretize '
f'_3dCSCG_VectorField of ftype {SELF.BC.ftype}.')
else:
raise NotImplementedError(f'3dCSCG 2-trace can not (target BC) '
f'discretize {SELF.TW.BC.body.__class__}.')
else:
raise NotImplementedError(f"target={target} not implemented "
f"for 3d CSCG 2-trace form discretization.")
if __name__ == '__main__':
# mpiexec -n 5 python _3dCSCG\forms\trace\_2_trace\discretize\main.py
from objects.CSCG._3d.master import MeshGenerator, SpaceInvoker, FormCaller
mesh = MeshGenerator('crazy', c=0.)([2,2,2])
space = SpaceInvoker('polynomials')([('Lobatto',5), ('Lobatto',5), ('Lobatto',5)])
FC = FormCaller(mesh, space) |
the-stack_0_16024 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Siconos is a program dedicated to modeling, simulation and control
# of non smooth dynamical systems.
#
# Copyright 2021 INRIA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# -----------------------------------------------------------------------
#
# DiodeBridge : sample of an electrical circuit involving :
# - a linear dynamical system consisting of an LC oscillator (1 µF , 10 mH)
# - a non smooth system (a 1000 Ohm resistor supplied through a 4
# diodes bridge) in parallel with the oscillator
#
# Expected behavior :
#
# The initial state (Vc = 10 V , IL = 0) of the oscillator provides
# an initial energy.
# The period is 2 Pi sqrt(LC) ~ 0,628 ms.
# The non smooth system is a full wave rectifier :
# each phase (positive and negative) of the oscillation allows current to flow
# through the resistor in a constant direction, resulting in an energy loss :
# the oscillation damps.
#
# State variables :
# - the voltage across the capacitor (or inductor)
# - the current through the inductor
#
# Since there is only one dynamical system, the interaction is defined by :
# - complementarity laws between diodes current and
# voltage. Depending on the diode position in the bridge, y stands
# for the reverse voltage across the diode or for the diode
# current (see figure in the template file)
# - a linear time invariant relation between the state variables and
# y and lambda (derived from Kirchhoff laws)
#
# -----------------------------------------------------------------------
t0 = 0.0
T = 5.0e-3 # Total simulation time
h_step = 1.0e-6 # Time step
Lvalue = 1e-2 # inductance
Cvalue = 1e-6 # capacitance
Rvalue = 1e3 # resistance
Vinit = 10.0 # initial voltage
Modeltitle = "DiodeBridge"
withPlot = True
if (withPlot):
import matplotlib
matplotlib.use('Agg')
from matplotlib.pyplot import subplot, title, plot, grid, savefig
from siconos.kernel import FirstOrderLinearDS, FirstOrderLinearTIR, \
ComplementarityConditionNSL, Interaction,\
NonSmoothDynamicalSystem, EulerMoreauOSI, TimeDiscretisation, LCP, \
TimeStepping
#
# dynamical system
#
init_state = [Vinit, 0]
A = [[0, -1.0/Cvalue],
[1.0/Lvalue, 0 ]]
LSDiodeBridge = FirstOrderLinearDS(init_state, A)
#
# Interactions
#
C = [[0., 0.],
[0, 0.],
[-1., 0.],
[1., 0.]]
D = [[1./Rvalue, 1./Rvalue, -1., 0.],
[1./Rvalue, 1./Rvalue, 0., -1.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]]
B = [[0., 0., -1./Cvalue, 1./Cvalue],
[0., 0., 0., 0. ]]
LTIRDiodeBridge = FirstOrderLinearTIR(C, B)
LTIRDiodeBridge.setDPtr(D)
nslaw = ComplementarityConditionNSL(4)
InterDiodeBridge = Interaction(nslaw, LTIRDiodeBridge)
#
# Model
#
DiodeBridge = NonSmoothDynamicalSystem(t0, T)
DiodeBridge.setTitle(Modeltitle)
# add the dynamical system in the non smooth dynamical system
DiodeBridge.insertDynamicalSystem(LSDiodeBridge)
# link the interaction and the dynamical system
DiodeBridge.link(InterDiodeBridge, LSDiodeBridge)
#
# Simulation
#
# (1) OneStepIntegrators
theta = 0.5
gamma = 0.5
aOSI = EulerMoreauOSI(theta, gamma)
aOSI.setUseGammaForRelation(True)
# (2) Time discretisation
aTiDisc = TimeDiscretisation(t0, h_step)
# (3) Non smooth problem
aLCP = LCP()
# (4) Simulation setup with (1) (2) (3)
aTS = TimeStepping(DiodeBridge, aTiDisc, aOSI, aLCP)
# end of model definition
#
# computation
#
k = 0
h = aTS.timeStep()
print("Timestep : ", h)
# Number of time steps
N = int((T - t0) / h)
print("Number of steps : ", N)
# Get the values to be plotted
# ->saved in a matrix dataPlot
from numpy import zeros
dataPlot = zeros([N, 10])
x = LSDiodeBridge.x()
print("Initial state : ", x)
y = InterDiodeBridge.y(0)
print("First y : ", y)
lambda_ = InterDiodeBridge.lambda_(0)
# For the initial time step:
# time
# inductor voltage
dataPlot[k, 1] = x[0]
# inductor current
dataPlot[k, 2] = x[1]
# diode R1 current
dataPlot[k, 3] = y[0]
# diode R1 voltage
dataPlot[k, 4] = - lambda_[0]
# diode F2 voltage
dataPlot[k, 5] = - lambda_[1]
# diode F1 current
dataPlot[k, 6] = lambda_[2]
k += 1
while (k < N):
aTS.computeOneStep()
#aLCP.display()
dataPlot[k, 0] = aTS.nextTime()
# inductor voltage
dataPlot[k, 1] = x[0]
# inductor current
dataPlot[k, 2] = x[1]
# diode R1 current
dataPlot[k, 3] = y[0]
# diode R1 voltage
dataPlot[k, 4] = - lambda_[0]
# diode F2 voltage
dataPlot[k, 5] = - lambda_[1]
# diode F1 current
dataPlot[k, 6] = lambda_[2]
k += 1
aTS.nextStep()
# comparison with reference file
from siconos.kernel import SimpleMatrix, getMatrix
from numpy.linalg import norm
ref = getMatrix(SimpleMatrix("DiodeBridge.ref"))
error = norm(dataPlot[:,0:6] - ref[:,0:6])
print("error = " , error)
#assert (error < 1e-09)
withRef = True
if (withPlot):
#
# plots
#
subplot(411)
title('inductor voltage')
plot(dataPlot[0:k - 1, 0], dataPlot[0:k - 1, 1])
if (withRef):
plot(ref[0:k - 1, 0], ref[0:k - 1, 1])
grid()
subplot(412)
title('inductor current')
plot(dataPlot[0:k - 1, 0], dataPlot[0:k - 1, 2])
if (withRef):
plot(ref[0:k - 1, 0], ref[0:k - 1, 2])
grid()
subplot(413)
title('diode R1 (blue) and F2 (green) voltage')
plot(dataPlot[0:k - 1, 0], -dataPlot[0:k - 1, 4])
plot(dataPlot[0:k - 1, 0], dataPlot[0:k - 1, 5])
if (withRef):
plot(ref[0:k - 1, 0], -ref[0:k - 1, 4])
plot(ref[0:k - 1, 0], ref[0:k - 1, 5])
grid()
subplot(414)
title('resistor current')
plot(dataPlot[0:k - 1, 0], dataPlot[0:k - 1, 3] + dataPlot[0:k - 1, 6] )
if (withRef):
plot(dataPlot[0:k - 1, 0], ref[0:k - 1, 3] + ref[0:k - 1, 6] )
grid()
savefig("diode_brige_tgs.png")
|
the-stack_0_16025 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
################################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
File: network.py
"""
import argparse
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
from source.utils.utils import str2bool
from source.utils.utils import id_to_text
from source.utils.utils import init_embedding
from source.utils.utils import build_data_feed
from source.utils.utils import load_id2str_dict
from source.inputters.corpus import KnowledgeCorpus
from source.models.knowledge_seq2seq import knowledge_seq2seq
#配置解析CMD 参数
def model_config():
""" model config """
parser = argparse.ArgumentParser()
# Data CMD参数组
data_arg = parser.add_argument_group("Data")
data_arg.add_argument("--data_dir", type=str, default="./data/")
data_arg.add_argument("--data_prefix", type=str, default="demo")
data_arg.add_argument("--save_dir", type=str, default="./models/")
data_arg.add_argument("--vocab_path", type=str, default="./data/vocab.txt")
data_arg.add_argument("--embed_file", type=str,
default="./data/sgns.weibo.300d.txt")
# Network CMD参数组
net_arg = parser.add_argument_group("Network")
#****词嵌入维度
net_arg.add_argument("--embed_size", type=int, default=300)
net_arg.add_argument("--hidden_size", type=int, default=800)
net_arg.add_argument("--bidirectional", type=str2bool, default=True)
# 训练时由载入的vocab又重新更新了以下vocab_size
net_arg.add_argument("--vocab_size", type=int, default=30004)
#过滤知识三元组时的filter参数 单个实体名长度大于等于min_len 小于等于max_len
net_arg.add_argument("--min_len", type=int, default=1)
net_arg.add_argument("--max_len", type=int, default=500)
net_arg.add_argument("--num_layers", type=int, default=1)
net_arg.add_argument("--attn", type=str, default='dot',
choices=['none', 'mlp', 'dot', 'general'])
# Training / Testing CMD参数组
train_arg = parser.add_argument_group("Training")
#TODO:run_train.sh 里分为stage0 stage1
train_arg.add_argument("--stage", type=int, default="0")
train_arg.add_argument("--run_type", type=str, default="train")
train_arg.add_argument("--init_model", type=str, default="")
train_arg.add_argument("--init_opt_state", type=str, default="")
train_arg.add_argument("--optimizer", type=str, default="Adam")
train_arg.add_argument("--lr", type=float, default=0.0005)
train_arg.add_argument("--grad_clip", type=float, default=5.0)
train_arg.add_argument("--dropout", type=float, default=0.3)
train_arg.add_argument("--num_epochs", type=int, default=13)
#stage0 train x 个epoch,则stage1 默认从x+1个epoch开始,x不加指定默认为5.
train_arg.add_argument("--pretrain_epoch", type=int, default=5)
train_arg.add_argument("--use_bow", type=str2bool, default=True)
train_arg.add_argument("--use_posterior", type=str2bool, default=False)
# Geneation
gen_arg = parser.add_argument_group("Generation")
gen_arg.add_argument("--beam_size", type=int, default=10)
gen_arg.add_argument("--max_dec_len", type=int, default=30)
gen_arg.add_argument("--length_average", type=str2bool, default=True)
gen_arg.add_argument("--output", type=str, default="./output/test.result")
gen_arg.add_argument("--model_path", type=str, default="./models/best_model/")
gen_arg.add_argument("--unk_id", type=int, default=1)
gen_arg.add_argument("--bos_id", type=int, default=2)
gen_arg.add_argument("--eos_id", type=int, default=3)
# MISC
misc_arg = parser.add_argument_group("Misc")
misc_arg.add_argument("--use_gpu", type=str2bool, default=True)
misc_arg.add_argument("--log_steps", type=int, default=300)
misc_arg.add_argument("--valid_steps", type=int, default=1000)
misc_arg.add_argument("--batch_size", type=int, default=1)
config = parser.parse_args()
return config
def trace_fianl_result(final_score, final_ids, final_index, topk=1, EOS=3):
""" trace fianl result """
col_size = final_score.shape[1]
row_size = final_score.shape[0]
found_eos_num = 0
i = row_size - 1
beam_size = col_size
score = final_score[-1]
row_array = [row_size - 1] * beam_size
col_array = [e for e in range(col_size)]
while i >= 0:
for j in range(col_size - 1, -1, -1):
if final_ids[i, j] == EOS:
repalce_idx = beam_size - (found_eos_num % beam_size) - 1
score[repalce_idx] = final_score[i, j]
found_eos_num += 1
row_array[repalce_idx] = i
col_array[repalce_idx] = j
i -= 1
topk_index = np.argsort(score,)[-topk:]
trace_result = []
trace_score = []
for index in reversed(topk_index):
start_i = row_array[index]
start_j = col_array[index]
ids = []
for k in range(start_i, -1, -1):
ids.append(final_ids[k, start_j])
start_j = final_index[k, start_j]
ids = ids[::-1]
trace_result.append(ids)
trace_score.append(score[index])
return trace_result, trace_score
def load():
""" load model for predict """
config = model_config()
config.vocab_size = len(open(config.vocab_path).readlines())
final_score, final_ids, final_index = knowledge_seq2seq(config)
final_score.persistable = True
final_ids.persistable = True
final_index.persistable = True
main_program = fluid.default_main_program()
if config.use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
fluid.io.load_params(executor=exe, dirname=config.model_path, main_program=main_program)
processors = KnowledgeCorpus(
data_dir=config.data_dir,
data_prefix=config.data_prefix,
vocab_path=config.vocab_path,
min_len=config.min_len,
max_len=config.max_len)
# load dict
id_dict_array = load_id2str_dict(config.vocab_path)
model_handle = [exe, place, final_score, final_ids, final_index, processors, id_dict_array]
return model_handle
def predict(model_handle, text):
""" predict for text by model_handle """
batch_size = 1
[exe, place, final_score, final_ids, final_index, processors, id_dict_array] = model_handle
data_generator = processors.preprocessing_for_lines([text], batch_size=batch_size)
results = []
for batch_id, data in enumerate(data_generator()):
data_feed, sent_num = build_data_feed(data, place, batch_size=batch_size)
out = exe.run(feed=data_feed,
fetch_list=[final_score.name, final_ids.name, final_index.name])
batch_score = out[0]
batch_ids = out[1]
batch_pre_index = out[2]
batch_score_arr = np.split(batch_score, batch_size, axis=1)
batch_ids_arr = np.split(batch_ids, batch_size, axis=1)
batch_pre_index_arr = np.split(batch_pre_index, batch_size, axis=1)
index = 0
for (score, ids, pre_index) in zip(batch_score_arr, batch_ids_arr, batch_pre_index_arr):
trace_ids, trace_score = trace_fianl_result(score, ids, pre_index, topk=1, EOS=3)
results.append(id_to_text(trace_ids[0][:-1], id_dict_array))
index += 1
if index >= sent_num:
break
return results[0]
#for stage0--train bow loss: init from random and embedding file(if no embedding file init embedding layer with random)
#for stage1--train overal loss: load from stage0 pretrain/之前的保存
def init_model(config, param_name_list, place):
""" init model """
stage = config.stage
if stage == 0:
for name in param_name_list:
t = fluid.global_scope().find_var(name).get_tensor()
init_scale = 0.05
np_t = np.asarray(t)
if str(name) == 'embedding':
np_para = init_embedding(config.embed_file, config.vocab_path,
init_scale, np_t.shape)
else:
np_para = np.random.uniform(-init_scale, init_scale, np_t.shape).astype('float32')
t.set(np_para.astype('float32'), place)
else:
model_init_file = config.init_model
try:
model_init = np.load(model_init_file)
except:
print("load init model failed", model_init_file)
raise Exception("load init model failed")
print("load init model")
for name in param_name_list:
t = fluid.global_scope().find_var(name).get_tensor()
t.set(model_init[str(name)].astype('float32'), place)
# load opt state
opt_state_init_file = config.init_opt_state
if opt_state_init_file != "":
print("begin to load opt state")
opt_state_data = np.load(opt_state_init_file)
for k, v in opt_state_data.items():
t = fluid.global_scope().find_var(str(k)).get_tensor()
t.set(v, place)
print("set opt state finished")
print("init model parameters finshed")
def train_loop(config,
train_generator, valid_generator,
main_program, inference_program,
model_handle, param_name_list, opt_var_name_list):
""" model train loop """
stage = config.stage
[exe, place, bow_loss, kl_loss, nll_loss, final_loss] = model_handle
#总步数
total_step = 0
start_epoch = 0 if stage == 0 else config.pretrain_epoch
end_epoch = config.pretrain_epoch if stage == 0 else config.num_epochs
print("stage"+str(stage)+"--- start epoch/end epoch: ", start_epoch, end_epoch)
best_score = float('inf')
for epoch_idx in range(start_epoch, end_epoch):
total_bow_loss = 0
total_kl_loss = 0
total_nll_loss = 0
total_final_loss = 0
sample_num = 0
for batch_id, data in enumerate(train_generator()):
data_feed = build_data_feed(data, place,
batch_size=config.batch_size,
is_training=True,
bow_max_len=config.max_len,
pretrain_epoch=epoch_idx < config.pretrain_epoch)
if data_feed is None:
break
out = exe.run(main_program, feed=data_feed,
fetch_list=[bow_loss.name, kl_loss.name, nll_loss.name, final_loss.name])
total_step += 1
total_bow_loss += out[0]
total_kl_loss += out[1]
total_nll_loss += out[2]
total_final_loss += out[3]
sample_num += 1
if batch_id > 0 and batch_id % config.log_steps == 0:
print("epoch %d step %d | "
"bow loss %0.6f kl loss %0.6f nll loss %0.6f total loss %0.6f" % \
(epoch_idx, batch_id,
total_bow_loss / sample_num, total_kl_loss / sample_num, \
total_nll_loss / sample_num, total_final_loss / sample_num))
total_bow_loss = 0
total_kl_loss = 0
total_nll_loss = 0
total_final_loss = 0
sample_num = 0
#在训练过程中,每config.valid_steps 个batch(步)进行一次valid,并储存一次最好模型
if batch_id > 0 and batch_id % config.valid_steps == 0:
eval_bow_loss, eval_kl_loss, eval_nll_loss, eval_total_loss = \
vaild_loop(config, valid_generator, inference_program, model_handle)
# save model
if stage != 0:
param_path = config.save_dir + "/" + str(total_step)
fluid.io.save_params(executor=exe, dirname=param_path,
main_program=main_program)
if eval_nll_loss < best_score:
# save to best
best_model_path = config.save_dir + "/best_model"
print("save to best", eval_nll_loss, best_model_path)
fluid.io.save_params(executor=exe, dirname=best_model_path,
main_program=main_program)
best_score = eval_nll_loss
eval_bow_loss, eval_kl_loss, eval_nll_loss, eval_total_loss = \
vaild_loop(config, valid_generator, inference_program, model_handle)
if stage != 0:
param_path = config.save_dir + "/" + str(total_step)
fluid.io.save_params(executor=exe, dirname=param_path,
main_program=main_program)
if eval_nll_loss < best_score:
best_model_path = config.save_dir + "/best_model"
print("save to best", eval_nll_loss, best_model_path)
fluid.io.save_params(executor=exe, dirname=best_model_path,
main_program=main_program)
best_score = eval_nll_loss
if stage == 0:
# save last model and opt_stat to npz for next stage init
save_model_file = config.save_dir + "/model_stage_0"
save_opt_state_file = config.save_dir + "/opt_state_stage_0"
model_stage_0 = {}
for name in param_name_list:
t = np.asarray(fluid.global_scope().find_var(name).get_tensor())
model_stage_0[name] = t
np.savez(save_model_file, **model_stage_0)
opt_state_stage_0 = {}
for name in opt_var_name_list:
t_data = np.asarray(fluid.global_scope().find_var(name).get_tensor())
opt_state_stage_0[name] = t_data
np.savez(save_opt_state_file, **opt_state_stage_0)
def vaild_loop(config, valid_generator, inference_program, model_handle):
""" model vaild loop """
[exe, place, bow_loss, kl_loss, nll_loss, final_loss] = model_handle
valid_num = 0.0
total_valid_bow_loss = 0.0
total_valid_kl_loss = 0.0
total_valid_nll_loss = 0.0
total_valid_final_loss = 0.0
for batch_id, data in enumerate(valid_generator()):
data_feed = build_data_feed(data, place,
batch_size=config.batch_size,
is_training=True,
bow_max_len=config.max_len,
pretrain_epoch=False)
if data_feed is None:
continue
val_fetch_outs = \
exe.run(inference_program,
feed=data_feed,
fetch_list=[bow_loss.name, kl_loss.name, nll_loss.name, final_loss.name])
total_valid_bow_loss += val_fetch_outs[0] * config.batch_size
total_valid_kl_loss += val_fetch_outs[1] * config.batch_size
total_valid_nll_loss += val_fetch_outs[2] * config.batch_size
total_valid_final_loss += val_fetch_outs[3] * config.batch_size
valid_num += config.batch_size
print("valid dataset: bow loss %0.6f kl loss %0.6f nll loss %0.6f total loss %0.6f" % \
(total_valid_bow_loss / valid_num, total_valid_kl_loss / valid_num, \
total_valid_nll_loss / valid_num, total_valid_final_loss / valid_num))
return [total_valid_bow_loss / valid_num, total_valid_kl_loss / valid_num, \
total_valid_nll_loss / valid_num, total_valid_final_loss / valid_num]
def test(config):
""" test """
batch_size = config.batch_size
config.vocab_size = len(open(config.vocab_path).readlines())
final_score, final_ids, final_index = knowledge_seq2seq(config)
final_score.persistable = True
final_ids.persistable = True
final_index.persistable = True
main_program = fluid.default_main_program()
if config.use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
fluid.io.load_params(executor=exe, dirname=config.model_path,
main_program=main_program)
print("laod params finsihed")
# test data generator
processors = KnowledgeCorpus(
data_dir=config.data_dir,
data_prefix=config.data_prefix,
vocab_path=config.vocab_path,
min_len=config.min_len,
max_len=config.max_len)
test_generator = processors.data_generator(
batch_size=config.batch_size,
phase="test",
shuffle=False)
# load dict
id_dict_array = load_id2str_dict(config.vocab_path)
out_file = config.output
fout = open(out_file, 'w')
for batch_id, data in enumerate(test_generator()):
data_feed, sent_num = build_data_feed(data, place, batch_size=batch_size)
if data_feed is None:
break
out = exe.run(feed=data_feed,
fetch_list=[final_score.name, final_ids.name, final_index.name])
batch_score = out[0]
batch_ids = out[1]
batch_pre_index = out[2]
batch_score_arr = np.split(batch_score, batch_size, axis=1)
batch_ids_arr = np.split(batch_ids, batch_size, axis=1)
batch_pre_index_arr = np.split(batch_pre_index, batch_size, axis=1)
index = 0
for (score, ids, pre_index) in zip(batch_score_arr, batch_ids_arr, batch_pre_index_arr):
trace_ids, trace_score = trace_fianl_result(score, ids, pre_index, topk=1, EOS=3)
fout.write(id_to_text(trace_ids[0][:-1], id_dict_array))
fout.write('\n')
index += 1
if index >= sent_num:
break
fout.close()
def train(config):
""" model training """
config.vocab_size = len(open(config.vocab_path).readlines())
#搭建网络:Bi-GRU Utterance encoder +Bi-GRU KG encoder +层级GRU decoder
bow_loss, kl_loss, nll_loss, final_loss= knowledge_seq2seq(config)
#持久性变量(Persistables)是一种在每次迭代结束后均不会被删除的变量
bow_loss.persistable = True
kl_loss.persistable = True
nll_loss.persistable = True
final_loss.persistable = True
#fluid.layers 接口中添加的op和variable会存储在 default main program 中
main_program = fluid.default_main_program()
inference_program = fluid.default_main_program().clone(for_test=True)
#给指定参数做梯度裁剪
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=config.grad_clip))
optimizer = fluid.optimizer.Adam(learning_rate=config.lr)
if config.stage == 0:
print("stage 0")
optimizer.minimize(bow_loss)
else:
print("stage 1")
optimizer.minimize(final_loss)
#优化器的训练参数如lr
opt_var_name_list = optimizer.get_opti_var_name_list()
if config.use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
exe = Executor(place)
#初始化 default_startup_program函数可以获取默认/全局 startup Program (初始化启动程序)。
exe.run(framework.default_startup_program())
#block0 表示一段代码的最外层块
param_list = main_program.block(0).all_parameters()
param_name_list = [p.name for p in param_list]
#TODO:init包含
init_model(config, param_name_list, place)
processors = KnowledgeCorpus(
data_dir=config.data_dir,
data_prefix=config.data_prefix,
vocab_path=config.vocab_path,
min_len=config.min_len,
max_len=config.max_len)
#train_generator为yeild 生成函数
#进行了如下操作:
#读取stream record file
#对读入的文本进行tokennize;根据max min len 进行过滤
#并根据词汇表把src\tgt\cue文本串(chatpath+knowledge+":"+history\ response\ KG cue)转为数字串
##进行padding并返回padding后的串和每个串的原长
train_generator = processors.train_generator(
batch_size=config.batch_size,
phase="train",
shuffle=True)
valid_generator = processors.data_generator(
batch_size=config.batch_size,
phase="dev",
shuffle=False)
model_handle = [exe, place, bow_loss, kl_loss, nll_loss, final_loss]
#在训练过程中,每config.valid_steps 个batch(步)进行一次valid,并储存一次最好模型
train_loop(config,
train_generator, valid_generator,
main_program, inference_program,
model_handle, param_name_list, opt_var_name_list)
if __name__ == "__main__":
#配置解析CMD 参数
config = model_config()
# 模式: train / test
run_type = config.run_type
if run_type == "train":
train(config)
elif run_type == "test":
test(config)
|
the-stack_0_16027 | from dataclasses import dataclass
from output.models.ibm_data.instance_invalid.s3_4_2_4.s3_4_2_4ii06_xsd.s3_4_2_4ii06 import C1
__NAMESPACE__ = "http://xstest-tns/schema11_S3_4_2_4"
@dataclass
class Root(C1):
class Meta:
name = "root"
namespace = "http://xstest-tns/schema11_S3_4_2_4"
|
the-stack_0_16028 | #!/usr/bin/env python
""" project creation and deletion check for v3 """
# We just want to see any exception that happens
# don't want the script to die under any cicumstances
# script must try to clean itself up
# pylint: disable=broad-except
# pylint: disable=invalid-name
# pylint: disable=import-error
import argparse
import time
import logging
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ocutil = OCUtil()
commandDelay = 10 # seconds
def runOCcmd(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
oc_time = time.time()
oc_result = ocutil.run_user_cmd(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - oc_time))
return oc_result
def parse_args():
""" parse the args from the cli """
logger.debug("parse_args()")
parser = argparse.ArgumentParser(description='OpenShift project creation and deletion test')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--namespace', default="ops-project-operation-check",
help='namespace (be careful of using existing namespaces)')
return parser.parse_args()
def send_metrics(status_code_create, status_code_delete):
""" send data to MetricSender"""
logger.debug("send_metrics()")
ms_time = time.time()
ms = MetricSender()
logger.info("Send data to MetricSender")
# 1 means create and delete the project failed
ms.add_metric({'openshift.master.project.create': status_code_create})
ms.add_metric({'openshift.master.project.delete': status_code_delete})
ms.send_metrics()
logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
def check_project(config):
""" check create and delete project """
logger.info('check_project()')
logger.debug(config)
project = None
try:
project = runOCcmd("get project {}".format(config.namespace))
logger.debug(project)
except Exception:
pass # don't want exception if project not found
if project:
project_exist = 1 # project exists
else:
project_exist = 0 # project doest not exists
return project_exist
def create_project(config):
" create the project "
try:
runOCcmd("new-project {}".format(config.namespace), base_cmd='oc adm')
time.sleep(commandDelay)
except Exception:
logger.exception('error creating new project')
def delete_project(config):
" delete the project "
try:
runOCcmd("delete project {}".format(config.namespace), base_cmd='oc')
time.sleep(commandDelay)
except Exception:
logger.exception('error delete project')
def main():
""" check the project operation status """
logger.debug("main()")
args = parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
ocutil.namespace = args.namespace
project_exists = check_project(args)
# project does not exists.
delete_project_code = 0
if project_exists == 0:
logger.info("project does not exists, going to create it")
create_project(args)
create_project_code = check_project(args)
if create_project_code == 0:
# 0 means project creation failed, no project was created
logger.info('project creation failed')
else:
# project creation succeed, then delete the project
delete_project(args)
delete_project_code = check_project(args)
if delete_project_code == 1:
# 1 means project deletion failed, the project still exists
# give the deletion second chance. 10 more seconds to check the
# teminating status project
delete_project_code = check_project(args)
if delete_project_code == 1:
logger.info('project deletion failed in 20s')
else:
delete_project_code = 0
else:
# the project already exists, so I think the project creation failed
create_project_code = 0
#logger.info("{} {}".format(create_project_code, delete_project_code))
if create_project_code == 1 and delete_project_code == 0:
logger.info('creation and deletion succeed, no data was sent to zagg')
send_metrics(create_project_code, delete_project_code)
if __name__ == "__main__":
main()
|
the-stack_0_16029 | #!/usr/bin/python
import os
import sys
import numpy as np
import biotite
import biotite.structure as struc
import biotite.database.rcsb as rcsb
import biotite.structure.io.pdb as pdb
import biotite.structure.io as strucio
def Nano(angstrom):
'''Convert angstrom to nanometer'''
nano = angstrom / 10
return(nano)
def DiameterA(TheFile):
'''
Find the diameter of a protein's structure accuratly, requires lots
of memory and crashes for big structures. Here we broadcast the array
against itself, calculating all pairwise distances between points.
This is a bad idea, because we have N*(N-1) = (1e6)**2 = 1 trillion
pairs! This will raise a MemoryError for N=1 million, as it requires
half a million gigabytes!!
'''
# Get atom coordinates
atom_array = strucio.load_structure(TheFile)
# Remove all non-amino acids atoms
atom_array = atom_array[struc.filter_amino_acids(atom_array)]
# Coordinates as a NumPy array
coord = atom_array.coord
# Calculate all pairwise difference vectors
diff = coord[:, np.newaxis, :] - coord[np.newaxis, :, :]
# Calculate absolute of difference vectors -> square distances
sq_dist = np.sum(diff*diff, axis=-1)
# Get maximum distance
maxdist = np.max(sq_dist)
# Maximum distance is diameter
diameter = np.sqrt(np.max(sq_dist))
return(round(diameter, 3))
def Diameter(TheFile):
'''
Find the diameter of a protein's structure approximately, requires less
memory thus good for big structures
'''
# Get atom coordinates
atom_array = strucio.load_structure(TheFile)
# Remove all non-amino acids atoms
atom_array = atom_array[struc.filter_amino_acids(atom_array)]
# Coordinates as a NumPy array
coord = atom_array.coord
# Find geometric center
center = np.mean(coord, axis=0)
# Find largest distance from center -> diameter
diameter = 2*np.sqrt(np.sum((coord - center)**2, axis=-1)).max()
return(round(diameter, 3))
def main():
directory = sys.argv[1]
filelist = os.listdir(directory)
for File in filelist:
try:
diameter = DiameterA('{}/{}'.format(directory, File))
diameternano = round(Nano(diameter), 3)
print('{} = {} A\t{} nm'.format(File, diameter, diameternano))
except:
diameter = Diameter('{}/{}'.format(directory, File))
diameternano = round(Nano(diameter), 3)
print('{} = {} A\t{} nm'.format(File, diameter, diameternano))
if __name__ == '__main__': main()
|
the-stack_0_16033 | # encoding=utf-8
## SOLVED 2013/12/23
## -59231
# Euler discovered the remarkable quadratic formula:
# n² + n + 41
# It turns out that the formula will produce 40 primes for the consecutive
# values n = 0 to 39. However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is
# divisible by 41, and certainly when n = 41, 41² + 41 + 41 is clearly divisible
# by 41.
# The incredible formula n² − 79n + 1601 was discovered, which produces 80
# primes for the consecutive values n = 0 to 79. The product of the
# coefficients, −79 and 1601, is −126479.
# Considering quadratics of the form:
# n² + an + b, where |a| < 1000 and |b| < 1000
# where |n| is the modulus/absolute value of n
# e.g. |11| = 11 and |−4| = 4
# Find the product of the coefficients, a and b, for the quadratic expression
# that produces the maximum number of primes for consecutive values of n,
# starting with n = 0.
import helpers.prime as prime
def euler():
longest_sequence = 0
product = 0
for a in range(-1000, 1000):
for b in range(-1000, 1000):
length = sequence_length(a, b)
if length > longest_sequence:
longest_sequence = length
product = a * b
return product
def sequence_length(a, b):
def f():
return n ** 2 + a * n + b
n = 0
while f() > 1 and prime.is_prime(f()):
n += 1
return n
|
the-stack_0_16034 | # In "and" operator if ONE is false the whole is false
# in "or" operator if ONE is true the whole is true
print("Welcome to the rollercoaster!")
height = int(input("What is your height in cms? "))
bill = 0
if height >= 120:
print("You can ride the rollercoaster")
age = int(input("What is your age? "))
if age < 12:
bill = 5
print("Child tickets are $5")
elif age <= 18:
bill = 7
print("Youth tickets are $7")
elif age >= 45 and age <= 55:
print("Everything is going to be ok. Have a free ride")
else:
bill = 14
print("Adult tickets are $14")
wants_photo = input("Do you want a photo taken? Y or N.")
if wants_photo == "Y":
bill += 3
print(f"Your final bill is $ {bill}")
else:
print("Sorry, your pp isn't grown up to ride the rollercoaster") |
the-stack_0_16035 | #**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
from dataclasses import dataclass
from pathlib import Path
import math
import bpy
from pxr import Sdf, UsdLux, Tf
from ...utils.image import cache_image_file, cache_image_file_path
from ...utils import BLENDER_DATA_DIR
from ...utils import usd as usd_utils
from ...utils import logging
log = logging.Log('export.world')
OBJ_PRIM_NAME = "World"
LIGHT_PRIM_NAME = "World"
@dataclass(init=False, eq=True)
class ShadingData:
type: str
use_scene_lights: bool = True
use_scene_world: bool = True
has_world: bool = False
studiolight: Path = None
studiolight_rotate_z: float = 0.0
studiolight_background_alpha: float = 0.0
studiolight_intensity: float = 1.0
def __init__(self, context: bpy.types.Context, world: bpy.types.World):
shading = context.area.spaces.active.shading
self.type = shading.type
if self.type == 'RENDERED':
self.use_scene_lights = shading.use_scene_lights_render
self.use_scene_world = shading.use_scene_world_render
else:
self.use_scene_lights = shading.use_scene_lights
self.use_scene_world = shading.use_scene_world
if self.use_scene_world:
self.has_world = bool(world)
else:
if shading.selected_studio_light.path:
self.studiolight = Path(shading.selected_studio_light.path)
else:
self.studiolight = BLENDER_DATA_DIR / "studiolights/world" / shading.studio_light
self.studiolight_rotate_z = shading.studiolight_rotate_z
self.studiolight_background_alpha = shading.studiolight_background_alpha
self.studiolight_intensity = shading.studiolight_intensity
@dataclass(init=False, eq=True, repr=True)
class WorldData:
""" Comparable dataclass which holds all environment settings """
color: tuple = (0.05, 0.05, 0.05)
image: str = None
intensity: float = 1.0
rotation: tuple = (0.0, 0.0, 0.0)
transparency: float = 1.0
@property
def clear_color(self):
color = [c * self.intensity for c in self.color]
color.append(self.transparency)
return tuple(color)
@staticmethod
def init_from_world(world: bpy.types.World):
""" Returns WorldData from bpy.types.World """
data = WorldData()
if not world:
return data
if not world.use_nodes:
data.color = tuple(world.color)
return data
output_node = next((node for node in world.node_tree.nodes
if node.bl_idname == 'ShaderNodeOutputWorld' and node.is_active_output),
None)
if not output_node:
return data
from .nodes import ShaderNodeOutputWorld
node_parser = ShaderNodeOutputWorld(world, output_node)
node_item = node_parser.export()
if not node_item:
return data
node_data = node_item.data
if isinstance(node_data, float):
data.color = (node_data, node_data, node_data)
data.transparency = 1.0
return data
if isinstance(node_data, tuple):
data.color = node_data[:3]
data.transparency = node_data[3]
return data
# node_data is dict here
intensity = node_data.get('intensity', 1.0)
if isinstance(intensity, tuple):
intensity = intensity[0]
data.intensity = intensity
color = node_data.get('color')
if color is None:
image = node_data.get('image')
if image:
data.image = cache_image_file(image)
elif isinstance(color, float):
data.color = (color, color, color)
data.transparency = color
elif isinstance(color, tuple):
data.color = color[:3]
data.transparency = color[3]
else: # dict
image = color.get('image')
if image:
data.image = cache_image_file(image)
rotation = node_data.get('rotation')
if isinstance(rotation, tuple):
data.rotation = rotation[:3]
return data
@staticmethod
def init_from_shading(shading: ShadingData, world):
if shading.use_scene_world:
return WorldData.init_from_world(world)
data = WorldData()
data.intensity = shading.studiolight_intensity
data.rotation = (0.0, 0.0, shading.studiolight_rotate_z)
data.image = cache_image_file_path(shading.studiolight)
return data
@staticmethod
def init_from_stage(stage):
data = WorldData()
light_prim = next((prim for prim in stage.TraverseAll() if
prim.GetTypeName() == 'DomeLight'), None)
if light_prim:
data.color = light_prim.GetAttribute('inputs:color').Get()
data.intensity = light_prim.GetAttribute('inputs:intensity').Get()
data.transparency = light_prim.GetAttribute('inputs:transparency').Get()
return data
def sync(root_prim, world: bpy.types.World, shading: ShadingData = None):
if shading:
data = WorldData.init_from_shading(shading, world)
else:
data = WorldData.init_from_world(world)
stage = root_prim.GetStage()
obj_prim = stage.DefinePrim(root_prim.GetPath().AppendChild(OBJ_PRIM_NAME))
usd_light = UsdLux.DomeLight.Define(stage, obj_prim.GetPath().AppendChild(LIGHT_PRIM_NAME))
light_prim = usd_light.GetPrim()
usd_light.OrientToStageUpAxis()
if data.image:
tex_attr = usd_light.CreateTextureFileAttr()
tex_attr.ClearDefault()
usd_utils.add_delegate_variants(obj_prim, {
'GL': lambda: tex_attr.Set(""),
'RPR': lambda: tex_attr.Set(str(data.image))
})
usd_light.CreateColorAttr(data.color)
usd_light.CreateIntensityAttr(data.intensity)
light_prim.CreateAttribute("inputs:transparency", Sdf.ValueTypeNames.Float).Set(data.transparency)
# set correct Dome light rotation
usd_light.AddRotateXOp().Set(180.0)
usd_light.AddRotateYOp().Set(-90.0 + math.degrees(data.rotation[2]))
def sync_update(root_prim, world: bpy.types.World, shading: ShadingData = None):
stage = root_prim.GetStage()
usd_light = UsdLux.DomeLight.Define(
stage, root_prim.GetPath().AppendChild(OBJ_PRIM_NAME).AppendChild(LIGHT_PRIM_NAME))
# removing prev settings
usd_light.CreateColorAttr().Clear()
usd_light.CreateIntensityAttr().Clear()
if usd_light.GetTextureFileAttr().Get() is not None:
usd_light.GetTextureFileAttr().Block()
usd_light.ClearXformOpOrder()
sync(root_prim, world, shading)
def get_clear_color(root_prim):
light_prim = root_prim.GetChild(OBJ_PRIM_NAME).GetChild(LIGHT_PRIM_NAME)
color = light_prim.GetAttribute('inputs:color').Get()
intensity = light_prim.GetAttribute('inputs:intensity').Get()
transparency = light_prim.GetAttribute('inputs:transparency').Get()
clear_color = [c * intensity for c in color]
clear_color.append(transparency)
return tuple(clear_color)
|
the-stack_0_16040 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG Website - A Django-powered website for Reaction Mechanism Generator
#
# Copyright (c) 2011 Prof. William H. Green ([email protected]) and the
# RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import os.path
import re
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.forms.models import BaseInlineFormSet, inlineformset_factory
from rmgweb.rmg.models import *
from rmgweb.rmg.forms import *
from rmgpy.molecule.molecule import Molecule
from rmgpy.molecule.group import Group
from rmgpy.thermo import *
from rmgpy.kinetics import *
from rmgpy.data.base import Entry
from rmgpy.data.thermo import ThermoDatabase
from rmgpy.data.kinetics import *
from rmgpy.data.rmg import RMGDatabase
from rmgweb.main.tools import *
from rmgweb.database.views import loadDatabase
################################################################################
def index(request):
"""
The RMG simulation homepage.
"""
return render_to_response('rmg.html', context_instance=RequestContext(request))
def convertChemkin(request):
"""
Allows user to upload chemkin and RMG dictionary files to generate a nice looking html output.
"""
chemkin = Chemkin()
path = ''
chemkin.deleteDir()
if request.method == 'POST':
chemkin.createDir()
form = UploadChemkinForm(request.POST, request.FILES, instance=chemkin)
if form.is_valid():
form.save()
path = 'media/rmg/tools/output.html'
# Generate the output HTML file
chemkin.createOutput()
# Go back to the network's main page
return render_to_response('chemkinUpload.html', {'form': form, 'path':path}, context_instance=RequestContext(request))
# Otherwise create the form
else:
form = UploadChemkinForm(instance=chemkin)
return render_to_response('chemkinUpload.html', {'form': form,'path':path}, context_instance=RequestContext(request))
def convertAdjlists(request):
"""
Allows user to upload a dictionary txt file and convert it back into old style adjacency lists in the form of a txt file.
"""
conversion = AdjlistConversion()
path = ''
conversion.deleteDir()
if request.method == 'POST':
conversion.createDir()
form = UploadDictionaryForm(request.POST, request.FILES, instance=conversion)
if form.is_valid():
form.save()
path = 'media/rmg/tools/adjlistConversion/RMG_Dictionary.txt'
# Generate the output HTML file
conversion.createOutput()
# Go back to the network's main page
return render_to_response('dictionaryUpload.html', {'form': form, 'path':path}, context_instance=RequestContext(request))
# Otherwise create the form
else:
form = UploadDictionaryForm(instance=conversion)
return render_to_response('dictionaryUpload.html', {'form': form,'path':path}, context_instance=RequestContext(request))
def compareModels(request):
"""
Allows user to compare 2 RMG models with their chemkin and species dictionaries and generate
a pretty HTML diff file.
"""
diff = Diff()
path = ''
diff.deleteDir()
if request.method == 'POST':
diff.createDir()
form = ModelCompareForm(request.POST, request.FILES, instance=diff)
if form.is_valid():
form.save()
path = 'media/rmg/tools/compare/diff.html'
# Generate the output HTML file
diff.createOutput()
return render_to_response('modelCompare.html', {'form': form, 'path':path}, context_instance=RequestContext(request))
# Otherwise create the form
else:
form = ModelCompareForm(instance=diff)
return render_to_response('modelCompare.html', {'form': form,'path':path}, context_instance=RequestContext(request))
def mergeModels(request):
"""
Merge 2 RMG models with their chemkin and species dictionaries.
Produces a merged chemkin file and species dictionary.
"""
model = Diff()
path = ''
model.deleteDir()
if request.method == 'POST':
model.createDir()
form = ModelCompareForm(request.POST, request.FILES, instance = model)
if form.is_valid():
form.save()
model.merge()
path = 'media/rmg/tools/compare'
#[os.path.join(model.path,'chem.inp'), os.path.join(model.path,'species_dictionary.txt'), os.path.join(model.path,'merging_log.txt')]
return render_to_response('mergeModels.html', {'form': form, 'path':path}, context_instance=RequestContext(request))
else:
form = ModelCompareForm(instance=model)
return render_to_response('mergeModels.html', {'form': form,'path':path}, context_instance=RequestContext(request))
def generateFlux(request):
"""
Allows user to upload a set of RMG condition files and/or chemkin species concentraiton output
to generate a flux diagram video.
"""
from generateFluxDiagram import createFluxDiagram
flux = FluxDiagram()
path = ''
flux.deleteDir()
if request.method == 'POST':
flux.createDir()
form = FluxDiagramForm(request.POST, request.FILES,instance=flux)
if form.is_valid():
form.save()
input = os.path.join(flux.path,'input.py')
chemkin = os.path.join(flux.path,'chem.inp')
dict = os.path.join(flux.path,'species_dictionary.txt')
chemkinOutput = ''
if 'ChemkinOutput' in request.FILES:
chemkinOutput = os.path.join(flux.path,'chemkin_output.out')
java = form.cleaned_data['Java']
settings = {}
settings['maximumNodeCount'] = form.cleaned_data['MaxNodes']
settings['maximumEdgeCount'] = form.cleaned_data['MaxEdges']
settings['timeStep'] = form.cleaned_data['TimeStep']
settings['concentrationTolerance'] = form.cleaned_data['ConcentrationTolerance']
settings['speciesRateTolerance'] = form.cleaned_data['SpeciesRateTolerance']
createFluxDiagram(flux.path, input, chemkin, dict, java, settings, chemkinOutput)
# Look at number of subdirectories to determine where the flux diagram videos are
subdirs = [name for name in os.listdir(flux.path) if os.path.isdir(os.path.join(flux.path, name))]
subdirs.remove('species')
return render_to_response('fluxDiagram.html', {'form': form, 'path':subdirs}, context_instance=RequestContext(request))
else:
form = FluxDiagramForm(instance=flux)
return render_to_response('fluxDiagram.html', {'form': form,'path':path}, context_instance=RequestContext(request))
def runPopulateReactions(request):
"""
Allows user to upload chemkin and RMG dictionary files to generate a nice looking html output.
"""
populateReactions = PopulateReactions()
outputPath = ''
chemkinPath = ''
populateReactions.deleteDir()
if request.method == 'POST':
populateReactions.createDir()
form = PopulateReactionsForm(request.POST, request.FILES, instance=populateReactions)
if form.is_valid():
form.save()
outputPath = 'media/rmg/tools/populateReactions/output.html'
chemkinPath = 'media/rmg/tools/populateReactions/chemkin/chem.inp'
# Generate the output HTML file
populateReactions.createOutput()
# Go back to the network's main page
return render_to_response('populateReactionsUpload.html', {'form': form, 'output': outputPath, 'chemkin': chemkinPath}, context_instance=RequestContext(request))
# Otherwise create the form
else:
form = PopulateReactionsForm(instance=populateReactions)
return render_to_response('populateReactionsUpload.html', {'form': form, 'output': outputPath, 'chemkin': chemkinPath}, context_instance=RequestContext(request))
def input(request):
ThermoLibraryFormset = inlineformset_factory(Input, ThermoLibrary, ThermoLibraryForm,
BaseInlineFormSet, extra=1, can_delete=True)
ReactionLibraryFormset = inlineformset_factory(Input, ReactionLibrary, ReactionLibraryForm,
BaseInlineFormSet, extra=1, can_delete=True)
ReactorSpeciesFormset = inlineformset_factory(Input, ReactorSpecies, ReactorSpeciesForm,
BaseInlineFormSet, extra = 1, can_delete=True)
ReactorFormset = inlineformset_factory(Input, Reactor, ReactorForm,
BaseInlineFormSet, extra = 1, can_delete=True)
Input.objects.all().delete()
input = Input()
input.deleteDir()
uploadform = UploadInputForm(instance=input)
form = InputForm(instance=input)
thermolibformset = ThermoLibraryFormset(instance=input)
reactionlibformset = ReactionLibraryFormset(instance=input)
reactorspecformset = ReactorSpeciesFormset(instance=input)
reactorformset = ReactorFormset(instance=input)
upload_error = ''
input_error = ''
if request.method == 'POST':
input.createDir()
# Load an input file into the form by uploading it
if "upload" in request.POST:
uploadform = UploadInputForm(request.POST, request.FILES, instance=input)
if uploadform.is_valid():
uploadform.save()
initial_thermo_libraries, initial_reaction_libraries, initial_reactor_systems, initial_species, initial = input.loadForm(input.loadpath)
# Make the formsets the lengths of the initial data
if initial_thermo_libraries:
ThermoLibraryFormset = inlineformset_factory(Input, ThermoLibrary, ThermoLibraryForm, BaseInlineFormSet,
extra=len(initial_thermo_libraries), can_delete=True)
if initial_reaction_libraries:
ReactionLibraryFormset = inlineformset_factory(Input, ReactionLibrary, ReactionLibraryForm, BaseInlineFormSet,
extra=len(initial_reaction_libraries), can_delete=True)
ReactorSpeciesFormset = inlineformset_factory(Input, ReactorSpecies, ReactorSpeciesForm, BaseInlineFormSet,
extra=len(initial_species), can_delete=True)
ReactorFormset = inlineformset_factory(Input, Reactor, ReactorForm, BaseInlineFormSet,
extra = len(initial_reactor_systems), can_delete=True)
thermolibformset = ThermoLibraryFormset()
reactionlibformset = ReactionLibraryFormset()
reactorspecformset = ReactorSpeciesFormset()
reactorformset = ReactorFormset()
# Load the initial data into the forms
form = InputForm(initial = initial)
for subform, data in zip(thermolibformset.forms, initial_thermo_libraries):
subform.initial = data
for subform, data in zip(reactionlibformset.forms, initial_reaction_libraries):
subform.initial = data
for subform, data in zip(reactorspecformset.forms, initial_species):
subform.initial = data
for subform, data in zip(reactorformset.forms, initial_reactor_systems):
subform.initial = data
else:
upload_error = 'Your input file was invalid. Please try again.'
if "submit" in request.POST:
uploadform = UploadInputForm(request.POST, instance=input)
form = InputForm(request.POST, instance = input)
thermolibformset = ThermoLibraryFormset(request.POST, instance=input)
reactionlibformset = ReactionLibraryFormset(request.POST, instance=input)
reactorspecformset = ReactorSpeciesFormset(request.POST, instance=input)
reactorformset = ReactorFormset(request.POST, instance=input)
if (form.is_valid() and thermolibformset.is_valid() and reactionlibformset.is_valid()
and reactorspecformset.is_valid() and reactorformset.is_valid()):
form.save()
thermolibformset.save()
reactionlibformset.save()
reactorspecformset.save()
reactorformset.save()
posted = Input.objects.all()[0]
input.saveForm(posted, form)
path = 'media/rmg/tools/input/input.py'
return render_to_response('inputResult.html', {'path': path})
else:
# Will need more useful error messages later.
input_error = 'Your form was invalid. Please edit the form and try again.'
return render_to_response('input.html', {'uploadform': uploadform, 'form': form, 'thermolibformset':thermolibformset,
'reactionlibformset':reactionlibformset, 'reactorspecformset':reactorspecformset,
'reactorformset':reactorformset, 'upload_error': upload_error,
'input_error': input_error}, context_instance=RequestContext(request))
def plotKinetics(request):
"""
Allows user to upload chemkin files to generate a plot of reaction kinetics.
"""
from rmgpy.quantity import Quantity
from rmgweb.database.forms import RateEvaluationForm
if request.method == 'POST':
chemkin = Chemkin()
chemkin.createDir()
form = UploadChemkinForm(request.POST, request.FILES, instance=chemkin)
rateForm = RateEvaluationForm(request.POST)
eval = []
if rateForm.is_valid():
temperature = Quantity(rateForm.cleaned_data['temperature'], str(rateForm.cleaned_data['temperature_units'])).value_si
pressure = Quantity(rateForm.cleaned_data['pressure'], str(rateForm.cleaned_data['pressure_units'])).value_si
eval = [temperature, pressure]
kineticsDataList = chemkin.getKinetics()
if form.is_valid():
form.save()
kineticsDataList = chemkin.getKinetics()
return render_to_response('plotKineticsData.html', {'kineticsDataList': kineticsDataList,
'plotWidth': 500,
'plotHeight': 400 + 15 * len(kineticsDataList),
'form': rateForm,
'eval':eval },
context_instance=RequestContext(request))
# Otherwise create the form
else:
chemkin = Chemkin()
chemkin.deleteDir()
form = UploadChemkinForm(instance=chemkin)
return render_to_response('plotKinetics.html', {'form': form}, context_instance=RequestContext(request))
def javaKineticsLibrary(request):
"""
Allows user to upload chemkin files to generate a plot of reaction kinetics.
"""
from rmgpy.quantity import Quantity
eval = False
if request.method == 'POST':
chemkin = Chemkin()
chemkin.createDir()
form = UploadChemkinForm(request.POST, request.FILES, instance=chemkin)
if form.is_valid():
form.save()
chemkin.createJavaKineticsLibrary()
eval = True
return render_to_response('javaKineticsLibrary.html', {'form': form,
'eval': eval },
context_instance=RequestContext(request))
# Otherwise create the form
else:
chemkin = Chemkin()
chemkin.deleteDir()
form = UploadChemkinForm(instance=chemkin)
return render_to_response('javaKineticsLibrary.html', {'form': form}, context_instance=RequestContext(request))
def evaluateNASA(request):
"""
Creates webpage form form entering a chemkin format NASA Polynomial and quickly
obtaining it's enthalpy and Cp values.
"""
from rmgpy.chemkin import readThermoEntry
form = NASAForm()
thermo = None
thermoData = None
if request.method == 'POST':
posted = NASAForm(request.POST, error_class=DivErrorList)
initial = request.POST.copy()
if posted.is_valid():
NASA = posted.cleaned_data['NASA']
if NASA != '':
species, thermo, formula = readThermoEntry(str(NASA))
try:
thermoData = thermo.toThermoData()
except:
# if we cannot convert the thermo to thermo data, we will not be able to display the
# H298, S298, and Cp values, but that's ok.
pass
form = NASAForm(initial, error_class=DivErrorList)
return render_to_response('NASA.html', {'form': form, 'thermo':thermo, 'thermoData':thermoData}, context_instance=RequestContext(request))
|
the-stack_0_16042 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v6.resources.types import language_constant
from google.ads.googleads.v6.services.types import language_constant_service
from .transports.base import (
LanguageConstantServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import LanguageConstantServiceGrpcTransport
class LanguageConstantServiceClientMeta(type):
"""Metaclass for the LanguageConstantService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[LanguageConstantServiceTransport]]
_transport_registry["grpc"] = LanguageConstantServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[LanguageConstantServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class LanguageConstantServiceClient(
metaclass=LanguageConstantServiceClientMeta
):
"""Service to fetch language constants."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
LanguageConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
LanguageConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> LanguageConstantServiceTransport:
"""Return the transport used by the client instance.
Returns:
LanguageConstantServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def language_constant_path(criterion_id: str,) -> str:
"""Return a fully-qualified language_constant string."""
return "languageConstants/{criterion_id}".format(
criterion_id=criterion_id,
)
@staticmethod
def parse_language_constant_path(path: str) -> Dict[str, str]:
"""Parse a language_constant path into its component segments."""
m = re.match(r"^languageConstants/(?P<criterion_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, LanguageConstantServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the language constant service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.LanguageConstantServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, LanguageConstantServiceTransport):
# transport is a LanguageConstantServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = LanguageConstantServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_language_constant(
self,
request: language_constant_service.GetLanguageConstantRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> language_constant.LanguageConstant:
r"""Returns the requested language constant.
Args:
request (:class:`google.ads.googleads.v6.services.types.GetLanguageConstantRequest`):
The request object. Request message for
[LanguageConstantService.GetLanguageConstant][google.ads.googleads.v6.services.LanguageConstantService.GetLanguageConstant].
resource_name (:class:`str`):
Required. Resource name of the
language constant to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.resources.types.LanguageConstant:
A language.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a language_constant_service.GetLanguageConstantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, language_constant_service.GetLanguageConstantRequest
):
request = language_constant_service.GetLanguageConstantRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_language_constant
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("LanguageConstantServiceClient",)
|
the-stack_0_16043 | import inspect
import os
import sys
from collections import namedtuple
from functools import lru_cache
from dagster import check, seven
from dagster.core.code_pointer import (
CodePointer,
CustomPointer,
FileCodePointer,
ModuleCodePointer,
get_python_file_from_target,
)
from dagster.core.errors import DagsterInvalidSubsetError, DagsterInvariantViolationError
from dagster.core.origin import PipelinePythonOrigin, RepositoryPythonOrigin, SchedulePythonOrigin
from dagster.core.selector import parse_solid_selection
from dagster.serdes import pack_value, unpack_value, whitelist_for_serdes
from dagster.utils.backcompat import experimental
from .pipeline_base import IPipeline
def get_ephemeral_repository_name(pipeline_name):
check.str_param(pipeline_name, "pipeline_name")
return "__repository__{pipeline_name}".format(pipeline_name=pipeline_name)
@whitelist_for_serdes
class ReconstructableRepository(
namedtuple("_ReconstructableRepository", "pointer container_image")
):
def __new__(
cls,
pointer,
container_image=None,
):
return super(ReconstructableRepository, cls).__new__(
cls,
pointer=check.inst_param(pointer, "pointer", CodePointer),
container_image=check.opt_str_param(container_image, "container_image"),
)
@lru_cache(maxsize=1)
def get_definition(self):
return repository_def_from_pointer(self.pointer)
def get_reconstructable_pipeline(self, name):
return ReconstructablePipeline(self, name)
def get_reconstructable_schedule(self, name):
return ReconstructableSchedule(self, name)
@classmethod
def for_file(cls, file, fn_name, working_directory=None, container_image=None):
if not working_directory:
working_directory = os.getcwd()
return cls(FileCodePointer(file, fn_name, working_directory), container_image)
@classmethod
def for_module(cls, module, fn_name, container_image=None):
return cls(ModuleCodePointer(module, fn_name), container_image)
def get_cli_args(self):
return self.pointer.get_cli_args()
def get_python_origin(self):
return RepositoryPythonOrigin(
executable_path=sys.executable,
code_pointer=self.pointer,
container_image=self.container_image,
)
def get_python_origin_id(self):
return self.get_python_origin().get_id()
@whitelist_for_serdes
class ReconstructablePipeline(
namedtuple(
"_ReconstructablePipeline",
"repository pipeline_name solid_selection_str solids_to_execute",
),
IPipeline,
):
def __new__(
cls,
repository,
pipeline_name,
solid_selection_str=None,
solids_to_execute=None,
):
check.opt_set_param(solids_to_execute, "solids_to_execute", of_type=str)
return super(ReconstructablePipeline, cls).__new__(
cls,
repository=check.inst_param(repository, "repository", ReconstructableRepository),
pipeline_name=check.str_param(pipeline_name, "pipeline_name"),
solid_selection_str=check.opt_str_param(solid_selection_str, "solid_selection_str"),
solids_to_execute=solids_to_execute,
)
@property
def solid_selection(self):
return seven.json.loads(self.solid_selection_str) if self.solid_selection_str else None
@lru_cache(maxsize=1)
def get_definition(self):
from dagster.core.definitions.job import JobDefinition
defn = self.repository.get_definition().get_pipeline(self.pipeline_name)
if isinstance(defn, JobDefinition):
return self.repository.get_definition().get_pipeline(self.pipeline_name)
else:
return (
self.repository.get_definition()
.get_pipeline(self.pipeline_name)
.get_pipeline_subset_def(self.solids_to_execute)
)
def _resolve_solid_selection(self, solid_selection):
# resolve a list of solid selection queries to a frozenset of qualified solid names
# e.g. ['foo_solid+'] to {'foo_solid', 'bar_solid'}
check.list_param(solid_selection, "solid_selection", of_type=str)
solids_to_execute = parse_solid_selection(self.get_definition(), solid_selection)
if len(solids_to_execute) == 0:
raise DagsterInvalidSubsetError(
"No qualified solids to execute found for solid_selection={requested}".format(
requested=solid_selection
)
)
return solids_to_execute
def get_reconstructable_repository(self):
return self.repository
def _subset_for_execution(self, solids_to_execute, solid_selection=None):
if solids_to_execute:
pipe = ReconstructablePipeline(
repository=self.repository,
pipeline_name=self.pipeline_name,
solid_selection_str=seven.json.dumps(solid_selection) if solid_selection else None,
solids_to_execute=frozenset(solids_to_execute),
)
else:
pipe = ReconstructablePipeline(
repository=self.repository,
pipeline_name=self.pipeline_name,
)
return pipe
def subset_for_execution(self, solid_selection):
# take a list of solid queries and resolve the queries to names of solids to execute
check.opt_list_param(solid_selection, "solid_selection", of_type=str)
solids_to_execute = (
self._resolve_solid_selection(solid_selection) if solid_selection else None
)
return self._subset_for_execution(solids_to_execute, solid_selection)
def subset_for_execution_from_existing_pipeline(self, solids_to_execute):
# take a frozenset of resolved solid names from an existing pipeline
# so there's no need to parse the selection
check.opt_set_param(solids_to_execute, "solids_to_execute", of_type=str)
return self._subset_for_execution(solids_to_execute)
def describe(self):
return '"{name}" in repository ({repo})'.format(
repo=self.repository.pointer.describe, name=self.pipeline_name
)
@staticmethod
def for_file(python_file, fn_name):
return bootstrap_standalone_recon_pipeline(
FileCodePointer(python_file, fn_name, os.getcwd())
)
@staticmethod
def for_module(module, fn_name):
return bootstrap_standalone_recon_pipeline(ModuleCodePointer(module, fn_name))
def to_dict(self):
return pack_value(self)
@staticmethod
def from_dict(val):
check.dict_param(val, "val")
inst = unpack_value(val)
check.invariant(
isinstance(inst, ReconstructablePipeline),
"Deserialized object is not instance of ReconstructablePipeline, got {type}".format(
type=type(inst)
),
)
return inst
def get_python_origin(self):
return PipelinePythonOrigin(self.pipeline_name, self.repository.get_python_origin())
def get_python_origin_id(self):
return self.get_python_origin().get_id()
@whitelist_for_serdes
class ReconstructableSchedule(
namedtuple(
"_ReconstructableSchedule",
"repository schedule_name",
)
):
def __new__(
cls,
repository,
schedule_name,
):
return super(ReconstructableSchedule, cls).__new__(
cls,
repository=check.inst_param(repository, "repository", ReconstructableRepository),
schedule_name=check.str_param(schedule_name, "schedule_name"),
)
def get_python_origin(self):
return SchedulePythonOrigin(self.schedule_name, self.repository.get_python_origin())
def get_python_origin_id(self):
return self.get_python_origin().get_id()
@lru_cache(maxsize=1)
def get_definition(self):
return self.repository.get_definition().get_schedule_def(self.schedule_name)
def reconstructable(target):
"""
Create a :py:class:`~dagster.core.definitions.reconstructable.ReconstructablePipeline` from a
function that returns a :py:class:`~dagster.PipelineDefinition`, or a function decorated with
:py:func:`@pipeline <dagster.pipeline>`
When your pipeline must cross process boundaries, e.g., for execution on multiple nodes or
in different systems (like ``dagstermill``), Dagster must know how to reconstruct the pipeline
on the other side of the process boundary.
This function implements a very conservative strategy for reconstructing pipelines, so that
its behavior is easy to predict, but as a consequence it is not able to reconstruct certain
kinds of pipelines, such as those defined by lambdas, in nested scopes (e.g., dynamically
within a method call), or in interactive environments such as the Python REPL or Jupyter
notebooks.
If you need to reconstruct pipelines constructed in these ways, you should use
:py:func:`~dagster.core.definitions.reconstructable.build_reconstructable_pipeline` instead,
which allows you to specify your own strategy for reconstructing a pipeline.
Examples:
.. code-block:: python
from dagster import PipelineDefinition, pipeline, reconstructable
@pipeline
def foo_pipeline():
...
reconstructable_foo_pipeline = reconstructable(foo_pipeline)
def make_bar_pipeline():
return PipelineDefinition(...)
reconstructable_bar_pipeline = reconstructable(bar_pipeline)
"""
from dagster.core.definitions import PipelineDefinition
if not seven.is_function_or_decorator_instance_of(target, PipelineDefinition):
raise DagsterInvariantViolationError(
"Reconstructable target should be a function or definition produced "
"by a decorated function, got {type}.".format(type=type(target)),
)
if seven.is_lambda(target):
raise DagsterInvariantViolationError(
"Reconstructable target can not be a lambda. Use a function or "
"decorated function defined at module scope instead, or use "
"build_reconstructable_pipeline."
)
if seven.qualname_differs(target):
raise DagsterInvariantViolationError(
'Reconstructable target "{target.__name__}" has a different '
'__qualname__ "{target.__qualname__}" indicating it is not '
"defined at module scope. Use a function or decorated function "
"defined at module scope instead, or use build_reconstructable_pipeline.".format(
target=target
)
)
try:
if (
hasattr(target, "__module__")
and hasattr(target, "__name__")
and inspect.getmodule(target).__name__ != "__main__"
):
return ReconstructablePipeline.for_module(target.__module__, target.__name__)
except: # pylint: disable=bare-except
pass
python_file = get_python_file_from_target(target)
if not python_file:
raise DagsterInvariantViolationError(
"reconstructable() can not reconstruct pipelines defined in interactive environments "
"like <stdin>, IPython, or Jupyter notebooks. "
"Use a pipeline defined in a module or file instead, or "
"use build_reconstructable_pipeline."
)
pointer = FileCodePointer(
python_file=python_file, fn_name=target.__name__, working_directory=os.getcwd()
)
return bootstrap_standalone_recon_pipeline(pointer)
@experimental
def build_reconstructable_pipeline(
reconstructor_module_name,
reconstructor_function_name,
reconstructable_args=None,
reconstructable_kwargs=None,
):
"""
Create a :py:class:`dagster.core.definitions.reconstructable.ReconstructablePipeline`.
When your pipeline must cross process boundaries, e.g., for execution on multiple nodes or
in different systems (like ``dagstermill``), Dagster must know how to reconstruct the pipeline
on the other side of the process boundary.
This function allows you to use the strategy of your choice for reconstructing pipelines, so
that you can reconstruct certain kinds of pipelines that are not supported by
:py:func:`~dagster.reconstructable`, such as those defined by lambdas, in nested scopes (e.g.,
dynamically within a method call), or in interactive environments such as the Python REPL or
Jupyter notebooks.
If you need to reconstruct pipelines constructed in these ways, use this function instead of
:py:func:`~dagster.reconstructable`.
Args:
reconstructor_module_name (str): The name of the module containing the function to use to
reconstruct the pipeline.
reconstructor_function_name (str): The name of the function to use to reconstruct the
pipeline.
reconstructable_args (Tuple): Args to the function to use to reconstruct the pipeline.
Values of the tuple must be JSON serializable.
reconstructable_kwargs (Dict[str, Any]): Kwargs to the function to use to reconstruct the
pipeline. Values of the dict must be JSON serializable.
Examples:
.. code-block:: python
# module: mymodule
from dagster import PipelineDefinition, pipeline, build_reconstructable_pipeline
class PipelineFactory:
def make_pipeline(*args, **kwargs):
@pipeline
def _pipeline(...):
...
return _pipeline
def reconstruct_pipeline(*args):
factory = PipelineFactory()
return factory.make_pipeline(*args)
factory = PipelineFactory()
foo_pipeline_args = (...,...)
foo_pipeline_kwargs = {...:...}
foo_pipeline = factory.make_pipeline(*foo_pipeline_args, **foo_pipeline_kwargs)
reconstructable_foo_pipeline = build_reconstructable_pipeline(
'mymodule',
'reconstruct_pipeline',
foo_pipeline_args,
foo_pipeline_kwargs,
)
"""
check.str_param(reconstructor_module_name, "reconstructor_module_name")
check.str_param(reconstructor_function_name, "reconstructor_function_name")
reconstructable_args = list(check.opt_tuple_param(reconstructable_args, "reconstructable_args"))
reconstructable_kwargs = list(
(
[key, value]
for key, value in check.opt_dict_param(
reconstructable_kwargs, "reconstructable_kwargs", key_type=str
).items()
)
)
reconstructor_pointer = ModuleCodePointer(
reconstructor_module_name, reconstructor_function_name
)
pointer = CustomPointer(reconstructor_pointer, reconstructable_args, reconstructable_kwargs)
pipeline_def = pipeline_def_from_pointer(pointer)
return ReconstructablePipeline(
repository=ReconstructableRepository(pointer), # creates ephemeral repo
pipeline_name=pipeline_def.name,
)
def bootstrap_standalone_recon_pipeline(pointer):
# So this actually straps the the pipeline for the sole
# purpose of getting the pipeline name. If we changed ReconstructablePipeline
# to get the pipeline on demand in order to get name, we could avoid this.
pipeline_def = pipeline_def_from_pointer(pointer)
return ReconstructablePipeline(
repository=ReconstructableRepository(pointer), # creates ephemeral repo
pipeline_name=pipeline_def.name,
)
def _check_is_loadable(definition):
from .pipeline import PipelineDefinition
from .repository import RepositoryDefinition
from .graph import GraphDefinition
if not isinstance(definition, (PipelineDefinition, RepositoryDefinition, GraphDefinition)):
raise DagsterInvariantViolationError(
(
"Loadable attributes must be either a PipelineDefinition, GraphDefinition, or a "
"RepositoryDefinition. Got {definition}."
).format(definition=repr(definition))
)
return definition
def load_def_in_module(module_name, attribute):
return def_from_pointer(CodePointer.from_module(module_name, attribute))
def load_def_in_package(package_name, attribute):
return def_from_pointer(CodePointer.from_python_package(package_name, attribute))
def load_def_in_python_file(python_file, attribute, working_directory):
return def_from_pointer(CodePointer.from_python_file(python_file, attribute, working_directory))
def def_from_pointer(pointer):
target = pointer.load_target()
from .pipeline import PipelineDefinition
from .repository import RepositoryDefinition
from .graph import GraphDefinition
if isinstance(
target, (PipelineDefinition, RepositoryDefinition, GraphDefinition)
) or not callable(target):
return _check_is_loadable(target)
# if its a function invoke it - otherwise we are pointing to a
# artifact in module scope, likely decorator output
if seven.get_args(target):
raise DagsterInvariantViolationError(
"Error invoking function at {target} with no arguments. "
"Reconstructable target must be callable with no arguments".format(
target=pointer.describe()
)
)
return _check_is_loadable(target())
def pipeline_def_from_pointer(pointer):
from .pipeline import PipelineDefinition
target = def_from_pointer(pointer)
if isinstance(target, PipelineDefinition):
return target
raise DagsterInvariantViolationError(
"CodePointer ({str}) must resolve to a PipelineDefinition. "
"Received a {type}".format(str=pointer.describe(), type=type(target))
)
def repository_def_from_target_def(target):
from .pipeline import PipelineDefinition
from .graph import GraphDefinition
from .repository import CachingRepositoryData, RepositoryDefinition
# special case - we can wrap a single pipeline in a repository
if isinstance(target, (PipelineDefinition, GraphDefinition)):
# consider including pipeline name in generated repo name
return RepositoryDefinition(
name=get_ephemeral_repository_name(target.name),
repository_data=CachingRepositoryData.from_list([target]),
)
elif isinstance(target, RepositoryDefinition):
return target
else:
return None
def repository_def_from_pointer(pointer):
target = def_from_pointer(pointer)
repo_def = repository_def_from_target_def(target)
if not repo_def:
raise DagsterInvariantViolationError(
"CodePointer ({str}) must resolve to a "
"RepositoryDefinition or a PipelineDefinition. "
"Received a {type}".format(str=pointer.describe(), type=type(target))
)
return repo_def
|
the-stack_0_16044 | # -*- coding: utf-8 -*-
""" S3 Pivot Table Reports Method
@copyright: 2011-2015 (c) Sahana Software Foundation
@license: MIT
@requires: U{B{I{Python 2.6}} <http://www.python.org>}
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import re
import sys
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from itertools import product
from gluon import current
from gluon.storage import Storage
from gluon.html import *
from gluon.languages import regex_translate
from gluon.sqlhtml import OptionsWidget
from gluon.validators import IS_IN_SET, IS_EMPTY_OR
from s3query import FS
from s3rest import S3Method
from s3utils import s3_flatlist, s3_has_foreign_key, s3_unicode, S3MarkupStripper, s3_represent_value
from s3xml import S3XMLFormat
from s3validators import IS_NUMBER
layer_pattern = re.compile("([a-zA-Z]+)\((.*)\)\Z")
# Compact JSON encoding
DEFAULT = lambda: None
SEPARATORS = (",", ":")
FACT = re.compile(r"([a-zA-Z]+)\(([a-zA-Z0-9_.$:\,~]+)\),*(.*)\Z")
SELECTOR = re.compile(r"^[a-zA-Z0-9_.$:\~]+\Z")
# =============================================================================
class S3Report(S3Method):
""" RESTful method for pivot table reports """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Page-render entry point for REST interface.
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
if r.http == "GET":
if r.representation == "geojson":
output = self.geojson(r, **attr)
else:
output = self.report(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def report(self, r, **attr):
"""
Pivot table report page
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
output = {}
resource = self.resource
get_config = resource.get_config
show_filter_form = False
if r.representation in ("html", "iframe"):
filter_widgets = get_config("filter_widgets", None)
if filter_widgets and not self.hide_filter:
# Apply filter defaults (before rendering the data!)
from s3filter import S3FilterForm
show_filter_form = True
S3FilterForm.apply_filter_defaults(r, resource)
widget_id = "pivottable"
# @todo: make configurable:
maxrows = 20
maxcols = 20
# Extract the relevant GET vars
report_vars = ("rows", "cols", "fact", "totals")
get_vars = dict((k, v) for k, v in r.get_vars.iteritems()
if k in report_vars)
# Fall back to report options defaults
report_options = get_config("report_options", {})
defaults = report_options.get("defaults", {})
if not any (k in get_vars for k in ("rows", "cols", "fact")):
get_vars = defaults
get_vars["chart"] = r.get_vars.get("chart",
defaults.get("chart", None))
get_vars["table"] = r.get_vars.get("table",
defaults.get("table", None))
# Generate the pivot table
if get_vars:
rows = get_vars.get("rows", None)
cols = get_vars.get("cols", None)
layer = get_vars.get("fact", "id")
try:
facts = S3PivotTableFact.parse(layer)
except SyntaxError:
current.log.error(sys.exc_info()[1])
facts = None
if not facts or not any([rows, cols]):
pivottable = None
else:
prefix = resource.prefix_selector
get_vars["rows"] = prefix(rows) if rows else None
get_vars["cols"] = prefix(cols) if cols else None
get_vars["fact"] = ",".join("%s(%s)" % (fact.method, fact.selector) for fact in facts)
pivottable = S3PivotTable(resource, rows, cols, facts)
else:
pivottable = None
# Render as JSON-serializable dict
if pivottable is not None:
pivotdata = pivottable.json(maxrows=maxrows, maxcols=maxcols)
else:
pivotdata = None
if r.representation in ("html", "iframe"):
tablename = resource.tablename
# Filter widgets
if show_filter_form:
advanced = False
for widget in filter_widgets:
if "hidden" in widget.opts and widget.opts.hidden:
advanced = resource.get_config("report_advanced", True)
break
filter_formstyle = get_config("filter_formstyle", None)
filter_form = S3FilterForm(filter_widgets,
formstyle=filter_formstyle,
advanced=advanced,
submit=False,
_class="filter-form",
_id="%s-filter-form" % widget_id)
fresource = current.s3db.resource(tablename)
alias = resource.alias if r.component else None
filter_widgets = filter_form.fields(fresource,
r.get_vars,
alias=alias)
else:
# Render as empty string to avoid the exception in the view
filter_widgets = None
# Generate the report form
ajax_vars = Storage(r.get_vars)
ajax_vars.update(get_vars)
filter_url = r.url(method="",
representation="",
vars=ajax_vars.fromkeys((k for k in ajax_vars
if k not in report_vars)))
ajaxurl = attr.get("ajaxurl", r.url(method="report",
representation="json",
vars=ajax_vars))
output = S3ReportForm(resource).html(pivotdata,
get_vars = get_vars,
filter_widgets = filter_widgets,
ajaxurl = ajaxurl,
filter_url = filter_url,
widget_id = widget_id)
output["title"] = self.crud_string(tablename, "title_report")
output["report_type"] = "pivottable"
# Detect and store theme-specific inner layout
self._view(r, "pivottable.html")
# View
current.response.view = self._view(r, "report.html")
elif r.representation == "json":
output = json.dumps(pivotdata, separators=SEPARATORS)
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def geojson(self, r, **attr):
"""
Render the pivot table data as a dict ready to be exported as
GeoJSON for display on a Map.
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
resource = self.resource
response = current.response
s3 = response.s3
# Set response headers
response.headers["Content-Type"] = s3.content_type.get("geojson",
"application/json")
if not resource.count():
# No Data
return json.dumps({})
# Extract the relevant GET vars
get_vars = r.get_vars
layer_id = r.get_vars.get("layer", None)
level = get_vars.get("level", "L0")
# Fall back to report options defaults
get_config = resource.get_config
report_options = get_config("report_options", {})
defaults = report_options.get("defaults", {})
# The rows dimension
context = get_config("context")
if context and "location" in context:
# @ToDo: We can add sanity-checking using resource.parse_bbox_query() as a guide if-desired
rows = "(location)$%s" % level
else:
# Fallback to location_id
rows = "location_id$%s" % level
# Fallback we can add if-required
#rows = "site_id$location_id$%s" % level
# Filter out null values
resource.add_filter(FS(rows) != None)
# Set XSLT stylesheet
stylesheet = os.path.join(r.folder, r.XSLT_PATH, "geojson", "export.xsl")
# Do we have any data at this level of aggregation?
fallback_to_points = True # @ToDo: deployment_setting?
output = None
if fallback_to_points:
if resource.count() == 0:
# Show Points
resource.clear_query()
# Apply URL filters (especially BBOX)
resource.build_query(filter=s3.filter, vars=get_vars)
# Extract the Location Data
xmlformat = S3XMLFormat(stylesheet)
include, exclude = xmlformat.get_fields(resource.tablename)
resource.load(fields=include,
skip=exclude,
start=0,
limit=None,
orderby=None,
virtual=False,
cacheable=True)
gis = current.gis
attr_fields = []
style = gis.get_style(layer_id=layer_id,
aggregate=False)
popup_format = style.popup_format
if popup_format:
if "T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
style.popup_format = popup_format
# Extract the attr_fields
parts = popup_format.split("{")
# Skip the first part
parts = parts[1:]
for part in parts:
attribute = part.split("}")[0]
attr_fields.append(attribute)
attr_fields = ",".join(attr_fields)
location_data = gis.get_location_data(resource,
attr_fields=attr_fields)
# Export as GeoJSON
current.xml.show_ids = True
output = resource.export_xml(fields=include,
mcomponents=None,
references=[],
stylesheet=stylesheet,
as_json=True,
location_data=location_data,
map_data=dict(style=style),
)
# Transformation error?
if not output:
r.error(400, "XSLT Transformation Error: %s " % current.xml.error)
else:
while resource.count() == 0:
# Try a lower level of aggregation
level = int(level[1:])
if level == 0:
# Nothing we can display
return json.dumps({})
resource.clear_query()
# Apply URL filters (especially BBOX)
resource.build_query(filter=s3.filter, vars=get_vars)
level = "L%s" % (level - 1)
if context and "location" in context:
# @ToDo: We can add sanity-checking using resource.parse_bbox_query() as a guide if-desired
rows = "(location)$%s" % level
else:
# Fallback to location_id
rows = "location_id$%s" % level
# Fallback we can add if-required
#rows = "site_id$location_id$%s" % level
resource.add_filter(FS(rows) != None)
if not output:
# Build the Pivot Table
cols = None
layer = get_vars.get("fact", defaults.get("fact", "count(id)"))
facts = S3PivotTableFact.parse(layer)[:1]
pivottable = S3PivotTable(resource, rows, cols, facts)
# Extract the Location Data
#attr_fields = []
style = current.gis.get_style(layer_id=layer_id,
aggregate=True)
popup_format = style.popup_format
if popup_format:
if"T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
style.popup_format = popup_format
# Extract the attr_fields
# No need as defaulted inside S3PivotTable.geojson()
#parts = popup_format.split("{")
## Skip the first part
#parts = parts[1:]
#for part in parts:
# attribute = part.split("}")[0]
# attr_fields.append(attribute)
#attr_fields = ",".join(attr_fields)
ids, location_data = pivottable.geojson(fact=facts[0], level=level)
# Export as GeoJSON
current.xml.show_ids = True
gresource = current.s3db.resource("gis_location", id=ids)
output = gresource.export_xml(fields=[],
mcomponents=None,
references=[],
stylesheet=stylesheet,
as_json=True,
location_data=location_data,
# Tell the client that we are
# displaying aggregated data and
# the level it is aggregated at
map_data=dict(level=int(level[1:]),
style=style),
)
# Transformation error?
if not output:
r.error(400, "XSLT Transformation Error: %s " % current.xml.error)
return output
# -------------------------------------------------------------------------
def widget(self, r, method=None, widget_id=None, visible=True, **attr):
"""
Pivot table report widget
@param r: the S3Request
@param method: the widget method
@param widget_id: the widget ID
@param visible: whether the widget is initially visible
@param attr: controller attributes
"""
output = {}
resource = self.resource
get_config = resource.get_config
# @todo: make configurable:
maxrows = 20
maxcols = 20
# Extract the relevant GET vars
report_vars = ("rows", "cols", "fact", "totals")
get_vars = dict((k, v) for k, v in r.get_vars.iteritems()
if k in report_vars)
# Fall back to report options defaults
report_options = get_config("report_options", {})
defaults = report_options.get("defaults", {})
if not any (k in get_vars for k in ("rows", "cols", "fact")):
get_vars = defaults
get_vars["chart"] = r.get_vars.get("chart",
defaults.get("chart", None))
get_vars["table"] = r.get_vars.get("table",
defaults.get("table", None))
# Generate the pivot table
if get_vars:
rows = get_vars.get("rows", None)
cols = get_vars.get("cols", None)
layer = get_vars.get("fact", "id")
try:
facts = S3PivotTableFact.parse(layer)
except SyntaxError:
current.log.error(sys.exc_info()[1])
facts = None
if not facts or not any([rows, cols]):
pivottable = None
else:
prefix = resource.prefix_selector
get_vars["rows"] = prefix(rows) if rows else None
get_vars["cols"] = prefix(cols) if cols else None
get_vars["fact"] = ",".join("%s(%s)" % (fact.method, fact.selector) for fact in facts)
if visible:
pivottable = S3PivotTable(resource, rows, cols, facts)
else:
pivottable = None
else:
pivottable = None
# Render as JSON-serializable dict
if pivottable is not None:
pivotdata = pivottable.json(maxrows=maxrows, maxcols=maxcols)
else:
pivotdata = None
if r.representation in ("html", "iframe"):
# Generate the report form
ajax_vars = Storage(r.get_vars)
ajax_vars.update(get_vars)
filter_form = attr.get("filter_form", None)
filter_tab = attr.get("filter_tab", None)
filter_url = r.url(method="",
representation="",
vars=ajax_vars.fromkeys((k for k in ajax_vars
if k not in report_vars)),
)
ajaxurl = attr.get("ajaxurl", r.url(method="report",
representation="json",
vars=ajax_vars))
output = S3ReportForm(resource).html(pivotdata,
get_vars = get_vars,
filter_widgets = None,
ajaxurl = ajaxurl,
filter_url = filter_url,
filter_form = filter_form,
filter_tab = filter_tab,
widget_id = widget_id)
# Detect and store theme-specific inner layout
view = self._view(r, "pivottable.html")
# Render inner layout (outer page layout is set by S3Summary)
output["title"] = None
output = XML(current.response.render(view, output))
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# =============================================================================
class S3ReportForm(object):
""" Helper class to render a report form """
def __init__(self, resource):
self.resource = resource
# -------------------------------------------------------------------------
def html(self,
pivotdata,
filter_widgets=None,
get_vars=None,
ajaxurl=None,
filter_url=None,
filter_form=None,
filter_tab=None,
widget_id=None):
"""
Render the form for the report
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element base ID for the widgets
"""
T = current.T
appname = current.request.application
# Report options
report_options = self.report_options(get_vars = get_vars,
widget_id = widget_id)
# Pivot data
hidden = {"pivotdata": json.dumps(pivotdata, separators=SEPARATORS)}
empty = T("No report specified.")
hide = T("Hide Table")
show = T("Show Table")
throbber = "/%s/static/img/indicator.gif" % appname
# Filter options
if filter_widgets is not None:
filter_options = self._fieldset(T("Filter Options"),
filter_widgets,
_id="%s-filters" % widget_id,
_class="filter-form")
else:
filter_options = ""
# Report form submit element
resource = self.resource
submit = resource.get_config("report_submit", True)
if submit:
_class = "pt-submit"
if submit is True:
label = T("Update Report")
elif isinstance(submit, (list, tuple)):
label = submit[0]
_class = "%s %s" % (submit[1], _class)
else:
label = submit
submit = TAG[""](
INPUT(_type="button",
_value=label,
_class=_class))
else:
submit = ""
# Form
form = FORM(filter_options,
report_options,
submit,
hidden = hidden,
_class = "pt-form",
_id = "%s-pt-form" % widget_id,
)
# View variables
output = {"form": form,
"throbber": throbber,
"hide": hide,
"show": show,
"empty": empty,
"widget_id": widget_id,
}
# Script options
settings = current.deployment_settings
opts = {
#"renderFilter": True,
#"collapseFilter": False,
#"renderOptions": True,
"collapseOptions": settings.get_ui_hide_report_options(),
"renderTable": True,
"collapseTable": False,
"showTotals": self.show_totals,
"ajaxURL": ajaxurl,
"renderChart": True,
"collapseChart": True,
"defaultChart": None,
"exploreChart": True,
"filterURL": filter_url,
"filterTab": filter_tab,
"filterForm": filter_form,
"autoSubmit": settings.get_ui_report_auto_submit(),
"thousandSeparator": settings.get_L10n_thousands_separator(),
"thousandGrouping": settings.get_L10n_thousands_grouping(),
"textAll": str(T("All")),
}
chart_opt = get_vars["chart"]
if chart_opt is not None:
if str(chart_opt).lower() in ("0", "off", "false"):
opts["renderChart"] = False
elif ":" in chart_opt:
opts["collapseChart"] = False
ctype, caxis = chart_opt.split(":", 1)
opts["defaultChart"] = {"type": ctype, "axis": caxis}
table_opt = get_vars["table"]
if table_opt is not None:
table_opt = str(table_opt).lower()
if table_opt in ("0", "off", "false"):
opts["renderTable"] = False
elif table_opt == "collapse":
opts["collapseTable"] = True
# Scripts
s3 = current.response.s3
scripts = s3.scripts
if s3.debug:
# @todo: support CDN
script = "/%s/static/scripts/d3/d3.js" % appname
if script not in scripts:
scripts.append(script)
script = "/%s/static/scripts/d3/nv.d3.js" % appname
if script not in scripts:
scripts.append(script)
script = "/%s/static/scripts/S3/s3.ui.pivottable.js" % appname
if script not in scripts:
scripts.append(script)
else:
script = "/%s/static/scripts/S3/s3.pivotTables.min.js" % appname
if script not in scripts:
scripts.append(script)
script = '''$('#%(widget_id)s').pivottable(%(opts)s)''' % \
dict(widget_id = widget_id,
opts = json.dumps(opts,
separators=SEPARATORS),
)
s3.jquery_ready.append(script)
return output
# -------------------------------------------------------------------------
def report_options(self, get_vars=None, widget_id="pivottable"):
"""
Render the widgets for the report options form
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element base ID for the widgets
"""
T = current.T
SHOW_TOTALS = T("Show totals")
FACT = T("Report of")
ROWS = T("Grouped by")
COLS = T("and")
resource = self.resource
get_config = resource.get_config
options = get_config("report_options")
# Specific formstyle?
settings = current.deployment_settings
formstyle = settings.get_ui_report_formstyle()
# Fall back to inline-variant of current formstyle
if formstyle is None:
formstyle = settings.get_ui_inline_formstyle()
# Helper for labels
label = lambda s, **attr: LABEL("%s:" % s, **attr)
formfields = []
# Layer selector
layer_id = "%s-fact" % widget_id
layer_widget = self.layer_options(options=options,
get_vars=get_vars,
widget_id=layer_id)
formfields.append((layer_id + "-row",
label(FACT, _for=layer_id),
layer_widget,
"",
))
# Rows/Columns selectors
axis_options = self.axis_options
rows_id = "%s-rows" % widget_id
cols_id = "%s-cols" % widget_id
rows_options = axis_options("rows",
options=options,
get_vars=get_vars,
widget_id=rows_id)
cols_options = axis_options("cols",
options=options,
get_vars=get_vars,
widget_id=cols_id)
axis_widget = DIV(rows_options,
label(COLS, _for=cols_id),
cols_options,
_class="pt-axis-options",
)
formfields.append(("%s-axis-row" % widget_id,
label(ROWS, _for=rows_id),
axis_widget,
"",
))
# Show Totals switch
show_totals = True
if get_vars and "totals" in get_vars and \
str(get_vars["totals"]).lower() in ("0", "false", "off"):
show_totals = False
self.show_totals = show_totals
show_totals_id = "%s-totals" % widget_id
totals_widget = INPUT(_type="checkbox",
_id=show_totals_id,
_name="totals",
_class="pt-totals",
value=show_totals
)
formfields.append(("%s-show-totals-row" % widget_id,
label(SHOW_TOTALS, _for=show_totals_id),
totals_widget,
"",
))
try:
widgets = formstyle(FIELDSET(), formfields)
except:
# Old style (should be avoided)
widgets = TAG[""]([formstyle(*formfield) for formfield in formfields])
# Render fieldset
fieldset = self._fieldset(T("Report Options"),
widgets,
_id="%s-options" % widget_id)
return fieldset
# -------------------------------------------------------------------------
def axis_options(self, axis,
options=None,
get_vars=None,
widget_id=None):
"""
Construct an OptionsWidget for rows or cols axis
@param axis: "rows" or "cols"
@param options: the report options
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element ID for the widget
"""
resource = self.resource
prefix = resource.prefix_selector
# Get all selectors
if options and axis in options:
fields = options[axis]
else:
fields = resource.get_config("list_fields")
if not fields:
fields = [f.name for f in resource.readable_fields()]
# Resolve the selectors
pkey = str(resource._id)
resolve_selector = resource.resolve_selector
rfields = []
append = rfields.append
for f in fields:
if isinstance(f, (tuple, list)):
label, selector = f[:2]
else:
label, selector = None, f
rfield = resolve_selector(selector)
if rfield.colname == pkey:
continue
if label:
rfield.label = label
append(rfield)
# Get current value
if get_vars and axis in get_vars:
value = get_vars[axis]
else:
value = ""
if value:
value = prefix(value)
# Dummy field
opts = [(prefix(rfield.selector), rfield.label) for rfield in rfields]
dummy_field = Storage(name=axis, requires=IS_IN_SET(opts))
# Construct widget
return OptionsWidget.widget(dummy_field,
value,
_id=widget_id,
_name=axis,
_class="pt-%s" % axis)
# -------------------------------------------------------------------------
def layer_options(self,
options=None,
get_vars=None,
widget_id=None):
"""
Construct an OptionsWidget for the fact layer
@param options: the report options
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element ID for the widget
"""
resource = self.resource
all_methods = S3PivotTableFact.METHODS
# Get all layers
layers = None
methods = None
if options:
if "methods" in options:
methods = options["methods"]
if "fact" in options:
layers = options["fact"]
if not layers:
layers = resource.get_config("list_fields")
if not layers:
layers = [f.name for f in resource.readable_fields()]
if not methods:
methods = all_methods
# Resolve layer options
T = current.T
RECORDS = T("Records")
mname = S3PivotTableFact._get_method_label
def layer_label(rfield, method):
""" Helper to construct a layer label """
mlabel = mname(method)
flabel = rfield.label if rfield.label != "Id" else RECORDS
# @ToDo: Exclude this string from admin/translate exports
return T("%s (%s)") % (flabel, mlabel)
prefix = resource.prefix_selector
layer_opts = []
for option in layers:
if isinstance(option, tuple):
title, layer = option
else:
title, layer = None, option
try:
facts = S3PivotTableFact.parse(layer)
except SyntaxError:
continue
if len(facts) > 1:
# Multi-fact layer
labels = []
expressions = []
for fact in facts:
if not title:
rfield = resource.resolve_selector(fact.selector)
labels.append(fact.get_label(rfield, layers))
expressions.append("%s(%s)" % (fact.method, fact.selector))
if not title:
title = " / ".join(labels)
layer_opts.append((",".join(expressions), title))
continue
else:
fact = facts[0]
label = fact.label or title
if fact.default_method:
s, m = fact.selector, None
else:
s, m = fact.selector, fact.method
# Resolve the selector
selector = prefix(s)
rfield = resource.resolve_selector(selector)
if not rfield.field and not rfield.virtual:
continue
if m is None and label:
rfield.label = label
if m is None:
# Only field given -> auto-detect aggregation methods
is_amount = None
ftype = rfield.ftype
if ftype == "integer":
is_amount = True
requires = rfield.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
for r in requires:
if isinstance(r, IS_IN_SET) or \
isinstance(r, IS_EMPTY_OR) and \
isinstance(r.other, IS_IN_SET):
is_amount = False
elif ftype == "double":
is_amount = True
elif ftype[:9] == "reference" or \
ftype[:5] == "list:" or \
ftype in ("id", "string", "text"):
is_amount = False
if ftype in ("datetime", "date", "time"):
mopts = ["min", "max", "list"]
elif is_amount is None:
mopts = ["sum", "min", "max", "avg", "count", "list"]
elif is_amount:
mopts = ["sum", "min", "max", "avg"]
else:
mopts = ["count", "list"]
for method in mopts:
if method in methods:
label = layer_label(rfield, method)
layer_opts.append(("%s(%s)" % (method, selector), label))
else:
# Explicit method specified
if label is None:
label = layer_label(rfield, m)
layer_opts.append(("%s(%s)" % (m, selector), label))
# Get current value
if get_vars and "fact" in get_vars:
layer = get_vars["fact"]
else:
layer = ""
if layer:
match = layer_pattern.match(layer)
if match is None:
layer = ""
else:
selector, method = match.group(2), match.group(1)
selector = prefix(selector)
layer = "%s(%s)" % (method, selector)
if len(layer_opts) == 1:
# Field is read-only if there is only 1 option
default = layer_opts[0]
widget = TAG[""](default[1],
INPUT(_type="hidden",
_id=widget_id,
_name=widget_id,
_value=default[0],
_class="pt-fact-single-option"))
else:
# Render Selector
dummy_field = Storage(name="fact",
requires=IS_IN_SET(layer_opts))
widget = OptionsWidget.widget(dummy_field,
layer,
_id=widget_id,
_name="fact",
_class="pt-fact")
return widget
# -------------------------------------------------------------------------
@staticmethod
def _fieldset(title, widgets, **attr):
"""
Helper method to wrap widgets in a FIELDSET container with
show/hide option
@param title: the title for the field set
@param widgets: the widgets
@param attr: HTML attributes for the field set
"""
T = current.T
SHOW = T("Show")
HIDE = T("Hide")
return FIELDSET(LEGEND(title,
BUTTON(SHOW,
_type="button",
_class="toggle-text",
),
BUTTON(HIDE,
_type="button",
_class="toggle-text",
)
),
widgets,
**attr)
# =============================================================================
class S3PivotTableFact(object):
""" Class representing a fact layer """
#: Supported aggregation methods
METHODS = {"list": "List",
"count": "Count",
"min": "Minimum",
"max": "Maximum",
"sum": "Total",
"avg": "Average",
#"std": "Standard Deviation"
}
def __init__(self, method, selector, label=None, default_method=True):
"""
Constructor
@param method: the aggregation method
@param selector: the field selector
@param label: the fact label
@param default_method: using default method (used by parser)
"""
if method is None:
method = "count"
default_method = True
if method not in self.METHODS:
raise SyntaxError("Unsupported aggregation function: %s" % method)
self.method = method
self.selector = selector
self._layer = None
self.label = label
self.resource = None
self.rfield = None
self.column = selector
self.default_method = default_method
# -------------------------------------------------------------------------
@property
def layer(self):
layer = self._layer
if not layer:
layer = self._layer = (self.selector, self.method)
return layer
# -------------------------------------------------------------------------
def compute(self, values, method=DEFAULT, totals=False):
"""
Aggregate a list of values.
@param values: iterable of values
"""
if values is None:
return None
if method is DEFAULT:
method = self.method
if totals and method == "list":
method = "count"
if method is None or method == "list":
return values if values else None
values = [v for v in values if v != None]
if method == "count":
return len(values)
elif method == "min":
try:
return min(values)
except (TypeError, ValueError):
return None
elif method == "max":
try:
return max(values)
except (TypeError, ValueError):
return None
elif method == "sum":
try:
return sum(values)
except (TypeError, ValueError):
return None
elif method == "avg":
try:
if len(values):
return sum(values) / float(len(values))
else:
return 0.0
except (TypeError, ValueError):
return None
#elif method == "std":
#import numpy
#if not values:
#return 0.0
#try:
#return numpy.std(values)
#except (TypeError, ValueError):
#return None
return None
# -------------------------------------------------------------------------
def aggregate_totals(self, totals):
"""
Aggregate totals for this fact (hyper-aggregation)
@param totals: iterable of totals
"""
if self.method in ("list", "count"):
total = self.compute(totals, method="sum")
else:
total = self.compute(totals)
return total
# -------------------------------------------------------------------------
@classmethod
def parse(cls, fact):
"""
Parse fact expression
@param fact: the fact expression
"""
if isinstance(fact, tuple):
label, fact = fact
else:
label = None
if isinstance(fact, list):
facts = []
for f in fact:
facts.extend(cls.parse(f))
if not facts:
raise SyntaxError("Invalid fact expression: %s" % fact)
return facts
# Parse the fact
other = None
default_method = False
if not fact:
method, parameters = "count", "id"
else:
match = FACT.match(fact)
if match:
method, parameters, other = match.groups()
if other:
other = cls.parse((label, other) if label else other)
elif SELECTOR.match(fact):
method, parameters, other = "count", fact, None
default_method = True
else:
raise SyntaxError("Invalid fact expression: %s" % fact)
# Validate method
if method not in cls.METHODS:
raise SyntaxError("Unsupported aggregation method: %s" % method)
# Extract parameters
parameters = parameters.split(",")
selector = parameters[0]
facts = [cls(method,
selector,
label=label,
default_method=default_method,
),
]
if other:
facts.extend(other)
return facts
# -------------------------------------------------------------------------
@classmethod
def _get_method_label(cls, code):
"""
Get a label for a method
@param code: the method code
@return: the label (lazyT), or None for unsupported methods
"""
methods = cls.METHODS
if code is None:
code = "list"
if code in methods:
return current.T(methods[code])
else:
return None
# -------------------------------------------------------------------------
@staticmethod
def _get_field_label(rfield, fact_options=None):
"""
Get the label for a field
@param rfield: the S3ResourceField
@param fact_options: the corresponding subset of the report
options ("fact", "rows" or "cols")
"""
label = None
if not rfield:
return
resource = rfield.resource
fields = list(fact_options) if fact_options else []
list_fields = resource.get_config("list_fields")
if list_fields:
fields.extend(list_fields)
prefix = resource.prefix_selector
# Search through the field labels in report options
selector = prefix(rfield.selector)
for f in fields:
if type(f) is tuple and \
isinstance(f[1], basestring) and \
prefix(f[1]) == selector:
label = f[0]
break
if not label and rfield:
if rfield.ftype == "id":
label = current.T("Records")
else:
label = rfield.label
return label if label else ""
# -------------------------------------------------------------------------
def get_label(self, rfield, fact_options=None):
"""
Get a label for this fact
@param rfield: the S3ResourceField
@param fact_options: the "fact" list of the report options
"""
label = self.label
if label:
# Already set
return label
if fact_options:
# Lookup the label from the fact options
prefix = rfield.resource.prefix_selector
for fact_option in fact_options:
facts = self.parse(fact_option)
for fact in facts:
if fact.method == self.method and \
prefix(fact.selector) == prefix(self.selector):
label = fact.label
break
if label:
break
if not label:
# Construct a label from the field label and the method name
field_label = self._get_field_label(rfield, fact_options)
method_label = self._get_method_label(self.method)
label = "%s (%s)" % (field_label, method_label)
self.label = label
return label
# =============================================================================
class S3PivotTable(object):
""" Class representing a pivot table of a resource """
def __init__(self, resource, rows, cols, facts, strict=True):
"""
Constructor - extracts all unique records, generates a
pivot table from them with the given dimensions and
computes the aggregated values for each cell.
@param resource: the S3Resource
@param rows: field selector for the rows dimension
@param cols: field selector for the columns dimension
@param facts: list of S3PivotTableFacts to compute
@param strict: filter out dimension values which don't match
the resource filter
"""
# Initialize ----------------------------------------------------------
#
if not rows and not cols:
raise SyntaxError("No rows or columns specified for pivot table")
self.resource = resource
self.lfields = None
self.dfields = None
self.rfields = None
self.rows = rows
self.cols = cols
self.facts = facts
# API variables -------------------------------------------------------
#
self.records = None
""" All records in the pivot table as a Storage like:
{
<record_id>: <Row>
}
"""
self.empty = False
""" Empty-flag (True if no records could be found) """
self.numrows = None
""" The number of rows in the pivot table """
self.numcols = None
""" The number of columns in the pivot table """
self.cell = None
""" Array of pivot table cells in [rows[columns]]-order, each
cell is a Storage like:
{
records: <list_of_record_ids>,
(<fact>, <method>): <aggregated_value>, ...per layer
}
"""
self.row = None
""" List of row headers, each header is a Storage like:
{
value: <dimension value>,
records: <list_of_record_ids>,
(<fact>, <method>): <total value>, ...per layer
}
"""
self.col = None
""" List of column headers, each header is a Storage like:
{
value: <dimension value>,
records: <list_of_record_ids>,
(<fact>, <method>): <total value>, ...per layer
}
"""
self.totals = Storage()
""" The grand total values for each layer, as a Storage like:
{
(<fact>, <method): <total value>, ...per layer
}
"""
self.values = {}
# Get the fields ------------------------------------------------------
#
tablename = resource.tablename
# The "report_fields" table setting defines which additional
# fields shall be included in the report base layer. This is
# useful to provide easy access to the record data behind a
# pivot table cell.
fields = current.s3db.get_config(tablename, "report_fields", [])
self._get_fields(fields=fields)
rows = self.rows
cols = self.cols
# Retrieve the records ------------------------------------------------
#
data = resource.select(self.rfields.keys(), limit=None)
drows = data["rows"]
if drows:
key = str(resource.table._id)
records = Storage([(i[key], i) for i in drows])
# Generate the data frame -----------------------------------------
#
gfields = self.gfields
pkey_colname = gfields[self.pkey]
rows_colname = gfields[rows]
cols_colname = gfields[cols]
if strict:
rfields = self.rfields
axes = (rfield
for rfield in (rfields[rows], rfields[cols])
if rfield != None)
axisfilter = resource.axisfilter(axes)
else:
axisfilter = None
dataframe = []
extend = dataframe.extend
#insert = dataframe.append
expand = self._expand
for _id in records:
row = records[_id]
item = {key: _id}
if rows_colname:
item[rows_colname] = row[rows_colname]
if cols_colname:
item[cols_colname] = row[cols_colname]
extend(expand(item, axisfilter=axisfilter))
self.records = records
# Group the records -----------------------------------------------
#
matrix, rnames, cnames = self._pivot(dataframe,
pkey_colname,
rows_colname,
cols_colname)
# Initialize columns and rows -------------------------------------
#
if cols:
self.col = [Storage({"value": v}) for v in cnames]
self.numcols = len(self.col)
else:
self.col = [Storage({"value": None})]
self.numcols = 1
if rows:
self.row = [Storage({"value": v}) for v in rnames]
self.numrows = len(self.row)
else:
self.row = [Storage({"value": None})]
self.numrows = 1
# Add the layers --------------------------------------------------
#
add_layer = self._add_layer
for fact in self.facts:
add_layer(matrix, fact)
else:
# No items to report on -------------------------------------------
#
self.empty = True
# -------------------------------------------------------------------------
# API methods
# -------------------------------------------------------------------------
def __len__(self):
""" Total number of records in the report """
items = self.records
if items is None:
return 0
else:
return len(self.records)
# -------------------------------------------------------------------------
def geojson(self,
fact=None,
level="L0"):
"""
Render the pivot table data as a dict ready to be exported as
GeoJSON for display on a Map.
Called by S3Report.geojson()
@param layer: the layer. e.g. ("id", "count")
- we only support methods "count" & "sum"
- @ToDo: Support density: 'per sqkm' and 'per population'
@param level: the aggregation level (defaults to Country)
"""
if fact is None:
fact = self.facts[0]
layer = fact.layer
# The rows dimension
# @ToDo: We can add sanity-checking using resource.parse_bbox_query() if-desired
context = self.resource.get_config("context")
if context and "location" in context:
rows_dim = "(location)$%s" % level
else:
# Fallback to location_id
rows_dim = "location_id$%s" % level
# Fallback we can add if-required
#rows_dim = "site_id$location_id$%s" % level
# The data
attributes = {}
geojsons = {}
if self.empty:
location_ids = []
else:
numeric = lambda x: isinstance(x, (int, long, float))
row_repr = lambda v: s3_unicode(v)
ids = {}
irows = self.row
rows = []
# Group and sort the rows
is_numeric = None
for i in xrange(self.numrows):
irow = irows[i]
total = irow[layer]
if is_numeric is None:
is_numeric = numeric(total)
if not is_numeric:
total = len(irow.records)
header = Storage(value = irow.value,
text = irow.text if "text" in irow
else row_repr(irow.value))
rows.append((i, total, header))
self._sortdim(rows, self.rfields[rows_dim])
# Aggregate the grouped values
db = current.db
gtable = current.s3db.gis_location
query = (gtable.level == level) & (gtable.deleted == False)
for rindex, rtotal, rtitle in rows:
rval = rtitle.value
if rval:
# @ToDo: Handle duplicate names ;)
if rval in ids:
_id = ids[rval]
else:
q = query & (gtable.name == rval)
row = db(q).select(gtable.id,
gtable.parent,
limitby=(0, 1)
).first()
try:
_id = row.id
# Cache
ids[rval] = _id
except:
continue
attribute = dict(name=s3_unicode(rval),
value=rtotal)
attributes[_id] = attribute
location_ids = [ids[r] for r in ids]
query = (gtable.id.belongs(location_ids))
geojsons = current.gis.get_locations(gtable,
query,
join=False,
geojson=True)
# Prepare for export via xml.gis_encode() and geojson/export.xsl
location_data = {}
geojsons = dict(gis_location = geojsons)
location_data["geojsons"] = geojsons
attributes = dict(gis_location = attributes)
location_data["attributes"] = attributes
return location_ids, location_data
# -------------------------------------------------------------------------
def json(self, maxrows=None, maxcols=None):
"""
Render the pivot table data as JSON-serializable dict
@param layer: the layer
@param maxrows: maximum number of rows (None for all)
@param maxcols: maximum number of columns (None for all)
@param least: render the least n rows/columns rather than
the top n (with maxrows/maxcols)
{
labels: {
layer:
rows:
cols:
total:
},
method: <aggregation method>,
cells: [rows[cols]],
rows: [rows[index, value, label, total]],
cols: [cols[index, value, label, total]],
total: <grand total>,
filter: [rows selector, cols selector]
}
"""
rfields = self.rfields
resource = self.resource
T = current.T
OTHER = "__other__"
rows_dim = self.rows
cols_dim = self.cols
# The output data
orows = []
rappend = orows.append
ocols = []
cappend = ocols.append
ocells = []
lookups = {}
facts = self.facts
if not self.empty:
# Representation methods for row and column keys
row_repr = self._represent_method(rows_dim)
col_repr = self._represent_method(cols_dim)
# Label for the "Others" row/columns
others = s3_unicode(T("Others"))
# Get the layers (fact.selector, fact.method),
# => used as keys to access the pivot data
layers = [fact.layer for fact in facts]
least = facts[0].method == "min"
# Group and sort the rows (grouping = determine "others")
irows = self.row
rows = []
rtail = (None, None)
for i in xrange(self.numrows):
irow = irows[i]
totals = [irow[layer] for layer in layers]
sort_total = totals[0]
header = {"value": irow.value,
"text": irow.text if "text" in irow
else row_repr(irow.value),
}
rows.append((i, sort_total, totals, header))
if maxrows is not None:
rtail = self._tail(rows, maxrows, least=least, facts=facts)
self._sortdim(rows, rfields[rows_dim])
if rtail[1] is not None:
values = [irows[i]["value"] for i in rtail[0]]
rows.append((OTHER,
rtail[1],
rtail[2],
{"value": values, "text":others},
))
# Group and sort the cols (grouping = determine "others")
icols = self.col
cols = []
ctail = (None, None)
for i in xrange(self.numcols):
icol = icols[i]
totals = [icol[layer] for layer in layers]
sort_total = totals[0]
header = {"value": icol.value,
"text": icol.text if "text" in icol
else col_repr(icol.value),
}
cols.append((i, sort_total, totals, header))
if maxcols is not None:
ctail = self._tail(cols, maxcols, least=least, facts=facts)
self._sortdim(cols, rfields[cols_dim])
if ctail[1] is not None:
values = [icols[i]["value"] for i in ctail[0]]
cols.append((OTHER,
ctail[1],
ctail[2],
{"value": values, "text": others},
))
rothers = rtail[0] or set()
cothers = ctail[0] or set()
# Group and sort the cells accordingly
# @todo: break up into subfunctions
icell = self.cell
cells = {}
for i in xrange(self.numrows):
irow = icell[i]
ridx = (i, OTHER) if rothers and i in rothers else (i,)
for j in xrange(self.numcols):
cell = irow[j]
cidx = (j, OTHER) if cothers and j in cothers else (j,)
cell_records = cell["records"]
for layer_index, layer in enumerate(layers):
# Get cell items for the layer
# => items can be a single numeric value, or a list
items = cell[layer]
# Get cell value for the layer
if isinstance(items, list):
value = len(items)
else:
value = items
for ri in ridx:
if ri not in cells:
orow = cells[ri] = {}
else:
orow = cells[ri]
for ci in cidx:
if ci not in orow:
# Create a new output cell
ocell = orow[ci] = {"values": [],
"items": [],
"records": [],
}
else:
ocell = orow[ci]
if layer_index == 0:
# Extend the list of records
ocell["records"].extend(cell_records)
value_array = ocell["values"]
items_array = ocell["items"]
if len(value_array) <= layer_index:
value_array.append(value)
items_array.append(items)
else:
ovalue = value_array[layer_index]
oitems = items_array[layer_index]
if isinstance(ovalue, list):
ovalue.append(value)
oitems.append(items)
else:
value_array[layer_index] = [ovalue, value]
items_array[layer_index] = [oitems, items]
# Get field representation methods
represents = self._represents(layers)
# Aggregate the grouped values
value_maps = {}
add_columns = True # do this only once
for rindex, rtotal, rtotals, rtitle in rows:
orow = []
# Row value for filter construction
rval = rtitle["value"]
if rindex == OTHER and isinstance(rval, list):
rval = ",".join(s3_unicode(v) for v in rval)
elif rval is not None:
rval = s3_unicode(rval)
# The output row summary
rappend((rindex,
rindex in rothers,
rtotals,
rval,
rtitle["text"],
))
for cindex, ctotal, ctotals, ctitle in cols:
# Get the corresponding cell
cell = cells[rindex][cindex]
value_array = cell["values"]
items_array = cell["items"]
# Initialize the output cell
# @todo: deflate JSON keys
ocell = {"items": [], "values": [], "keys": []}
for layer_index, fact in enumerate(facts):
selector, method = fact.layer
if selector not in lookups:
lookup = lookups[selector] = {}
else:
lookup = lookups[selector]
if selector not in value_maps:
value_map = value_maps[selector] = {}
else:
value_map = value_maps[selector]
# Add the cell value
value = value_array[layer_index]
if type(value) is list:
# "Others" cell with multiple totals
value = fact.aggregate_totals(value)
ocell["values"].append(value)
has_fk, _repr = represents[selector]
rfield = self.rfields[selector]
items = items_array[layer_index]
okeys = None
# Build a lookup table for field values if counting
if method in ("count", "list"):
keys = []
for record_id in cell["records"]:
record = self.records[record_id]
try:
fvalue = record[rfield.colname]
except AttributeError:
continue
if fvalue is None:
continue
if type(fvalue) is not list:
fvalue = [fvalue]
for v in fvalue:
if v is None:
continue
if has_fk:
if v not in keys:
keys.append(v)
if v not in lookup:
lookup[v] = _repr(v)
else:
if v not in value_map:
next_id = len(value_map)
value_map[v] = next_id
keys.append(next_id)
lookup[next_id] = _repr(v)
else:
prev_id = value_map[v]
if prev_id not in keys:
keys.append(prev_id)
# Sort the keys by their representations
keys.sort(key=lambda i: lookup[i])
if method == "list":
items = [lookup[key] for key in keys if key in lookup]
else:
okeys = keys
ocell["items"].append(items)
ocell["keys"].append(okeys)
orow.append(ocell)
if add_columns:
# Column value for filter construction
cval = ctitle["value"]
if cindex == OTHER and isinstance(cval, list):
cval = ",".join(s3_unicode(v) for v in cval)
elif cval is not None:
cval = s3_unicode(cval)
# The output column summary
cappend((cindex,
cindex in cothers,
ctotals,
cval,
ctitle["text"],
))
add_columns = False
ocells.append(orow)
# Lookup labels
report_options = resource.get_config("report_options", {})
if report_options:
fact_options = report_options.get("fact")
else:
fact_options = ()
# @todo: lookup report title before constructing from fact labels
fact_data = []
fact_labels = []
for fact in facts:
rfield = rfields[fact.selector]
fact_label = str(fact.get_label(rfield, fact_options))
fact_data.append((fact.selector, fact.method, fact_label))
fact_labels.append(fact_label)
get_label = S3PivotTableFact._get_field_label
if rows_dim:
rows_label = str(get_label(rfields[rows_dim], report_options.get("rows")))
else:
rows_label = ""
if cols_dim:
cols_label = str(get_label(rfields[cols_dim], report_options.get("cols")))
else:
cols_label = ""
labels = {"total": str(T("Total")),
"none": str(current.messages["NONE"]),
"per": str(T("per")),
"breakdown": str(T("Breakdown")),
# @todo: use report title:
"layer": " / ".join(fact_labels),
"rows": rows_label,
"cols": cols_label,
}
# Compile the output dict
output = {"rows": orows,
"cols": ocols,
"facts": fact_data,
"cells": ocells,
"lookups": lookups,
"total": self._totals(self.totals, [fact]),
"nodata": None if not self.empty else str(T("No data available")),
"labels": labels,
}
# Add axis selectors for filter-URL construction
prefix = resource.prefix_selector
output["filter"] = (prefix(rows_dim) if rows_dim else None,
prefix(cols_dim) if cols_dim else None,
)
return output
# -------------------------------------------------------------------------
def _represents(self, layers):
"""
Get the representation functions per fact field
@param layers: the list of layers, tuples (selector, method)
"""
rfields = self.rfields
represents = {}
values = self.values
for selector, method in layers:
if selector in represents:
continue
# Get the field
rfield = rfields[selector]
f = rfield.field
# Utilize bulk-representation for field values
if method in ("list", "count") and \
f is not None and \
hasattr(f.represent, "bulk"):
all_values = values[(selector, method)]
if all_values:
f.represent.bulk(list(s3_flatlist(all_values)))
# Get the representation method
has_fk = f is not None and s3_has_foreign_key(f)
if has_fk:
represent = lambda v, f=f: s3_unicode(f.represent(v))
else:
m = self._represent_method(selector)
represent = lambda v, m=m: s3_unicode(m(v))
represents[selector] = (has_fk, represent)
return represents
# -------------------------------------------------------------------------
@staticmethod
def _sortdim(items, rfield, index=3):
"""
Sort a dimension (sorts items in-place)
@param items: the items as list of tuples
(index, sort-total, totals, header)
@param rfield: the dimension (S3ResourceField)
@param index: alternative index of the value/text dict
within each item
"""
if not rfield:
return
ftype = rfield.ftype
sortby = "value"
if ftype == "integer":
requires = rfield.requires
if isinstance(requires, (tuple, list)):
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
if isinstance(requires, IS_IN_SET):
sortby = "text"
elif ftype[:9] == "reference":
sortby = "text"
items.sort(key=lambda item: item[index][sortby])
return
# -------------------------------------------------------------------------
@classmethod
def _tail(cls, items, length=10, least=False, facts=None):
"""
Find the top/least <length> items (by total)
@param items: the items as list of tuples
(index, sort-total, totals, header)
@param length: the maximum number of items
@param least: find least rather than top
@param facts: the facts to aggregate the tail totals
"""
try:
if len(items) > length:
l = list(items)
l.sort(lambda x, y: int(y[1]-x[1]))
if least:
l.reverse()
keys = [item[0] for item in l[length-1:]]
totals = []
for i, fact in enumerate(facts):
subtotals = [item[2][i] for item in l[length-1:]]
totals.append(fact.aggregate_totals(subtotals))
return (keys, totals[0], totals)
except (TypeError, ValueError):
pass
return (None, None)
# -------------------------------------------------------------------------
@staticmethod
def _totals(values, facts, append=None):
"""
Get the totals of a row/column/report
@param values: the values dictionary
@param facts: the facts
@param append: callback to collect the totals for JSON data
(currently only collects the first layer)
"""
totals = []
number_represent = IS_NUMBER.represent
for fact in facts:
value = values[fact.layer]
#if fact.method == "list":
#value = value and len(value) or 0
if not len(totals) and append is not None:
append(value)
totals.append(s3_unicode(number_represent(value)))
totals = " / ".join(totals)
return totals
# -------------------------------------------------------------------------
# Internal methods
# -------------------------------------------------------------------------
@staticmethod
def _pivot(items, pkey_colname, rows_colname, cols_colname):
"""
2-dimensional pivoting of a list of unique items
@param items: list of unique items as dicts
@param pkey_colname: column name of the primary key
@param rows_colname: column name of the row dimension
@param cols_colname: column name of the column dimension
@return: tuple of (cell matrix, row headers, column headers),
where cell matrix is a 2-dimensional array [rows[columns]]
and row headers and column headers each are lists (in the
same order as the cell matrix)
"""
rvalues = Storage()
cvalues = Storage()
cells = Storage()
# All unique rows values
rindex = 0
cindex = 0
for item in items:
rvalue = item[rows_colname] if rows_colname else None
cvalue = item[cols_colname] if cols_colname else None
if rvalue not in rvalues:
r = rvalues[rvalue] = rindex
rindex += 1
else:
r = rvalues[rvalue]
if cvalue not in cvalues:
c = cvalues[cvalue] = cindex
cindex += 1
else:
c = cvalues[cvalue]
if (r, c) not in cells:
cells[(r, c)] = [item[pkey_colname]]
else:
cells[(r, c)].append(item[pkey_colname])
matrix = []
for r in xrange(len(rvalues)):
row = []
for c in xrange(len(cvalues)):
row.append(cells[(r, c)])
matrix.append(row)
rnames = [None] * len(rvalues)
for k, v in rvalues.items():
rnames[v] = k
cnames = [None] * len(cvalues)
for k, v in cvalues.items():
cnames[v] = k
return matrix, rnames, cnames
# -------------------------------------------------------------------------
def _add_layer(self, matrix, fact):
"""
Compute an aggregation layer, updates:
- self.cell: the aggregated values per cell
- self.row: the totals per row
- self.col: the totals per column
- self.totals: the overall totals per layer
@param matrix: the cell matrix
@param fact: the fact field
@param method: the aggregation method
"""
rows = self.row
cols = self.col
records = self.records
extract = self._extract
resource = self.resource
RECORDS = "records"
VALUES = "values"
table = resource.table
pkey = table._id.name
layer = fact.layer
numcols = len(self.col)
numrows = len(self.row)
# Initialize cells
if self.cell is None:
self.cell = [[Storage()
for i in xrange(numcols)]
for j in xrange(numrows)]
cells = self.cell
all_values = []
for r in xrange(numrows):
# Initialize row header
row = rows[r]
row[RECORDS] = []
row[VALUES] = []
row_records = row[RECORDS]
row_values = row[VALUES]
for c in xrange(numcols):
# Initialize column header
col = cols[c]
if RECORDS not in col:
col[RECORDS] = []
col_records = col[RECORDS]
if VALUES not in col:
col[VALUES] = []
col_values = col[VALUES]
# Get the records
cell = cells[r][c]
if RECORDS in cell and cell[RECORDS] is not None:
ids = cell[RECORDS]
else:
data = matrix[r][c]
if data:
remove = data.remove
while None in data:
remove(None)
ids = data
else:
ids = []
cell[RECORDS] = ids
row_records.extend(ids)
col_records.extend(ids)
# Get the values
if fact.selector is None:
fact.selector = pkey
values = ids
row_values = row_records
col_values = row_records
all_values = records.keys()
else:
values = []
append = values.append
for i in ids:
value = extract(records[i], fact.selector)
if value is None:
continue
append(value)
values = list(s3_flatlist(values))
if fact.method in ("list", "count"):
values = list(set(values))
row_values.extend(values)
col_values.extend(values)
all_values.extend(values)
# Aggregate values
value = fact.compute(values)
cell[layer] = value
# Compute row total
row[layer] = fact.compute(row_values, totals=True)
del row[VALUES]
# Compute column total
for c in xrange(numcols):
col = cols[c]
col[layer] = fact.compute(col[VALUES], totals=True)
del col[VALUES]
# Compute overall total
self.totals[layer] = fact.compute(all_values, totals=True)
self.values[layer] = all_values
return
# -------------------------------------------------------------------------
def _get_fields(self, fields=None):
"""
Determine the fields needed to generate the report
@param fields: fields to include in the report (all fields)
"""
resource = self.resource
table = resource.table
# Lambda to prefix all field selectors
alias = resource.alias
def prefix(s):
if isinstance(s, (tuple, list)):
return prefix(s[-1])
if "." not in s.split("$", 1)[0]:
return "%s.%s" % (alias, s)
elif s[:2] == "~.":
return "%s.%s" % (alias, s[2:])
else:
return s
self.pkey = pkey = prefix(table._id.name)
self.rows = rows = self.rows and prefix(self.rows) or None
self.cols = cols = self.cols and prefix(self.cols) or None
if not fields:
fields = ()
# dfields (data-fields): fields to generate the layers
dfields = [prefix(s) for s in fields]
if rows and rows not in dfields:
dfields.append(rows)
if cols and cols not in dfields:
dfields.append(cols)
if pkey not in dfields:
dfields.append(pkey)
for fact in self.facts:
selector = fact.selector = prefix(fact.selector)
if selector not in dfields:
dfields.append(selector)
self.dfields = dfields
# rfields (resource-fields): dfields resolved into a ResourceFields map
rfields = resource.resolve_selectors(dfields)[0]
rfields = Storage([(f.selector.replace("~", alias), f) for f in rfields])
self.rfields = rfields
# gfields (grouping-fields): fields to group the records by
self.gfields = {pkey: rfields[pkey].colname,
rows: rfields[rows].colname
if rows and rows in rfields else None,
cols: rfields[cols].colname
if cols and cols in rfields else None,
}
return
# -------------------------------------------------------------------------
def _represent_method(self, field):
"""
Get the representation method for a field in the report
@param field: the field selector
"""
rfields = self.rfields
default = lambda value: None
if field and field in rfields:
rfield = rfields[field]
if rfield.field:
def repr_method(value):
return s3_represent_value(rfield.field, value,
strip_markup=True)
elif rfield.virtual:
stripper = S3MarkupStripper()
def repr_method(val):
if val is None:
return "-"
text = s3_unicode(val)
if "<" in text:
stripper.feed(text)
return stripper.stripped() # = totally naked ;)
else:
return text
else:
repr_method = default
else:
repr_method = default
return repr_method
# -------------------------------------------------------------------------
def _extract(self, row, field):
"""
Extract a field value from a DAL row
@param row: the row
@param field: the fieldname (list_fields syntax)
"""
rfields = self.rfields
if field not in rfields:
raise KeyError("Invalid field name: %s" % field)
rfield = rfields[field]
try:
return rfield.extract(row)
except AttributeError:
return None
# -------------------------------------------------------------------------
def _expand(self, row, axisfilter=None):
"""
Expand a data frame row into a list of rows for list:type values
@param row: the row
@param field: the field to expand (None for all fields)
@param axisfilter: dict of filtered field values by column names
"""
pairs = []
append = pairs.append
for colname in self.gfields.values():
if not colname:
continue
value = row[colname]
if type(value) is list:
if not value:
value = [None]
if axisfilter and colname in axisfilter:
p = [(colname, v) for v in value
if v in axisfilter[colname]]
if not p:
raise RuntimeError("record does not match query")
else:
append(p)
else:
append([(colname, v) for v in value])
else:
append([(colname, value)])
result = [dict(i) for i in product(*pairs)]
return result
# END =========================================================================
|
the-stack_0_16048 | from eventsourcing.example.application import (
close_example_application,
get_example_application,
init_example_application,
)
from eventsourcing.infrastructure.sqlalchemy.manager import SQLAlchemyRecordManager
from eventsourcing.infrastructure.sqlalchemy.records import IntegerSequencedNoIDRecord
from eventsourcing.tests.datastore_tests.test_sqlalchemy import (
SQLAlchemyDatastoreTestCase,
)
class TestExampleApplicationSingleInstanceFunctions(SQLAlchemyDatastoreTestCase):
def setUp(self):
super(TestExampleApplicationSingleInstanceFunctions, self).setUp()
# Setup the database.
self.datastore.setup_connection()
self.datastore.setup_tables()
def tearDown(self):
# Teardown single instance.
close_example_application()
# Teardown the database.
self.datastore.drop_tables()
self.datastore.close_connection()
super(TestExampleApplicationSingleInstanceFunctions, self).tearDown()
def test(self):
self.datastore.setup_connection()
self.datastore.setup_tables()
record_manager = SQLAlchemyRecordManager(
record_class=IntegerSequencedNoIDRecord, session=self.datastore.session
)
# Can't get the single instance before it has been constructed.
with self.assertRaises(AssertionError):
get_example_application()
# Construct single instance.
init_example_application(entity_record_manager=record_manager)
# Can't construct single instance twice.
with self.assertRaises(AssertionError):
init_example_application(entity_record_manager=record_manager)
# Get the single instance.
app1 = get_example_application()
app2 = get_example_application()
self.assertEqual(id(app1), id(app2))
# Close single instance.
close_example_application()
# Can't get the single instance before it has been constructed.
with self.assertRaises(AssertionError):
get_example_application()
# Construct single instance.
init_example_application(entity_record_manager=record_manager)
# Can't construct single instance twice.
with self.assertRaises(AssertionError):
init_example_application(entity_record_manager=record_manager)
# Get the single instance.
app1 = get_example_application()
app2 = get_example_application()
self.assertEqual(id(app1), id(app2))
|
the-stack_0_16050 | input_data = open("day9.input").read().split("\n")
adj_m = [(0, 1), (1, 0), (0, -1), (-1, 0)]
sol = 0
basins = []
for ix, line in enumerate(input_data):
for iy, col in enumerate(line):
adj = []
for a in adj_m:
realx = ix+a[0]
realy = iy+a[1]
if realx >= 0 and realy >= 0 and realx < len(input_data) and realy < len(line):
adj.append(int(input_data[realx][realy]))
if int(col) < min(adj):
sol += int(col) + 1
basins.append([(ix, iy)])
print(sol)
for bx, basin in enumerate(basins):
while True:
check = []
for point in basins[bx]:
for (x, y) in adj_m:
realx = point[0]+x
realy = point[1]+y
if realx >= 0 and realy >= 0 and realx < len(input_data) and realy < len(line):
if input_data[realx][realy] != "9":
check.append((realx, realy))
check = list(filter(lambda x: x not in basins[bx], set(check)))
if len(check) == 0:
break
basins[bx] += check
basins.sort(key=lambda x: len(x), reverse=True)
print("Aufgabe2")
print(len(basins[0]) * len(basins[1]) * len(basins[2]))
|
the-stack_0_16052 | import uos
import network
from flashbdev import bdev
def wifi():
import ubinascii
ap_if = network.WLAN(network.AP_IF)
essid = b"MicroPython-%s" % ubinascii.hexlify(ap_if.config("mac")[-3:])
ap_if.config(essid=essid, authmode=network.AUTH_WPA_WPA2_PSK, password=b"micropythoN")
def check_bootsec():
buf = bytearray(bdev.SEC_SIZE)
bdev.readblocks(0, buf)
empty = True
for b in buf:
if b != 0xff:
empty = False
break
if empty:
return True
fs_corrupted()
def fs_corrupted():
import time
while 1:
print("""\
The FAT filesystem starting at sector %d with size %d sectors appears to
be corrupted. If you had important data there, you may want to make a flash
snapshot to try to recover it. Otherwise, perform factory reprogramming
of MicroPython firmware (completely erase flash, followed by firmware
programming).
""" % (bdev.START_SEC, bdev.blocks))
time.sleep(3)
def setup():
check_bootsec()
print("Performing initial setup")
# wifi()
uos.VfsFat.mkfs(bdev)
vfs = uos.VfsFat(bdev)
uos.mount(vfs, '/')
with open("boot.py", "w") as f:
f.write("""\
# This file is executed on every boot (including wake-boot from deepsleep)
#import esp
#esp.osdebug(None)
import uos, machine
uos.dupterm(machine.UART(0, 115200), 1)
import gc
#import webrepl
#webrepl.start()
gc.collect()
import prometheus.pnetwork
gc.collect()
prometheus.pnetwork.init_network()
gc.collect()
""")
return vfs
|
the-stack_0_16053 | import os
import json
from COCO_Eval_Utils import coco_eval,coco_eval_specific
from Utils import model_construction,init_optimizer,set_lr,clip_gradient,get_transform,get_sample_image_info,visualize_att,RewardCriterion,get_self_critical_reward
import torch.nn as nn
import torch
from torch.nn.utils.rnn import pack_padded_sequence
import tqdm
import numpy as np
from cider.pyciderevalcap.tokenizer.ptbtokenizer import PTBTokenizer
from cider.pyciderevalcap.ciderD.ciderD import CiderD
class Engine(object):
def __init__(self,model_settings_json,dataset_name,caption_vocab,data_dir=None,device='cpu'):
self.model,self.settings = model_construction(model_settings_json=model_settings_json,caption_vocab=caption_vocab,device=device)
self.device = device
self.data_dir = data_dir
self.dataset_name = dataset_name
self.caption_vocab = caption_vocab
self.tag = 'Model_' + self.settings['model_type'] + '_Dataset_' + dataset_name
self.model.to(self.device)
def load_pretrained_model(self,scst_model=False):
scst_not_found = False
if scst_model:
pretrained_scst_model_path = './CheckPoints/%s/' % self.tag + 'Captioner_scst_cp.pth'
if os.path.exists(pretrained_scst_model_path):
self.model.load_state_dict(torch.load(pretrained_scst_model_path))
print('load pretrained scst weights complete.')
else:
print('pretrained scst weights not found, try to load pretrained xe weights.')
scst_not_found = True
if not(scst_model) or scst_not_found:
pretrained_model_path = './CheckPoints/%s/' % self.tag + 'Captioner_cp.pth'
if os.path.exists(pretrained_model_path):
self.model.load_state_dict(torch.load(pretrained_model_path))
print('load pretrained xe weights complete.')
else:print('model checkpoint not found, training from scratch.')
def load_score_record(self,scst=False):
best_cider = 0.0
scst_score_record_path = './CheckPoints/%s/Captioner_scst_cp_score.json' % (self.tag)
score_record_path = './CheckPoints/%s/Captioner_cp_score.json' % (self.tag)
if scst and os.path.exists(scst_score_record_path):
scst_score_record = json.load(open(scst_score_record_path, 'r'))
best_cider = scst_score_record['cider']
if not scst and os.path.exists(score_record_path):
score_record = json.load(open(score_record_path,'r'))
best_cider = score_record['cider']
if best_cider != 0.0:print('best cider record: %.3f, model checkpoints below the score record will not be saved.' % best_cider)
else: print('best cider record not found.')
return best_cider
def get_model_params(self):
cnn_extractor_params = list(filter(lambda p: p.requires_grad, self.model.encoder.feature_extractor.parameters()))
captioner_params = list(self.model.decoder.parameters())
return cnn_extractor_params,captioner_params
#------------------------------XELoss training---------------------------------#
def training(self, num_epochs, train_dataloader, eval_dataloader, eval_caption_path,
optimizer_type, lr_opts, ss_opts, use_preset_settings, eval_beam_size=-1,
load_pretrained_model=False, overwrite_guarantee=True, cnn_FT_start=False, tqdm_visible=True):
os.makedirs('./CheckPoints/%s' % self.tag, exist_ok=True)
if load_pretrained_model:self.load_pretrained_model(scst_model=False)
else:print('training from scratch')
if overwrite_guarantee:best_cider_record = self.load_score_record(scst=False)
else:best_cider_record = 0.0
if hasattr(self.model,'cnn_fine_tune'):
self.model.cnn_fine_tune(cnn_FT_start)
cnn_extractor_params,captioner_params = self.get_model_params()
#------------Load preset training settings if exists--------------#
optim_type = optimizer_type
lr = lr_opts['learning_rate']
cnn_FT_lr = lr_opts['cnn_FT_learning_rate']
if use_preset_settings:
if self.settings.__contains__('optimizer'):
optim_type = self.settings['optimizer']
print('training under preset optimizer_type:%s' % optim_type)
if self.settings.__contains__('lr'):
lr = self.settings['lr']
print('training under preset learning_rate:%.6f' % lr)
if self.settings.__contains__('cnn_FT_lr'):
cnn_FT_lr = self.settings['cnn_FT_lr']
print('training under preset cnn_FT_learning_rate:%.6f' % cnn_FT_lr)
#-----------------------------------------------------------------#
cnn_extractor_optimizer = init_optimizer(optimizer_type=optim_type,params=cnn_extractor_params,learning_rate=cnn_FT_lr)
captioner_optimizer = init_optimizer(optimizer_type=optim_type,params=captioner_params,learning_rate=lr)
criterion = nn.CrossEntropyLoss().to(self.device)
cider_scores = []
best_cider = 0.0
best_epoch = 0
best_cider_woFT = 0.0
best_epoch_woFT = 0
for epoch in range(1, num_epochs + 1):
print('----------------------Start training for Epoch %d, CNN_fine_tune:%s---------------------' % (epoch, cnn_FT_start))
if epoch > lr_opts['lr_dec_start_epoch'] and lr_opts['lr_dec_start_epoch'] >= 0:
frac = (epoch - lr_opts['lr_dec_start_epoch']) // lr_opts['lr_dec_every']
decay_factor = lr_opts['lr_dec_rate'] ** frac
current_lr = lr * decay_factor
else:
current_lr = lr
if cnn_extractor_optimizer is not None:set_lr(cnn_extractor_optimizer,min(cnn_FT_lr,current_lr))
set_lr(captioner_optimizer, current_lr) # set the decayed rate
if epoch > ss_opts['ss_start_epoch'] and ss_opts['ss_start_epoch'] >= 0:
frac = (epoch - ss_opts['ss_start_epoch']) // ss_opts['ss_inc_every']
ss_prob = min(ss_opts['ss_inc_prob'] * frac, ss_opts['ss_max_prob'])
self.model.ss_prob = ss_prob
else:ss_prob = 0.0
print('| current_lr: %.6f cnn_FT_lr: %.6f current_scheduled_sampling_prob: %.2f |'
% (current_lr,cnn_FT_lr,ss_prob))
print('------------------------------------------------------------------------------------------')
self.training_epoch(dataloader=train_dataloader, optimizers=[cnn_extractor_optimizer,captioner_optimizer], criterion=criterion, tqdm_visible=tqdm_visible)
print('--------------Start evaluating for Epoch %d-----------------' % epoch)
results = self.eval_captions_json_generation(
dataloader=eval_dataloader,
eval_beam_size=eval_beam_size,
tqdm_visible=tqdm_visible
)
cider = coco_eval(results=results, eval_caption_path=eval_caption_path)
cider_scores.append(cider)
if cider > best_cider:
if cider > best_cider_record:
torch.save(self.model.state_dict(), './CheckPoints/%s/Captioner_cp.pth' % (self.tag))
score_record = {'cider':cider}
json.dump(score_record,open('./CheckPoints/%s/Captioner_cp_score.json' % (self.tag),'w'))
best_cider = cider
best_epoch = epoch
if len(cider_scores) >= 5:
last_5 = cider_scores[-4:]
last_5_max = max(last_5)
last_5_min = min(last_5)
if last_5_max != best_cider or abs(last_5_max - last_5_min) <= 0.01:
if not hasattr(self.model,'cnn_fine_tune') or cnn_FT_start:
print('No improvement with CIDEr in the last 5 epochs...Early stopping triggered.')
break
else:
print('No improvement with CIDEr in the last 5 epochs...CNN fine-tune triggered.')
best_cider_woFT = best_cider
best_epoch_woFT = best_epoch
cnn_FT_start = True
self.model.cnn_fine_tune(flag=cnn_FT_start)
self.load_pretrained_model(scst_model=False)
print('load pretrained model from previous best epoch:%d' % best_epoch_woFT)
cnn_extractor_params,_ = self.get_model_params()
cnn_extractor_optimizer = init_optimizer(optimizer_type=optim_type,params=cnn_extractor_params,learning_rate=cnn_FT_lr)
cider_scores = []
if hasattr(self.model,'cnn_fine_tune'):
print('Model of best epoch #:%d with CIDEr score %.3f w/o cnn fine-tune' % (best_epoch_woFT,best_cider_woFT))
print('Model of best epoch #:%d with CIDEr score %.3f w/ cnn fine-tune' % (best_epoch,best_cider))
else:
print('Model of best epoch #:%d with CIDEr score %.3f' % (best_epoch,best_cider))
def training_epoch(self, dataloader, optimizers, criterion, tqdm_visible=True):
self.model.train()
if tqdm_visible:
monitor = tqdm.tqdm(dataloader, desc='Training Process')
else:
monitor = dataloader
for batch_i, (_, imgs, captions, lengths) in enumerate(monitor):
imgs = imgs.to(self.device)
captions = captions.to(self.device)
lengths = [cap_len - 1 for cap_len in lengths]
targets = pack_padded_sequence(input=captions[:, 1:], lengths=lengths, batch_first=True)
self.model.zero_grad()
predictions = self.model(imgs, captions, lengths)
loss = criterion(predictions[0], targets[0])
loss_npy = loss.cpu().detach().numpy()
if tqdm_visible:
monitor.set_postfix(Loss=np.round(loss_npy, decimals=4))
loss.backward()
for optimizer in optimizers:
if optimizer is not None:
clip_gradient(optimizer, grad_clip=0.1)
optimizer.step()
#-------------------------------SCST training-----------------------------------------#
def SCSTtraining(self, num_epochs, train_dataloader, eval_dataloader, eval_caption_path,
optimizer_type, scst_lr, scst_cnn_FT_lr, use_preset_settings, eval_beam_size=-1,
load_pretrained_scst_model=False, overwrite_guarantee=True, cnn_FT_start=True, tqdm_visible=True):
print('SCST training needs the model pretrained.')
self.load_pretrained_model(scst_model=load_pretrained_scst_model)
if overwrite_guarantee:best_scst_cider_record = self.load_score_record(scst=True)
else:best_scst_cider_record = 0.0
if hasattr(self.model,'cnn_fine_tune'):
self.model.cnn_fine_tune(cnn_FT_start)
cnn_extractor_params,captioner_params = self.get_model_params()
#------------Load preset training settings if exists--------------#
optim_type = optimizer_type
lr = scst_lr
cnn_FT_lr = scst_cnn_FT_lr
if use_preset_settings:
if self.settings.__contains__('optimizer'):
optim_type = self.settings['optimizer']
print('training under preset optimizer_type:%s' % optim_type)
if self.settings.__contains__('scst_lr'):
lr = self.settings['scst_lr']
print('training under preset scst learning_rate:%.6f' % lr)
if self.settings.__contains__('scst_cnn_FT_lr'):
cnn_FT_lr = self.settings['scst_cnn_FT_lr']
print('training under preset scst cnn_FT_learning_rate:%.6f' % cnn_FT_lr)
#-----------------------------------------------------------------#
cnn_extractor_optimizer = init_optimizer(optimizer_type=optim_type,params=cnn_extractor_params,learning_rate=cnn_FT_lr)
captioner_optimizer = init_optimizer(optimizer_type=optim_type,params=captioner_params,learning_rate=lr)
criterion = RewardCriterion().to(self.device)
best_cider = 0.0
best_epoch = 0
for epoch in range(1,num_epochs + 1):
print('--------------Start training for Epoch %d, Training_Stage:SCST--------------' % (epoch))
print('| lr: %.6f cnn_FT_lr: %.6f |'
% (lr, cnn_FT_lr))
print('---------------------------------------------------------------------------')
self.SCST_training_epoch(dataloader=train_dataloader,optimizers=[cnn_extractor_optimizer,captioner_optimizer],criterion=criterion,tqdm_visible=tqdm_visible)
print('--------------Start evaluating for Epoch %d-----------------' % epoch)
results = self.eval_captions_json_generation(dataloader=eval_dataloader,eval_beam_size=eval_beam_size,tqdm_visible=tqdm_visible)
cider = coco_eval(results=results,eval_caption_path=eval_caption_path)
if cider > best_cider:
if cider > best_scst_cider_record: #avoid score decreasing
torch.save(self.model.state_dict(), './CheckPoints/%s/Captioner_scst_cp.pth' % (self.tag))
score_record = {'cider':cider}
json.dump(score_record,open('./CheckPoints/%s/Captioner_scst_cp_score.json' % (self.tag),'w'))
best_cider = cider
best_epoch = epoch
print('Model of best epoch #:%d with CIDEr score %.3f in stage:SCST'
% (best_epoch,best_cider))
def SCST_training_epoch(self,dataloader,optimizers,criterion,tqdm_visible=True):
self.model.train()
if tqdm_visible:monitor = tqdm.tqdm(dataloader,desc='Training Process')
else:monitor = dataloader
for batch_i,(imgids,imgs,img_gts) in enumerate(monitor):
imgs = imgs.to(self.device)
self.model.zero_grad()
self.model.eval()
with torch.no_grad():
greedy_res = self.model.sampler(imgs,max_len=20)
self.model.train()
seq_gen,seqLogprobs = self.model.sampler_rl(imgs,max_len=20) #(bsize,max_len)
rewards = get_self_critical_reward(gen_result=seq_gen,greedy_res=greedy_res,ground_truth=img_gts,
img_ids=imgids,caption_vocab = self.caption_vocab,dataset_name=self.dataset_name)
loss = criterion(seqLogprobs,seq_gen,rewards.to(self.device))
loss_npy = loss.cpu().detach().numpy()
if tqdm_visible:
monitor.set_postfix(Loss=np.round(loss_npy,decimals=4))
loss.backward()
for optimizer in optimizers:
if optimizer is not None:
clip_gradient(optimizer,grad_clip=0.25)
optimizer.step()
def eval_captions_json_generation(self,dataloader,eval_beam_size=-1,tqdm_visible=True):
self.model.eval()
result = []
print('Generating captions json for evaluation. Beam Search: %s' % (eval_beam_size!=-1))
if tqdm_visible:monitor = tqdm.tqdm(dataloader, desc='Generating Process')
else:monitor = dataloader
for batch_i, (image_ids, images) in enumerate(monitor):
images = images.to(self.device)
with torch.no_grad():
if eval_beam_size!=-1:
generated_captions = self.model.beam_search_sampler(images=images, beam_size=eval_beam_size)
else:
generated_captions = self.model.sampler(images=images, max_len=20)
captions = generated_captions.cpu().detach().numpy()
for image_idx in range(captions.shape[0]):
sampled_ids = captions[image_idx]
sampled_caption = []
for word_id in sampled_ids:
word = self.caption_vocab.ix2word[word_id]
if word == '<end>':
break
elif word != '<sta>':
sampled_caption.append(word)
sentence = ' '.join(sampled_caption)
tmp = {'image_id': int(image_ids[image_idx]), 'caption': sentence}
result.append(tmp)
return result
def eval(self,dataset,split,eval_scst,eval_dataloader,eval_caption_path,eval_beam_size=-1,output_statics=False,tqdm_visible=True):
self.load_pretrained_model(scst_model=eval_scst)
print('--------------Start evaluating for Dataset %s on %s split-----------------' % (dataset,split))
results = self.eval_captions_json_generation(dataloader=eval_dataloader, eval_beam_size=eval_beam_size,tqdm_visible=tqdm_visible)
if output_statics:coco_eval_specific(results=results,eval_caption_path=eval_caption_path)
else:coco_eval(results=results,eval_caption_path=eval_caption_path)
def test(self,use_scst_model,img_root,img_filename,eval_beam_size=-1):
self.load_pretrained_model(use_scst_model)
self.model.eval()
img_copy,gts = get_sample_image_info(img_root=img_root,img_filename=img_filename)
img = get_transform()(img_copy).unsqueeze(0)
img = img.to(self.device)
caption,additional = self.model.eval_test_image(image=img,caption_vocab=self.caption_vocab,max_len=20,eval_beam_size=eval_beam_size)
sentence = ' '.join(caption)
print('Generated caption:')
print(sentence)
if len(gts)>0:
img_id = list(gts.keys())[0]
res = [{'image_id':img_id,'caption':sentence}]
tokenizer = PTBTokenizer(_source='gts')
_gts = tokenizer.tokenize(gts)
tokenizer = PTBTokenizer(_source='res')
_res = tokenizer.tokenize(res)
ciderD_scorer = CiderD(df='COCO14-val')
ciderD_score,_ = ciderD_scorer.compute_score(gts=_gts,res=_res)
print('CIDEr-D :%.3f' % (ciderD_score))
self.show_additional_rlt(additional,img_copy,caption)
def show_additional_rlt(self,additional,image,caption):
pass
|
the-stack_0_16054 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import CTransaction, FromHex, ToHex
from test_framework.util import (
assert_equal,
)
from test_framework.estxconfig import COINBASE_MATURITY
def assert_approx(v, vexp, vspan=0.00001):
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
class WalletGroupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
self.rpc_timewait = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some coins
self.nodes[0].generate(10+COINBASE_MATURITY)
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
self.nodes[0].generate(1)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.01)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.01)
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
for i in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
tx.vout = [tx.vout[0]] * 2000
funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx))
signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'])
self.nodes[0].sendrawtransaction(signed_tx['hex'])
self.nodes[0].generate(1)
self.sync_all()
# Check that we can create a transaction that only requires ~100 of our
# utxos, without pulling in all outputs and creating a transaction that
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
if __name__ == '__main__':
WalletGroupTest().main ()
|
the-stack_0_16055 | # coding:utf-8
from django.conf import settings
from django.test.signals import setting_changed
try:
from django.utils.module_loading import import_string
except ImportError:
from django.utils.module_loading import import_by_path as import_string
from active_users.keys import AbstractActiveUserEntry
PREFIX = 'ACTIVE_USERS'
DEFAULTS = {
'KEY_EXPIRE': 20,
'KEY_CLASS': 'active_users.keys.ActiveUserEntry',
'EXCLUDE_URL_PATTERNS': []
}
class ActiveUsersSettings(object):
def __init__(self):
for key, default in DEFAULTS.items():
value = getattr(settings, '{0}_{1}'.format(PREFIX, key), default)
self.set_setting(key, value)
assert issubclass(self.KEY_CLASS, AbstractActiveUserEntry)
def set_setting(self, key, value):
setattr(
self, key, import_string(value) if key == 'KEY_CLASS' else value)
active_users_settings = ActiveUsersSettings()
def reload_settings(*args, **kwargs):
if kwargs['setting'].startswith(PREFIX):
key = kwargs['setting'].replace(PREFIX + '_', '')
if key in DEFAULTS:
active_users_settings.set_setting(
key, kwargs['value'] or DEFAULTS[key])
setting_changed.connect(reload_settings)
|
the-stack_0_16057 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import subprocess
from collections import namedtuple
from datetime import datetime, timezone
from threading import Thread
from time import sleep
import requests
from flask import Flask, jsonify, request
app = Flask(__name__)
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
Node = namedtuple('Node', ['endpoint', 'data'])
# A simple manager which tracks all event subscriptions
class Manager:
def __init__(self):
self._events = {}
self._nr_sent_events = 0
def subscribe(self, id_, endpoint, event_name, data):
data = data or {}
logger.info(f'[subscribe] id: "{id_}", endpoint:"{endpoint}", '
f'name: "{event_name}", data: %s', data)
if event_name not in self._events:
self._events[event_name] = {}
# Check whether the id is new
if id_ in self._events[event_name]:
return False
self._events[event_name][id_] = Node(endpoint, data)
def unsubscribe(self, id_, event_name):
logger.info(f'[unsubscribe] id: "{id_}", name: "{event_name}"')
if event_name not in self._events:
return False
# Check whether the id exists
if id_ not in self._events[event_name]:
return False
del self._events[event_name][id_]
def publish(self, event_name, data):
logger.info(f'[publish] name: "{event_name}", data: %s', data)
if event_name not in self._events:
return False
for node in self._events[event_name].values():
# filter for user (optional)
if 'user' in node.data and 'user' in data:
if node.data['user'] == data['user']:
self._send_event(node, event_name, data)
else:
self._send_event(node, event_name, data)
return True
def _send_event(self, node, event_name, data):
local_time = datetime.now(timezone.utc).astimezone()
requests.post(node.endpoint, json={
'eventType': event_name,
'type': 'com.microservices.python.template',
'specversion': '0.2',
'source': '/my-source',
'id': f'PYTHON-TEMPLATE-{self._nr_sent_events}',
'time': local_time.isoformat(),
'datacontenttype': 'application/json',
'data': data,
})
self._nr_sent_events = self._nr_sent_events + 1
manager = Manager()
@app.route('/events', methods=['POST'])
def subscribe():
return jsonify({'sucess': manager.subscribe(
id_=request.json['id'],
endpoint=request.json['endpoint'],
event_name=request.json['event'],
data=request.json.get('data', {}),
)})
@app.route('/events', methods=['DELETE'])
def unsubscribe():
return jsonify({'sucess': manager.unsubscribe(
id_=request.json['id'],
event_name=request.json['event'],
)})
@app.route('/publish', methods=['POST'])
def publish():
data = request.json.get('data', {})
if 'user' in request.json:
data['user'] = request.json['user']
return jsonify({'sucess': manager.publish(
event_name=request.json['event'],
data=data,
)})
@app.route('/health', methods=['GET'])
def health():
return 'OK'
# Return errors as JSON objects
def app_error(e):
return jsonify({'message': str(e)}), 400
# Calls a callback every period with args
def set_interval(period, callback, **args):
def wrapper():
while True:
sleep(period)
callback(**args)
Thread(target=wrapper).start()
def heartbeat(user):
manager.publish('heartbeat', {
'user': user,
'time': str(datetime.now()),
})
if __name__ == '__main__':
app.register_error_handler(Exception, app_error)
set_interval(3, heartbeat, user='max')
set_interval(5, heartbeat, user='moritz')
app.run(host='0.0.0.0', port=8080)
|
the-stack_0_16059 | from contextlib import suppress
from datetime import datetime
from typing import Optional
from models_library.clusters import ClusterID
from models_library.projects import ProjectID
from models_library.projects_state import RunningState
from models_library.users import UserID
from pydantic import BaseModel, PositiveInt, validator
from simcore_postgres_database.models.comp_pipeline import StateType
from ...utils.db import DB_TO_RUNNING_STATE
class CompRunsAtDB(BaseModel):
run_id: PositiveInt
project_uuid: ProjectID
user_id: UserID
cluster_id: Optional[ClusterID]
iteration: PositiveInt
result: RunningState
created: datetime
modified: datetime
started: Optional[datetime]
ended: Optional[datetime]
@validator("result", pre=True)
@classmethod
def convert_result_from_state_type_enum_if_needed(cls, v):
if isinstance(v, str):
# try to convert to a StateType, if it fails the validations will continue
# and pydantic will try to convert it to a RunninState later on
with suppress(ValueError):
v = StateType(v)
if isinstance(v, StateType):
return RunningState(DB_TO_RUNNING_STATE[StateType(v)])
return v
class Config:
orm_mode = True
schema_extra = {
"examples": [
# DB model
{
"run_id": 432,
"project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5",
"user_id": 132,
"cluster_id": 0,
"iteration": 42,
"result": "NOT_STARTED",
"created": "2021-03-01 13:07:34.19161",
"modified": "2021-03-01 13:07:34.19161",
},
{
"run_id": 43243,
"project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5",
"user_id": 132,
"cluster_id": 123,
"iteration": 12,
"result": "SUCCESS",
"created": "2021-03-01 13:07:34.19161",
"modified": "2021-03-01 13:07:34.19161",
"started": "2021-03-01 8:07:34.19161",
"ended": "2021-03-01 13:07:34.10",
},
]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.