filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_17708 | import torch
import torch.nn.functional as F
import timm
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from ace import attack_confidence_estimation
def attack_example(file_name, true_label, transform, normalization):
image = Image.open(f'./images/{file_name}.jpg').convert('RGB')
input = transform(image).unsqueeze(0).cuda() # transform and add batch dimension
with torch.no_grad():
output = model(normalization(input))
orig_prediction = torch.nn.functional.softmax(output, dim=1).max(1)
print(f'Ground truth label is {true_label}. The predicted label is {orig_prediction[1].item()} with a confidence of {orig_prediction[0].item()}')
adversarial_example = attack_confidence_estimation(model=model, input=input, label=torch.tensor(true_label), normalization=normalization)
with torch.no_grad():
attacked_prediction = torch.nn.functional.softmax(model(normalization(adversarial_example)), dim=1).max(1)
print(f'After using ACE, the predicted label is still {attacked_prediction[1].item()} with a confidence of {attacked_prediction[0].item()}')
if __name__ == '__main__':
model = timm.create_model('efficientnet_b0', pretrained=True).cuda()
model.eval()
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
normalization = transform.transforms.pop(3)
# A correct prediction example
print('=============== A correct prediction example: ===============')
attack_example(file_name='tank', true_label=847, transform=transform, normalization=normalization)
# An incorrect prediction example
print('=============== An incorrect prediction example: ===============')
attack_example(file_name='binoculars', true_label=447, transform=transform, normalization=normalization) |
the-stack_0_17710 | #! /usr/bin/python3
from __future__ import print_function
import sys
import time
import array
import os
#sys.path.append("shell")
import swapforth
class TetheredJ1a(swapforth.TetheredTarget):
cellsize = 2
def open_ser(self, port, speed):
try:
import serial
except:
print("This tool needs PySerial, but it was not found")
sys.exit(1)
self.ser = serial.Serial(port, 115200, timeout=None, rtscts=0)
sys.stdout.write("115200...ok")
print("")
def reset(self, fullreset = True):
ser = self.ser
'''
ser.setDTR(1)
if fullreset:
ser.setRTS(1)
ser.setRTS(0)
ser.setDTR(0)
'''
def waitcr():
while ser.read(1) != chr(10):
pass
#waitcr()
ser.write(b'\r')
#waitcr()
for c in ' 1 tth !':
ser.write(c.encode('utf-8'))
ser.flush()
time.sleep(0.001)
ser.flushInput()
# print(repr(se#! /usr/bin/python3r.read(ser.inWaiting())))
ser.write(b'\r')
while 1:
c = ser.read(1)
# print(repr(c))
if c == b'\x1e':
break
def boot(self, bootfile = None):
sys.stdout.write('Contacting... ')
print("")
self.reset()
print('established')
def interrupt(self):
self.reset(False)
def serialize(self):
l = self.command_response('0 here dump')
lines = l.strip().replace('\r', '').split('\n')
s = []
for l in lines:
l = l.split()
s += [int(b, 16) for b in l[1:17]]
s = array.array('B', s).tostring().ljust(8192, b'\xff')
return array.array('H', s)
if __name__ == '__main__':
swapforth.main(TetheredJ1a)
|
the-stack_0_17715 | #copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
__all__ = ["ResNet", "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"]
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class ResNet():
def __init__(self, layers=50):
self.params = train_parameters
self.layers = layers
def net(self, input, args, class_dim=1000):
layers = self.layers
supported_layers = [18, 34, 50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 18:
depth = [2, 2, 2, 2]
elif layers == 34 or layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
num_filters = [64, 128, 256, 512]
conv = self.conv_bn_layer(
input=input, num_filters=64, filter_size=7, stride=2, act='relu',name="conv1", data_format=args.data_format)
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max',
data_format=args.data_format)
if layers >= 50:
for block in range(len(depth)):
for i in range(depth[block]):
if layers in [101, 152] and block == 2:
if i == 0:
conv_name="res"+str(block+2)+"a"
else:
conv_name="res"+str(block+2)+"b"+str(i)
else:
conv_name="res"+str(block+2)+chr(97+i)
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1, name=conv_name, data_format=args.data_format)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True, data_format=args.data_format)
if args.data_format == "NCHW":
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
else:
stdv = 1.0 / math.sqrt(pool.shape[-1] * 1.0)
out = fluid.layers.fc(input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)))
else:
for block in range(len(depth)):
for i in range(depth[block]):
conv_name="res"+str(block+2)+chr(97+i)
conv = self.basic_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
is_first=block==i==0,
name=conv_name,
data_format=args.data_format)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True, data_format=args.data_format)
if args.data_format == "NCHW":
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
else:
stdv = 1.0 / math.sqrt(pool.shape[-1] * 1.0)
out = fluid.layers.fc(input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)))
return out
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
name=None,
data_format='NCHW'):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False,
name=name + '.conv2d.output.1',
data_format=data_format)
if name == "conv1":
bn_name = "batch_norm_" + name
else:
bn_name = "batch_norm" + name[3:]
return fluid.layers.batch_norm(input=conv,
act=act,
name=bn_name+'.output.1',
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance',
data_layout=data_format)
def shortcut(self, input, ch_out, stride, is_first, name, data_format):
if data_format == 'NCHW':
ch_in = input.shape[1]
else:
ch_in = input.shape[-1]
if ch_in != ch_out or stride != 1 or is_first == True:
return self.conv_bn_layer(input, ch_out, 1, stride, name=name, data_format=data_format)
else:
return input
def bottleneck_block(self, input, num_filters, stride, name, data_format):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu',name=name+"_branch2a", data_format=data_format)
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu',
name=name+"_branch2b",
data_format=data_format)
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 4, filter_size=1, act=None, name=name+"_branch2c", data_format=data_format)
short = self.shortcut(input, num_filters * 4, stride, is_first=False, name=name + "_branch1", data_format=data_format)
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu',name=name+".add.output.5")
def basic_block(self, input, num_filters, stride, is_first, name, data_format):
conv0 = self.conv_bn_layer(input=input, num_filters=num_filters, filter_size=3, act='relu', stride=stride,
name=name+"_branch2a", data_format=data_format)
conv1 = self.conv_bn_layer(input=conv0, num_filters=num_filters, filter_size=3, act=None,
name=name+"_branch2b", data_format=data_format)
short = self.shortcut(input, num_filters, stride, is_first, name=name + "_branch1", data_format=data_format)
return fluid.layers.elementwise_add(x=short, y=conv1, act='relu')
def ResNet18():
model = ResNet(layers=18)
return model
def ResNet34():
model = ResNet(layers=34)
return model
def ResNet50():
model = ResNet(layers=50)
return model
def ResNet101():
model = ResNet(layers=101)
return model
def ResNet152():
model = ResNet(layers=152)
return model
|
the-stack_0_17718 | import paho.mqtt.client as mqtt
import configparser
import logging
import json
import paho.mqtt.client as mqtt
class Messaging:
""" This is a wrapper for the mqtt client. """
def __init__(self, config, subscription = None, on_message = None, clientId = None):
global on_connect
self.config = config
defaultHost = 'None'
if (clientId):
self.client = mqtt.Client(clientId)
else:
self.client = mqtt.Client()
self.client.enable_logger()
self.client.on_connect = on_connect
self.client.tls_set_context()
self.client.tls_insecure_set(True)
if (subscription):
self.client.user_data_set(subscription)
if (on_message):
self.client.on_message = on_message
username = config.get('username', None)
password = config.get('password', None)
if username is not None:
self.client.username_pw_set(username, password)
port = int(config.get('port', '1883'))
host = config.get('host', defaultHost)
print("Host: ", host, "port: ", port)
if host is None:
raise Exception("Host must be defined in the config file or in the servers section.")
self.client.connect(host, port)
def publish(self, topic, payload, qos = 0, retain = False):
self.client.publish(topic, payload, qos, retain)
def subscribe(self, topic):
self.client.subscribe(topic)
def loop_forever(self):
self.client.loop_forever()
def loop_start(self):
self.client.loop_start()
def on_connect(client, userdata, flags, rc):
if (userdata):
client.subscribe(userdata)
|
the-stack_0_17719 | from utils import *
import numpy as np
import h5py
import os
import pandas as pd
from PIL import Image
from tqdm import tqdm
def resize_images(image_list, im_size):
return_list = []
for im in image_list:
img = Image.open(im)
img = img.resize((im_size, im_size), Image.ANTIALIAS)
np_img = np.array(img)
return_list.append(np_img)
return return_list
def create_image_label_list(img_path, group, im_size, skip, all_labels):
label = all_labels['label'].loc[int(group)]
image_list = os.listdir(img_path + os.sep + group)
if len(image_list) < 24:
return [], []
image_list = sorted(image_list[:24:skip])
images = resize_images([img_path + os.sep + group + os.sep + i for i in image_list], im_size)
return images, label
def make_hdf5(img_path, im_size, skip, all_labels, desired_labels, fname='data_hdf5.h5'):
indices = list(all_labels[all_labels['label'].isin(desired_labels)].index)
hf = h5py.File(fname, 'w')
for group in tqdm(indices):
group = str(group)
images, label = create_image_label_list(img_path, group, im_size, skip, all_labels)
if not images:
print('{} excluded, because of the short length'.format(group))
continue
label_id = desired_labels.index(label)
hfgroup = hf.create_group(group)
hfgroup.create_dataset('images', data=images)
hfgroup.create_dataset('label', data=label)
hfgroup.create_dataset('label_id', data=label_id)
hf.close()
if __name__ == "__main__":
# read config.ini and use the settings
param = get_configs()
data_path = param['data_path']
img_path = param['img_path']
train_labels = pd.read_csv(param['csv_train'], names=['label'], sep=';')
val_labels = pd.read_csv(param['csv_val'], names=['label'], sep=';')
all_labels = pd.read_csv(param['csv_labels'], sep=';')
labels = param['labels']
fn_postfix = str(len(labels))
print('labels are {}, length of {}'.format(labels, fn_postfix))
train_fn = data_path + os.sep + 'train_hdf5' + fn_postfix + '.h5'
val_fn = data_path + os.sep + 'val_hdf5' + fn_postfix + '.h5'
maker_params = {'img_path': img_path, 'im_size': param['im_size'], 'skip': param['skip'], 'desired_labels': labels}
make_hdf5(all_labels=train_labels, fname=train_fn, **maker_params)
make_hdf5(all_labels=val_labels, fname=val_fn, **maker_params) |
the-stack_0_17722 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: [email protected]
"""
随机方法
"""
import pytest
import os
from wpy.path import read_dict
from wpy.randoms import (
random_str
)
from lfsdb import FileStorage
from lfsdb.db import FileStorageError
from lfsdb.db.errors import FSQueryError
from lfsdb.db.cache import CacheTable
from lfsdb.db.client import FileTable
from lfsdb.sockets.db import SocketTable
root = '/tmp'
root = None
db_name = 'wpy_db'
table = 'wpy_table'
fs = FileStorage(root)
file_table = fs.get_db(db_name).get_table(table)
socket_table = SocketTable(db_name, table)
cache_table = CacheTable(db_name, table)
tables = [file_table, socket_table, cache_table]
table_root = os.path.join(fs.root, db_name, table)
def _origin_data(data):
for k in ('_id', '_update_time', "_create_time"):
data.pop(k, None)
return data
def _handle_table_test(func):
for table in tables:
table.drop()
func(table)
table.drop()
def test_insert():
_handle_table_test(_test_insert)
def _test_insert(db):
name = random_str(6)
doc = {
"name": name
}
# 查看插入的数据是否存入到文件中
_id = db.insert(doc)
if isinstance(db, FileTable):
path = os.path.join(table_root, _id)
data = read_dict(path)
data = _origin_data(data)
assert doc == data
data = db.find_by_id(_id)
data = _origin_data(data)
assert doc == data
doc['_id'] = _id
with pytest.raises(FileStorageError) as excinfo:
db.insert(doc)
assert str(excinfo) == '{}._id {} is exists'.format(table, _id)
db.drop()
assert not os.path.exists(table_root)
def test_find():
_handle_table_test(_test_find)
def _test_find(db):
name = random_str(6)
doc = { "name": name}
db.drop()
db.insert(doc)
db.insert(doc)
doc['age'] = 12
db.insert(doc)
# 条件为空
docs = db.find()
assert len(docs) == 3
docs = db.find({ "name": name })
assert len(docs) == 3
docs = db.find({ "name": name, "age": 12 })
assert len(docs) == 1
doc = db.find_one({"age": 12}, {})
assert len(doc.keys()) == 5
doc = db.find_one({"age": 12}, {"name": 1})
assert len(doc.keys()) == 2
with pytest.raises(FSQueryError) as exe_info:
doc = db.find_one({"age": 12}, {"name": 1, "age": 0})
assert str(exe_info) == ('Projection cannot have a mix of inclusion'
' and exclusion.')
doc = db.find_one({"age": 12}, {"name": 1, "_id": 0})
assert len(doc.keys()) == 2
db.drop()
def test_update():
_handle_table_test(_test_update)
def _test_update(db):
# TODO 缓存
name = random_str(6)
doc = { "name": name}
db.insert(doc)
_id = db.insert(doc)
insert_utime = db.find_by_id(_id).get("_update_time")
db.insert(doc)
count = db.update(doc, {"name": "wxnacy"})
assert count == 3
db.update({"_id": _id}, {"name": "wxn"})
data = db.find_by_id(_id)
update_utime = data.get("_update_time")
# 检查修改时间是否改变
assert insert_utime < update_utime
data = db.find_by_id(_id)
data = _origin_data(data)
assert { "name": "wxn" } == data
db.drop()
def test_delete():
_handle_table_test(_test_delete)
def _test_delete(db):
db.drop()
name = random_str(6)
doc = { "name": name}
db.insert(doc)
_id = db.insert(doc)
db.insert(doc)
assert db.delete({ "_id": _id }) == 1
docs = db.find()
assert len(docs) == 2
count = db.delete(doc)
assert count == 2
db.drop()
def test_sort():
_handle_table_test(_test_sort)
def _test_sort(db):
db.drop()
arr = [{"age": 5, "id": 2}, {"age": 5, "id": 5}, {"age": 3, "id": 4}]
for a in arr:
db.insert(a)
items = db.find(sorter = [('age', 1), ('id', -1)])
for item in items:
item.pop('_id', None)
item.pop('_create_time', None)
item.pop('_update_time', None)
assert items == [{"age": 3, "id": 4},{"age": 5, "id": 5}, {"age": 5, "id": 2}]
db.drop()
socket_table.close()
|
the-stack_0_17723 | #!/usr/bin/env python
import roshelper
import rospy
from geometry_msgs.msg import Vector3
from std_msgs.msg import Float32, Bool
node_name = "Input"
n = roshelper.Node(node_name, anonymous=False)
# A class for the manipulator arm, still needs actual servos,
# motors and stuff
@n.entry_point() #(exp_a=1, exp_b=1, exp_c=1)
class TenderBotInput(object):
button_state = False
last_button_state = False
# ctor, start service
def __init__(self): # (self, exp_a, exp_b, exp_c)
pass
def check_if_publish_button(self):
return self.button_state == self.last_button_state
# Publishes the end effector position
@n.publisher(node_name + "/button", Bool)
def publish_button(self):
msg = Bool();
msg.data = self.button_state;
return msg
@n.main_loop(frequency=30)
def run(self):
if self.check_if_publish_button():
self.publish_button()
if __name__ == "__main__":
n.start(spin=True)
|
the-stack_0_17725 | __all__ = [
'RulebaseAgentBase',
'RulebaseAgentRandomAlpha',
'RulebaseAgentWrfWeepRandomAlpha',
'RulebaseAgentWrfWeepNoAction',
'RulebaseAgentWepegWeepRandomAlpha',
'RulebaseAgentGarnet',
]
from .rulebase_policy import *
from fice_nike.agent_base import AgentBase
class RulebaseAgentBase(AgentBase):
_policy = None
def __init__(self, *args):
super().__init__(*args)
def policy(self, frame_data):
data = {
'frame_data': frame_data,
'player': self.player,
}
return self._policy.update(data)
class RulebaseAgentRandomAlpha(RulebaseAgentBase):
def __init__(self, *args):
super().__init__(*args)
self._policy = RulebasePolicyRandomAlpha()
class RulebaseAgentWrfWeepRandomAlpha(RulebaseAgentBase):
def __init__(self, *args):
super().__init__(*args)
self._policy = RulebasePolicyWrfWeepRandomAlpha()
class RulebaseAgentWrfWeepNoAction(RulebaseAgentBase):
def __init__(self, *args):
super().__init__(*args)
self._policy = RulebasePolicyWrfWeepNoAction()
class RulebaseAgentWepegWeepRandomAlpha(RulebaseAgentBase):
def __init__(self, *args):
super().__init__(*args)
self._policy = RulebasePolicyWepegWeepRandomAlpha()
class RulebaseAgentGarnet(RulebaseAgentBase):
def __init__(self, *args):
super().__init__(*args)
self._policy = RulebasePolicyGarnet()
|
the-stack_0_17728 | # -*- coding: utf-8 -*-
from setuptools import find_packages, setup
from setuptools.extension import Extension
from Cython.Build import cythonize
with open("README.md") as f:
readme = f.read()
with open("LICENSE.txt") as f:
license = f.read()
extensions = [
Extension(
"tenforty.ots_2020",
["tenforty/ots/ots_2020.pyx"],
libraries=[],
include_dirs=[],
),
Extension(
"tenforty.ots_2019",
["tenforty/ots/ots_2019.pyx"],
libraries=[],
include_dirs=[],
),
Extension(
"tenforty.ots_2018",
["tenforty/ots/ots_2018.pyx"],
libraries=[],
include_dirs=[],
),
Extension(
"tenforty.ots_2017",
["tenforty/ots/ots_2017.pyx"],
libraries=[],
include_dirs=[],
),
]
setup(
name="tenforty",
version="0.1.0",
description="Compute US federal taxes, and state taxes for some states.",
long_description=readme,
author="Mike Macpherson",
author_email="[email protected]",
url="https://github.com/mmacpherson/tenforty",
license=license,
packages=find_packages(exclude=("tests", "docs")),
# cmdclass=dict(build_ext=build_ext),
ext_modules=cythonize(extensions),
zip_safe=False,
)
|
the-stack_0_17729 | # Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-x',
'--xml_file',
help='A Fast-RTPS XML configuration file',
required=False
)
parser.add_argument(
'-f',
'--demands_file',
help='Filename of the demands configuration file',
required=False,
default=None
)
parser.add_argument(
'-n',
'--number_of_samples',
help='The number of measurements to take for each payload',
required=False,
default='10000'
)
parser.add_argument(
'-s',
'--security',
action='store_true',
help='Enables security (Defaults: disable)',
required=False
)
parser.add_argument(
'-i',
'--interprocess',
action='store_true',
help='Publisher and subscribers in separate processes. Defaults:False',
required=False
)
parser.add_argument(
'-d',
'--data_sharing',
choices=['on', 'off'],
help='Explicitly enable/disable data sharing. (Defaults: Fast-DDS default settings)',
required=False
)
parser.add_argument(
'-l',
'--data_loans',
action='store_true',
help='Enable the use of the loan sample API (Defaults: disable)',
required=False
)
parser.add_argument(
'-r',
'--reliability',
action='store_true',
help='Run with RELIABLE reliability (Defaults: disable)',
required=False
)
parser.add_argument(
'--shared_memory',
choices=['on', 'off'],
help='Explicitly enable/disable shared memory transport. (Defaults: Fast-DDS default settings)',
required=False
)
# Parse arguments
args = parser.parse_args()
xml_file = args.xml_file
security = args.security
interprocess = args.interprocess
if security and not interprocess:
print('Intra-process delivery NOT supported with security')
exit(1) # Exit with error
# Check that samples is positive
if str.isdigit(args.number_of_samples) and int(args.number_of_samples) > 0:
samples = str(args.number_of_samples)
else:
print(
'"number_of_samples" must be a positive integer, NOT {}'.format(
args.number_of_samples
)
)
exit(1) # Exit with error
# Demands files options
demands_options = []
if args.demands_file:
if not os.path.isfile(args.demands_file):
print('Demands file "{}" is NOT a file'.format(args.demands_file))
exit(1) # Exit with error
else:
demands_options = [
'--file',
args.demands_file,
]
# XML options
filename_options = 'default'
xml_options = []
if xml_file:
if not os.path.isfile(xml_file):
print('XML file "{}" is NOT a file'.format(xml_file))
exit(1) # Exit with error
else:
xml_options = ['--xml', xml_file]
# Get QoS from XML
filename_options = xml_file.split('/')[-1].split('\\')[-1]
filename_options = filename_options.split('.')[-2].split('_')[1:]
filename_options = '_'.join(filename_options)
# Data sharing and loans options
# modify output file names
if args.data_sharing and 'on' == args.data_sharing and args.data_loans:
filename_options += '_data_loans_and_sharing'
elif args.data_sharing and 'on' == args.data_sharing:
filename_options += '_data_sharing'
elif args.data_loans:
filename_options += '_data_loans'
# add flags to the command line
data_options = []
if args.data_sharing:
if 'on' == args.data_sharing:
data_options += ['--data_sharing=on']
else:
data_options += ['--data_sharing=off']
if args.data_loans:
data_options += ['--data_loans']
reliability_options = []
if args.reliability:
reliability_options = ['--reliability=reliable']
else:
reliability_options = ['--reliability=besteffort']
if args.shared_memory:
if 'on' == args.shared_memory:
data_options += ['--shared_memory=on']
else:
data_options += ['--shared_memory=off']
# Environment variables
executable = os.environ.get('LATENCY_TEST_BIN')
certs_path = os.environ.get('CERTS_PATH')
# Check that executable exists
if executable:
if not os.path.isfile(executable):
print('LATENCY_TEST_BIN does NOT specify a file')
exit(1) # Exit with error
else:
print('LATENCY_TEST_BIN is NOT set')
exit(1) # Exit with error
# Security
security_options = []
if security is True:
if certs_path:
if os.path.isdir(certs_path):
security_options = ['--security=true', '--certs=' + certs_path]
else:
print('CERTS_PATH does NOT specify a directory')
exit(1) # Exit with error
else:
print('Cannot find CERTS_PATH environment variable')
exit(1) # Exit with error
# Domain
domain = str(os.getpid() % 230)
domain_options = ['--domain', domain]
if interprocess is True:
# Base of test command for publisher agent
pub_command = [
executable,
'publisher',
'--samples',
samples,
'--export_raw_data',
]
# Base of test command for subscriber agent
sub_command = [
executable,
'subscriber',
]
# Manage security
if security is True:
pub_command.append(
'./measurements_interprocess_{}_security.csv'.format(
filename_options
)
)
pub_command += security_options
sub_command += security_options
else:
pub_command.append(
'./measurements_interprocess_{}.csv'.format(
filename_options
)
)
pub_command += domain_options
pub_command += xml_options
pub_command += demands_options
pub_command += data_options
pub_command += reliability_options
sub_command += domain_options
sub_command += xml_options
sub_command += demands_options
sub_command += data_options
sub_command += reliability_options
print('Publisher command: {}'.format(
' '.join(element for element in pub_command)),
flush=True
)
print('Subscriber command: {}'.format(
' '.join(element for element in sub_command)),
flush=True
)
# Spawn processes
publisher = subprocess.Popen(pub_command)
subscriber = subprocess.Popen(sub_command)
# Wait until finish
subscriber.communicate()
publisher.communicate()
if subscriber.returncode != 0:
exit(subscriber.returncode)
elif publisher.returncode != 0:
exit(publisher.returncode)
else:
# Base of test command to execute
command = [
executable,
'both',
'--samples',
samples,
'--export_raw_data',
]
# Manage security
if security is True:
command.append(
'./measurements_intraprocess_{}_security.csv'.format(
filename_options
)
)
command += security_options
else:
command.append(
'./measurements_intraprocess_{}.csv'.format(
filename_options
)
)
command += domain_options
command += xml_options
command += demands_options
command += data_options
command += reliability_options
print('Executable command: {}'.format(
' '.join(element for element in command)),
flush=True
)
# Spawn process
both = subprocess.Popen(command)
# Wait until finish
both.communicate()
exit(both.returncode)
exit(0)
|
the-stack_0_17731 | import torch
class SamplingResult(object):
def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, gt_polygons, assign_result,
gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.pos_bboxes = bboxes[pos_inds]
self.neg_bboxes = bboxes[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_bboxes.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
if gt_polygons is not None:
self.pos_gt_polygon = gt_polygons[self.pos_assigned_gt_inds, :]
if assign_result.labels is not None:
self.pos_gt_labels = assign_result.labels[pos_inds]
else:
self.pos_gt_labels = None
@property
def bboxes(self):
return torch.cat([self.pos_bboxes, self.neg_bboxes])
|
the-stack_0_17732 | import sys
import traceback
import ujson as json
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError
from cloudaux.aws.sts import boto3_cached_conn
from consoleme.config import config
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.role_updater.schemas import RoleUpdaterRequest
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics"))()
async def update_role(event):
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"event": event,
"message": "Working on event",
}
log.debug(log_data)
if not isinstance(event, list):
raise Exception("The passed event must be a list.")
# Let's normalize all of the policies to JSON if they are not already
for d in event:
for i in d.get("inline_policies", []):
if i.get("policy_document") and isinstance(i.get("policy_document"), dict):
i["policy_document"] = json.dumps(
i["policy_document"], escape_forward_slashes=False
)
if d.get("assume_role_policy_document", {}):
if isinstance(
d.get("assume_role_policy_document", {}).get(
"assume_role_policy_document"
),
dict,
):
d["assume_role_policy_document"][
"assume_role_policy_document"
] = json.dumps(
d["assume_role_policy_document"]["assume_role_policy_document"],
escape_forward_slashes=False,
)
bad_validation = RoleUpdaterRequest().validate(event, many=True)
if bad_validation:
log_data["error"] = bad_validation
log.error(log_data)
return {"error_msg": "invalid schema passed", "detail_error": bad_validation}
event = RoleUpdaterRequest().load(event, many=True)
result = {"success": False}
for d in event:
arn = d["arn"]
aws_session_name = "roleupdater-" + d["requester"]
account_number = await parse_account_id_from_arn(arn)
role_name = await parse_role_name_from_arn(arn)
# TODO: Make configurable
client = boto3_cached_conn(
"iam",
account_number=account_number,
assume_role=config.get("policies.role_name", "ConsoleMe"),
session_name=aws_session_name,
)
inline_policies = d.get("inline_policies", [])
managed_policies = d.get("managed_policies", [])
assume_role_doc = d.get("assume_role_policy_document", {})
tags = d.get("tags", [])
if (
not inline_policies
and not managed_policies
and not assume_role_doc
and not tags
):
result["message"] = f"Invalid request. No response taken on event: {event}"
return result
try:
for policy in inline_policies:
await update_inline_policy(client, role_name, policy)
for policy in managed_policies:
await update_managed_policy(client, role_name, policy)
if assume_role_doc:
await update_assume_role_document(client, role_name, assume_role_doc)
for tag in tags:
await update_tags(client, role_name, tag)
except ClientError as ce:
result["message"] = ce.response["Error"]
result["Traceback"] = traceback.format_exc()
return result
result["success"] = True
return result
async def parse_account_id_from_arn(arn):
return arn.split(":")[4]
async def parse_role_name_from_arn(arn):
return arn.split("/")[-1]
async def update_inline_policy(client, role_name, policy):
log.debug(
{"message": "Updating inline policy", "role_name": role_name, "policy": policy}
)
if policy.get("action") == "attach":
response = await sync_to_async(client.put_role_policy)(
RoleName=role_name,
PolicyName=policy["policy_name"],
PolicyDocument=policy["policy_document"],
)
elif policy.get("action") == "detach":
response = await sync_to_async(client.delete_role_policy)(
RoleName=role_name, PolicyName=policy["policy_name"]
)
else:
raise Exception("Unable to update managed policy")
return response
async def update_managed_policy(client, role_name, policy):
log.debug(
{"message": "Updating managed policy", "role_name": role_name, "policy": policy}
)
if policy.get("action") == "attach":
response = await sync_to_async(client.attach_role_policy)(
PolicyArn=policy["arn"], RoleName=role_name
)
elif policy.get("action") == "detach":
response = await sync_to_async(client.detach_role_policy)(
PolicyArn=policy["arn"], RoleName=role_name
)
else:
raise Exception("Unable to update managed policy.")
return response
async def update_assume_role_document(client, role_name, assume_role_doc):
log.debug(
{
"message": "Updating assume role doc",
"role_name": role_name,
"assume_role_doc": assume_role_doc,
}
)
response = None
if assume_role_doc.get("action", "") in ["create", "update"]:
response = await sync_to_async(client.update_assume_role_policy)(
RoleName=role_name,
PolicyDocument=assume_role_doc["assume_role_policy_document"],
)
return response
# Log or report result?
async def update_tags(client, role_name, tag):
log.debug({"message": "Updating tag", "role_name": role_name, "tag": tag})
if tag.get("action") == "add":
response = await sync_to_async(client.tag_role)(
RoleName=role_name, Tags=[{"Key": tag["key"], "Value": tag["value"]}]
)
elif tag.get("action") == "remove":
response = await sync_to_async(client.untag_role)(
RoleName=role_name, TagKeys=[tag["key"]]
)
else:
raise Exception("Unable to update tags.")
return response
|
the-stack_0_17734 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2020 Snowflake Computing Inc. All right reserved.
#
import glob
import gzip
import os
import sys
import time
from filecmp import cmp
from logging import getLogger
import mock
import pytest
import requests
from snowflake.connector.constants import UTF8
from snowflake.connector.file_transfer_agent import SnowflakeFileTransferAgent
from ..generate_test_files import generate_k_lines_of_n_files
try:
from parameters import (CONNECTION_PARAMETERS_ADMIN)
except ImportError:
CONNECTION_PARAMETERS_ADMIN = {}
logger = getLogger(__name__)
# Mark every test in this module as a gcp test
pytestmark = pytest.mark.gcp
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_put_get_with_gcp(tmpdir, conn_cnx, db_parameters):
"""[gcp] Puts and Gets a small text using gcp."""
# create a data file
fname = str(tmpdir.join('test_put_get_with_gcp_token.txt.gz'))
with gzip.open(fname, 'wb') as f:
original_contents = "123,test1\n456,test2\n"
f.write(original_contents.encode(UTF8))
tmp_dir = str(tmpdir.mkdir('test_put_get_with_gcp_token'))
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password']) as cnx:
with cnx.cursor() as csr:
csr.execute("rm @~/snow32806")
csr.execute(
"create or replace table snow32806 (a int, b string)")
try:
csr.execute(
"put file://{} @%snow32806 auto_compress=true parallel=30".format(
fname))
rec = csr.fetchone()
assert rec[6] == 'UPLOADED'
csr.execute("copy into snow32806")
csr.execute(
"copy into @~/snow32806 from snow32806 "
"file_format=( format_name='common.public.csv' "
"compression='gzip')")
csr.execute(
"get @~/snow32806 file://{} pattern='snow32806.*'".format(
tmp_dir))
rec = csr.fetchone()
assert rec[0].startswith(
'snow32806'), 'A file downloaded by GET'
assert rec[1] == 36, 'Return right file size'
assert rec[2] == 'DOWNLOADED', 'Return DOWNLOADED status'
assert rec[3] == '', 'Return no error message'
finally:
csr.execute("drop table snow32806")
csr.execute("rm @~/snow32806")
files = glob.glob(os.path.join(tmp_dir, 'snow32806*'))
with gzip.open(files[0], 'rb') as fd:
contents = fd.read().decode(UTF8)
assert original_contents == contents, (
'Output is different from the original file')
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_put_copy_many_files_gcp(tmpdir, conn_cnx, db_parameters):
"""[gcp] Puts and Copies many files."""
# generates N files
number_of_files = 10
number_of_lines = 1000
tmp_dir = generate_k_lines_of_n_files(number_of_lines, number_of_files, tmp_dir=str(tmpdir.mkdir('data')))
files = os.path.join(tmp_dir, 'file*')
def run(csr, sql):
sql = sql.format(
files=files,
name=db_parameters['name'])
return csr.execute(sql).fetchall()
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password']) as cnx:
with cnx.cursor() as csr:
run(csr, """
create or replace table {name} (
aa int,
dt date,
ts timestamp,
tsltz timestamp_ltz,
tsntz timestamp_ntz,
tstz timestamp_tz,
pct float,
ratio number(6,2))
""")
try:
all_recs = run(csr, "put file://{files} @%{name}")
assert all([rec[6] == 'UPLOADED' for rec in all_recs])
run(csr, "copy into {name}")
rows = sum([rec[0] for rec in run(csr, "select count(*) from "
"{name}")])
assert rows == number_of_files * number_of_lines, \
'Number of rows'
finally:
run(csr, "drop table if exists {name}")
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_put_copy_duplicated_files_gcp(tmpdir, conn_cnx,
db_parameters):
"""[gcp] Puts and Copies duplicated files."""
# generates N files
number_of_files = 5
number_of_lines = 100
tmp_dir = generate_k_lines_of_n_files(number_of_lines, number_of_files, tmp_dir=str(tmpdir.mkdir('data')))
files = os.path.join(tmp_dir, 'file*')
def run(csr, sql):
sql = sql.format(
files=files,
name=db_parameters['name'])
return csr.execute(sql).fetchall()
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password']) as cnx:
with cnx.cursor() as csr:
run(csr, """
create or replace table {name} (
aa int,
dt date,
ts timestamp,
tsltz timestamp_ltz,
tsntz timestamp_ntz,
tstz timestamp_tz,
pct float,
ratio number(6,2))
""")
try:
success_cnt = 0
skipped_cnt = 0
for rec in run(csr, "put file://{files} @%{name}"):
logger.info('rec=%s', rec)
if rec[6] == 'UPLOADED':
success_cnt += 1
elif rec[6] == 'SKIPPED':
skipped_cnt += 1
assert success_cnt == number_of_files, 'uploaded files'
assert skipped_cnt == 0, 'skipped files'
deleted_cnt = 0
run(csr, "rm @%{name}/file0")
deleted_cnt += 1
run(csr, "rm @%{name}/file1")
deleted_cnt += 1
run(csr, "rm @%{name}/file2")
deleted_cnt += 1
success_cnt = 0
skipped_cnt = 0
for rec in run(csr, "put file://{files} @%{name}"):
logger.info('rec=%s', rec)
if rec[6] == 'UPLOADED':
success_cnt += 1
elif rec[6] == 'SKIPPED':
skipped_cnt += 1
assert success_cnt == number_of_files, \
'uploaded files in the second time'
assert skipped_cnt == 0, \
'skipped files in the second time'
run(csr, "copy into {name}")
rows = 0
for rec in run(csr, "select count(*) from {name}"):
rows += rec[0]
assert rows == number_of_files * number_of_lines, \
'Number of rows'
finally:
run(csr, "drop table if exists {name}")
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_put_get_large_files_gcp(tmpdir, conn_cnx, db_parameters):
"""[gcp] Puts and Gets Large files."""
number_of_files = 3
number_of_lines = 200000
tmp_dir = generate_k_lines_of_n_files(number_of_lines, number_of_files, tmp_dir=str(tmpdir.mkdir('data')))
files = os.path.join(tmp_dir, 'file*')
output_dir = os.path.join(tmp_dir, 'output_dir')
os.makedirs(output_dir)
class cb(object):
def __init__(self, filename, filesize, **_):
pass
def __call__(self, bytes_amount):
pass
def run(cnx, sql):
return cnx.cursor().execute(
sql.format(
files=files,
dir=db_parameters['name'],
output_dir=output_dir),
_put_callback_output_stream=sys.stdout,
_get_callback_output_stream=sys.stdout,
_get_callback=cb,
_put_callback=cb).fetchall()
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password']) as cnx:
try:
all_recs = run(cnx, "PUT file://{files} @~/{dir}")
assert all([rec[6] == 'UPLOADED' for rec in all_recs])
for _ in range(60):
for _ in range(100):
all_recs = run(cnx, "LIST @~/{dir}")
if len(all_recs) == number_of_files:
break
# you may not get the files right after PUT command
# due to the nature of gcs blob, which synchronizes
# data eventually.
time.sleep(1)
else:
# wait for another second and retry.
# this could happen if the files are partially available
# but not all.
time.sleep(1)
continue
break # success
else:
pytest.fail(
'cannot list all files. Potentially '
'PUT command missed uploading Files: {}'.format(all_recs))
all_recs = run(cnx, "GET @~/{dir} file://{output_dir}")
assert len(all_recs) == number_of_files
assert all([rec[2] == 'DOWNLOADED' for rec in all_recs])
finally:
run(cnx, "RM @~/{dir}")
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_get_gcp_file_object_http_400_error(tmpdir, conn_cnx, db_parameters):
fname = str(tmpdir.join('test_put_get_with_gcp_token.txt.gz'))
with gzip.open(fname, 'wb') as f:
original_contents = "123,test1\n456,test2\n"
f.write(original_contents.encode(UTF8))
tmp_dir = str(tmpdir.mkdir('test_put_get_with_gcp_token'))
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password']) as cnx:
with cnx.cursor() as csr:
csr.execute("rm @~/snow32807")
csr.execute(
"create or replace table snow32807 (a int, b string)")
try:
from requests import put, get
def mocked_put(*args, **kwargs):
if mocked_put.counter == 0:
mocked_put.counter += 1
exc = requests.exceptions.HTTPError(response=requests.Response())
exc.response.status_code = 400
raise exc
else:
return put(*args, **kwargs)
mocked_put.counter = 0
def mocked_file_agent(*args, **kwargs):
agent = SnowflakeFileTransferAgent(*args, **kwargs)
agent._update_file_metas_with_presigned_url = mock.MagicMock(
wraps=agent._update_file_metas_with_presigned_url
)
mocked_file_agent.agent = agent
return agent
with mock.patch('snowflake.connector.cursor.SnowflakeFileTransferAgent',
side_effect=mocked_file_agent):
with mock.patch('requests.put', side_effect=mocked_put):
csr.execute(
"put file://{} @%snow32807 auto_compress=true parallel=30".format(
fname))
assert mocked_file_agent.agent._update_file_metas_with_presigned_url.call_count == 2
rec = csr.fetchone()
assert rec[6] == 'UPLOADED'
csr.execute("copy into snow32807")
csr.execute(
"copy into @~/snow32807 from snow32807 "
"file_format=( format_name='common.public.csv' "
"compression='gzip')")
def mocked_get(*args, **kwargs):
if mocked_get.counter == 0:
mocked_get.counter += 1
exc = requests.exceptions.HTTPError(response=requests.Response())
exc.response.status_code = 400
raise exc
else:
return get(*args, **kwargs)
mocked_get.counter = 0
def mocked_file_agent(*args, **kwargs):
agent = SnowflakeFileTransferAgent(*args, **kwargs)
agent._update_file_metas_with_presigned_url = mock.MagicMock(
wraps=agent._update_file_metas_with_presigned_url
)
mocked_file_agent.agent = agent
return agent
with mock.patch('snowflake.connector.cursor.SnowflakeFileTransferAgent',
side_effect=mocked_file_agent):
with mock.patch('requests.get', side_effect=mocked_get):
csr.execute(
"get @~/snow32807 file://{} pattern='snow32807.*'".format(
tmp_dir))
assert mocked_file_agent.agent._update_file_metas_with_presigned_url.call_count == 2
rec = csr.fetchone()
assert rec[0].startswith(
'snow32807'), 'A file downloaded by GET'
assert rec[1] == 36, 'Return right file size'
assert rec[2] == 'DOWNLOADED', 'Return DOWNLOADED status'
assert rec[3] == '', 'Return no error message'
finally:
csr.execute("drop table snow32807")
csr.execute("rm @~/snow32807")
files = glob.glob(os.path.join(tmp_dir, 'snow32807*'))
with gzip.open(files[0], 'rb') as fd:
contents = fd.read().decode(UTF8)
assert original_contents == contents, (
'Output is different from the original file')
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_auto_compress_off_gcp(tmpdir, conn_cnx, db_parameters):
"""[gcp] Puts and Gets a small text using gcp with no auto compression."""
fname = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data', 'example.json'))
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password'],
) as cnx:
with cnx.cursor() as cursor:
try:
cursor.execute("create or replace stage teststage")
cursor.execute("put file://{} @teststage auto_compress=false".format(fname))
cursor.execute("get @teststage file://{}".format(str(tmpdir)))
downloaded_file = os.path.join(str(tmpdir), 'example.json')
assert cmp(fname, downloaded_file)
finally:
cursor.execute("drop stage teststage")
|
the-stack_0_17737 | from conch.analysis.formants import FormantTrackFunction
import librosa
from conch.analysis.segments import FileSegment, SignalSegment
def test_formants_praat(base_filenames):
for f in base_filenames:
wavpath = f + '.wav'
func = FormantTrackFunction(time_step=0.01,
window_length=0.025, num_formants=5, max_frequency=5500)
formants = func(wavpath)
sig, sr = librosa.load(wavpath)
formants2 = func(SignalSegment(sig, sr))
# Things are not exact...
# assert formants == formants2
|
the-stack_0_17739 |
# Classic non-recursive binary search
def bin_search(array: list, find: int) -> int:
start, end = 0, len(array) - 1
while start <= end:
# Get center of result
center = int((start + end) / 2)
if find == array[center]:
return center
elif find > array[center]:
start = center + 1
else:
end = center - 1
return -2
# Задача: В первой строке даны целое число 1≤n≤10^5и массив A[1…n] из n различных натуральных чисел,
# не превышающих 10^9, в порядке возрастания, во второй — целое число 1≤k≤10^5
# и k натуральных чисел b1....bk, не превышающих 10^9. Для каждого i от 1 до k
# необходимо вывести индекс 1 ≤ j ≤ n, для которого A[j] = bi, или -1, если такого j нет.
# Input:
# 5 1 5 8 12 13
# 5 8 1 23 1 11
# Output
# 3 1 -1 1 -1
def main():
array_len, *array = map(int, input().split())
find_len, *find_array = map(int, input().split())
for find in find_array:
print(bin_search(array, find) + 1, end=" ")
if __name__ == '__main__':
main()
|
the-stack_0_17741 | #!/usr/bin/env python3
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pathlib import Path
import sys
from apt import Cache
from catkin_pkg.packages import find_packages
from ros_buildfarm.argument import add_argument_os_code_name
from ros_buildfarm.argument import add_argument_os_name
from ros_buildfarm.argument import add_argument_output_dir
from ros_buildfarm.argument import add_argument_package_selection_args
from ros_buildfarm.argument import add_argument_rosdistro_name
from ros_buildfarm.argument import add_argument_skip_rosdep_keys
from ros_buildfarm.colcon import locate_packages
from ros_buildfarm.common import get_binary_package_versions
from ros_buildfarm.common import Scope
from rosdep2 import create_default_installer_context
from rosdep2.catkin_support import get_catkin_view
from rosdep2.catkin_support import resolve_for_os
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description='Lists available binary packages and versions which are'
'needed to satisfy rosdep keys for ROS packages in the workspace')
# Positional
add_argument_rosdistro_name(parser)
add_argument_os_name(parser)
add_argument_os_code_name(parser)
add_argument_output_dir(parser)
add_argument_package_selection_args(parser)
add_argument_skip_rosdep_keys(parser)
parser.add_argument(
'--package-root',
nargs='+',
help='The path to the directory containing packages')
args = parser.parse_args(argv)
workspace_root = args.package_root[-1]
os.chdir(workspace_root)
with Scope('SUBSECTION', 'mark packages with IGNORE files'):
all_packages = locate_packages(workspace_root)
selected_packages = all_packages
if args.package_selection_args:
print(
'Using package selection arguments:',
args.package_selection_args)
selected_packages = locate_packages(
workspace_root, extra_args=args.package_selection_args)
to_ignore = all_packages.keys() - selected_packages.keys()
print('Ignoring %d packages' % len(to_ignore))
for package in sorted(to_ignore):
print('-', package)
package_root = all_packages[package]
Path(package_root, 'COLCON_IGNORE').touch()
print('There are %d packages which meet selection criteria' %
len(selected_packages))
with Scope('SUBSECTION', 'Enumerating packages needed to build'):
# find all of the underlay packages
underlay_pkgs = {}
all_underlay_pkg_names = set()
for package_root in args.package_root[0:-1]:
print("Crawling for packages in '%s'" % package_root)
underlay_pkgs.update(find_packages(package_root))
# Check for a colcon index for non-ROS package detection
colcon_index = os.path.join(package_root, 'colcon-core', 'packages')
try:
all_underlay_pkg_names.update(os.listdir(colcon_index))
except FileNotFoundError:
pass
underlay_pkg_names = [pkg.name for pkg in underlay_pkgs.values()]
print('Found the following ROS underlay packages:')
for pkg_name in sorted(underlay_pkg_names):
print(' -', pkg_name)
# get direct build dependencies
package_root = args.package_root[-1]
non_ros_package_paths = set(
d for d in selected_packages.values()
if not os.path.isfile(os.path.join(d, 'package.xml')))
print("Crawling for packages in '%s'" % package_root)
pkgs = find_packages(package_root, exclude_paths=non_ros_package_paths)
pkg_names = [pkg.name for pkg in pkgs.values()]
print('Found the following ROS packages:')
for pkg_name in sorted(pkg_names):
print(' -', pkg_name)
# get build dependencies and map them to binary packages
all_pkgs = set(pkgs.values()).union(underlay_pkgs.values())
for pkg in all_pkgs:
pkg.evaluate_conditions(os.environ)
for pkg in all_pkgs:
for group_depend in pkg.group_depends:
if group_depend.evaluated_condition is not False:
group_depend.extract_group_members(all_pkgs)
dependency_keys_build = get_dependencies(
all_pkgs, 'build', _get_build_and_recursive_run_dependencies,
pkgs.values())
dependency_keys_test = get_dependencies(
all_pkgs, 'run and test', _get_test_and_recursive_run_dependencies,
pkgs.values())
if args.skip_rosdep_keys:
dependency_keys_build.difference_update(args.skip_rosdep_keys)
dependency_keys_test.difference_update(args.skip_rosdep_keys)
# remove all non-ROS packages and packages which are present but
# specifically ignored
every_package_name = all_packages.keys() | all_underlay_pkg_names
dependency_keys_build -= every_package_name
dependency_keys_test -= every_package_name
context = initialize_resolver(
args.rosdistro_name, args.os_name, args.os_code_name)
os_pkg_names_build = resolve_names(dependency_keys_build, **context)
os_pkg_names_test = resolve_names(dependency_keys_test, **context)
os_pkg_names_test -= os_pkg_names_build
with Scope('SUBSECTION', 'Resolving packages versions using apt cache'):
apt_cache = Cache()
os_pkg_versions = get_binary_package_versions(
apt_cache, os_pkg_names_build | os_pkg_names_test)
with open(os.path.join(args.output_dir, 'install_list_build.txt'), 'w') as out_file:
for package in sorted(os_pkg_names_build):
out_file.write('# break docker cache %s=%s\n' % (package, os_pkg_versions[package]))
out_file.write('%s\n' % (package))
with open(os.path.join(args.output_dir, 'install_list_test.txt'), 'w') as out_file:
for package in sorted(os_pkg_names_test):
out_file.write('# break docker cache %s=%s\n' % (package, os_pkg_versions[package]))
out_file.write('%s\n' % (package))
def get_dependencies(pkgs, label, get_dependencies_callback, target_pkgs):
pkg_names = [pkg.name for pkg in pkgs]
depend_names = set([])
for pkg in target_pkgs:
depend_names.update(
[d for d in get_dependencies_callback(pkg, pkgs)
if d not in pkg_names])
print('Identified the following %s dependencies ' % label +
'(ignoring packages available from source):')
for depend_name in sorted(depend_names):
print(' -', depend_name)
return depend_names
def _get_build_and_recursive_run_dependencies(pkg, pkgs):
depends = [
d.name for d in pkg.build_depends + pkg.buildtool_depends
if d.evaluated_condition is not False]
# include recursive run dependencies on other pkgs in the workspace
# if pkg A in the workspace build depends on pkg B in the workspace
# then the recursive run dependencies of pkg B need to be installed
# in order to build the workspace
other_pkgs_by_names = \
dict([(p.name, p) for p in pkgs if p.name != pkg.name])
run_depends_in_pkgs = \
set([d for d in depends if d in other_pkgs_by_names])
while run_depends_in_pkgs:
# pick first element from sorted order to ensure deterministic results
pkg_name = sorted(run_depends_in_pkgs).pop(0)
pkg = other_pkgs_by_names[pkg_name]
other_pkgs_by_names.pop(pkg_name)
run_depends_in_pkgs.remove(pkg_name)
# append run dependencies
run_depends = [
d.name for d in pkg.build_export_depends +
pkg.buildtool_export_depends + pkg.exec_depends
if d.evaluated_condition is not False]
# append group dependencies
run_depends += [
member for group in pkg.group_depends for member in group.members
if group.evaluated_condition is not False]
depends += run_depends
# consider recursive dependencies
run_depends_in_pkgs.update(
[d for d in run_depends if d in other_pkgs_by_names])
return depends
def _get_test_and_recursive_run_dependencies(pkg, pkgs):
depends = [
d.name for d in pkg.build_export_depends +
pkg.buildtool_export_depends + pkg.exec_depends + pkg.test_depends
if d.evaluated_condition is not False]
# include recursive run dependencies on other pkgs in the workspace
# if pkg A in the workspace test depends on pkg B in the workspace
# then the recursive run dependencies of pkg B need to be installed
# in order to test the workspace
other_pkgs_by_names = \
dict([(p.name, p) for p in pkgs if p.name != pkg.name])
run_depends_in_pkgs = \
set([d for d in depends if d in other_pkgs_by_names])
while run_depends_in_pkgs:
# pick first element from sorted order to ensure deterministic results
pkg_name = sorted(run_depends_in_pkgs).pop(0)
pkg = other_pkgs_by_names[pkg_name]
other_pkgs_by_names.pop(pkg_name)
run_depends_in_pkgs.remove(pkg_name)
# append run dependencies
run_depends = [
d.name for d in pkg.build_export_depends +
pkg.buildtool_export_depends + pkg.exec_depends
if d.evaluated_condition is not False]
# append group dependencies
run_depends += [
member for group in pkg.group_depends for member in group.members
if group.evaluated_condition is not False]
depends += run_depends
# consider recursive dependencies
run_depends_in_pkgs.update(
[d for d in run_depends if d in other_pkgs_by_names])
return depends
def initialize_resolver(rosdistro_name, os_name, os_code_name):
# resolve rosdep keys into binary package names
ctx = create_default_installer_context()
try:
installer_key = ctx.get_default_os_installer_key(os_name)
except KeyError:
raise RuntimeError(
"Could not determine the rosdep installer for '%s'" % os_name)
installer = ctx.get_installer(installer_key)
view = get_catkin_view(rosdistro_name, os_name, os_code_name, update=False)
return {
'os_name': os_name,
'os_code_name': os_code_name,
'installer': installer,
'view': view,
}
def resolve_names(rosdep_keys, os_name, os_code_name, view, installer):
debian_pkg_names = set([])
for rosdep_key in sorted(rosdep_keys):
try:
resolved_names = resolve_for_os(
rosdep_key, view, installer, os_name, os_code_name)
except KeyError:
raise RuntimeError(
"Could not resolve the rosdep key '%s'" % rosdep_key)
debian_pkg_names.update(resolved_names)
print('Resolved the dependencies to the following binary packages:')
for debian_pkg_name in sorted(debian_pkg_names):
print(' -', debian_pkg_name)
return debian_pkg_names
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_17746 | #!/usr/bin/env python3
import sys,os,re
import json
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
N = int(sys.argv[1]) if len(sys.argv) >= 2 else 30
W = sys.argv[2] if len(sys.argv) >= 3 else None
re_r = re.compile(r'(.+)\t(.+)\t(.+)\t(.+)\t(.+)\t(.+)')
for line in sys.stdin:
try:
fname = line.strip()
o = fname
f = False
with open(fname,'r') as fp:
w = []
for cnt, l in enumerate(fp):
if cnt > N: break
mo = re_r.search(l)
if mo != None:
c = '\t' if o != '' else ''
o = o + c + mo.group(1)
if W == None or mo.group(1) == W:
f = True
if f: print(o)
except StopIteration:
print('EOF')
|
the-stack_0_17747 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main hook file that is called by Juju.
"""
import json
import httplib
import os
import time
import socket
import subprocess
import sys
import urlparse
from charmhelpers.core import hookenv, host
from kubernetes_installer import KubernetesInstaller
from path import path
from lib.registrator import Registrator
hooks = hookenv.Hooks()
@hooks.hook('api-relation-changed')
def api_relation_changed():
"""
On the relation to the api server, this function determines the appropriate
architecture and the configured version to copy the kubernetes binary files
from the kubernetes-master charm and installs it locally on this machine.
"""
hookenv.log('Starting api-relation-changed')
charm_dir = path(hookenv.charm_dir())
# Get the package architecture, rather than the from the kernel (uname -m).
arch = subprocess.check_output(['dpkg', '--print-architecture']).strip()
kubernetes_bin_dir = path('/opt/kubernetes/bin')
# Get the version of kubernetes to install.
version = subprocess.check_output(['relation-get', 'version']).strip()
print('Relation version: ', version)
if not version:
print('No version present in the relation.')
exit(0)
version_file = charm_dir / '.version'
if version_file.exists():
previous_version = version_file.text()
print('Previous version: ', previous_version)
if version == previous_version:
exit(0)
# Can not download binaries while the service is running, so stop it.
# TODO: Figure out a better way to handle upgraded kubernetes binaries.
for service in ('kubelet', 'proxy'):
if host.service_running(service):
host.service_stop(service)
command = ['relation-get', 'private-address']
# Get the kubernetes-master address.
server = subprocess.check_output(command).strip()
print('Kubernetes master private address: ', server)
installer = KubernetesInstaller(arch, version, server, kubernetes_bin_dir)
installer.download()
installer.install()
# Write the most recently installed version number to the file.
version_file.write_text(version)
relation_changed()
@hooks.hook('etcd-relation-changed',
'network-relation-changed')
def relation_changed():
"""Connect the parts and go :-)
"""
template_data = get_template_data()
# Check required keys
for k in ('etcd_servers', 'kubeapi_server'):
if not template_data.get(k):
print('Missing data for %s %s' % (k, template_data))
return
print('Running with\n%s' % template_data)
# Setup kubernetes supplemental group
setup_kubernetes_group()
# Register upstart managed services
for n in ('kubelet', 'proxy'):
if render_upstart(n, template_data) or not host.service_running(n):
print('Starting %s' % n)
host.service_restart(n)
# Register machine via api
print('Registering machine')
register_machine(template_data['kubeapi_server'])
# Save the marker (for restarts to detect prev install)
template_data.save()
def get_template_data():
rels = hookenv.relations()
template_data = hookenv.Config()
template_data.CONFIG_FILE_NAME = '.unit-state'
overlay_type = get_scoped_rel_attr('network', rels, 'overlay_type')
etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port'))
api_servers = get_rel_hosts('api', rels, ('hostname', 'port'))
# kubernetes master isn't ha yet.
if api_servers:
api_info = api_servers.pop()
api_servers = 'http://%s:%s' % (api_info[0], api_info[1])
template_data['overlay_type'] = overlay_type
template_data['kubelet_bind_addr'] = _bind_addr(
hookenv.unit_private_ip())
template_data['proxy_bind_addr'] = _bind_addr(
hookenv.unit_get('public-address'))
template_data['kubeapi_server'] = api_servers
template_data['etcd_servers'] = ','.join([
'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)])
template_data['identifier'] = os.environ['JUJU_UNIT_NAME'].replace(
'/', '-')
return _encode(template_data)
def _bind_addr(addr):
if addr.replace('.', '').isdigit():
return addr
try:
return socket.gethostbyname(addr)
except socket.error:
raise ValueError('Could not resolve private address')
def _encode(d):
for k, v in d.items():
if isinstance(v, unicode):
d[k] = v.encode('utf8')
return d
def get_scoped_rel_attr(rel_name, rels, attr):
private_ip = hookenv.unit_private_ip()
for r, data in rels.get(rel_name, {}).items():
for unit_id, unit_data in data.items():
if unit_data.get('private-address') != private_ip:
continue
if unit_data.get(attr):
return unit_data.get(attr)
def get_rel_hosts(rel_name, rels, keys=('private-address',)):
hosts = []
for r, data in rels.get(rel_name, {}).items():
for unit_id, unit_data in data.items():
if unit_id == hookenv.local_unit():
continue
values = [unit_data.get(k) for k in keys]
if not all(values):
continue
hosts.append(len(values) == 1 and values[0] or values)
return hosts
def render_upstart(name, data):
tmpl_path = os.path.join(
os.environ.get('CHARM_DIR'), 'files', '%s.upstart.tmpl' % name)
with open(tmpl_path) as fh:
tmpl = fh.read()
rendered = tmpl % data
tgt_path = '/etc/init/%s.conf' % name
if os.path.exists(tgt_path):
with open(tgt_path) as fh:
contents = fh.read()
if contents == rendered:
return False
with open(tgt_path, 'w') as fh:
fh.write(rendered)
return True
def register_machine(apiserver, retry=False):
parsed = urlparse.urlparse(apiserver)
# identity = hookenv.local_unit().replace('/', '-')
private_address = hookenv.unit_private_ip()
with open('/proc/meminfo') as fh:
info = fh.readline()
mem = info.strip().split(':')[1].strip().split()[0]
cpus = os.sysconf('SC_NPROCESSORS_ONLN')
registration_request = Registrator()
registration_request.data['Kind'] = 'Minion'
registration_request.data['id'] = private_address
registration_request.data['name'] = private_address
registration_request.data['metadata']['name'] = private_address
registration_request.data['spec']['capacity']['mem'] = mem + ' K'
registration_request.data['spec']['capacity']['cpu'] = cpus
registration_request.data['spec']['externalID'] = private_address
registration_request.data['status']['hostIP'] = private_address
response, result = registration_request.register(parsed.hostname,
parsed.port,
'/api/v1beta3/nodes')
print(response)
try:
registration_request.command_succeeded(response, result)
except ValueError:
# This happens when we have already registered
# for now this is OK
pass
def setup_kubernetes_group():
output = subprocess.check_output(['groups', 'kubernetes'])
# TODO: check group exists
if 'docker' not in output:
subprocess.check_output(
['usermod', '-a', '-G', 'docker', 'kubernetes'])
if __name__ == '__main__':
hooks.execute(sys.argv)
|
the-stack_0_17749 | from pygeppetto.model import GeppettoModel
from pygeppetto.model.exceptions import GeppettoModelException
class QueryNotFoundException(GeppettoModelException): pass
def get_query(query_path, model: GeppettoModel):
data_source = None
for token in query_path.split('.'):
if data_source is None:
try:
return next(query for query in model.queries if query.id == token)
except StopIteration:
try:
data_source = next(ds for ds in model.dataSources if ds.id == token)
except StopIteration:
raise QueryNotFoundException("Query `{}` not found in model.".format(query_path))
else:
try:
return next(query for query in data_source.queries if query.id == token)
except StopIteration:
raise QueryNotFoundException("Query `{}` not found in model.".format(query_path))
else:
raise QueryNotFoundException("Query `{}` not found in model.".format(query_path))
|
the-stack_0_17750 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
import torch.autograd
import numpy as np
from .binding import einsum
from ..common import normalize_subscript
class EinsumFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, equation, input_0, input_1=None):
equation, isBinary = normalize_subscript(equation)
if isBinary and input_1 is None:
raise RuntimeError('The subscript indicates two inputs, but only one was passed')
if not isBinary and input_1 is not None:
raise RuntimeError('The subscript indicates one input, but two were passed')
if input_1 is None:
input_1 = input_0.new_empty((1,))
output = einsum(equation, input_0, input_1, False, False)
if isBinary:
ctx.save_for_backward(input_0, input_1)
ctx.equation = equation
ctx.isBinary = isBinary
return output
@staticmethod
def backward(ctx, grad_output):
equation = ctx.equation
lhs, modeC = equation.split('->')
if ctx.isBinary:
input_0, input_1 = ctx.saved_tensors
conjugate = False
if torch.is_complex(input_0) or torch.is_complex(input_1):
conjugate = True
modeA, modeB = lhs.split(',')
d_input_0 = einsum(modeC + ',' + modeB + '->' + modeA, grad_output,
input_1, False, conjugate)
d_input_1 = einsum(modeA + ',' + modeC + '->' + modeB, input_0,
grad_output, conjugate, False)
return None, d_input_0, d_input_1
else:
dummy = grad_output.new_empty((1,))
d_input = einsum(modeC + '->' + lhs, grad_output, dummy, False, False)
return None, d_input
class Einsum(torch.nn.Module):
def __init__(self, equation):
super(Einsum, self).__init__()
self.equation = equation
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, input_0, input_1):
return EinsumFunction.apply(self.equation, input_0, input_1)
def _compute_target_tensor(in0, in1, target):
result = in0[:-1] + in1[:-1] + in1[-1] + in0[-1]
# remove duplicates
duplicates = set(in0) & set(in1)
for elem in duplicates:
result = result.replace(elem, '')
# reorder target modes like target
result = list(result)
for i in range(len(result)):
if result[i] not in target: continue
for j in range(i):
if result[j] not in target: continue
if target.index(result[j]) > target.index(result[i]):
result[i], result[j] = result[j], result[i]
return ''.join(result)
def EinsumGeneral(equation, *tensors, **kwargs):
tensors = list(tensors)
equation, isBinary = normalize_subscript(equation)
path = np.einsum_path(equation,
*[np.broadcast_to(np.nan, t.shape) for t in tensors],
**kwargs)
path = path[0][1:]
equation = equation.split('->')
eqs = equation[0].split(',')
target = equation[1]
for step in path:
if len(step) == 1:
result = EinsumFunction.apply(eqs[0] + '->' + target, tensors[0])
continue
assert step[0] < step[1]
in0 = tensors[step[0]]
in1 = tensors[step[1]]
tensors.pop(step[1])
tensors.pop(step[0])
tgt = _compute_target_tensor(eqs[step[0]], eqs[step[1]], target)
assert tgt != ""
eq = eqs[step[0]] + ',' + eqs[step[1]] + '->' + tgt
eqs.pop(step[1])
eqs.pop(step[0])
eqs.append(tgt)
result = EinsumFunction.apply(eq, in0, in1)
tensors.append(result)
return result
|
the-stack_0_17754 | """
=======================================
FDR correction on T-test on sensor data
=======================================
One tests if the evoked response significantly deviates from 0.
Multiple comparison problem is addressed with
False Discovery Rate (FDR) correction.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD-3-Clause
# %%
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.stats import bonferroni_correction, fdr_correction
print(__doc__)
# %%
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)[:30]
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
# %%
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
X = epochs.get_data() # as 3D matrix
X = X[:, 0, :] # take only one channel to get a 2D array
# %%
# Compute statistic
T, pval = stats.ttest_1samp(X, 0)
alpha = 0.05
n_samples, n_tests = X.shape
threshold_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha=alpha)
threshold_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
threshold_fdr = np.min(np.abs(T)[reject_fdr])
# %%
# Plot
times = 1e3 * epochs.times
plt.close('all')
plt.plot(times, T, 'k', label='T-stat')
xmin, xmax = plt.xlim()
plt.hlines(threshold_uncorrected, xmin, xmax, linestyle='--', colors='k',
label='p=0.05 (uncorrected)', linewidth=2)
plt.hlines(threshold_bonferroni, xmin, xmax, linestyle='--', colors='r',
label='p=0.05 (Bonferroni)', linewidth=2)
plt.hlines(threshold_fdr, xmin, xmax, linestyle='--', colors='b',
label='p=0.05 (FDR)', linewidth=2)
plt.legend()
plt.xlabel("Time (ms)")
plt.ylabel("T-stat")
plt.show()
|
the-stack_0_17755 | """Place and route utilities.
"""
from __future__ import absolute_import
import pickle
from six import iteritems
from collections import defaultdict
from rig.netlist import Net
from rig.place_and_route.constraints import (LocationConstraint,
RouteEndpointConstraint,
SameChipConstraint)
from nengo_spinnaker.builder import Model
from nengo_spinnaker.node_io import Ethernet
def create_network_netlist(network, n_steps, fp, dt=0.001):
"""Create a netlist of a network running for a number of steps, dump that
netlist to file.
"""
# Build the network, assuming EthernetIO
model = Model(dt)
node_io = Ethernet()
model.build(network, **node_io.builder_kwargs)
# Build the netlist
netlist = model.make_netlist(n_steps).as_rig_arguments()
pickle_netlist(netlist, fp)
def pickle_netlist(netlist_dict, fp, **kwargs):
"""Dump a pickle of a netlist to a file.
This function replaces all vertices with `object` instances so that
nengo-specific or project-specific dependencies are not included.
"""
# {old_vertex: new_vertex, ...}
new_vertices = defaultdict(object)
netlist_dict["vertices_resources"] = {
new_vertices[vertex]: resources
for (vertex, resources)
in iteritems(netlist_dict["vertices_resources"])
}
netlist_dict["nets"] = [
Net(new_vertices[net.source],
[new_vertices[sink] for sink in net.sinks],
net.weight)
for net in netlist_dict["nets"]
]
old_constraints = netlist_dict["constraints"]
netlist_dict["constraints"] = []
for constraint in old_constraints:
if isinstance(constraint, LocationConstraint):
netlist_dict["constraints"].append(
LocationConstraint(new_vertices[constraint.vertex],
constraint.location))
elif isinstance(constraint, RouteEndpointConstraint):
netlist_dict["constraints"].append(
RouteEndpointConstraint(new_vertices[constraint.vertex],
constraint.route))
elif isinstance(constraint, SameChipConstraint):
# Get the new vertices
vs = [new_vertices[v] for v in constraint.vertices]
netlist_dict["constraints"].append(SameChipConstraint(vs))
else:
netlist_dict["constraints"].append(constraint)
pickle.dump(netlist_dict, fp, **kwargs)
|
the-stack_0_17756 | # -*- coding: utf-8 -*-
"""
chemspipy.api
~~~~~~~~~~~~~
Core API for interacting with ChemSpider web services.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from base64 import b64decode
import logging
import sys
import warnings
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
import requests
import six
from . import __version__
from .errors import ChemSpiPyError, ChemSpiPyParseError, ChemSpiPyAuthError, ChemSpiPyServerError
from .errors import ChemSpiPyNotFoundError
from .objects import Compound, Spectrum
from .search import Results
log = logging.getLogger(__name__)
#: 2D coordinate dimensions
MOL2D = '2d'
#: 3D coordinate dimensions
MOL3D = '3d'
#: Both coordinate dimensions
BOTH = 'both'
#: Ascending sort direction
ASCENDING = 'ascending'
#: Descending sort direction
DESCENDING = 'descending'
#: CSID sort order
CSID = 'csid'
#: Mass defect sort order
MASS_DEFECT = 'mass_defect'
#: Molecular weight sort order
MOLECULAR_WEIGHT = 'molecular_weight'
#: Reference count sort order
REFERENCE_COUNT = 'reference_count'
#: Datasource count sort order
DATASOURCE_COUNT = 'datasource_count'
#: Pubmed count sort order
PUBMED_COUNT = 'pubmed_count'
#: RSC count sort order
RSC_COUNT = 'rsc_count'
#: Coordinate dimensions
DIMENSIONS = {
MOL2D: 'e2D',
MOL3D: 'e3D',
BOTH: 'eBoth'
}
#: Sort directions
DIRECTIONS = {
ASCENDING: 'eAscending',
DESCENDING: 'eDescending'
}
#: Sort orders
ORDERS = {
CSID: 'eCSID',
MASS_DEFECT: 'eMassDefect',
MOLECULAR_WEIGHT: 'eMolecularWeight',
REFERENCE_COUNT: 'eReferenceCount',
DATASOURCE_COUNT: 'eDataSourceCount',
PUBMED_COUNT: 'ePubMedCount',
RSC_COUNT: 'eRscCount'
}
#: API to python field mappings
FIELDS = {
'CSID': ('csid', int),
'csid': ('csid', int),
'MF': ('molecular_formula', six.text_type),
'SMILES': ('smiles', six.text_type),
'InChI': ('inchi', six.text_type),
'InChIKey': ('inchikey', six.text_type),
'AverageMass': ('average_mass', float),
'MolecularWeight': ('molecular_weight', float),
'MonoisotopicMass': ('monoisotopic_mass', float),
'NominalMass': ('nominal_mass', float),
'ALogP': ('alogp', float),
'XLogP': ('xlogp', float),
'CommonName': ('common_name', six.text_type),
'MOL2d': ('mol_2d', six.text_type),
'MOL3d': ('mol_3d', six.text_type),
'ReferenceCount': ('reference_count', int),
'DataSourceCount': ('datasource_count', int),
'PubMedCount': ('pubmed_count', int),
'RSCCount': ('rsc_count', int),
'ExternalReferences': ('external_references', list),
'ds_name': ('datasource_name', six.text_type),
'ds_url': ('datasource_url', six.text_type),
'ext_id': ('external_id', six.text_type),
'ext_url': ('external_url', six.text_type),
'Status': ('status', six.text_type),
'Count': ('count', int),
'Message': ('message', six.text_type),
'Elapsed': ('elapsed', six.text_type),
'spc_id': ('spectrum_id', int),
'spc_type': ('spectrum_type', six.text_type),
'file_name': ('file_name', six.text_type),
'comments': ('comments', six.text_type),
'original_url': ('original_url', six.text_type),
'submitted_date': ('submitted_date', six.text_type),
}
class BaseChemSpider(object):
def __init__(self, security_token=None, user_agent=None, api_url=None):
"""
:param string security_token: (Optional) Your ChemSpider security token.
:param string user_agent: (Optional) Identify your application to ChemSpider servers.
:param string api_url: (Optional) Alternative API server.
"""
log.debug('Initializing ChemSpider')
self.api_url = api_url if api_url else 'https://www.chemspider.com'
self.http = requests.session()
self.http.headers['User-Agent'] = user_agent if user_agent else 'ChemSpiPy/%s Python/%s ' % (__version__, sys.version.split()[0])
self.security_token = security_token
def request(self, api, endpoint, **params):
"""Construct API request and return the XML response.
:param string api: The specific ChemSpider API to call (MassSpec, Search, Spectra, InChI).
:param string endpoint: ChemSpider API endpoint.
:param params: (Optional) Parameters for the ChemSpider endpoint as keyword arguments.
:rtype: xml tree
"""
url = '%s/%s.asmx/%s' % (self.api_url, api, endpoint)
log.debug('Request: %s %s', url, params)
params['token'] = self.security_token
try:
response = self.http.post(url, data=params)
except requests.RequestException as e:
raise ChemSpiPyError(six.text_type(e))
if response.status_code == 500:
if 'Missing parameter: token.' in response.text:
raise ChemSpiPyAuthError('Endpoint requires a security token.')
elif 'Error converting data type nvarchar to uniqueidentifier' in response.text:
# Generally when supplying a security token with incorrect format
raise ChemSpiPyAuthError('Invalid security token. Did you copy the entire token?')
elif 'Unauthorized web service usage' in response.text:
# Fake/incorrect token (but in correct format)
raise ChemSpiPyAuthError(response.text)
elif 'Unable to get record details' in response.text:
# Generally when requesting a non-existent CSID
raise ChemSpiPyNotFoundError(response.text)
elif 'Unable to get records spectra' in response.text:
# No spectra for a CSID, shouldn't be an exception
return []
else:
raise ChemSpiPyServerError(response.text)
try:
tree = etree.fromstring(response.content)
except etree.ParseError as e:
raise ChemSpiPyParseError('Unable to parse XML response: %s' % e)
return tree
def construct_api_url(self, api, endpoint, **params):
"""Construct a Chemspider API url, encoded, with parameters as a GET querystring.
:param string api: The specific ChemSpider API to call (MassSpecAPI, Search, Spectra, InChI).
:param string endpoint: ChemSpider API endpoint.
:param params: (Optional) Parameters for the ChemSpider endpoint as keyword arguments.
:rtype: string
"""
querystring = []
for k, v in params.items():
querystring.append('%s=%s' % (k, six.moves.urllib.parse.quote_plus(six.text_type(v))))
if self.security_token:
querystring.append('token=%s' % self.security_token)
return '%s/%s.asmx/%s?%s' % (self.api_url, api, endpoint, '&'.join(querystring))
def xml_to_dict(t):
"""Convert a ChemSpider XML response to a python dict."""
d = {}
for child in t:
tag = child.tag.split('}')[1]
tag, rtype = FIELDS.get(tag, (tag, six.text_type))
if rtype == list:
d[tag] = [xml_to_dict(grandchild) for grandchild in child]
elif rtype == dict:
d[tag] = xml_to_dict(child)
elif child.text is not None:
d[tag] = rtype(child.text.strip())
return d
class MassSpecApi(BaseChemSpider):
def get_databases(self):
"""Get the list of datasources in ChemSpider."""
response = self.request('MassSpecApi', 'GetDatabases')
return [el.text for el in response]
def get_extended_compound_info(self, csid):
"""Get extended record details for a CSID. Security token is required.
:param string|int csid: ChemSpider ID.
"""
response = self.request('MassSpecApi', 'GetExtendedCompoundInfo', csid=csid)
return xml_to_dict(response)
def get_extended_compound_info_list(self, csids):
"""Get extended record details for a list of CSIDs. Security token is required.
:param list[string|int] csids: ChemSpider IDs.
"""
response = self.request('MassSpecApi', 'GetExtendedCompoundInfoArray', csids=csids)
return [xml_to_dict(result) for result in response]
def get_extended_mol_compound_info_list(self, csids, mol_type=MOL2D, include_reference_counts=False,
include_external_references=False):
"""Get extended record details (including MOL) for a list of CSIDs.
A maximum of 250 CSIDs can be fetched per request. Security token is required.
:param list[string|int] csids: ChemSpider IDs.
:param string mol_type: :data:`~chemspipy.api.MOL2D`, :data:`~chemspipy.api.MOL3D` or
:data:`~chemspipy.api.BOTH`.
:param bool include_reference_counts: Whether to include reference counts.
:param bool include_external_references: Whether to include external references.
"""
response = self.request('MassSpecApi', 'GetExtendedMolCompoundInfoArray', csids=csids,
eMolType=DIMENSIONS.get(mol_type, mol_type),
includeReferenceCounts=include_reference_counts,
includeExternalReferences=include_external_references)
return [xml_to_dict(result) for result in response]
def get_record_mol(self, csid, calc3d=False):
"""Get ChemSpider record in MOL format. Security token is required.
:param string|int csid: ChemSpider ID.
:param bool calc3d: Whether 3D coordinates should be calculated before returning record data.
"""
response = self.request('MassSpecApi', 'GetRecordMol', csid=csid, calc3d=calc3d)
return response.text
def simple_search_by_formula(self, formula):
"""Search ChemSpider by molecular formula.
:param string formula: Molecular formula
:returns: A list of Compounds.
:rtype: list[:class:`~chemspipy.Compound`]
"""
warnings.warn("Use search_by_formula instead of simple_search_by_formula.", DeprecationWarning)
response = self.request('MassSpecApi', 'SearchByFormula2', formula=formula)
return [Compound(self, el.text) for el in response]
def simple_search_by_mass(self, mass, mass_range):
"""Search ChemSpider by mass +/- range.
:param float mass: The mass to search for.
:param float mass_range: The +/- mass range to allow.
:returns: A list of Compounds.
:rtype: list[:class:`~chemspipy.Compound`]
"""
warnings.warn("Use search_by_mass instead of simple_search_by_mass.", DeprecationWarning)
response = self.request('MassSpecApi', 'SearchByMass2', mass=mass, range=mass_range)
return [Compound(self, el.text) for el in response]
# def get_compressed_records_sdf(self, rid):
# """Get an SDF containing all the results from a search operation.
#
# A maximum of 10000 records can be fetched per request. Subscriber role security token is required.
#
# Warning: This doesn't work reliably.
#
# :param string rid: A transaction ID, returned by an asynchronous search method.
# :returns: SDF containing the requested records.
# :rtype: string
# """
# response = self.request('MassSpecApi', 'GetCompressedRecordsSdf', rid=rid, eComp='eGzip')
# if response.text:
# return zlib.decompress(b64decode(response.text.encode('utf-8')), 16+zlib.MAX_WBITS)
#
# def get_records_sdf(self, rid):
# """Get an SDF containing all the results from a search operation.
#
# A maximum of 10000 records can be fetched per request. Subscriber role security token is required.
#
# Warning: This doesn't work reliably.
#
# :param string rid: A transaction ID, returned by an asynchronous search method.
# :returns: SDF containing the requested records.
# :rtype: string
# """
# response = self.request('MassSpecApi', 'GetRecordsSdf', rid=rid)
# if response.text:
# return response.text.encode('utf-8')
class SearchApi(BaseChemSpider):
def async_simple_search(self, query):
"""Search ChemSpider with arbitrary query, returning results in order of the best match found.
This method returns a transaction ID which can be used with other methods to get search status and results.
Security token is required.
:param string query: Search query - a name, SMILES, InChI, InChIKey, CSID, etc.
:returns: Transaction ID.
:rtype: string
"""
response = self.request('Search', 'AsyncSimpleSearch', query=query)
return response.text
def async_simple_search_ordered(self, query, order=CSID, direction=ASCENDING):
"""Search ChemSpider with arbitrary query, returning results with a custom order.
This method returns a transaction ID which can be used with other methods to get search status and results.
Security token is required.
:param string query: Search query - a name, SMILES, InChI, InChIKey, CSID, etc.
:param string order: :data:`~chemspipy.api.CSID`, :data:`~chemspipy.api.MASS_DEFECT`,
:data:`~chemspipy.api.MOLECULAR_WEIGHT`, :data:`~chemspipy.api.REFERENCE_COUNT`,
:data:`~chemspipy.api.DATASOURCE_COUNT`, :data:`~chemspipy.api.PUBMED_COUNT` or
:data:`~chemspipy.api.RSC_COUNT`.
:param string direction: :data:`~chemspipy.api.ASCENDING` or :data:`~chemspipy.api.DESCENDING`.
:returns: Transaction ID.
:rtype: string
"""
response = self.request('Search', 'AsyncSimpleSearchOrdered', query=query, orderBy=ORDERS[order],
orderDirection=DIRECTIONS[direction])
return response.text
def get_async_search_status(self, rid):
"""Check the status of an asynchronous search operation.
Security token is required.
:param string rid: A transaction ID, returned by an asynchronous search method.
:returns: Unknown, Created, Scheduled, Processing, Suspended, PartialResultReady, ResultReady, Failed,
TooManyRecords
:rtype: string
"""
response = self.request('Search', 'GetAsyncSearchStatus', rid=rid)
return response.text
def get_async_search_status_and_count(self, rid):
"""Check the status of an asynchronous search operation. If ready, a count and message are also returned.
Security token is required.
:param string rid: A transaction ID, returned by an asynchronous search method.
:rtype: dict
"""
response = self.request('Search', 'GetAsyncSearchStatusAndCount', rid=rid)
return xml_to_dict(response)
def get_async_search_result(self, rid):
"""Get the results from a asynchronous search operation. Security token is required.
:param string rid: A transaction ID, returned by an asynchronous search method.
:returns: A list of Compounds.
:rtype: list[:class:`~chemspipy.Compound`]
"""
response = self.request('Search', 'GetAsyncSearchResult', rid=rid)
return [Compound(self, el.text) for el in response]
def get_async_search_result_part(self, rid, start=0, count=-1):
"""Get a slice of the results from a asynchronous search operation. Security token is required.
:param string rid: A transaction ID, returned by an asynchronous search method.
:param int start: The number of results to skip.
:param int count: The number of results to return. -1 returns all through to end.
:returns: A list of Compounds.
:rtype: list[:class:`~chemspipy.Compound`]
"""
response = self.request('Search', 'GetAsyncSearchResultPart', rid=rid, start=start, count=count)
return [Compound(self, el.text) for el in response]
def get_compound_info(self, csid):
"""Get SMILES, StdInChI and StdInChIKey for a given CSID. Security token is required.
:param string|int csid: ChemSpider ID.
:rtype: dict
"""
response = self.request('Search', 'GetCompoundInfo', csid=csid)
return xml_to_dict(response)
def get_compound_thumbnail(self, csid):
"""Get PNG image as binary data.
:param string|int csid: ChemSpider ID.
:rtype: bytes
"""
response = self.request('Search', 'GetCompoundThumbnail', id=csid)
return b64decode(response.text.encode('utf-8'))
def simple_search(self, query):
"""Search ChemSpider with arbitrary query.
A maximum of 100 results are returned. Security token is required.
:param string query: Search query - a name, SMILES, InChI, InChIKey, CSID, etc.
:returns: List of :class:`Compounds <chemspipy.Compound>`.
:rtype: list[:class:`~chemspipy.Compound`]
"""
response = self.request('Search', 'SimpleSearch', query=query)
return [Compound(self, el.text) for el in response]
class SpectraApi(BaseChemSpider):
def get_all_spectra_info(self):
"""Get full list of all spectra in ChemSpider. Subscriber role security token is required.
rtype: list[dict]
"""
response = self.request('Spectra', 'GetAllSpectraInfo')
return [xml_to_dict(result) for result in response]
def get_spectrum_info(self, spectrum_id):
"""Get information for a specific spectrum ID. Subscriber role security token is required.
:param string|int spectrum_id: spectrum ID.
:returns: Spectrum info.
:rtype: dict
"""
response = self.request('Spectra', 'GetSpectrumInfo', spc_id=spectrum_id)
return xml_to_dict(response)
def get_compound_spectra_info(self, csid):
"""Get information about all the spectra for a ChemSpider ID. Subscriber role security token is required.
:param string|int csid: ChemSpider ID.
:returns: List of spectrum info.
:rtype: list[dict]
"""
response = self.request('Spectra', 'GetCompoundSpectraInfo', csid=csid)
return [xml_to_dict(result) for result in response]
def get_spectra_info_list(self, csids):
"""Get information about all the spectra for a list of ChemSpider IDs.
:param list[string|int] csids: ChemSpider IDs.
:returns: List of spectrum info.
:rtype: list[dict]
"""
response = self.request('Spectra', 'GetSpectraInfoArray', csids=csids)
return [xml_to_dict(result) for result in response]
class InchiApi(BaseChemSpider):
def get_original_mol(self, csid):
"""Get original submitted MOL file. Security token is required.
:param string|int csid: ChemSpider ID.
"""
response = self.request('InChI', 'CSIDToMol', csid=csid)
return response.text
# TODO
# InChIKeyToCSID - inchi_key - csid
# InChIKeyToInChI - inchi_key - InChI
# InChIKeyToMol - inchi_key - Mol
# InChIToCSID - inchi - csid
# InChIToInChIKey - inchi - inchikey
# InChIToMol - inchi - mol
# InChIToSMILES - inchi - smiles
# IsValidInChIKey - inchi_key - bool
# MolToInChI - mol - inchi
# MolToInChIKey - mol - inchi
# ResolveInChIKey - inchi_key, out_format (MOL/SDF/SMILES/InChI) - list of strings
# SMILESToInChI - smiles - inchi
class CustomApi(BaseChemSpider):
def get_compound(self, csid):
"""Return a Compound object for a given ChemSpider ID. Security token is required.
:param string|int csid: ChemSpider ID.
:returns: The Compound with the specified ChemSpider ID.
:rtype: :class:`~chemspipy.Compound`
"""
return Compound(self, csid)
def get_compounds(self, csids):
"""Return a list of Compound objects, given a list ChemSpider IDs. Security token is required.
:param list[string|int] csids: List of ChemSpider IDs.
:returns: List of Compounds with the specified ChemSpider IDs.
:rtype: list[:class:`~chemspipy.Compound`]
"""
return [Compound(self, csid) for csid in csids]
def get_spectrum(self, spectrum_id):
"""Return a :class:`~chemspipy.Spectrum` object for a given spectrum ID. Subscriber role security token is required.
:param string|int spectrum_id: Spectrum ID.
:returns: The Spectrum with the specified spectrum ID.
:rtype: :class:`~chemspipy.Spectrum`
"""
return Spectrum(self, spectrum_id)
def get_spectra(self, spectrum_ids):
"""Return a :class:`~chemspipy.Spectrum` object for a given spectrum ID. Subscriber role security token is required.
:param list[string|int] spectrum_ids: List of spectrum IDs.
:returns: List of spectra with the specified spectrum IDs.
:rtype: list[:class:`~chemspipy.Spectrum`]
"""
return [Spectrum(self, spectrum_id) for spectrum_id in spectrum_ids]
def get_compound_spectra(self, csid):
"""Return :class:`~chemspipy.Spectrum` objects for all the spectra associated with a ChemSpider ID.
:param csid: string|int csid: ChemSpider ID.
:returns: List of spectra for the specified ChemSpider ID.
:rtype: list[:class:`~chemspipy.Spectrum`]
"""
return [Spectrum.from_info_dict(self, info) for info in self.get_spectra_info_list([csid])]
def get_all_spectra(self):
"""Return a full list of :class:`~chemspipy.Spectrum` objects for all spectra in ChemSpider.
Subscriber role security token is required.
:returns: Full list of spectra in ChemSpider.
:rtype: list[:class:`~chemspipy.Spectrum`]
"""
return [Spectrum.from_info_dict(self, info) for info in self.get_all_spectra_info()]
def search(self, query, order=None, direction=ASCENDING, raise_errors=False):
"""Search ChemSpider for the specified query and return the results. Security token is required.
:param string|int query: Search query.
:param string order: (Optional) :data:`~chemspipy.api.CSID`, :data:`~chemspipy.api.MASS_DEFECT`,
:data:`~chemspipy.api.MOLECULAR_WEIGHT`, :data:`~chemspipy.api.REFERENCE_COUNT`,
:data:`~chemspipy.api.DATASOURCE_COUNT`, :data:`~chemspipy.api.PUBMED_COUNT` or
:data:`~chemspipy.api.RSC_COUNT`.
:param string direction: (Optional) :data:`~chemspipy.api.ASCENDING` or :data:`~chemspipy.api.DESCENDING`.
:param bool raise_errors: If True, raise exceptions. If False, store on Results ``exception`` property.
:returns: Search Results list.
:rtype: Results
"""
if order and direction:
return Results(self, self.async_simple_search_ordered, (query, order, direction), raise_errors=raise_errors)
else:
return Results(self, self.async_simple_search, (query,), raise_errors=raise_errors)
# TODO: Wrappers for subscriber role asynchronous searches
class ChemSpider(CustomApi, MassSpecApi, SearchApi, SpectraApi, InchiApi):
"""Provides access to the ChemSpider API.
Usage::
>>> from chemspipy import ChemSpider
>>> cs = ChemSpider('<YOUR-SECURITY-TOKEN>')
"""
def __repr__(self):
return 'ChemSpider()'
|
the-stack_0_17760 | """Unit tests for contextlib.py, and other context managers."""
import io
import sys
import tempfile
import threading
import unittest
from contextlib import * # Tests __all__
from test import support
class TestAbstractContextManager(unittest.TestCase):
def test_enter(self):
class DefaultEnter(AbstractContextManager):
def __exit__(self, *args):
super().__exit__(*args)
manager = DefaultEnter()
self.assertIs(manager.__enter__(), manager)
def test_exit_is_abstract(self):
class MissingExit(AbstractContextManager):
pass
with self.assertRaises(TypeError):
MissingExit()
def test_structural_subclassing(self):
class ManagerFromScratch:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return None
self.assertTrue(issubclass(ManagerFromScratch, AbstractContextManager))
class DefaultEnter(AbstractContextManager):
def __exit__(self, *args):
super().__exit__(*args)
self.assertTrue(issubclass(DefaultEnter, AbstractContextManager))
class NoEnter(ManagerFromScratch):
__enter__ = None
self.assertFalse(issubclass(NoEnter, AbstractContextManager))
class NoExit(ManagerFromScratch):
__exit__ = None
self.assertFalse(issubclass(NoExit, AbstractContextManager))
class ContextManagerTestCase(unittest.TestCase):
def test_contextmanager_plain(self):
state = []
@contextmanager
def woohoo():
state.append(1)
yield 42
state.append(999)
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_finally(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_no_reraise(self):
@contextmanager
def whee():
yield
ctx = whee()
ctx.__enter__()
# Calling __exit__ should not result in an exception
self.assertFalse(ctx.__exit__(TypeError, TypeError("foo"), None))
def test_contextmanager_trap_yield_after_throw(self):
@contextmanager
def whoo():
try:
yield
except:
yield
ctx = whoo()
ctx.__enter__()
self.assertRaises(
RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
)
def test_contextmanager_except(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError as e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_except_stopiter(self):
stop_exc = StopIteration('spam')
@contextmanager
def woohoo():
yield
try:
with self.assertWarnsRegex(DeprecationWarning,
"StopIteration"):
with woohoo():
raise stop_exc
except Exception as ex:
self.assertIs(ex, stop_exc)
else:
self.fail('StopIteration was suppressed')
def test_contextmanager_except_pep479(self):
code = """\
from __future__ import generator_stop
from contextlib import contextmanager
@contextmanager
def woohoo():
yield
"""
locals = {}
exec(code, locals, locals)
woohoo = locals['woohoo']
stop_exc = StopIteration('spam')
try:
with woohoo():
raise stop_exc
except Exception as ex:
self.assertIs(ex, stop_exc)
else:
self.fail('StopIteration was suppressed')
def test_contextmanager_do_not_unchain_non_stopiteration_exceptions(self):
@contextmanager
def test_issue29692():
try:
yield
except Exception as exc:
raise RuntimeError('issue29692:Chained') from exc
try:
with test_issue29692():
raise ZeroDivisionError
except Exception as ex:
self.assertIs(type(ex), RuntimeError)
self.assertEqual(ex.args[0], 'issue29692:Chained')
self.assertIsInstance(ex.__cause__, ZeroDivisionError)
try:
with test_issue29692():
raise StopIteration('issue29692:Unchained')
except Exception as ex:
self.assertIs(type(ex), StopIteration)
self.assertEqual(ex.args[0], 'issue29692:Unchained')
self.assertIsNone(ex.__cause__)
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@contextmanager
@attribs(foo='bar')
def baz(spam):
"""Whee!"""
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@support.requires_docstrings
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
@support.requires_docstrings
def test_instance_docstring_given_cm_docstring(self):
baz = self._create_contextmanager_attribs()(None)
self.assertEqual(baz.__doc__, "Whee!")
def test_keywords(self):
# Ensure no keyword arguments are inhibited
@contextmanager
def woohoo(self, func, args, kwds):
yield (self, func, args, kwds)
with woohoo(self=11, func=22, args=33, kwds=44) as target:
self.assertEqual(target, (11, 22, 33, 44))
class ClosingTestCase(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = closing.__doc__
obj = closing(None)
self.assertEqual(obj.__doc__, cm_docstring)
def test_closing(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with closing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
def test_closing_error(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
with closing(x) as y:
self.assertEqual(x, y)
1 / 0
self.assertEqual(state, [1])
class NullcontextTestCase(unittest.TestCase):
def test_nullcontext(self):
class C:
pass
c = C()
with nullcontext(c) as c_in:
self.assertIs(c_in, c)
class FileContextTestCase(unittest.TestCase):
def testWithOpen(self):
tfn = tempfile.mktemp()
try:
f = None
with open(tfn, "w") as f:
self.assertFalse(f.closed)
f.write("Booh\n")
self.assertTrue(f.closed)
f = None
with self.assertRaises(ZeroDivisionError):
with open(tfn, "r") as f:
self.assertFalse(f.closed)
self.assertEqual(f.read(), "Booh\n")
1 / 0
self.assertTrue(f.closed)
finally:
support.unlink(tfn)
class LockContextTestCase(unittest.TestCase):
def boilerPlate(self, lock, locked):
self.assertFalse(locked())
with lock:
self.assertTrue(locked())
self.assertFalse(locked())
with self.assertRaises(ZeroDivisionError):
with lock:
self.assertTrue(locked())
1 / 0
self.assertFalse(locked())
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
class mycontext(ContextDecorator):
"""Example decoration-compatible context manager for testing"""
started = False
exc = None
catch = False
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
return self.catch
class TestContextDecorator(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = mycontext.__doc__
obj = mycontext()
self.assertEqual(obj.__doc__, cm_docstring)
def test_contextdecorator(self):
context = mycontext()
with context as result:
self.assertIs(result, context)
self.assertTrue(context.started)
self.assertEqual(context.exc, (None, None, None))
def test_contextdecorator_with_exception(self):
context = mycontext()
with self.assertRaisesRegex(NameError, 'foo'):
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
context = mycontext()
context.catch = True
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorator(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_decorator_with_exception(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
raise NameError('foo')
with self.assertRaisesRegex(NameError, 'foo'):
test()
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorating_method(self):
context = mycontext()
class Test(object):
@context
def method(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
# these tests are for argument passing when used as a decorator
test = Test()
test.method(1, 2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
self.assertEqual(test.c, None)
test = Test()
test.method('a', 'b', 'c')
self.assertEqual(test.a, 'a')
self.assertEqual(test.b, 'b')
self.assertEqual(test.c, 'c')
test = Test()
test.method(a=1, b=2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
def test_typo_enter(self):
class mycontext(ContextDecorator):
def __unter__(self):
pass
def __exit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_typo_exit(self):
class mycontext(ContextDecorator):
def __enter__(self):
pass
def __uxit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_contextdecorator_as_mixin(self):
class somecontext(object):
started = False
exc = None
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
class mycontext(somecontext, ContextDecorator):
pass
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_contextmanager_as_decorator(self):
@contextmanager
def woohoo(y):
state.append(y)
yield
state.append(999)
state = []
@woohoo(1)
def test(x):
self.assertEqual(state, [1])
state.append(x)
test('something')
self.assertEqual(state, [1, 'something', 999])
# Issue #11647: Ensure the decorated function is 'reusable'
state = []
test('something else')
self.assertEqual(state, [1, 'something else', 999])
class TestExitStack(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = ExitStack.__doc__
obj = ExitStack()
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_resources(self):
with ExitStack():
pass
def test_callback(self):
expected = [
((), {}),
((1,), {}),
((1,2), {}),
((), dict(example=1)),
((1,), dict(example=1)),
((1,2), dict(example=1)),
]
result = []
def _exit(*args, **kwds):
"""Test metadata propagation"""
result.append((args, kwds))
with ExitStack() as stack:
for args, kwds in reversed(expected):
if args and kwds:
f = stack.callback(_exit, *args, **kwds)
elif args:
f = stack.callback(_exit, *args)
elif kwds:
f = stack.callback(_exit, **kwds)
else:
f = stack.callback(_exit)
self.assertIs(f, _exit)
for wrapper in stack._exit_callbacks:
self.assertIs(wrapper.__wrapped__, _exit)
self.assertNotEqual(wrapper.__name__, _exit.__name__)
self.assertIsNone(wrapper.__doc__, _exit.__doc__)
self.assertEqual(result, expected)
def test_push(self):
exc_raised = ZeroDivisionError
def _expect_exc(exc_type, exc, exc_tb):
self.assertIs(exc_type, exc_raised)
def _suppress_exc(*exc_details):
return True
def _expect_ok(exc_type, exc, exc_tb):
self.assertIsNone(exc_type)
self.assertIsNone(exc)
self.assertIsNone(exc_tb)
class ExitCM(object):
def __init__(self, check_exc):
self.check_exc = check_exc
def __enter__(self):
self.fail("Should not be called!")
def __exit__(self, *exc_details):
self.check_exc(*exc_details)
with ExitStack() as stack:
stack.push(_expect_ok)
self.assertIs(stack._exit_callbacks[-1], _expect_ok)
cm = ExitCM(_expect_ok)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
stack.push(_suppress_exc)
self.assertIs(stack._exit_callbacks[-1], _suppress_exc)
cm = ExitCM(_expect_exc)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
stack.push(_expect_exc)
self.assertIs(stack._exit_callbacks[-1], _expect_exc)
stack.push(_expect_exc)
self.assertIs(stack._exit_callbacks[-1], _expect_exc)
1/0
def test_enter_context(self):
class TestCM(object):
def __enter__(self):
result.append(1)
def __exit__(self, *exc_details):
result.append(3)
result = []
cm = TestCM()
with ExitStack() as stack:
@stack.callback # Registered first => cleaned up last
def _exit():
result.append(4)
self.assertIsNotNone(_exit)
stack.enter_context(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
result.append(2)
self.assertEqual(result, [1, 2, 3, 4])
def test_close(self):
result = []
with ExitStack() as stack:
@stack.callback
def _exit():
result.append(1)
self.assertIsNotNone(_exit)
stack.close()
result.append(2)
self.assertEqual(result, [1, 2])
def test_pop_all(self):
result = []
with ExitStack() as stack:
@stack.callback
def _exit():
result.append(3)
self.assertIsNotNone(_exit)
new_stack = stack.pop_all()
result.append(1)
result.append(2)
new_stack.close()
self.assertEqual(result, [1, 2, 3])
def test_exit_raise(self):
with self.assertRaises(ZeroDivisionError):
with ExitStack() as stack:
stack.push(lambda *exc: False)
1/0
def test_exit_suppress(self):
with ExitStack() as stack:
stack.push(lambda *exc: True)
1/0
def test_exit_exception_chaining_reference(self):
# Sanity check to make sure that ExitStack chaining matches
# actual nested with statements
class RaiseExc:
def __init__(self, exc):
self.exc = exc
def __enter__(self):
return self
def __exit__(self, *exc_details):
raise self.exc
class RaiseExcWithContext:
def __init__(self, outer, inner):
self.outer = outer
self.inner = inner
def __enter__(self):
return self
def __exit__(self, *exc_details):
try:
raise self.inner
except:
raise self.outer
class SuppressExc:
def __enter__(self):
return self
def __exit__(self, *exc_details):
type(self).saved_details = exc_details
return True
try:
with RaiseExc(IndexError):
with RaiseExcWithContext(KeyError, AttributeError):
with SuppressExc():
with RaiseExc(ValueError):
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = SuppressExc.saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
def test_exit_exception_chaining(self):
# Ensure exception chaining matches the reference behaviour
def raise_exc(exc):
raise exc
saved_details = None
def suppress_exc(*exc_details):
nonlocal saved_details
saved_details = exc_details
return True
try:
with ExitStack() as stack:
stack.callback(raise_exc, IndexError)
stack.callback(raise_exc, KeyError)
stack.callback(raise_exc, AttributeError)
stack.push(suppress_exc)
stack.callback(raise_exc, ValueError)
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
def test_exit_exception_non_suppressing(self):
# http://bugs.python.org/issue19092
def raise_exc(exc):
raise exc
def suppress_exc(*exc_details):
return True
try:
with ExitStack() as stack:
stack.callback(lambda: None)
stack.callback(raise_exc, IndexError)
except Exception as exc:
self.assertIsInstance(exc, IndexError)
else:
self.fail("Expected IndexError, but no exception was raised")
try:
with ExitStack() as stack:
stack.callback(raise_exc, KeyError)
stack.push(suppress_exc)
stack.callback(raise_exc, IndexError)
except Exception as exc:
self.assertIsInstance(exc, KeyError)
else:
self.fail("Expected KeyError, but no exception was raised")
def test_exit_exception_with_correct_context(self):
# http://bugs.python.org/issue20317
@contextmanager
def gets_the_context_right(exc):
try:
yield
finally:
raise exc
exc1 = Exception(1)
exc2 = Exception(2)
exc3 = Exception(3)
exc4 = Exception(4)
# The contextmanager already fixes the context, so prior to the
# fix, ExitStack would try to fix it *again* and get into an
# infinite self-referential loop
try:
with ExitStack() as stack:
stack.enter_context(gets_the_context_right(exc4))
stack.enter_context(gets_the_context_right(exc3))
stack.enter_context(gets_the_context_right(exc2))
raise exc1
except Exception as exc:
self.assertIs(exc, exc4)
self.assertIs(exc.__context__, exc3)
self.assertIs(exc.__context__.__context__, exc2)
self.assertIs(exc.__context__.__context__.__context__, exc1)
self.assertIsNone(
exc.__context__.__context__.__context__.__context__)
def test_exit_exception_with_existing_context(self):
# Addresses a lack of test coverage discovered after checking in a
# fix for issue 20317 that still contained debugging code.
def raise_nested(inner_exc, outer_exc):
try:
raise inner_exc
finally:
raise outer_exc
exc1 = Exception(1)
exc2 = Exception(2)
exc3 = Exception(3)
exc4 = Exception(4)
exc5 = Exception(5)
try:
with ExitStack() as stack:
stack.callback(raise_nested, exc4, exc5)
stack.callback(raise_nested, exc2, exc3)
raise exc1
except Exception as exc:
self.assertIs(exc, exc5)
self.assertIs(exc.__context__, exc4)
self.assertIs(exc.__context__.__context__, exc3)
self.assertIs(exc.__context__.__context__.__context__, exc2)
self.assertIs(
exc.__context__.__context__.__context__.__context__, exc1)
self.assertIsNone(
exc.__context__.__context__.__context__.__context__.__context__)
def test_body_exception_suppress(self):
def suppress_exc(*exc_details):
return True
try:
with ExitStack() as stack:
stack.push(suppress_exc)
1/0
except IndexError as exc:
self.fail("Expected no exception, got IndexError")
def test_exit_exception_chaining_suppress(self):
with ExitStack() as stack:
stack.push(lambda *exc: True)
stack.push(lambda *exc: 1/0)
stack.push(lambda *exc: {}[1])
def test_excessive_nesting(self):
# The original implementation would die with RecursionError here
with ExitStack() as stack:
for i in range(10000):
stack.callback(int)
def test_instance_bypass(self):
class Example(object): pass
cm = Example()
cm.__exit__ = object()
stack = ExitStack()
self.assertRaises(AttributeError, stack.enter_context, cm)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1], cm)
def test_dont_reraise_RuntimeError(self):
# https://bugs.python.org/issue27122
class UniqueException(Exception): pass
class UniqueRuntimeError(RuntimeError): pass
@contextmanager
def second():
try:
yield 1
except Exception as exc:
raise UniqueException("new exception") from exc
@contextmanager
def first():
try:
yield 1
except Exception as exc:
raise exc
# The UniqueRuntimeError should be caught by second()'s exception
# handler which chain raised a new UniqueException.
with self.assertRaises(UniqueException) as err_ctx:
with ExitStack() as es_ctx:
es_ctx.enter_context(second())
es_ctx.enter_context(first())
raise UniqueRuntimeError("please no infinite loop.")
exc = err_ctx.exception
self.assertIsInstance(exc, UniqueException)
self.assertIsInstance(exc.__context__, UniqueRuntimeError)
self.assertIsNone(exc.__context__.__context__)
self.assertIsNone(exc.__context__.__cause__)
self.assertIs(exc.__cause__, exc.__context__)
class TestRedirectStream:
redirect_stream = None
orig_stream = None
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = self.redirect_stream.__doc__
obj = self.redirect_stream(None)
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_redirect_in_init(self):
orig_stdout = getattr(sys, self.orig_stream)
self.redirect_stream(None)
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
def test_redirect_to_string_io(self):
f = io.StringIO()
msg = "Consider an API like help(), which prints directly to stdout"
orig_stdout = getattr(sys, self.orig_stream)
with self.redirect_stream(f):
print(msg, file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue().strip()
self.assertEqual(s, msg)
def test_enter_result_is_target(self):
f = io.StringIO()
with self.redirect_stream(f) as enter_result:
self.assertIs(enter_result, f)
def test_cm_is_reusable(self):
f = io.StringIO()
write_to_f = self.redirect_stream(f)
orig_stdout = getattr(sys, self.orig_stream)
with write_to_f:
print("Hello", end=" ", file=getattr(sys, self.orig_stream))
with write_to_f:
print("World!", file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue()
self.assertEqual(s, "Hello World!\n")
def test_cm_is_reentrant(self):
f = io.StringIO()
write_to_f = self.redirect_stream(f)
orig_stdout = getattr(sys, self.orig_stream)
with write_to_f:
print("Hello", end=" ", file=getattr(sys, self.orig_stream))
with write_to_f:
print("World!", file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue()
self.assertEqual(s, "Hello World!\n")
class TestRedirectStdout(TestRedirectStream, unittest.TestCase):
redirect_stream = redirect_stdout
orig_stream = "stdout"
class TestRedirectStderr(TestRedirectStream, unittest.TestCase):
redirect_stream = redirect_stderr
orig_stream = "stderr"
class TestSuppress(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = suppress.__doc__
obj = suppress()
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_result_from_enter(self):
with suppress(ValueError) as enter_result:
self.assertIsNone(enter_result)
def test_no_exception(self):
with suppress(ValueError):
self.assertEqual(pow(2, 5), 32)
def test_exact_exception(self):
with suppress(TypeError):
len(5)
def test_exception_hierarchy(self):
with suppress(LookupError):
'Hello'[50]
def test_other_exception(self):
with self.assertRaises(ZeroDivisionError):
with suppress(TypeError):
1/0
def test_no_args(self):
with self.assertRaises(ZeroDivisionError):
with suppress():
1/0
def test_multiple_exception_args(self):
with suppress(ZeroDivisionError, TypeError):
1/0
with suppress(ZeroDivisionError, TypeError):
len(5)
def test_cm_is_reentrant(self):
ignore_exceptions = suppress(Exception)
with ignore_exceptions:
pass
with ignore_exceptions:
len(5)
with ignore_exceptions:
with ignore_exceptions: # Check nested usage
len(5)
outer_continued = True
1/0
self.assertTrue(outer_continued)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_17763 | # -*- coding: utf-8 -*-
# Copyright (C) 2019 Machine Learning Group of the University of Oldenburg.
# Licensed under the Academic Free License version 3.0
import torch as to
import tvo
import numpy as np
from typing import Dict
from tvo.variational._set_redundant_lpj_to_low_CPU import set_redundant_lpj_to_low_CPU
def _unique_ind(x: to.Tensor) -> to.Tensor:
"""Find indices of unique rows in tensor.
:param x: torch tensor
:returns: indices of unique rows in tensor.
"""
n = x.shape[0]
unique_rows, inverse_ind = to.unique(x, sorted=False, return_inverse=True, dim=0)
n_unique = unique_rows.shape[0]
perm = to.arange(n, device=inverse_ind.device)
# make sure reverse_ind relative to old_states come last...
inverse_ind, perm = inverse_ind.flip([0]), perm.flip([0])
# ...so the indices that are written last in each position are the ones for old_states
uniq_ind = inverse_ind.new_empty(n_unique).scatter_(0, inverse_ind, perm)
return uniq_ind
def _set_redundant_lpj_to_low_GPU(new_states: to.Tensor, new_lpj: to.Tensor, old_states: to.Tensor):
"""Find redundant states in new_states w.r.t. old_states and set
corresponding lpg to low.
:param new_states: set of new variational states (batch_size, newS, H)
:param new_lpj: corresponding log-pseudo-joints (batch_size, newS)
:param old_states: (batch_size, S, H)
"""
N, S, H = old_states.shape
newS = new_states.shape[1]
# old_states must come first for np.unique to discard redundant new_states
old_and_new = to.cat((old_states, new_states), dim=1)
for n in range(N):
uniq_idx = _unique_ind(old_and_new[n])
# indexes of states in new_states[n] that are not in old_states[n]
new_uniq_idx = uniq_idx[uniq_idx >= S] - S
# BoolTensor in pytorch>=1.2, ByteTensor otherwise
bool_or_byte = (to.empty(0) < 0).dtype
mask = to.ones(newS, dtype=bool_or_byte, device=new_lpj.device)
# indexes of all non-unique states in new_states (complementary of new_uniq_idx)
mask[new_uniq_idx.to(device=new_lpj.device)] = 0
# set lpj of redundant states to an arbitrary low value
new_lpj[n][mask] = -1e20
# set_redundant_lpj_to_low is a performance hotspot. when running on CPU, we use a cython
# function that runs on numpy arrays, when running on GPU, we stick to torch tensors
def set_redundant_lpj_to_low(new_states: to.Tensor, new_lpj: to.Tensor, old_states: to.Tensor):
if tvo.get_device().type == "cpu":
set_redundant_lpj_to_low_CPU(new_states.numpy(), new_lpj.numpy(), old_states.numpy())
else:
_set_redundant_lpj_to_low_GPU(new_states, new_lpj, old_states)
def generate_unique_states(
n_states: int, H: int, crowdedness: float = 1.0, device: to.device = None
) -> to.Tensor:
"""Generate a torch tensor containing random and unique binary vectors.
:param n_states: number of unique vectors to be generated
:param H: size of binary vector
:param crowdedness: average crowdedness per state
:param device: torch.device of output Tensor. Defaults to tvo.get_device()
Requires that n_states <= 2**H. Return has shape (n_states, H).
"""
if device is None:
device = tvo.get_device()
assert n_states <= 2**H, "n_states must be smaller than 2**H"
n_samples = max(n_states // 2, 1)
s_set = {tuple(s) for s in np.random.binomial(1, p=crowdedness / H, size=(n_samples, H))}
while len(s_set) < n_states:
s_set.update(
{tuple(s) for s in np.random.binomial(1, p=crowdedness / H, size=(n_samples, H))}
)
while len(s_set) > n_states:
s_set.pop()
return to.from_numpy(np.array(tuple(s for s in s_set), dtype=int)).to(
dtype=to.uint8, device=device
)
def update_states_for_batch(
new_states: to.Tensor,
new_lpj: to.Tensor,
idx: to.Tensor,
all_states: to.Tensor,
all_lpj: to.Tensor,
sort_by_lpj: Dict[str, to.Tensor] = {},
) -> int:
"""Perform substitution of old and new states (and lpj, ...)
according to TVO criterion.
:param new_states: set of new variational states (idx.size, newS, H)
:param new_lpj: corresponding log-pseudo-joints (idx.size, newS)
:param idx: indeces of the datapoints that compose the batch within the dataset
:param all_states: set of all variational states (N, S, H)
:param all_lpj: corresponding log-pseudo-joints (N, S)
:param sort_by_lpj: optional list of tensors with shape (n,s,...) that will be
sorted by all_lpj, the same way all_lpj and all_states are sorted.
S is the number of variational states memorized for each of the N
data-points. idx contains the ordered list of indexes for which the
new_states have been evaluated (i.e. the states in new_states[0] are to
be put into all_s[idx[0]]. all_s[n] is updated to contain the set of
variational states with best log-pseudo-joints.
"""
# TODO Find out why lpj precision decreases for states without substitutions
# (difference on the order of 1e-15).
S = all_states.shape[1]
batch_size, newS, H = new_states.shape
old_states = all_states[idx]
old_lpj = all_lpj[idx]
assert old_states.shape == (batch_size, S, H)
assert old_lpj.shape == (batch_size, S)
conc_states = to.cat((old_states, new_states), dim=1)
conc_lpj = to.cat((old_lpj, new_lpj), dim=1) # (batch_size, S+newS)
# is (batch_size, S)
sorted_idx = to.flip(to.topk(conc_lpj, k=S, dim=1, largest=True, sorted=True)[1], [1])
flattened_sorted_idx = sorted_idx.flatten()
idx_n = idx.repeat(S, 1).t().flatten()
idx_s = to.arange(S, device=all_states.device).repeat(batch_size)
idx_sc = to.arange(batch_size, device=all_states.device).repeat(S, 1).t().flatten()
all_states[idx_n, idx_s] = conc_states[idx_sc, flattened_sorted_idx]
all_lpj[idx_n, idx_s] = conc_lpj[idx_sc, flattened_sorted_idx]
for t in sort_by_lpj.values():
idx_n_ = to.arange(batch_size).repeat(S, 1).t().flatten()
t[idx_n_, idx_s] = t[idx_n_, flattened_sorted_idx]
return (sorted_idx >= old_states.shape[1]).sum().item() # nsubs
def lpj2pjc(lpj: to.Tensor):
"""Shift log-pseudo-joint and convert log- to actual probability
:param lpj: log-pseudo-joint tensor
:returns: probability tensor
"""
up_lpg_bound = 0.0
shft = up_lpg_bound - lpj.max(dim=1, keepdim=True)[0]
tmp = to.exp(lpj + shft)
return tmp.div_(tmp.sum(dim=1, keepdim=True))
def _mean_post_einsum(g: to.Tensor, lpj: to.Tensor) -> to.Tensor:
"""Compute expectation value of g(s) w.r.t truncated variational distribution q(s).
:param g: Values of g(s) with shape (N,S,...).
:param lpj: Log-pseudo-joint with shape (N,S).
:returns: tensor with shape (N,...).
"""
return to.einsum("ns...,ns->n...", (g, lpj2pjc(lpj)))
def _mean_post_mul(g: to.Tensor, lpj: to.Tensor) -> to.Tensor:
"""Compute expectation value of g(s) w.r.t truncated variational distribution q(s).
:param g: Values of g(s) with shape (N,S,...).
:param lpj: Log-pseudo-joint with shape (N,S).
:returns: tensor with shape (N,...).
"""
# reshape lpj from (N,S) to (N,S,1,...), to match dimensionality of g
lpj = lpj.view(*lpj.shape, *(1 for _ in range(g.ndimension() - 2)))
return lpj2pjc(lpj).mul(g).sum(dim=1)
def mean_posterior(g: to.Tensor, lpj: to.Tensor) -> to.Tensor:
"""Compute expectation value of g(s) w.r.t truncated variational distribution q(s).
:param g: Values of g(s) with shape (N,S,...).
:param lpj: Log-pseudo-joint with shape (N,S).
:returns: tensor with shape (N,...).
"""
if tvo.get_device().type == "cpu":
means = _mean_post_einsum(g, lpj)
else:
means = _mean_post_mul(g, lpj)
assert means.shape == (g.shape[0], *g.shape[2:])
assert not to.isnan(means).any() and not to.isinf(means).any()
return means
|
the-stack_0_17764 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Helveticum Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP 9 soft forks.
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
from io import BytesIO
import time
import itertools
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1']]
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 1-A
# check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period
test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
# Test 1-B
# check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period
test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False)
# Test 1-C
# finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN
test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(57, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
# check counting stats and "possible" flag before last block of this period achieves LOCKED_IN...
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 143)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# ...continue with Test 3
test_blocks = self.generate_blocks(1, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
assert_equal(self.get_bip9_status(bipName)['since'], 720)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert(not (tmpl['version'] & (1 << bitno)))
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
self.test.clear_all_connections()
self.stop_nodes()
shutil.rmtree(self.options.tmpdir + "/node0")
self.setup_chain()
self.setup_network()
self.test.add_all_connections(self.nodes)
NetworkThread().start()
self.test.test_nodes[0].wait_for_verack()
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
"""Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
"""
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
"""Modify the nSequence to make it fails once sequence lock rule is
activated (high timespan).
"""
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
"""Modify the nLockTime to make it fails once MTP rule is activated."""
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
|
the-stack_0_17765 | import subprocess
import pkg_resources
import json
class funnel:
"""
The funnel subdeployer to manage funnel deployment via Docker
"""
def __init__(self):
"""
Constructor for the funnel subdeployer
Determines paths to coordinate deployment
"""
self.pkgName = __name__
funnelPath = '/'.join(('.'))
self.funnelDir = pkg_resources.resource_filename(self.pkgName, funnelPath)
def route(self, args):
"""
The entry-point method for the subdeployer
Coordinates the deployment scheme based on the arguments
Configures and deploys the software container holding funnel
Parameters:
argparse.Namespace args - command-line arguments object
"""
# deploy funnel if selected
if args.funnel:
# configure the funnel setup based on args
self.config(args)
# run the Docker container
self.deployDocker(args.funnelImageName,
args.funnelContainerName,
args.funnelPort)
def deployDocker(self, funnelImageName, funnelContainerName, funnelPort):
"""
Deploy the funnel server via docker
Parameters:
string funnelImageName
string funnelContainerName
string funnelPort
Returns: None
"""
build = ["docker", "build", "-t", funnelImageName, self.funnelDir]
subprocess.call(build)
# We must allow Funnel to call Docker
# from inside one of Docker's container
# Hence we bind one of docker's sockets into its own container
run = ["docker", "run",
"-v", "/var/run/docker.sock:/var/run/docker.sock",
"-p", funnelPort + ":3002",
"--name", funnelContainerName, funnelImageName]
subprocess.Popen(run)
def config(self, args):
"""
Writes the keycloak.json file for the funnel client
Parameters:
argparse.Namespace args - An object containing the command-line
arguments as attributes
Returns: None
"""
fileName = self.funnelDir + "/funnel-node/node-client/keycloak.json"
authUrl = "http://" + args.keycloakIP + ":" + args.keycloakPort + "/auth"
redirectList = [ "http://" + args.funnelIP + ":" + args.funnelPort + "/oidc_callback" ]
secretDict = { "secret" : args.funnelSecret }
keycloakData = { "realm" : args.realmName,
"auth-server-url": authUrl,
"resource" : args.funnelID,
"redirect_uris" : redirectList,
"credentials" : secretDict }
jsonData = json.dumps(keycloakData, indent=1)
fileHandle = open(fileName, "w")
fileHandle.write(jsonData)
fileHandle.close()
|
the-stack_0_17766 | # Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
import random
import numpy as np
from config import global_config as cfg
from reader import CamRest676Reader, get_glove_matrix
from reader import KvretReader
from network import FSDM, cuda_
from torch.optim import Adam
from torch.autograd import Variable
from reader import pad_sequences
import argparse, time
from metric import CamRestEvaluator, KvretEvaluator
import logging
class Model:
def __init__(self, dataset):
reader_dict = {
'camrest': CamRest676Reader,
'kvret': KvretReader,
}
model_dict = {
'FSDM': FSDM
}
evaluator_dict = {
'camrest': CamRestEvaluator,
'kvret': KvretEvaluator,
}
self.reader = reader_dict[dataset]()
self.m = model_dict[cfg.m](embed_size=cfg.embedding_size,
hidden_size=cfg.hidden_size,
vocab_size=cfg.vocab_size,
layer_num=cfg.layer_num,
dropout_rate=cfg.dropout_rate,
z_length=cfg.z_length,
max_ts=cfg.max_ts,
beam_search=cfg.beam_search,
beam_size=cfg.beam_size,
eos_token_idx=self.reader.vocab.encode('EOS_M'),
vocab=self.reader.vocab,
teacher_force=cfg.teacher_force,
degree_size=cfg.degree_size,
num_head=cfg.num_head,
separate_enc=cfg.separate_enc)
self.EV = evaluator_dict[dataset] # evaluator class
if cfg.cuda:
self.m = self.m.cuda()
self.base_epoch = -1
def _to_onehot(self, encoded):
_np = np.zeros((cfg.vocab_size, 1))
for idx in encoded:
_np[idx] = 1.
return _np
def _convert_batch(self, py_batch, prev_z_py=None):
kw_ret = {}
requested_7_np = np.stack(py_batch['requested_7'], axis=0).transpose()
requested_7_np = requested_7_np[:, :, np.newaxis] # 7, batchsize, 1
response_7_np = np.stack(py_batch['response_7'], axis=0).transpose()
response_7_np = response_7_np[:, :, np.newaxis] # 7, batchsize, 1
requestable_key = py_batch['requestable_key'] # (batchsize, 7) keys
requestable_slot = py_batch['requestable_slot'] # (batchsize, 7) slots
requestable_key_np = pad_sequences(requestable_key, len(requestable_key[0]), padding='post',
truncating='post').transpose((1, 0))
requestable_slot_np = pad_sequences(requestable_slot, len(requestable_slot[0]), padding='post',
truncating='post').transpose((1, 0))
kw_ret['requestable_key_np'] = requestable_key_np
kw_ret['requestable_slot_np'] = requestable_slot_np
kw_ret['requestable_key'] = cuda_(Variable(torch.from_numpy(requestable_key_np).long()))
kw_ret['requestable_slot'] = cuda_(Variable(torch.from_numpy(requestable_slot_np).long()))
kw_ret['requested_7'] = cuda_(Variable(torch.from_numpy(requested_7_np).float()))
kw_ret['response_7'] = cuda_(Variable(torch.from_numpy(response_7_np).float()))
u_input_py = py_batch['user']
u_len_py = py_batch['u_len']
if cfg.prev_z_method == 'concat' and prev_z_py is not None:
for i in range(len(u_input_py)):
eob = self.reader.vocab.encode('EOS_Z2')
if eob in prev_z_py[i] and prev_z_py[i].index(eob) != len(prev_z_py[i]) - 1:
idx = prev_z_py[i].index(eob)
u_input_py[i] = prev_z_py[i][:idx + 1] + u_input_py[i]
else:
u_input_py[i] = prev_z_py[i] + u_input_py[i]
u_len_py[i] = len(u_input_py[i])
for j, word in enumerate(prev_z_py[i]):
if word >= cfg.vocab_size:
prev_z_py[i][j] = 2 # unk
elif cfg.prev_z_method == 'separate' and prev_z_py is not None:
for i in range(len(prev_z_py)):
eob = self.reader.vocab.encode('EOS_Z2')
if eob in prev_z_py[i] and prev_z_py[i].index(eob) != len(prev_z_py[i]) - 1:
idx = prev_z_py[i].index(eob)
prev_z_py[i] = prev_z_py[i][:idx + 1]
for j, word in enumerate(prev_z_py[i]):
if word >= cfg.vocab_size:
prev_z_py[i][j] = 2 # unk
prev_z_input_np = pad_sequences(prev_z_py, cfg.max_ts, padding='post', truncating='pre').transpose((1, 0))
prev_z_len = np.array([len(_) for _ in prev_z_py])
prev_z_input = cuda_(Variable(torch.from_numpy(prev_z_input_np).long()))
kw_ret['prev_z_len'] = prev_z_len
kw_ret['prev_z_input'] = prev_z_input
kw_ret['prev_z_input_np'] = prev_z_input_np
degree_input_np = np.array(py_batch['degree'])
u_input_np = pad_sequences(u_input_py, cfg.max_ts, padding='post', truncating='pre').transpose((1, 0))
m_input_np = pad_sequences(py_batch['response'], cfg.max_ts, padding='post', truncating='post').transpose(
(1, 0))
r_input_np = pad_sequences(py_batch['requested'], cfg.req_length, padding='post', truncating='post').transpose(
(1, 0)) # (seqlen, batchsize)
k_input_np = pad_sequences(py_batch['constraint_key'], len(py_batch['constraint_key'][0]), padding='post',
truncating='post').transpose(
(1, 0))
flat_constraint_value = []
num_k = k_input_np.shape[0]
for b in py_batch['constraint_value']:
for k in b:
flat_constraint_value.append(k)
flat_i_input_np = pad_sequences(flat_constraint_value, cfg.inf_length, padding='post', truncating='post')
i_input_np = []
i_k_input_np = []
for idx, k in enumerate(flat_i_input_np):
i_k_input_np.append(k)
if (idx + 1) % num_k == 0:
i_input_np.append(np.asarray(i_k_input_np))
i_k_input_np = []
i_input_np = np.asarray(i_input_np) # (batchsize, key_size, seqlen)
u_len = np.array(u_len_py)
m_len = np.array(py_batch['m_len'])
degree_input = cuda_(Variable(torch.from_numpy(degree_input_np).float()))
u_input = cuda_(Variable(torch.from_numpy(u_input_np).long()))
m_input = cuda_(Variable(torch.from_numpy(m_input_np).long()))
r_input = cuda_(Variable(torch.from_numpy(r_input_np).long()))
k_input = cuda_(Variable(torch.from_numpy(k_input_np).long()))
i_input = cuda_(Variable(torch.from_numpy(i_input_np).long()))
i_input = i_input.permute(1, 2, 0)
z_input = []
for k_i_input in i_input:
z_input.append(k_i_input)
z_input = torch.cat(z_input, dim=0)
z_input_np = z_input.cpu().data.numpy()
kw_ret['z_input_np'] = z_input_np
return u_input, u_input_np, z_input, m_input, m_input_np, u_len, m_len, \
degree_input, k_input, i_input, r_input, kw_ret, py_batch['constraint_eos']
def _test_convert_batch(self, py_batch, prev_z_py=None, prev_m_py=None): # ???not easy to write
kw_ret = {}
requested_7_np = np.stack(py_batch['requested_7'], axis=0).transpose()
requested_7_np = requested_7_np[:, :, np.newaxis] # 7, batchsize, 1
response_7_np = np.stack(py_batch['response_7'], axis=0).transpose()
response_7_np = response_7_np[:, :, np.newaxis] # 7, batchsize, 1
requestable_key = py_batch['requestable_key'] # (batchsize, 7) keys
requestable_slot = py_batch['requestable_slot'] # (batchsize, 7) slots
requestable_key_np = pad_sequences(requestable_key, len(requestable_key[0]), padding='post',
truncating='pre').transpose((1, 0))
requestable_slot_np = pad_sequences(requestable_slot, len(requestable_slot[0]), padding='post',
truncating='pre').transpose((1, 0))
kw_ret['requestable_key_np'] = requestable_key_np
kw_ret['requestable_slot_np'] = requestable_slot_np
kw_ret['requestable_key'] = cuda_(Variable(torch.from_numpy(requestable_key_np).long()))
kw_ret['requestable_slot'] = cuda_(Variable(torch.from_numpy(requestable_slot_np).long()))
kw_ret['requested_7'] = cuda_(Variable(torch.from_numpy(requested_7_np).float()))
kw_ret['response_7'] = cuda_(Variable(torch.from_numpy(response_7_np).float()))
u_input_py = py_batch['user']
u_len_py = py_batch['u_len']
eom = self.reader.vocab.encode('EOS_M')
if prev_m_py != None:
fix_u_input_py = []
for b, m in zip(u_input_py, prev_m_py):
if eom in b:
idx = b.index(eom)
b = b[idx + 1:]
if eom in m:
idx = m.index(eom)
m = m[:idx + 1]
m = [self.reader.vocab.encode('<unk>') if w >= cfg.vocab_size else w for w in m]
fix_u_input_py.append(m + b)
else:
fix_u_input_py.append(b)
u_input_py = fix_u_input_py
u_len_py = [len(b) for b in fix_u_input_py]
if cfg.prev_z_method == 'concat' and prev_z_py is not None:
for i in range(len(u_input_py)):
eob = self.reader.vocab.encode('EOS_Z2')
if eob in prev_z_py[i] and prev_z_py[i].index(eob) != len(prev_z_py[i]) - 1:
idx = prev_z_py[i].index(eob)
u_input_py[i] = prev_z_py[i][:idx + 1] + u_input_py[i]
else:
u_input_py[i] = prev_z_py[i] + u_input_py[i]
u_len_py[i] = len(u_input_py[i])
for j, word in enumerate(prev_z_py[i]):
if word >= cfg.vocab_size:
prev_z_py[i][j] = 2 # unk
elif cfg.prev_z_method == 'separate' and prev_z_py is not None:
for i in range(len(prev_z_py)):
eob = self.reader.vocab.encode('EOS_Z2')
if eob in prev_z_py[i] and prev_z_py[i].index(eob) != len(prev_z_py[i]) - 1:
idx = prev_z_py[i].index(eob)
prev_z_py[i] = prev_z_py[i][:idx + 1]
for j, word in enumerate(prev_z_py[i]):
if word >= cfg.vocab_size:
prev_z_py[i][j] = 2 # unk
prev_z_input_np = pad_sequences(prev_z_py, cfg.max_ts, padding='post', truncating='pre').transpose((1, 0))
prev_z_len = np.array([len(_) for _ in prev_z_py])
prev_z_input = cuda_(Variable(torch.from_numpy(prev_z_input_np).long()))
kw_ret['prev_z_len'] = prev_z_len
kw_ret['prev_z_input'] = prev_z_input
kw_ret['prev_z_input_np'] = prev_z_input_np
degree_input_np = np.array(py_batch['degree'])
u_input_np = pad_sequences(u_input_py, cfg.max_ts, padding='post', truncating='pre').transpose((1, 0))
m_input_np = pad_sequences(py_batch['response'], cfg.max_ts, padding='post', truncating='post').transpose(
(1, 0))
r_input_np = pad_sequences(py_batch['requested'], cfg.req_length, padding='post', truncating='post').transpose(
(1, 0)) # (seqlen, batchsize)
k_input_np = pad_sequences(py_batch['constraint_key'], len(py_batch['constraint_key'][0]), padding='post',
truncating='post').transpose(
(1, 0))
flat_constraint_value = []
num_k = k_input_np.shape[0]
for b in py_batch['constraint_value']:
for k in b:
flat_constraint_value.append(k)
inf_length = max([len(l) for l in flat_constraint_value])
print(inf_length)
flat_i_input_np = pad_sequences(flat_constraint_value, cfg.inf_length, padding='post', truncating='post')
i_input_np = []
i_k_input_np = []
for idx, k in enumerate(flat_i_input_np):
i_k_input_np.append(k)
if (idx + 1) % num_k == 0:
i_input_np.append(np.asarray(i_k_input_np))
i_k_input_np = []
i_input_np = np.asarray(i_input_np) # (batchsize, key_size, seqlen)
u_len = np.array(u_len_py)
m_len = np.array(py_batch['m_len'])
degree_input = cuda_(Variable(torch.from_numpy(degree_input_np).float()))
u_input = cuda_(Variable(torch.from_numpy(u_input_np).long()))
m_input = cuda_(Variable(torch.from_numpy(m_input_np).long()))
r_input = cuda_(Variable(torch.from_numpy(r_input_np).long()))
k_input = cuda_(Variable(torch.from_numpy(k_input_np).long()))
i_input = cuda_(Variable(torch.from_numpy(i_input_np).long()))
i_input = i_input.permute(1, 2, 0)
z_input = []
for k_i_input in i_input:
z_input.append(k_i_input)
z_input = torch.cat(z_input, dim=0)
z_input_np = z_input.cpu().data.numpy()
kw_ret['z_input_np'] = z_input_np
if 'database' in py_batch.keys():
database = py_batch['database']
else:
database = None
return u_input, u_input_np, z_input, m_input, m_input_np, u_len, m_len, \
degree_input, k_input, i_input, r_input, kw_ret, database, py_batch['constraint_eos']
def train(self):
lr = cfg.lr
prev_min_loss = 0.
early_stop_count = cfg.early_stop_count
train_time = 0
for epoch in range(cfg.epoch_num):
loss_weights = [1., 1., 1., 1.]
sw = time.time()
if epoch <= self.base_epoch:
continue
self.training_adjust(epoch)
self.m.self_adjust(epoch)
sup_loss = 0
sup_cnt = 0
data_iterator = self.reader.mini_batch_iterator('train')
optim = Adam(lr=lr, params=filter(lambda x: x.requires_grad, self.m.parameters()), weight_decay=1e-5)
for iter_num, dial_batch in enumerate(data_iterator):
turn_states = {}
prev_z = None
for turn_num, turn_batch in enumerate(dial_batch):
if cfg.truncated:
logging.debug('iter %d turn %d' % (iter_num, turn_num))
optim.zero_grad()
u_input, u_input_np, z_input, m_input, m_input_np, u_len, \
m_len, degree_input, k_input, i_input, r_input, kw_ret, constraint_eos \
= self._convert_batch(turn_batch, prev_z)
loss, pr_loss, m_loss, turn_states, req_loss, res_loss = self.m(u_input=u_input, z_input=z_input,
m_input=m_input,
degree_input=degree_input,
u_input_np=u_input_np,
m_input_np=m_input_np,
turn_states=turn_states,
u_len=u_len, m_len=m_len,
k_input=k_input,
i_input=i_input,
r_input=r_input,
loss_weights=loss_weights,
mode='train', **kw_ret)
loss.backward(retain_graph=turn_num != len(dial_batch) - 1)
grad = torch.nn.utils.clip_grad_norm_(self.m.parameters(), 10.0)
optim.step()
sup_loss += loss.cpu().item()
sup_cnt += 1
logging.debug(
'loss:{} pr_loss:{} req_loss:{} res_loss:{} m_loss:{} grad:{}'.format(loss.cpu().item(),
pr_loss.cpu().item(),
req_loss.cpu().item(),
res_loss.cpu().item(),
m_loss.cpu().item(),
grad))
prev_z = turn_batch['bspan']
epoch_sup_loss = sup_loss / (sup_cnt + 1e-8)
train_time += time.time() - sw
logging.info('Traning time: {}'.format(train_time))
logging.info('avg training loss in epoch %d sup:%f' % (epoch, epoch_sup_loss))
valid_sup_loss, valid_unsup_loss = self.validate()
logging.info('validation loss in epoch %d sup:%f unsup:%f' % (epoch, valid_sup_loss, valid_unsup_loss))
logging.info('time for epoch %d: %f' % (epoch, time.time() - sw))
valid_loss = valid_sup_loss + valid_unsup_loss
metrics = self.eval(data='dev')
valid_metrics = metrics[-1] + metrics[-2] + metrics[-3]
logging.info('valid metric %f ' % (valid_metrics))
if valid_metrics >= prev_min_loss:
self.save_model(epoch)
prev_min_loss = valid_metrics
early_stop_count = cfg.early_stop_count
else:
early_stop_count -= 1
lr *= cfg.lr_decay
if not early_stop_count:
break
logging.info('early stop countdown %d, learning rate %f' % (early_stop_count, lr))
def eval(self, data='test'):
self.m.eval()
self.reader.result_file = None
data_iterator = self.reader.mini_batch_iterator(data)
mode = 'test' if not cfg.pretrain else 'pretrain_test'
for batch_num, dial_batch in enumerate(data_iterator):
turn_states = {}
prev_z = None
for turn_num, turn_batch in enumerate(dial_batch):
u_input, u_input_np, z_input, m_input, m_input_np, u_len, \
m_len, degree_input, k_input, i_input, r_input, kw_ret, constraint_eos \
= self._convert_batch(turn_batch, prev_z)
m_idx, z_idx, turn_states = self.m(u_input=u_input, z_input=z_input,
m_input=m_input,
degree_input=degree_input,
u_input_np=u_input_np,
m_input_np=m_input_np,
turn_states=turn_states,
u_len=u_len, m_len=m_len,
k_input=k_input,
i_input=i_input,
r_input=r_input,
mode='test', **kw_ret)
self.reader.wrap_result(turn_batch, m_idx, z_idx, prev_z=prev_z)
prev_z = z_idx
if self.reader.result_file != None:
self.reader.result_file.close()
ev = self.EV(result_path=cfg.result_path, data=data)
res = ev.run_metrics()
self.m.train()
return res
def validate(self, loss_weights=[1., 1., 1., 1.], data='dev'):
self.m.eval()
data_iterator = self.reader.mini_batch_iterator(data)
sup_loss, unsup_loss = 0, 0
sup_cnt, unsup_cnt = 0, 0
for dial_batch in data_iterator:
turn_states = {}
for turn_num, turn_batch in enumerate(dial_batch):
u_input, u_input_np, z_input, m_input, m_input_np, u_len, \
m_len, degree_input, k_input, i_input, r_input, kw_ret, constraint_eos \
= self._convert_batch(turn_batch)
loss, pr_loss, m_loss, turn_states, req_loss, res_loss = self.m(u_input=u_input, z_input=z_input,
m_input=m_input,
degree_input=degree_input,
u_input_np=u_input_np,
m_input_np=m_input_np,
turn_states=turn_states,
u_len=u_len, m_len=m_len,
k_input=k_input,
i_input=i_input,
r_input=r_input,
loss_weights=loss_weights,
mode='train', **kw_ret)
sup_loss += loss.cpu().item()
sup_cnt += 1
logging.debug(
'loss:{} pr_loss:{} req_loss:{} res_loss:{} m_loss:{}'.format(loss.cpu().item(), pr_loss.cpu().item(),
req_loss.cpu().item(),
res_loss.cpu().item(),
m_loss.cpu().item()))
sup_loss /= (sup_cnt + 1e-8)
unsup_loss /= (unsup_cnt + 1e-8)
self.m.train()
return sup_loss, unsup_loss
def save_model(self, epoch, path=None):
if not path:
path = cfg.model_path
all_state = {'lstd': self.m.state_dict(),
'config': cfg.__dict__,
'epoch': epoch}
torch.save(all_state, path)
def load_model(self, path=None):
if not path:
path = cfg.model_path
all_state = torch.load(path)
self.m.load_state_dict(all_state['lstd'])
self.base_epoch = all_state.get('epoch', 0)
def training_adjust(self, epoch):
return
def freeze_module(self, module):
for param in module.parameters():
param.requires_grad = False
def unfreeze_module(self, module):
for param in module.parameters():
param.requires_grad = True
def load_glove_embedding(self):
initial_arr = self.m.u_encoder.embedding.weight.data.cpu().numpy()
embedding_arr = torch.from_numpy(get_glove_matrix(self.reader.vocab, initial_arr))
self.m.u_encoder.embedding.weight.data.copy_(embedding_arr)
self.m.u_encoder.embedding.weight.requires_grad = cfg.emb_trainable
if cfg.separate_enc:
self.m.z_encoder.embedding.weight.data.copy_(embedding_arr)
self.m.z_encoder.embedding.weight.requires_grad = cfg.emb_trainable
for i in range(cfg.num_head):
self.m.z_decoders[i].emb.weight.data.copy_(embedding_arr)
self.m.z_decoders[i].emb.weight.requires_grad = cfg.emb_trainable
self.m.req_classifiers.emb.weight.data.copy_(embedding_arr)
self.m.req_classifiers.emb.weight.requires_grad = cfg.emb_trainable
self.m.res_classifiers.emb.weight.data.copy_(embedding_arr)
self.m.res_classifiers.emb.weight.requires_grad = cfg.emb_trainable
self.m.m_decoder.emb.weight.data.copy_(embedding_arr)
self.m.m_decoder.emb.weight.requires_grad = cfg.emb_trainable
def count_params(self):
module_parameters = filter(lambda p: p.requires_grad, self.m.parameters())
param_cnt = sum([np.prod(p.size()) for p in module_parameters if p.requires_grad == True])
print('total trainable params: %d' % param_cnt)
print(self.m)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-mode')
parser.add_argument('-data')
parser.add_argument('-cfg', nargs='*')
args = parser.parse_args()
cfg.init_handler(args.data)
if args.cfg:
for pair in args.cfg:
k, v = tuple(pair.split('='))
dtype = type(getattr(cfg, k))
if dtype == type(None):
raise ValueError()
if dtype is bool:
v = False if v == 'False' else True
else:
v = dtype(v)
setattr(cfg, k, v)
logging.debug(str(cfg))
if cfg.cuda:
torch.cuda.set_device(cfg.cuda_device)
logging.debug('Device: {}'.format(torch.cuda.current_device()))
cfg.mode = args.mode
torch.manual_seed(cfg.seed)
torch.cuda.manual_seed(cfg.seed)
random.seed(cfg.seed)
np.random.seed(cfg.seed)
m = Model(args.data.split('-')[-1])
m.count_params()
if args.mode == 'train':
m.load_glove_embedding()
m.m.beam_search = False
m.train()
elif args.mode == 'adjust':
m.load_model()
m.train()
m.load_model()
m.eval()
elif args.mode == 'test':
m.load_model()
m.eval(data='test')
if __name__ == '__main__':
main()
|
the-stack_0_17768 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 9 05:43:47 2021
@author: wq
"""
import os
from detectron2 import model_zoo
from detectron2.data import MetadataCatalog
from detectron2.utils.visualizer import ColorMode
import numpy as np
import cv2
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data.datasets import register_coco_instances
from numpy import mat
# 以下这部分是你自己要编辑的部分,根据你的机器人的配置去校准的环境的四个角的坐标
rob_cor_1 = (0.337180175851907, -0.7709528989764918)
rob_cor_2 = (-0.3383507457068013, -0.7918474781347146)
rob_cor_3 = (0.3435026039288244, -0.3769407945516401)
rob_cor_4 = (-0.3350733477311105, -0.3822064940321181)
################################################################################
def get_metadata():
path_to_train_image = './trained_cnn/UR5_sim_coco/train'
path_to_train_json = './trained_cnn/UR5_sim_coco/annotations/train.json'
register_coco_instances(
'train', {}, path_to_train_json, path_to_train_image)
coco_val_metadata = MetadataCatalog.get('train')
return coco_val_metadata
def get_predictor():
cfg = get_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.merge_from_file(model_zoo.get_config_file(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
# Let training initialize from model zoo
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "./trained_cnn/model_final.pth")
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 4 # only has one class (ballon).
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold
predictor = DefaultPredictor(cfg)
return predictor
def brg2rgb(image_rgb, resolution):
image_rgb_r = [image_rgb[i] for i in range(0, len(image_rgb), 3)]
image_rgb_r = np.array(image_rgb_r)
image_rgb_r = image_rgb_r.reshape(resolution[1], resolution[0])
image_rgb_r = image_rgb_r.astype(np.uint8)
image_rgb_g = [image_rgb[i] for i in range(1, len(image_rgb), 3)]
image_rgb_g = np.array(image_rgb_g)
image_rgb_g = image_rgb_g.reshape(resolution[1], resolution[0])
image_rgb_g = image_rgb_g.astype(np.uint8)
image_rgb_b = [image_rgb[i] for i in range(2, len(image_rgb), 3)]
image_rgb_b = np.array(image_rgb_b)
image_rgb_b = image_rgb_b.reshape(resolution[1], resolution[0])
image_rgb_b = image_rgb_b.astype(np.uint8)
result_rgb = cv2.merge([image_rgb_b, image_rgb_g, image_rgb_r])
result_rgb = cv2.flip(result_rgb, 0)
return result_rgb
def visulization(result_rgb, metadata, outputs):
v = Visualizer(result_rgb[:, :, ::-1],metadata=metadata, scale=0.5, instance_mode=ColorMode.IMAGE_BW)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.namedWindow("prediction",0);
cv2.resizeWindow("prediction", 1024, 512)
cv2.moveWindow("prediction",0,0)
cv2.imshow("prediction",out.get_image()[:, :, ::-1])
cv2.waitKey(0)
cv2.destroyAllWindows()
from copy import copy
def loc_label(outputs):
boxes = outputs["instances"].pred_boxes
center_pos = boxes.get_centers()
result_pos = center_pos.numpy().tolist()
sorted_list = []
result_pocy = copy(result_pos)
for i in range(len(result_pos)):
resmin = result_pocy[0]
for j in range(len(result_pocy)):
if resmin[0] > result_pocy[j][0]:
resmin = result_pocy[j]
sorted_list.append(resmin)
result_pocy.remove(resmin)
label1_3 = [sorted_list[0],sorted_list[1]]
label2_4 = [sorted_list[-1],sorted_list[-2]]
if label1_3[0][1] < label1_3[1][1]:
label1 = label1_3[0]
label3 = label1_3[1]
else:
label1 = label1_3[1]
label3 = label1_3[0]
if label2_4[0][1] < label2_4[1][1]:
label2 = label2_4[0]
label4 = label2_4[1]
else:
label2 = label2_4[1]
label4 = label2_4[0]
return [label1, label2, label3, label4]
def cal_obj_pos(obj_rgb_coor,label_coordinate):
rgb_cor_1 = label_coordinate[0]
rgb_cor_2 = label_coordinate[1]
rgb_cor_3 = label_coordinate[2]
rgb_cor_4 = label_coordinate[3]
dy_rob_1 = rob_cor_3[1] - rob_cor_1[1]
dy_rob_2 = rob_cor_4[1] - rob_cor_2[1]
dx_rob_1 = rob_cor_2[0] - rob_cor_1[0]
dx_rob_2 = rob_cor_4[0] - rob_cor_3[0]
dy_rgb_1 = rgb_cor_3[1] - rgb_cor_1[1]
dy_rgb_2 = rgb_cor_4[1] - rgb_cor_2[1]
dx_rgb_1 = rgb_cor_2[0] - rgb_cor_1[0]
dx_rgb_2 = rgb_cor_4[0] - rgb_cor_3[0]
obj_x_1 = (((obj_rgb_coor[0] - rgb_cor_1[0]) / dx_rgb_1) * dx_rob_1) + rob_cor_1[0]
obj_x_2 = (((obj_rgb_coor[0] - rgb_cor_2[0]) / dx_rgb_2) * dx_rob_2) + rob_cor_2[0]
obj_x = (obj_x_1 + obj_x_2) / 2
# print('x coordinate in the robot coordinate system is: ', obj_x)
obj_y_1 = (((obj_rgb_coor[1] - rgb_cor_1[1]) / dy_rgb_1) * dy_rob_1) + rob_cor_1[1]
obj_y_2 = (((obj_rgb_coor[1] - rgb_cor_2[1]) / dy_rgb_2) * dy_rob_2) + rob_cor_2[1]
obj_y = (obj_y_1 + obj_y_2) / 2
# print('y coordinate in the robot coordinate system is: ', obj_y)
return (obj_x, obj_y)
def get_all_objects_coordinate(cubiod_coor,sphere_coor,label_coordinate):
cub_coor = []
sph_coor = []
for cub in cubiod_coor:
cub_coor.append(cal_obj_pos(cub,label_coordinate))
for sph in sphere_coor:
sph_coor.append(cal_obj_pos(sph,label_coordinate))
return cub_coor, sph_coor
def list2mat(list):
m1 = [list[0],list[1],list[2],list[3]]
m2 = [list[4],list[5],list[6],list[7]]
m3 = [list[8],list[9],list[10],list[11]]
m4 = [list[12],list[13],list[14],list[15]]
matrix = mat([m1,m2,m3,m4])
return matrix
def mat2list(matrix):
lis = [matrix[0,0],matrix[0,1],matrix[0,2],matrix[0,3],\
matrix[1,0],matrix[1,1],matrix[1,2],matrix[1,3],\
matrix[2,0],matrix[2,1],matrix[2,2],matrix[2,3],\
matrix[3,0],matrix[3,1],matrix[3,2],matrix[3,3]]
return lis
|
the-stack_0_17770 | import numpy as np
import pyanitools as pyt
from pyNeuroChem import cachegenerator as cg
import sys
import os
import hdnntools as hdn
import matplotlib.pyplot as plt
import matplotlib as mpl
def interval(v,S):
ps = 0.0
ds = 1.0 / float(S)
for s in range(S):
if v > ps and v <= ps+ds:
return s
ps = ps + ds
#wkdir = '/scratch/Research/force_train_testing/'
#saef = wkdir + "sae_6-31gd.dat"
wkdir = '/nh/nest/u/jsmith/Research/gutzwiller_research/train_all/gutz_model-5/'
saef = wkdir + "sae.dat"
#wkdir = '/scratch/Research/datasets/iso17/train_test/'
#saef = wkdir + "sae_6-31gd.dat"
#data_root = '/scratch/Research/GDB-11-AL-wB97x631gd/'
data_root = '/auto/nest/nest/u/jsmith/scratch/Research/gutzwiller_research/h5files/'
#data_root = '/scratch/Research/datasets/iso17/'
h5files = [#'/home/jujuman/Research/Cluster_AL/waterclusters1.h5',
#data_root + 'gutzwiller1-U2-rs1.5.h5',
#data_root + 'gutzwiller1-U4-rs1.5.h5',
#data_root + 'gutzwiller1-U6-rs1.5.h5',
#data_root + 'gutzwiller1-U8-rs1.5.h5',
#data_root + 'gutzwiller1-U10-rs1.5.h5',
data_root + 'gutzwiller1-U12-rs1.5.h5',
]
store_dir = wkdir + "cache-data-"
N = 5
for i in range(N):
if not os.path.exists(store_dir + str(i)):
os.mkdir(store_dir + str(i))
if os.path.exists(wkdir + 'testset.h5'):
os.remove(wkdir + 'testset.h5')
cachet = [cg('_train', saef, store_dir + str(r) + '/',False) for r in range(N)]
cachev = [cg('_valid', saef, store_dir + str(r) + '/',False) for r in range(N)]
testh5 = pyt.datapacker(wkdir + 'testset.h5')
Nd = np.zeros(N,dtype=np.int32)
Nbf = 0
for f,fn in enumerate(h5files):
print('Processing file('+ str(f+1) +' of '+ str(len(h5files)) +'):', fn[1])
adl = pyt.anidataloader(fn)
To = adl.size()
Ndc = 0
Fmt = []
Emt = []
for c, data in enumerate(adl):
#if c == 2 or c == 2 or c == 2:
# Get test store name
#Pn = fn.split('/')[-1].split('.')[0] + data['path']
Pn = data['path']+'_'+str(f).zfill(6)+'_'+str(c).zfill(6)
#print(Pn)
# Progress indicator
sys.stdout.write("\r%d%% %s" % (int(100*c/float(To)), Pn))
sys.stdout.flush()
#print(data.keys())
# Extract the data
X = data['coordinates']
E = data['energies']
F = -data['forces']
S = data['species']
Fmt.append(np.max(np.linalg.norm(F,axis=2),axis=1))
Emt.append(E)
Mv = np.max(np.linalg.norm(F,axis=2),axis=1)
#print(Mv.shape,X.shape)
index = np.where(Mv > 10000000.5)[0]
indexk = np.where(Mv <= 10000000.5)[0]
#if index.size > 0:
#print(Mv[index])
#hdn.writexyzfile(bddir+'mols_'+str(c).zfill(3)+'_'+str(f).zfill(3)+'.xyz',X[index],S)
Nbf += index.size
#if data['path'] == '/dimer7/grp_0':
# print(data['path'])
# print(E)
# print(F)
# CLear forces
X = X[indexk]
F = F[indexk]
E = E[indexk]
#exit(0)
#print(" MAX FORCE:", F.max(), S)
'''
print('meanforce:',F.flatten().mean())
print("FORCE:",F)
print(np.max(F.reshape(E.size,F.shape[1]*F.shape[2]),axis=1))
print("MAX FORCE:", F.max(),S)
if F.max() > 0.0:
print(np.mean(F.reshape(E.size,F.shape[1]*F.shape[2]),axis=1).shape, E.size)
plt.hist(np.max(np.abs(F).reshape(E.size,F.shape[1]*F.shape[2]),axis=1),bins=100)
plt.show()
plt.scatter(np.max(np.abs(F).reshape(E.size,F.shape[1]*F.shape[2]),axis=1), E)
plt.show()
'''
#Ru = np.random.uniform(0.0, 1.0, E.shape[0])
#nidx = np.where(Ru < fn[0])
#X = X[nidx]
#F = F[nidx]
#E = E[nidx]
Ndc += E.size
#for i in range(E.size):
# X[i] = X[0]
# F[i] = F[0]
# E[i] = E[0]
if (set(S).issubset(['C', 'N', 'O', 'H', 'F', 'S', 'Cl'])):
Si = int(E.shape[0]*0.9)
X_te = X[Si:]
E_te = E[Si:]
F_te = F[Si:]
testh5.store_data(Pn, coordinates=X_te, forces=F_te, energies=E_te, species=list(S))
X = X[0:Si]
E = E[0:Si]
F = F[0:Si]
# Random mask
R = np.random.uniform(0.0, 1.0, E.shape[0])
idx = np.array([interval(r,N) for r in R])
# Build random split lists
split = []
for j in range(N):
split.append([i for i, s in enumerate(idx) if s == j])
nd = len([i for i, s in enumerate(idx) if s == j])
Nd[j] = Nd[j] + nd
# Store data
for i,t,v in zip(range(N), cachet, cachev):
## Store training data
X_t = np.array(np.concatenate([X[s] for j, s in enumerate(split) if j != i]), order='C', dtype=np.float32)
F_t = np.array(np.concatenate([F[s] for j, s in enumerate(split) if j != i]), order='C', dtype=np.float32)
E_t = np.array(np.concatenate([E[s] for j, s in enumerate(split) if j != i]), order='C', dtype=np.float64)
if E_t.shape[0] != 0:
t.insertdata(X_t, F_t, E_t, list(S))
## Store Validation
if len(split[i]) > 0:
X_v = np.array(X[split[i]], order='C', dtype=np.float32)
F_v = np.array(F[split[i]], order='C', dtype=np.float32)
E_v = np.array(E[split[i]], order='C', dtype=np.float64)
if E_v.shape[0] != 0:
v.insertdata(X_v, F_v, E_v, list(S))
sys.stdout.write("\r%d%%" % int(100))
print(" Data Kept: ", Ndc, 'High Force: ', Nbf)
sys.stdout.flush()
print("")
# Print some stats
print('Data count:',Nd)
print('Data split:',100.0*Nd/np.sum(Nd),'%')
# Save train and valid meta file and cleanup testh5
for t,v in zip(cachet, cachev):
t.makemetadata()
v.makemetadata()
testh5.cleanup()
|
the-stack_0_17771 | import pytest
from ckan.plugins import toolkit
from ckanext.ytp_recommendation.logic.action import create, get
from ckanext.ytp_recommendation.model import Recommendation
from ckanext.ytp_recommendation.tests import factories as ytp_factories
@pytest.mark.usefixtures('clean_db', 'clean_recommendation_table')
class TestGetActions(object):
def test_get_user_can_make_recommendation_w_userobj(self, app):
package = ytp_factories.get_or_create_package_object()
user = ytp_factories.get_or_create_user_object()
with app.flask_app.test_request_context('/'):
with app.flask_app.app_context():
toolkit.request.environ['REMOTE_ADDR'] = ytp_factories.get_ip_address()
toolkit.c.userobj = user
result = get.get_user_can_make_recommendation({}, {'package_id': package.id})
assert result
ytp_factories.create_and_get_recommendation(
user_id=user.id,
package_id=package.id,
ip=ytp_factories.get_ip_address())
result = get.get_user_can_make_recommendation({}, {'package_id': package.id})
assert not result
def test_get_user_count_for_package(self):
package = ytp_factories.get_or_create_package_object()
user = ytp_factories.get_or_create_user_object()
ip = ytp_factories.get_ip_address()
data_dict = {'package_id': package.id}
assert get.get_recommendation_count_for_package({}, data_dict) == 0
ytp_factories.create_and_get_recommendation(package_id=package.id, ip=ip, user_id=user.id)
assert get.get_recommendation_count_for_package({}, data_dict) == 1
@pytest.mark.usefixtures('clean_db', 'clean_recommendation_table')
class TestCreateActions(object):
def test_create_recommendation_w_userbj(self, app):
package = ytp_factories.get_or_create_package_object()
user = ytp_factories.get_or_create_user_object()
data_dict = {'package_id': package.id}
with app.flask_app.test_request_context('/'):
with app.flask_app.app_context():
toolkit.request.environ['REMOTE_ADDR'] = ytp_factories.get_ip_address()
toolkit.c.userobj = user
recommendation_count = len(Recommendation.get_package_recommendations(package.id))
assert recommendation_count == 0
recommendation_count = create.create_recommendation({}, data_dict)
assert recommendation_count == 1
|
the-stack_0_17773 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram ShippingOption."""
from telegram import TelegramObject
class ShippingOption(TelegramObject):
"""This object represents one shipping option.
Attributes:
id (:obj:`str`): Shipping option identifier.
title (:obj:`str`): Option title.
prices (List[:class:`telegram.LabeledPrice`]): List of price portions.
Args:
id (:obj:`str`): Shipping option identifier.
title (:obj:`str`): Option title.
prices (List[:class:`telegram.LabeledPrice`]): List of price portions.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self, id, title, prices, **kwargs):
self.id = id
self.title = title
self.prices = prices
self._id_attrs = (self.id,)
def to_dict(self):
data = super(ShippingOption, self).to_dict()
data['prices'] = [p.to_dict() for p in self.prices]
return data
|
the-stack_0_17776 | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import models
from bitcoinrpc.authproxy import JSONRPCException
import misc
import re
from misc import printdbg
import time
# mixin for GovObj composed classes like proposal and superblock, etc.
class GovernanceClass(object):
only_masternode_can_submit = False
# lazy
@property
def go(self):
return self.governance_object
# pass thru to GovernanceObject#vote
def vote(self, kauricoind, signal, outcome):
return self.go.vote(kauricoind, signal, outcome)
# pass thru to GovernanceObject#voted_on
def voted_on(self, **kwargs):
return self.go.voted_on(**kwargs)
def vote_validity(self, kauricoind):
if self.is_valid():
printdbg("Voting valid! %s: %d" % (self.__class__.__name__, self.id))
self.vote(kauricoind, models.VoteSignals.valid, models.VoteOutcomes.yes)
else:
printdbg("Voting INVALID! %s: %d" % (self.__class__.__name__, self.id))
self.vote(kauricoind, models.VoteSignals.valid, models.VoteOutcomes.no)
def get_submit_command(self):
object_fee_tx = self.go.object_fee_tx
import kauricoinlib
obj_data = kauricoinlib.SHIM_serialise_for_kauricoind(self.serialise())
cmd = ['gobject', 'submit', '0', '1', str(int(time.time())), obj_data, object_fee_tx]
return cmd
def list(self):
dikt = {
"DataHex": self.serialise(),
"Hash": self.object_hash,
"CollateralHash": self.go.object_fee_tx,
"AbsoluteYesCount": self.go.absolute_yes_count,
"YesCount": self.go.yes_count,
"NoCount": self.go.no_count,
"AbstainCount": self.go.abstain_count,
}
# return a dict similar to kauricoind "gobject list" output
return {self.object_hash: dikt}
def get_submit_command(self):
import kauricoinlib
obj_data = kauricoinlib.SHIM_serialise_for_kauricoind(self.serialise())
# new objects won't have parent_hash, revision, etc...
cmd = ['gobject', 'submit', '0', '1', str(int(time.time())), obj_data]
# some objects don't have a collateral tx to submit
if not self.only_masternode_can_submit:
cmd.append(go.object_fee_tx)
return cmd
def submit(self, kauricoind):
# don't attempt to submit a superblock unless a masternode
# note: will probably re-factor this, this has code smell
if (self.only_masternode_can_submit and not kauricoind.is_masternode()):
print("Not a masternode. Only masternodes may submit these objects")
return
try:
object_hash = kauricoind.rpc_command(*self.get_submit_command())
printdbg("Submitted: [%s]" % object_hash)
except JSONRPCException as e:
print("Unable to submit: %s" % e.message)
def serialise(self):
import inflection
import binascii
import simplejson
# 'proposal', 'superblock', etc.
name = self._meta.name
obj_type = inflection.singularize(name)
return binascii.hexlify(simplejson.dumps((obj_type, self.get_dict()), sort_keys=True).encode('utf-8')).decode('utf-8')
def kauricoind_serialise(self):
import kauricoinlib
return kauricoinlib.SHIM_serialise_for_kauricoind(self.serialise())
@classmethod
def serialisable_fields(self):
# Python is so not very elegant...
pk_column = self._meta.primary_key.db_column
fk_columns = [fk.db_column for fk in self._meta.rel.values()]
do_not_use = [pk_column]
do_not_use.extend(fk_columns)
do_not_use.append('object_hash')
fields_to_serialise = list(self._meta.columns.keys())
for field in do_not_use:
if field in fields_to_serialise:
fields_to_serialise.remove(field)
return fields_to_serialise
def get_dict(self):
dikt = {}
for field_name in self.serialisable_fields():
dikt[field_name] = getattr(self, field_name)
return dikt
|
the-stack_0_17777 | """
Expression class
Basic handling for microarray and rna-seq and realtime PCR like data
"""
import sys, os, csv, string, math, collections
from operator import itemgetter
import numpy
from numpy import array, arange, meshgrid, zeros, linspace, mean, object_, std # This use of array here is not good.
from . import config
from .flags import *
from .draw import draw
from .genelist import genelist
from .progress import progressbar
from .errors import AssertionError, ArgumentError, ExpressionNonUniqueConditionNameError
from .utils import qdeepcopy
class base_expression(genelist):
def __init__(self, filename=None, loadable_list=None, format=None, expn=None, silent:bool=False, **kargs):
"""
See the documentation in the expression class.
This is the underlying base expression object and is not designed for direct usage.
"""
'''
if not loadable_list:
# these are only required if not loading a list
assert expn, "'expn' argument cannot be empty"
assert filename, "no filename to load"
assert format, "required argument 'format' is missing"
assert os.path.exists(os.path.realpath(filename)), "'%s' not found" % filename
else:
# probably should put some more sanity checking in here.
assert loadable_list[0], "the list to load does not appear to be a proper list"
'''
if "cv_err" in kargs or "err_up" in kargs or "err_dn" in kargs:
raise NotImplementedError("Whoops! I haven't finished expression class - cv_err, err_up and err_dn are not implemented")
valig_args = ["cond_names", "name", "force_tsv", "nan_value"]
for k in kargs:
if k not in valig_args:
raise ArgumentError(self.__init__, k)
genelist.__init__(self)
self.filename = filename
self._conditions = [] # Provide a dummy conditions temporarily
self.name = "None"
if "name" in kargs and kargs["name"]:
self.name = kargs["name"]
elif filename:
self.name = "".join(self.filename.split(".")[:-1])
if not loadable_list and not expn:
config.log.info("expression: made an empty expression object")
return()
if loadable_list:
self.load_list(loadable_list, expn, **kargs)
else:
# This is a placeholder at the moment,
# I reload the expn and err values back into the format
# When you redo this, remember to also redo load_list()
newf = format
newf["conditions"] = {"code": expn}
if "err" in kargs and kargs["err"]:
newf["err"] = {"code": kargs["err"]}
elif "cv_err" in kargs and kargs["cv_err"]:
newf["cv_err"] = kargs["cv_err"]
if "force_tsv" in kargs and kargs["force_tsv"]:
newf["force_tsv"] = True
format = newf
self.loadCSV(filename=filename, format=format) # no need for error checking here - it's in genelist now.
if "cond_names" in kargs and kargs["cond_names"]:
self._conditions = kargs["cond_names"]
else:
# re-open the file and try to guess the conditions
# reopen the file to get the condition headers.
oh = open(filename, "rU")
if "force_tsv" in format and format["force_tsv"]:
reader = csv.reader(oh, dialect=csv.excel_tab)
elif "dialect" in format:
reader = csv.reader(oh, dialect=format["dialect"])
else:
reader = csv.reader(oh)
do = False
self._conditions = []
for index, column in enumerate(reader):
if "skiptill" in kargs:
if kargs["skiptill"] in column:
do = True
elif "skiplines" in kargs:
if index == kargs["skiplines"]:
do = True
else:
do = True # do anyway
if do:
names = eval("{0}".format(format["conditions"]["code"])) # yay, more nice happy arbitrary code execution.
if names:
self._conditions = [str(k) for k in names]
break
oh.close()
if not silent:
config.log.info("expression: I found the following conditions:")
config.log.info("\n".join(["%s\t%s" % (n, i) for n, i in enumerate(self._conditions)]))
# coerce the conditions errs etc to floats
nans = set(('nan', 'Nan', 'NaN'))
for idx, i in enumerate(self):
try:
# Nan policy:
if True in [t in nans for t in i["conditions"]]:
config.log.warning("line {0}, contains Nan, filling with 0".format(idx))
newc = []
for c in i['conditions']:
if c in nans:
newc.append(0.0) # nan policy
else:
newc.append(c)
i['conditions'] = newc
i["conditions"] = [float(str(t).replace(",", "")) for t in i["conditions"]] # because somebody once sent me a file with ',' for thousands!
except ValueError:
config.log.warning("line %s, contains missing data (%s), filling with 0" % (idx, i["conditions"]))
i["conditions"] = [0 for t in self._conditions] # Use conditions as the example I had here was also missing all of the other values.
# These will bomb on missing data...
if "err" in i:
i["err"] = [float(t) for t in i["err"]]
if "cv_err" in i:
i["cv_err"] = [float(t) for t in i["cv_err"]]
self.__check_condition_names_are_unique()
self._optimiseData()
if not silent:
config.log.info("expression: loaded %s items, %s conditions" % (len(self), len(self.getConditionNames())))
def __check_condition_names_are_unique(self):
"""
Bit of gotcha this one, but expression objects must have unique condition names
or lots of things break. Here, check the condition names are unique.
"""
if len(self._conditions) > len(set(self._conditions)):
raise ExpressionNonUniqueConditionNameError(self._conditions)
return(False)
def __repr__(self):
return("glbase.expression")
def _load_numpy_back_into_linearData(self):
"""
For routines that make a change in self.numpy_array_all_data
this must be called after to propogate the changes back into linearData
"""
for i, row in enumerate(self.numpy_array_all_data):
self.linearData[i]["conditions"] = list(row)
self._optimiseData()
def _optimiseData(self):
"""
(Override)
(Internal)
Add expression optimisations
"""
genelist._optimiseData(self) # do the parent optimise.
# generate a serialised version of the array conditions.
self.numpy_array_all_data = numpy.array([i["conditions"] for i in self.linearData])
# could be done with dict comp:
data = {}
for index, name in enumerate(self._conditions):
if not name in data:
data[name] = self.numpy_array_all_data[:,index]
self.serialisedArrayDataDict = data
# list;
self.serialisedArrayDataList = [self.serialisedArrayDataDict[key] for key in self._conditions]
#self.serialisedArrayDataList = all_array_data # This consumes massive amounts of memory.
# presumably something downstream is doing something nasty.
return(True)
def saveCSV(self, filename=None, interleave_errors=True, no_header=False, no_col1_header=False, **kargs):
"""
A CSV version of saveTSV(), see saveTSV() for syntax
"""
self.saveTSV(filename=filename, tsv=False, interleave_errors=True, no_header=False, no_col1_header=False, **kargs)
config.log.info("saveCSV(): Saved '%s'" % filename)
def saveTSV(self, filename=None, tsv=True, interleave_errors=True, no_header=False, no_col1_header=False, **kargs):
"""
(Override)
**Purpose**
Save the microarray data as a tsv file
This is a little different from the normal genelist.saveTSV()
as I want to make certain that the condition data is written in a sensible manner at
the end of the TSV.
I also need to deal with grid like structures etc.
As a general warning, use expression.save() in preference to this.
This save is not guaranteed to survive reloading into glbase, and is particularly
troublesome in the case of expression objects. Indeed, the default guesser when loading
a genelist object will incorrectly load an expression object with error values
and will probably bodge any other arrangement too.
**Arguments**
filename
The filename (with a valid path) to save the file to.
interleave_errors (Optional, default=True)
By default the errors are interleaved so that the sample data will be arranged:
Sample1 Err1 Sample2 Err2
if interleave_errors=False then:
Sample1 Sample2 Err1 Err2
no_col1_header (Optional, default=False)
In case you want a table like this:
A B C D
W 1 2 3 4
X 2 2 2 2
Y 2 2 2 2
Z 2 2 2 2
i.e. the top left column label is empty.
**Returns**
returns None
"""
self._save_TSV_CSV(filename=filename, tsv=tsv, interleave_errors=True, no_header=False, no_col1_header=False, **kargs)
config.log.info("saveTSV(): Saved '%s'" % filename)
def _save_TSV_CSV(self, filename=None, tsv=True, interleave_errors=True, no_header=False, no_col1_header=False, **kargs):
"""
Internal unified saveCSV/TSV for expression objects
"""
valig_args = ["filename", "tsv", "key_order", "no_header"]
for k in kargs:
if k not in valig_args:
raise ArgumentError(self.saveCSV, k)
assert filename, "you must specify a filename"
oh = open(os.path.realpath(filename), "w")
if tsv:
writer = csv.writer(oh, dialect=csv.excel_tab)
else:
writer = csv.writer(oh)
array_data_keys = ("conditions", "err", "cv_err")
write_keys = []
if "key_order" in kargs:
write_keys = kargs["key_order"]
# now add in any missing keys to the right side of the list:
for item in list(self.keys()):
if item not in write_keys and item not in array_data_keys: # But omit the array_data_keys
write_keys.append(item)
else:
# just select them all:
write_keys = [k for k in list(self.keys()) if not k in array_data_keys]
if "err" in list(self.keys()):
if interleave_errors:
conds = ["mean_%s" % c for c in self.getConditionNames()]
errs = ["err_%s" % c for c in self.getConditionNames()]
paired = [val for pair in zip(conds, errs) for val in pair]
if not no_header:
title_row = [k for k in write_keys if k in list(self.keys())]
writer.writerow(title_row + paired)
for data in self.linearData:
line = [data[k] for k in write_keys if k in data]
interleaved_data = [val for pair in zip(data["conditions"], data["err"]) for val in pair] # I never understand how these work, but what the hell.
writer.writerow(line + interleaved_data)# conditions go last.
oh.close()
else:
if not no_header:
title_row = [k for k in write_keys in k in list(self.keys())]
writer.writerow(write_keys + self.getConditionNames() + ["err_%s" % i for i in self.getConditionNames()])
for data in self.linearData:
line = [data[k] for k in write_keys if k in data]
writer.writerow(line + data["conditions"] + data["err"])# conditions go last.
oh.close()
else: # no error, very easy:
if not no_header:
title_row = [k for k in write_keys if k in list(self.keys())]
if no_col1_header:
title_row[0] = ""
writer.writerow(title_row + self.getConditionNames())
for data in self.linearData:
line = [data[k] for k in write_keys if k in data]
writer.writerow(line + data["conditions"])# conditions go last.
oh.close()
return(None)
def sort(self, key, reverse=False):
"""
This is slightly different from the vanilla genelist's sort - you can pass it the name of
a condition. Take care to make sure the condition name is not also a valid list key.
The algorithm searches the genelist before searching the array for your particular condition.
Also take care with this one: It is one of the few in-place list
modifiers.
**Arguments**
key
must be a valid key in the genelist or the name of an array condition.
reverse (Optional, default=False)
By default the list is sorted smallest to largest.
reverse = True sorts largest to smallest.
**Result**
returns True if succesful.
returns False if not valid.
"""
assert (key in self.linearData[0]) or key in self._conditions, "'%s' search key not found in list or array data" % key
if key in self.linearData[0]:
return(genelist.sort(self, key, reverse=reverse)) # use the parents sort.
else:
if key in self._conditions:
name_index = self._conditions.index(key)
self.linearData = sorted(self.linearData, key=lambda x: x["conditions"][name_index]) # the original sort() was overridden.
if reverse:
self.linearData.reverse()
self._optimiseData()
return(True)
return(False)
def load_list(self, list_to_load, expn=None, name=False, cond_names=None, nan_value=0):
"""
**Purpose**
You've generated your own [{ ... }, { ...}] like list
(A list of dicts) and you want to either reload it into
a genelist-like object or load it into an empty genelist.
This is the method to do that officially.
This method should be used with care. Some sanity
checking is done. But not very much.
This load_list is modified for expression-like genelists.
(eg. expression()). Here you can load keys into conditions based on
their key names.
**Arguments**
list_to_load
must be a list of dicts.
expn (optional)
A list of key names to construct the expression data from
If not specified then it assumes your list already has a correctly formatted
"conditions" key.
**Returns**
None. This is one of the few IN PLACE methods. and returns
None.
"""
assert list_to_load[0], "list_to_load does not appear to be a valid list"
__nan_warnings = False
nans = frozenset(["Inf", "-Inf", "NA", "Nan", "NaN"])
if expn:
assert isinstance(expn, list), "'expn' must be a list of keys"
# Bodge in a new "conditions" key:
newl = []
for i in list_to_load:
new = i.copy()
nl = [i[k] for k in expn]
# test for Inf, -Inf, NA, NaN, etc.
if True in [ti in nans for ti in nl]: # woah! Nan here.
t = []
for item in nl:
if item in nans:
t.append(nan_value)
else:
t.append(item)
nl = t
if not __nan_warnings:
__nan_warnings = True
config.log.warning("Expression list contains 'not a number' values, setting them to <nan_value=%s>" % nan_value)
new["conditions"] = nl
for k in expn:
del new[k]
newl.append(new)
self._conditions = expn
else:
newl = list_to_load
if cond_names: # user sent the conditions names. Hope they are in the same order
assert len(cond_names) == len(newl[0]["conditions"]), "cond_names is not the same length as the number of conditions"
self._conditions = cond_names
else:
# conditions can get lost in a loadable list. fill in a dummy one
if len(self._conditions) != len(newl[0]["conditions"]):
self._conditions = ["cond_%s" % i for i in range(len(newl[0]["conditions"]))]
# Now call parent with new list
genelist.load_list(self, newl, name)
def from_pandas(self, pandas_data_frame, condition_names=None):
"""
**Purpose**
Convert a pandas dataFrame to a genelist
NOTE: This is an INPLACE method that will REPLACE any exisiting data
in the
**Arguments**
pandas_data_frame (Required)
The pandas data frame to convert
condition_names (Required)
A list of Column names from the Pandas frame to use as expression data
**Result**
None
The object is populated by the Pandas object
"""
assert condition_names, 'You must specify condition_names'
assert isinstance(condition_names, list), 'condition_names must be a list of colun names'
if len(self) >0:
config.log.warning('expression.from_pandas() will overwrite the existing data in the expression')
newl = []
key_names = pandas_data_frame.columns
for index, row in pandas_data_frame.iterrows():
newitem = {}
# load normal keys:
for k, item in zip(key_names, row):
if k not in condition_names:
newitem[k] = item
# load conditions, in-order:
dict_items = dict(zip(key_names, row))
newitem['conditions'] = [dict_items[z] for z in condition_names]
newl.append(newitem)
self._conditions = condition_names
self.linearData = newl
self._optimiseData()
config.log.info("expression.from_pandas() imported dataFrame")
def getConditionNames(self):
"""
returns a list of the condition headers
"""
return(list(self._conditions))
def setConditionNames(self, new_cond_names):
"""
rename the conditions names for the expression data
THIS IS AN IN-PLACE method and returns None
"""
assert len(new_cond_names) == len(self._conditions), "setConditionNames(): new and old condition names are different lengths (%s vs. %s)" % (len(new_cond_names), len(self._conditions))
self.__check_condition_names_are_unique()
self._conditions = list(new_cond_names)
self._optimiseData()
return(self._conditions)
|
the-stack_0_17778 | """ philoseismos: engineering seismologist's toolbox.
author: Ivan Dubrovin
e-mail: [email protected] """
import setuptools
with open('README.md') as f:
long_description = f.read()
setuptools.setup(
name='philoseismos',
version='0.0.32_alpha',
author="Ivan Dubrovin",
author_email="[email protected]",
description="Engineering seismologist's toolbox",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/iod-ine/philoseismos",
packages=setuptools.find_packages(),
install_requires=[
'numpy',
'pandas',
'scipy',
'matplotlib',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Natural Language :: English",
],
python_requires='>=3.6',
)
|
the-stack_0_17780 | import typing
from collections import OrderedDict
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from rest_framework.viewsets import ModelViewSet
from pydis_site.apps.api.models.bot.infraction import Infraction
from pydis_site.apps.api.models.bot.metricity import Metricity, NotFoundError
from pydis_site.apps.api.models.bot.user import User
from pydis_site.apps.api.serializers import UserSerializer
class UserListPagination(PageNumberPagination):
"""Custom pagination class for the User Model."""
page_size = 2500
page_size_query_param = "page_size"
def get_next_page_number(self) -> typing.Optional[int]:
"""Get the next page number."""
if not self.page.has_next():
return None
page_number = self.page.next_page_number()
return page_number
def get_previous_page_number(self) -> typing.Optional[int]:
"""Get the previous page number."""
if not self.page.has_previous():
return None
page_number = self.page.previous_page_number()
return page_number
def get_paginated_response(self, data: list) -> Response:
"""Override method to send modified response."""
return Response(OrderedDict([
('count', self.page.paginator.count),
('next_page_no', self.get_next_page_number()),
('previous_page_no', self.get_previous_page_number()),
('results', data)
]))
class UserViewSet(ModelViewSet):
"""
View providing CRUD operations on Discord users through the bot.
## Routes
### GET /bot/users
Returns all users currently known with pagination.
#### Response format
>>> {
... 'count': 95000,
... 'next_page_no': "2",
... 'previous_page_no': None,
... 'results': [
... {
... 'id': 409107086526644234,
... 'name': "Python",
... 'discriminator': 4329,
... 'roles': [
... 352427296948486144,
... 270988689419665409,
... 277546923144249364,
... 458226699344019457
... ],
... 'in_guild': True
... },
... ]
... }
#### Optional Query Parameters
- page_size: number of Users in one page, defaults to 10,000
- page: page number
#### Status codes
- 200: returned on success
### GET /bot/users/<snowflake:int>
Gets a single user by ID.
#### Response format
>>> {
... 'id': 409107086526644234,
... 'name': "Python",
... 'discriminator': 4329,
... 'roles': [
... 352427296948486144,
... 270988689419665409,
... 277546923144249364,
... 458226699344019457
... ],
... 'in_guild': True
... }
#### Status codes
- 200: returned on success
- 404: if a user with the given `snowflake` could not be found
### GET /bot/users/<snowflake:int>/metricity_data
Gets metricity data for a single user by ID.
#### Response format
>>> {
... "joined_at": "2020-10-06T21:54:23.540766",
... "total_messages": 2,
... "voice_banned": False,
... "activity_blocks": 1
...}
#### Status codes
- 200: returned on success
- 404: if a user with the given `snowflake` could not be found
### GET /bot/users/<snowflake:int>/metricity_review_data
Gets metricity data for a single user's review by ID.
#### Response format
>>> {
... 'joined_at': '2020-08-26T08:09:43.507000',
... 'top_channel_activity': [['off-topic', 15],
... ['talent-pool', 4],
... ['defcon', 2]],
... 'total_messages': 22
... }
#### Status codes
- 200: returned on success
- 404: if a user with the given `snowflake` could not be found
### POST /bot/users
Adds a single or multiple new users.
The roles attached to the user(s) must be roles known by the site.
Users that already exist in the database will be skipped.
#### Request body
>>> {
... 'id': int,
... 'name': str,
... 'discriminator': int,
... 'roles': List[int],
... 'in_guild': bool
... }
Alternatively, request users can be POSTed as a list of above objects,
in which case multiple users will be created at once. In this case,
the response is an empty list.
#### Status codes
- 201: returned on success
- 400: if one of the given roles does not exist, or one of the given fields is invalid
- 400: if multiple user objects with the same id are given
### PUT /bot/users/<snowflake:int>
Update the user with the given `snowflake`.
All fields in the request body are required.
#### Request body
>>> {
... 'id': int,
... 'name': str,
... 'discriminator': int,
... 'roles': List[int],
... 'in_guild': bool
... }
#### Status codes
- 200: returned on success
- 400: if the request body was invalid, see response body for details
- 404: if the user with the given `snowflake` could not be found
### PATCH /bot/users/<snowflake:int>
Update the user with the given `snowflake`.
All fields in the request body are optional.
#### Request body
>>> {
... 'id': int,
... 'name': str,
... 'discriminator': int,
... 'roles': List[int],
... 'in_guild': bool
... }
#### Status codes
- 200: returned on success
- 400: if the request body was invalid, see response body for details
- 404: if the user with the given `snowflake` could not be found
### BULK PATCH /bot/users/bulk_patch
Update users with the given `ids` and `details`.
`id` field and at least one other field is mandatory.
#### Request body
>>> [
... {
... 'id': int,
... 'name': str,
... 'discriminator': int,
... 'roles': List[int],
... 'in_guild': bool
... },
... {
... 'id': int,
... 'name': str,
... 'discriminator': int,
... 'roles': List[int],
... 'in_guild': bool
... },
... ]
#### Status codes
- 200: returned on success
- 400: if the request body was invalid, see response body for details
- 400: if multiple user objects with the same id are given
- 404: if the user with the given id does not exist
### DELETE /bot/users/<snowflake:int>
Deletes the user with the given `snowflake`.
#### Status codes
- 204: returned on success
- 404: if a user with the given `snowflake` does not exist
"""
serializer_class = UserSerializer
queryset = User.objects.all().order_by("id")
pagination_class = UserListPagination
def get_serializer(self, *args, **kwargs) -> ModelSerializer:
"""Set Serializer many attribute to True if request body contains a list."""
if isinstance(kwargs.get('data', {}), list):
kwargs['many'] = True
return super().get_serializer(*args, **kwargs)
@action(detail=False, methods=["PATCH"], name='user-bulk-patch')
def bulk_patch(self, request: Request) -> Response:
"""Update multiple User objects in a single request."""
serializer = self.get_serializer(
instance=self.get_queryset(),
data=request.data,
many=True,
partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=True)
def metricity_data(self, request: Request, pk: str = None) -> Response:
"""Request handler for metricity_data endpoint."""
user = self.get_object()
try:
Infraction.objects.get(user__id=user.id, active=True, type="voice_ban")
except ObjectDoesNotExist:
voice_banned = False
else:
voice_banned = True
with Metricity() as metricity:
try:
data = metricity.user(user.id)
data["total_messages"] = metricity.total_messages(user.id)
data["activity_blocks"] = metricity.total_message_blocks(user.id)
data["voice_banned"] = voice_banned
return Response(data, status=status.HTTP_200_OK)
except NotFoundError:
return Response(dict(detail="User not found in metricity"),
status=status.HTTP_404_NOT_FOUND)
@action(detail=True)
def metricity_review_data(self, request: Request, pk: str = None) -> Response:
"""Request handler for metricity_review_data endpoint."""
user = self.get_object()
with Metricity() as metricity:
try:
data = metricity.user(user.id)
data["total_messages"] = metricity.total_messages(user.id)
data["top_channel_activity"] = metricity.top_channel_activity(user.id)
return Response(data, status=status.HTTP_200_OK)
except NotFoundError:
return Response(dict(detail="User not found in metricity"),
status=status.HTTP_404_NOT_FOUND)
|
the-stack_0_17782 | #standart sapma islemini numpy ile atma
import numpy as np
a = [3, 4, 7, 3, 2, 4, 1]
a = np.array(a)
#burada a üzerinde yapılan np islemleri vektörel işlemler olduğundan her elemana tek tek yapılıyor
std = np.sqrt(np.sum((a - np.mean(a)) ** 2)) / a.shape[0]
print(std)
#rastgele nd array yaratmak istersem
x = np.ndarray((3, 2), dtype='float32')
print(x)
|
the-stack_0_17783 | # STDLIB
import logging
import pathlib
import subprocess
import sys
logger = logging.getLogger()
package_dir = "pct_python_default_test"
cli_filename = "pct_python_default_test_cli.py"
path_cli_command = pathlib.Path(__file__).resolve().parent.parent / package_dir / cli_filename
def call_cli_command(commandline_args: str = "") -> bool:
command = " ".join([sys.executable, str(path_cli_command), commandline_args])
try:
subprocess.run(command, shell=True, check=True)
except subprocess.CalledProcessError:
return False
return True
def test_cli_commands() -> None:
# due to a bug in python 3.8.1 with setup.py test on travis we need to cancel the click tests there !
if sys.version_info < (3, 8, 1) or sys.version_info >= (3, 8, 2):
assert not call_cli_command("--unknown_option")
assert call_cli_command("--version")
assert call_cli_command("-h")
assert call_cli_command("info")
assert call_cli_command("--traceback info")
|
the-stack_0_17784 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import datetime
import six
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow import configuration
from airflow.utils.log.LoggingMixin import LoggingMixin
from airflow.utils.state import State
from qds_sdk.qubole import Qubole
from qds_sdk.commands import Command, HiveCommand, PrestoCommand, HadoopCommand, \
PigCommand, ShellCommand, SparkCommand, DbTapQueryCommand, DbExportCommand, \
DbImportCommand
COMMAND_CLASSES = {
"hivecmd": HiveCommand,
"prestocmd": PrestoCommand,
"hadoopcmd": HadoopCommand,
"shellcmd": ShellCommand,
"pigcmd": PigCommand,
"sparkcmd": SparkCommand,
"dbtapquerycmd": DbTapQueryCommand,
"dbexportcmd": DbExportCommand,
"dbimportcmd": DbImportCommand
}
HYPHEN_ARGS = ['cluster_label', 'app_id', 'note_id']
POSITIONAL_ARGS = ['sub_command', 'parameters']
COMMAND_ARGS = {
"hivecmd": ['query', 'script_location', 'macros', 'tags', 'sample_size',
'cluster_label', 'name'],
'prestocmd': ['query', 'script_location', 'macros', 'tags', 'cluster_label', 'name'],
'hadoopcmd': ['sub_command', 'tags', 'cluster_label', 'name'],
'shellcmd': ['script', 'script_location', 'files', 'archives', 'parameters', 'tags',
'cluster_label', 'name'],
'pigcmd': ['script', 'script_location', 'parameters', 'tags', 'cluster_label',
'name'],
'dbtapquerycmd': ['db_tap_id', 'query', 'macros', 'tags', 'name'],
'sparkcmd': ['program', 'cmdline', 'sql', 'script_location', 'macros', 'tags',
'cluster_label', 'language', 'app_id', 'name', 'arguments', 'note_id',
'user_program_arguments'],
'dbexportcmd': ['mode', 'hive_table', 'partition_spec', 'dbtap_id', 'db_table',
'db_update_mode', 'db_update_keys', 'export_dir',
'fields_terminated_by', 'tags', 'name'],
'dbimportcmd': ['mode', 'hive_table', 'dbtap_id', 'db_table', 'where_clause',
'parallelism', 'extract_query', 'boundary_query', 'split_column',
'tags', 'name']
}
class QuboleHook(BaseHook, LoggingMixin):
def __init__(self, *args, **kwargs):
conn = self.get_connection(kwargs['qubole_conn_id'])
Qubole.configure(api_token=conn.password, api_url=conn.host)
self.task_id = kwargs['task_id']
self.dag_id = kwargs['dag'].dag_id
self.kwargs = kwargs
self.cls = COMMAND_CLASSES[self.kwargs['command_type']]
self.cmd = None
@staticmethod
def handle_failure_retry(context):
ti = context['ti']
cmd_id = ti.xcom_pull(key='qbol_cmd_id', task_ids=ti.task_id)
if cmd_id is not None:
cmd = Command.find(cmd_id)
if cmd is not None:
log = LoggingMixin().logger
if cmd.status == 'done':
log.info('Command ID: %s has been succeeded, hence marking this '
'TI as Success.', cmd_id)
ti.state = State.SUCCESS
elif cmd.status == 'running':
log.info('Cancelling the Qubole Command Id: %s', cmd_id)
cmd.cancel()
def execute(self, context):
args = self.cls.parse(self.create_cmd_args(context))
self.cmd = self.cls.create(**args)
context['task_instance'].xcom_push(key='qbol_cmd_id', value=self.cmd.id)
self.logger.info(
"Qubole command created with Id: %s and Status: %s",
self.cmd.id, self.cmd.status
)
while not Command.is_done(self.cmd.status):
time.sleep(Qubole.poll_interval)
self.cmd = self.cls.find(self.cmd.id)
self.logger.info("Command Id: %s and Status: %s", self.cmd.id, self.cmd.status)
if 'fetch_logs' in self.kwargs and self.kwargs['fetch_logs'] is True:
self.logger.info("Logs for Command Id: %s \n%s", self.cmd.id, self.cmd.get_log())
if self.cmd.status != 'done':
raise AirflowException('Command Id: {0} failed with Status: {1}'.format(
self.cmd.id, self.cmd.status))
def kill(self, ti):
"""
Kill (cancel) a Qubole commmand
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: response from Qubole
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
self.cmd = self.cls.find(cmd_id)
if self.cls and self.cmd:
self.logger.info('Sending KILL signal to Qubole Command Id: %s', self.cmd.id)
self.cmd.cancel()
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
logpath = os.path.expanduser(configuration.get('core', 'BASE_LOG_FOLDER'))
resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results'
configuration.mkdir_p(resultpath)
fp = open(resultpath + '/' + iso, 'wb')
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
self.cmd.get_results(fp, inline, delim, fetch)
fp.flush()
fp.close()
return fp.name
def get_log(self, ti):
"""
Get Logs of a command from Qubole
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: command log as text
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_log_id(self.cls, cmd_id)
def get_jobs_id(self, ti):
"""
Get jobs associated with a Qubole commands
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: Job informations assoiciated with command
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_jobs_id(self.cls, cmd_id)
def create_cmd_args(self, context):
args = []
cmd_type = self.kwargs['command_type']
inplace_args = None
tags = set([self.dag_id, self.task_id, context['run_id']])
for k,v in self.kwargs.items():
if k in COMMAND_ARGS[cmd_type]:
if k in HYPHEN_ARGS:
args.append("--{0}={1}".format(k.replace('_', '-'),v))
elif k in POSITIONAL_ARGS:
inplace_args = v
elif k == 'tags':
if isinstance(v, six.string_types):
tags.add(v)
elif isinstance(v, (list, tuple)):
for val in v:
tags.add(val)
else:
args.append("--{0}={1}".format(k,v))
if k == 'notify' and v is True:
args.append("--notify")
args.append("--tags={0}".format(','.join(filter(None,tags))))
if inplace_args is not None:
if cmd_type == 'hadoopcmd':
args += inplace_args.split(' ', 1)
else:
args += inplace_args.split(' ')
return args
|
the-stack_0_17785 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import asyncio
import contextvars
import json
import logging
import random
import sys
import time
import types
from collections import Counter, OrderedDict
from copy import deepcopy
from enum import Enum
from functools import total_ordering
from os.path import commonprefix
import ijson
from esrally import exceptions, track
# Mapping from operation type to specific runner
__RUNNERS = {}
def register_default_runners():
register_runner(track.OperationType.Bulk, BulkIndex(), async_runner=True)
register_runner(track.OperationType.ForceMerge, ForceMerge(), async_runner=True)
register_runner(track.OperationType.IndexStats, Retry(IndicesStats()), async_runner=True)
register_runner(track.OperationType.NodeStats, NodeStats(), async_runner=True)
register_runner(track.OperationType.Search, Query(), async_runner=True)
register_runner(track.OperationType.RawRequest, RawRequest(), async_runner=True)
register_runner(track.OperationType.Composite, Composite(), async_runner=True)
register_runner(track.OperationType.SubmitAsyncSearch, SubmitAsyncSearch(), async_runner=True)
register_runner(track.OperationType.GetAsyncSearch, Retry(GetAsyncSearch(), retry_until_success=True), async_runner=True)
register_runner(track.OperationType.DeleteAsyncSearch, DeleteAsyncSearch(), async_runner=True)
# This is an administrative operation but there is no need for a retry here as we don't issue a request
register_runner(track.OperationType.Sleep, Sleep(), async_runner=True)
# these requests should not be retried as they are not idempotent
register_runner(track.OperationType.CreateSnapshot, CreateSnapshot(), async_runner=True)
register_runner(track.OperationType.RestoreSnapshot, RestoreSnapshot(), async_runner=True)
# We treat the following as administrative commands and thus already start to wrap them in a retry.
register_runner(track.OperationType.ClusterHealth, Retry(ClusterHealth()), async_runner=True)
register_runner(track.OperationType.PutPipeline, Retry(PutPipeline()), async_runner=True)
register_runner(track.OperationType.Refresh, Retry(Refresh()), async_runner=True)
register_runner(track.OperationType.CreateIndex, Retry(CreateIndex()), async_runner=True)
register_runner(track.OperationType.DeleteIndex, Retry(DeleteIndex()), async_runner=True)
register_runner(track.OperationType.CreateComponentTemplate, Retry(CreateComponentTemplate()), async_runner=True)
register_runner(track.OperationType.DeleteComponentTemplate, Retry(DeleteComponentTemplate()), async_runner=True)
register_runner(track.OperationType.CreateComposableTemplate, Retry(CreateComposableTemplate()), async_runner=True)
register_runner(track.OperationType.DeleteComposableTemplate, Retry(DeleteComposableTemplate()), async_runner=True)
register_runner(track.OperationType.CreateDataStream, Retry(CreateDataStream()), async_runner=True)
register_runner(track.OperationType.DeleteDataStream, Retry(DeleteDataStream()), async_runner=True)
register_runner(track.OperationType.CreateIndexTemplate, Retry(CreateIndexTemplate()), async_runner=True)
register_runner(track.OperationType.DeleteIndexTemplate, Retry(DeleteIndexTemplate()), async_runner=True)
register_runner(track.OperationType.ShrinkIndex, Retry(ShrinkIndex()), async_runner=True)
register_runner(track.OperationType.CreateMlDatafeed, Retry(CreateMlDatafeed()), async_runner=True)
register_runner(track.OperationType.DeleteMlDatafeed, Retry(DeleteMlDatafeed()), async_runner=True)
register_runner(track.OperationType.StartMlDatafeed, Retry(StartMlDatafeed()), async_runner=True)
register_runner(track.OperationType.StopMlDatafeed, Retry(StopMlDatafeed()), async_runner=True)
register_runner(track.OperationType.CreateMlJob, Retry(CreateMlJob()), async_runner=True)
register_runner(track.OperationType.DeleteMlJob, Retry(DeleteMlJob()), async_runner=True)
register_runner(track.OperationType.OpenMlJob, Retry(OpenMlJob()), async_runner=True)
register_runner(track.OperationType.CloseMlJob, Retry(CloseMlJob()), async_runner=True)
register_runner(track.OperationType.DeleteSnapshotRepository, Retry(DeleteSnapshotRepository()), async_runner=True)
register_runner(track.OperationType.CreateSnapshotRepository, Retry(CreateSnapshotRepository()), async_runner=True)
register_runner(track.OperationType.WaitForSnapshotCreate, Retry(WaitForSnapshotCreate()), async_runner=True)
register_runner(track.OperationType.WaitForRecovery, Retry(IndicesRecovery()), async_runner=True)
register_runner(track.OperationType.PutSettings, Retry(PutSettings()), async_runner=True)
register_runner(track.OperationType.CreateTransform, Retry(CreateTransform()), async_runner=True)
register_runner(track.OperationType.StartTransform, Retry(StartTransform()), async_runner=True)
register_runner(track.OperationType.WaitForTransform, Retry(WaitForTransform()), async_runner=True)
register_runner(track.OperationType.DeleteTransform, Retry(DeleteTransform()), async_runner=True)
def runner_for(operation_type):
try:
return __RUNNERS[operation_type]
except KeyError:
raise exceptions.RallyError("No runner available for operation type [%s]" % operation_type)
def enable_assertions(enabled):
"""
Changes whether assertions are enabled. The status changes for all tasks that are executed after this call.
:param enabled: ``True`` to enable assertions, ``False`` to disable them.
"""
AssertingRunner.assertions_enabled = enabled
def register_runner(operation_type, runner, **kwargs):
logger = logging.getLogger(__name__)
async_runner = kwargs.get("async_runner", False)
if isinstance(operation_type, track.OperationType):
operation_type = operation_type.to_hyphenated_string()
if not async_runner:
raise exceptions.RallyAssertionError(
"Runner [{}] must be implemented as async runner and registered with async_runner=True.".format(str(runner)))
if getattr(runner, "multi_cluster", False):
if "__aenter__" in dir(runner) and "__aexit__" in dir(runner):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering runner object [%s] for [%s].", str(runner), str(operation_type))
cluster_aware_runner = _multi_cluster_runner(runner, str(runner), context_manager_enabled=True)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering context-manager capable runner object [%s] for [%s].", str(runner), str(operation_type))
cluster_aware_runner = _multi_cluster_runner(runner, str(runner))
# we'd rather use callable() but this will erroneously also classify a class as callable...
elif isinstance(runner, types.FunctionType):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering runner function [%s] for [%s].", str(runner), str(operation_type))
cluster_aware_runner = _single_cluster_runner(runner, runner.__name__)
elif "__aenter__" in dir(runner) and "__aexit__" in dir(runner):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering context-manager capable runner object [%s] for [%s].", str(runner), str(operation_type))
cluster_aware_runner = _single_cluster_runner(runner, str(runner), context_manager_enabled=True)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering runner object [%s] for [%s].", str(runner), str(operation_type))
cluster_aware_runner = _single_cluster_runner(runner, str(runner))
__RUNNERS[operation_type] = _with_completion(_with_assertions(cluster_aware_runner))
# Only intended for unit-testing!
def remove_runner(operation_type):
del __RUNNERS[operation_type]
class Runner:
"""
Base class for all operations against Elasticsearch.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = logging.getLogger(__name__)
async def __aenter__(self):
return self
async def __call__(self, es, params):
"""
Runs the actual method that should be benchmarked.
:param args: All arguments that are needed to call this method.
:return: A pair of (int, String). The first component indicates the "weight" of this call. it is typically 1 but for bulk operations
it should be the actual bulk size. The second component is the "unit" of weight which should be "ops" (short for
"operations") by default. If applicable, the unit should always be in plural form. It is used in metrics records
for throughput and reports. A value will then be shown as e.g. "111 ops/s".
"""
raise NotImplementedError("abstract operation")
async def __aexit__(self, exc_type, exc_val, exc_tb):
return False
def _default_kw_params(self, params):
# map of API kwargs to Rally config parameters
kw_dict = {
"body": "body",
"headers": "headers",
"index": "index",
"opaque_id": "opaque-id",
"params": "request-params",
"request_timeout": "request-timeout",
}
full_result = {k: params.get(v) for (k, v) in kw_dict.items()}
# filter Nones
return dict(filter(lambda kv: kv[1] is not None, full_result.items()))
def _transport_request_params(self, params):
request_params = params.get("request-params", {})
request_timeout = params.get("request-timeout")
if request_timeout is not None:
request_params["request_timeout"] = request_timeout
headers = params.get("headers") or {}
opaque_id = params.get("opaque-id")
if opaque_id is not None:
headers.update({"x-opaque-id": opaque_id})
return request_params, headers
class Delegator:
"""
Mixin to unify delegate handling
"""
def __init__(self, delegate, *args, **kwargs):
super().__init__(*args, **kwargs)
self.delegate = delegate
def unwrap(runner):
"""
Unwraps all delegators until the actual runner.
:param runner: An arbitrarily nested chain of delegators around a runner.
:return: The innermost runner.
"""
delegate = getattr(runner, "delegate", None)
if delegate:
return unwrap(delegate)
else:
return runner
def _single_cluster_runner(runnable, name, context_manager_enabled=False):
# only pass the default ES client
return MultiClientRunner(runnable, name, lambda es: es["default"], context_manager_enabled)
def _multi_cluster_runner(runnable, name, context_manager_enabled=False):
# pass all ES clients
return MultiClientRunner(runnable, name, lambda es: es, context_manager_enabled)
def _with_assertions(delegate):
return AssertingRunner(delegate)
def _with_completion(delegate):
unwrapped_runner = unwrap(delegate)
if hasattr(unwrapped_runner, "completed") and hasattr(unwrapped_runner, "percent_completed"):
return WithCompletion(delegate, unwrapped_runner)
else:
return NoCompletion(delegate)
class NoCompletion(Runner, Delegator):
def __init__(self, delegate):
super().__init__(delegate=delegate)
@property
def completed(self):
return None
@property
def percent_completed(self):
return None
async def __call__(self, *args):
return await self.delegate(*args)
def __repr__(self, *args, **kwargs):
return repr(self.delegate)
async def __aenter__(self):
await self.delegate.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.delegate.__aexit__(exc_type, exc_val, exc_tb)
class WithCompletion(Runner, Delegator):
def __init__(self, delegate, progressable):
super().__init__(delegate=delegate)
self.progressable = progressable
@property
def completed(self):
return self.progressable.completed
@property
def percent_completed(self):
return self.progressable.percent_completed
async def __call__(self, *args):
return await self.delegate(*args)
def __repr__(self, *args, **kwargs):
return repr(self.delegate)
async def __aenter__(self):
await self.delegate.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.delegate.__aexit__(exc_type, exc_val, exc_tb)
class MultiClientRunner(Runner, Delegator):
def __init__(self, runnable, name, client_extractor, context_manager_enabled=False):
super().__init__(delegate=runnable)
self.name = name
self.client_extractor = client_extractor
self.context_manager_enabled = context_manager_enabled
async def __call__(self, *args):
return await self.delegate(self.client_extractor(args[0]), *args[1:])
def __repr__(self, *args, **kwargs):
if self.context_manager_enabled:
return "user-defined context-manager enabled runner for [%s]" % self.name
else:
return "user-defined runner for [%s]" % self.name
async def __aenter__(self):
if self.context_manager_enabled:
await self.delegate.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.context_manager_enabled:
return await self.delegate.__aexit__(exc_type, exc_val, exc_tb)
else:
return False
class AssertingRunner(Runner, Delegator):
assertions_enabled = False
def __init__(self, delegate):
super().__init__(delegate=delegate)
self.predicates = {
">": self.greater_than,
">=": self.greater_than_or_equal,
"<": self.smaller_than,
"<=": self.smaller_than_or_equal,
"==": self.equal,
}
def greater_than(self, expected, actual):
return actual > expected
def greater_than_or_equal(self, expected, actual):
return actual >= expected
def smaller_than(self, expected, actual):
return actual < expected
def smaller_than_or_equal(self, expected, actual):
return actual <= expected
def equal(self, expected, actual):
return actual == expected
def check_assertion(self, op_name, assertion, properties):
path = assertion["property"]
predicate_name = assertion["condition"]
expected_value = assertion["value"]
actual_value = properties
for k in path.split("."):
actual_value = actual_value[k]
predicate = self.predicates[predicate_name]
success = predicate(expected_value, actual_value)
if not success:
if op_name:
msg = f"Expected [{path}] in [{op_name}] to be {predicate_name} [{expected_value}] but was [{actual_value}]."
else:
msg = f"Expected [{path}] to be {predicate_name} [{expected_value}] but was [{actual_value}]."
raise exceptions.RallyTaskAssertionError(msg)
async def __call__(self, *args):
params = args[1]
return_value = await self.delegate(*args)
if AssertingRunner.assertions_enabled and "assertions" in params:
op_name = params.get("name")
if isinstance(return_value, dict):
for assertion in params["assertions"]:
self.check_assertion(op_name, assertion, return_value)
else:
self.logger.debug("Skipping assertion check in [%s] as [%s] does not return a dict.",
op_name, repr(self.delegate))
return return_value
def __repr__(self, *args, **kwargs):
return repr(self.delegate)
async def __aenter__(self):
await self.delegate.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.delegate.__aexit__(exc_type, exc_val, exc_tb)
def mandatory(params, key, op):
try:
return params[key]
except KeyError:
raise exceptions.DataError(
f"Parameter source for operation '{str(op)}' did not provide the mandatory parameter '{key}'. "
f"Add it to your parameter source and try again.")
# TODO: remove and use https://docs.python.org/3/library/stdtypes.html#str.removeprefix
# once Python 3.9 becomes the minimum version
def remove_prefix(string, prefix):
if string.startswith(prefix):
return string[len(prefix):]
return string
def escape(v):
"""
Escapes values so they can be used as query parameters
:param v: The raw value. May be None.
:return: The escaped value.
"""
if v is None:
return None
elif isinstance(v, bool):
return str(v).lower()
else:
return str(v)
class BulkIndex(Runner):
"""
Bulk indexes the given documents.
"""
async def __call__(self, es, params):
"""
Runs one bulk indexing operation.
:param es: The Elasticsearch client.
:param params: A hash with all parameters. See below for details.
:return: A hash with meta data for this bulk operation. See below for details.
It expects a parameter dict with the following mandatory keys:
* ``body``: containing all documents for the current bulk request.
* ``bulk-size``: An indication of the bulk size denoted in ``unit``.
* ``unit``: The name of the unit in which the bulk size is provided.
* ``action_metadata_present``: if ``True``, assume that an action and metadata line is present (meaning only half of the lines
contain actual documents to index)
* ``index``: The name of the affected index in case ``action_metadata_present`` is ``False``.
* ``type``: The name of the affected type in case ``action_metadata_present`` is ``False``.
The following keys are optional:
* ``pipeline``: If present, runs the the specified ingest pipeline for this bulk.
* ``detailed-results``: If ``True``, the runner will analyze the response and add detailed meta-data. Defaults to ``False``. Note
that this has a very significant impact on performance and will very likely cause a bottleneck in the benchmark driver so please
be very cautious enabling this feature. Our own measurements have shown a median overhead of several thousand times (execution time
is in the single digit microsecond range when this feature is disabled and in the single digit millisecond range when this feature
is enabled; numbers based on a bulk size of 500 elements and no errors). For details please refer to the respective benchmarks
in ``benchmarks/driver``.
* ``request-timeout``: a non-negative float indicating the client-side timeout for the operation. If not present, defaults to
``None`` and potentially falls back to the global timeout setting.
"""
detailed_results = params.get("detailed-results", False)
api_kwargs = self._default_kw_params(params)
bulk_params = {}
if "pipeline" in params:
bulk_params["pipeline"] = params["pipeline"]
with_action_metadata = mandatory(params, "action-metadata-present", self)
bulk_size = mandatory(params, "bulk-size", self)
unit = mandatory(params, "unit", self)
# parse responses lazily in the standard case - responses might be large thus parsing skews results and if no
# errors have occurred we only need a small amount of information from the potentially large response.
if not detailed_results:
es.return_raw_response()
if with_action_metadata:
api_kwargs.pop("index", None)
# only half of the lines are documents
response = await es.bulk(params=bulk_params, **api_kwargs)
else:
response = await es.bulk(doc_type=params.get("type"), params=bulk_params, **api_kwargs)
stats = self.detailed_stats(params, response) if detailed_results else self.simple_stats(bulk_size, unit, response)
meta_data = {
"index": params.get("index"),
"weight": bulk_size,
"unit": unit,
}
meta_data.update(stats)
if not stats["success"]:
meta_data["error-type"] = "bulk"
return meta_data
def detailed_stats(self, params, response):
ops = {}
shards_histogram = OrderedDict()
bulk_error_count = 0
bulk_success_count = 0
error_details = set()
bulk_request_size_bytes = 0
total_document_size_bytes = 0
with_action_metadata = mandatory(params, "action-metadata-present", self)
if isinstance(params["body"], str):
bulk_lines = params["body"].split("\n")
elif isinstance(params["body"], list):
bulk_lines = params["body"]
else:
raise exceptions.DataError("bulk body is neither string nor list")
for line_number, data in enumerate(bulk_lines):
line_size = len(data.encode('utf-8'))
if with_action_metadata:
if line_number % 2 == 1:
total_document_size_bytes += line_size
else:
total_document_size_bytes += line_size
bulk_request_size_bytes += line_size
for item in response["items"]:
# there is only one (top-level) item
op, data = next(iter(item.items()))
if op not in ops:
ops[op] = Counter()
ops[op]["item-count"] += 1
if "result" in data:
ops[op][data["result"]] += 1
if "_shards" in data:
s = data["_shards"]
sk = "%d-%d-%d" % (s["total"], s["successful"], s["failed"])
if sk not in shards_histogram:
shards_histogram[sk] = {
"item-count": 0,
"shards": s
}
shards_histogram[sk]["item-count"] += 1
if data["status"] > 299 or ("_shards" in data and data["_shards"]["failed"] > 0):
bulk_error_count += 1
self.extract_error_details(error_details, data)
else:
bulk_success_count += 1
stats = {
"took": response.get("took"),
"success": bulk_error_count == 0,
"success-count": bulk_success_count,
"error-count": bulk_error_count,
"ops": ops,
"shards_histogram": list(shards_histogram.values()),
"bulk-request-size-bytes": bulk_request_size_bytes,
"total-document-size-bytes": total_document_size_bytes
}
if bulk_error_count > 0:
stats["error-type"] = "bulk"
stats["error-description"] = self.error_description(error_details)
if "ingest_took" in response:
stats["ingest_took"] = response["ingest_took"]
return stats
def simple_stats(self, bulk_size, unit, response):
bulk_success_count = bulk_size if unit == "docs" else None
bulk_error_count = 0
error_details = set()
# parse lazily on the fast path
props = parse(response, ["errors", "took"])
if props.get("errors", False):
# determine success count regardless of unit because we need to iterate through all items anyway
bulk_success_count = 0
# Reparse fully in case of errors - this will be slower
parsed_response = json.loads(response.getvalue())
for item in parsed_response["items"]:
data = next(iter(item.values()))
if data["status"] > 299 or ('_shards' in data and data["_shards"]["failed"] > 0):
bulk_error_count += 1
self.extract_error_details(error_details, data)
else:
bulk_success_count += 1
stats = {
"took": props.get("took"),
"success": bulk_error_count == 0,
"success-count": bulk_success_count,
"error-count": bulk_error_count
}
if bulk_error_count > 0:
stats["error-type"] = "bulk"
stats["error-description"] = self.error_description(error_details)
return stats
def extract_error_details(self, error_details, data):
error_data = data.get("error", {})
error_reason = error_data.get("reason") if isinstance(error_data, dict) else str(error_data)
if error_data:
error_details.add((data["status"], error_reason))
else:
error_details.add((data["status"], None))
def error_description(self, error_details):
error_description = ""
for status, reason in error_details:
if reason:
error_description += "HTTP status: %s, message: %s" % (str(status), reason)
else:
error_description += "HTTP status: %s" % str(status)
return error_description
def __repr__(self, *args, **kwargs):
return "bulk-index"
class ForceMerge(Runner):
"""
Runs a force merge operation against Elasticsearch.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
max_num_segments = params.get("max-num-segments")
mode = params.get("mode")
merge_params = self._default_kw_params(params)
if max_num_segments:
merge_params["max_num_segments"] = max_num_segments
if mode == "polling":
complete = False
try:
await es.indices.forcemerge(**merge_params)
complete = True
except elasticsearch.ConnectionTimeout:
pass
while not complete:
await asyncio.sleep(params.get("poll-period"))
tasks = await es.tasks.list(params={"actions": "indices:admin/forcemerge"})
if len(tasks["nodes"]) == 0:
# empty nodes response indicates no tasks
complete = True
else:
await es.indices.forcemerge(**merge_params)
def __repr__(self, *args, **kwargs):
return "force-merge"
class IndicesStats(Runner):
"""
Gather index stats for all indices.
"""
def _get(self, v, path):
if v is None:
return None
elif len(path) == 1:
return v.get(path[0])
else:
return self._get(v.get(path[0]), path[1:])
def _safe_string(self, v):
return str(v) if v is not None else None
async def __call__(self, es, params):
api_kwargs = self._default_kw_params(params)
index = api_kwargs.pop("index", "_all")
condition = params.get("condition")
response = await es.indices.stats(index=index, metric="_all", **api_kwargs)
if condition:
path = mandatory(condition, "path", repr(self))
expected_value = mandatory(condition, "expected-value", repr(self))
actual_value = self._get(response, path.split("."))
return {
"weight": 1,
"unit": "ops",
"condition": {
"path": path,
# avoid mapping issues in the ES metrics store by always rendering values as strings
"actual-value": self._safe_string(actual_value),
"expected-value": self._safe_string(expected_value)
},
# currently we only support "==" as a predicate but that might change in the future
"success": actual_value == expected_value
}
else:
return {
"weight": 1,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "indices-stats"
class NodeStats(Runner):
"""
Gather node stats for all nodes.
"""
async def __call__(self, es, params):
request_timeout = params.get("request-timeout")
await es.nodes.stats(metric="_all", request_timeout=request_timeout)
def __repr__(self, *args, **kwargs):
return "node-stats"
def parse(text, props, lists=None):
"""
Selectively parsed the provided text as JSON extracting only the properties provided in ``props``. If ``lists`` is
specified, this function determines whether the provided lists are empty (respective value will be ``True``) or
contain elements (respective key will be ``False``).
:param text: A text to parse.
:param props: A mandatory list of property paths (separated by a dot character) for which to extract values.
:param lists: An optional list of property paths to JSON lists in the provided text.
:return: A dict containing all properties and lists that have been found in the provided text.
"""
text.seek(0)
parser = ijson.parse(text)
parsed = {}
parsed_lists = {}
current_list = None
expect_end_array = False
try:
for prefix, event, value in parser:
if expect_end_array:
# True if the list is empty, False otherwise
parsed_lists[current_list] = event == "end_array"
expect_end_array = False
if prefix in props:
parsed[prefix] = value
elif lists is not None and prefix in lists and event == "start_array":
current_list = prefix
expect_end_array = True
# found all necessary properties
if len(parsed) == len(props) and (lists is None or len(parsed_lists) == len(lists)):
break
except ijson.IncompleteJSONError:
# did not find all properties
pass
parsed.update(parsed_lists)
return parsed
class Query(Runner):
"""
Runs a request body search against Elasticsearch.
It expects at least the following keys in the `params` hash:
* `index`: The index or indices against which to issue the query.
* `type`: See `index`
* `cache`: True iff the request cache should be used.
* `body`: Query body
The following parameters are optional:
* `detailed-results` (default: ``False``): Records more detailed meta-data about queries. As it analyzes the
corresponding response in more detail, this might incur additional
overhead which can skew measurement results. This flag is ineffective
for scroll queries (detailed meta-data are always returned).
* ``request-timeout``: a non-negative float indicating the client-side timeout for the operation. If not present,
defaults to ``None`` and potentially falls back to the global timeout setting.
If the following parameters are present in addition, a scroll query will be issued:
* `pages`: Number of pages to retrieve at most for this scroll. If a scroll query does yield less results than the specified number of
pages we will terminate earlier.
* `results-per-page`: Number of results to retrieve per page.
"""
async def __call__(self, es, params):
if "pages" in params and "results-per-page" in params:
return await self.scroll_query(es, params)
else:
return await self.request_body_query(es, params)
async def request_body_query(self, es, params):
request_params, headers = self._transport_request_params(params)
# Mandatory to ensure it is always provided. This is especially important when this runner is used in a
# composite context where there is no actual parameter source and the entire request structure must be provided
# by the composite's parameter source.
index = mandatory(params, "index", self)
body = mandatory(params, "body", self)
doc_type = params.get("type")
detailed_results = params.get("detailed-results", False)
encoding_header = self._query_headers(params)
if encoding_header is not None:
headers.update(encoding_header)
cache = params.get("cache")
if cache is not None:
request_params["request_cache"] = str(cache).lower()
if not bool(headers):
# counter-intuitive but preserves prior behavior
headers = None
# disable eager response parsing - responses might be huge thus skewing results
es.return_raw_response()
r = await self._raw_search(es, doc_type, index, body, request_params, headers=headers)
if detailed_results:
props = parse(r, ["hits.total", "hits.total.value", "hits.total.relation", "timed_out", "took"])
hits_total = props.get("hits.total.value", props.get("hits.total", 0))
hits_relation = props.get("hits.total.relation", "eq")
timed_out = props.get("timed_out", False)
took = props.get("took", 0)
return {
"weight": 1,
"unit": "ops",
"success": True,
"hits": hits_total,
"hits_relation": hits_relation,
"timed_out": timed_out,
"took": took
}
else:
return {
"weight": 1,
"unit": "ops",
"success": True
}
async def scroll_query(self, es, params):
request_params, headers = self._transport_request_params(params)
hits = 0
hits_relation = None
retrieved_pages = 0
timed_out = False
took = 0
# explicitly convert to int to provoke an error otherwise
total_pages = sys.maxsize if params["pages"] == "all" else int(params["pages"])
size = params.get("results-per-page")
encoding_header = self._query_headers(params)
if encoding_header is not None:
headers.update(encoding_header)
scroll_id = None
cache = params.get("cache")
if cache is not None:
request_params["request_cache"] = str(cache).lower()
if not bool(headers):
# counter-intuitive but preserves prior behavior
headers = None
# disable eager response parsing - responses might be huge thus skewing results
es.return_raw_response()
try:
for page in range(total_pages):
if page == 0:
# Mandatory to ensure it is always provided. This is especially important when this runner is used
# in a composite context where there is no actual parameter source and the entire request structure
# must be provided by the composite's parameter source.
index = mandatory(params, "index", self)
body = mandatory(params, "body", self)
sort = "_doc"
scroll = "10s"
doc_type = params.get("type")
params = request_params.copy()
params["sort"] = sort
params["scroll"] = scroll
params["size"] = size
r = await self._raw_search(es, doc_type, index, body, params, headers=headers)
props = parse(r,
["_scroll_id", "hits.total", "hits.total.value", "hits.total.relation", "timed_out", "took"],
["hits.hits"])
scroll_id = props.get("_scroll_id")
hits = props.get("hits.total.value", props.get("hits.total", 0))
hits_relation = props.get("hits.total.relation", "eq")
timed_out = props.get("timed_out", False)
took = props.get("took", 0)
all_results_collected = (size is not None and hits < size) or hits == 0
else:
r = await es.transport.perform_request("GET", "/_search/scroll",
body={"scroll_id": scroll_id, "scroll": "10s"},
params=request_params,
headers=headers)
props = parse(r, ["hits.total", "hits.total.value", "hits.total.relation", "timed_out", "took"], ["hits.hits"])
timed_out = timed_out or props.get("timed_out", False)
took += props.get("took", 0)
# is the list of hits empty?
all_results_collected = props.get("hits.hits", False)
retrieved_pages += 1
if all_results_collected:
break
finally:
if scroll_id:
# noinspection PyBroadException
try:
await es.clear_scroll(body={"scroll_id": [scroll_id]})
except BaseException:
self.logger.exception("Could not clear scroll [%s]. This will lead to excessive resource usage in "
"Elasticsearch and will skew your benchmark results.", scroll_id)
return {
"weight": retrieved_pages,
"pages": retrieved_pages,
"hits": hits,
"hits_relation": hits_relation,
"unit": "pages",
"timed_out": timed_out,
"took": took
}
async def _raw_search(self, es, doc_type, index, body, params, headers=None):
components = []
if index:
components.append(index)
if doc_type:
components.append(doc_type)
components.append("_search")
path = "/".join(components)
return await es.transport.perform_request("GET", "/" + path, params=params, body=body, headers=headers)
def _query_headers(self, params):
# reduces overhead due to decompression of very large responses
if params.get("response-compression-enabled", True):
return None
else:
return {"Accept-Encoding": "identity"}
def __repr__(self, *args, **kwargs):
return "query"
class ClusterHealth(Runner):
"""
Get cluster health
"""
async def __call__(self, es, params):
@total_ordering
class ClusterHealthStatus(Enum):
UNKNOWN = 0
RED = 1
YELLOW = 2
GREEN = 3
def __lt__(self, other):
if self.__class__ is other.__class__:
# pylint: disable=comparison-with-callable
return self.value < other.value
return NotImplemented
def status(v):
try:
return ClusterHealthStatus[v.upper()]
except (KeyError, AttributeError):
return ClusterHealthStatus.UNKNOWN
request_params = params.get("request-params", {})
api_kw_params = self._default_kw_params(params)
# by default, Elasticsearch will not wait and thus we treat this as success
expected_cluster_status = request_params.get("wait_for_status", str(ClusterHealthStatus.UNKNOWN))
# newer ES versions >= 5.0
if "wait_for_no_relocating_shards" in request_params:
expected_relocating_shards = 0
else:
# older ES versions
# either the user has defined something or we're good with any count of relocating shards.
expected_relocating_shards = int(request_params.get("wait_for_relocating_shards", sys.maxsize))
result = await es.cluster.health(**api_kw_params)
cluster_status = result["status"]
relocating_shards = result["relocating_shards"]
return {
"weight": 1,
"unit": "ops",
"success": status(cluster_status) >= status(expected_cluster_status) and relocating_shards <= expected_relocating_shards,
"cluster-status": cluster_status,
"relocating-shards": relocating_shards
}
def __repr__(self, *args, **kwargs):
return "cluster-health"
class PutPipeline(Runner):
"""
Execute the `put pipeline API <https://www.elastic.co/guide/en/elasticsearch/reference/current/put-pipeline-api.html>`_. Note that this
API is only available from Elasticsearch 5.0 onwards.
"""
async def __call__(self, es, params):
await es.ingest.put_pipeline(id=mandatory(params, "id", self),
body=mandatory(params, "body", self),
master_timeout=params.get("master-timeout"),
timeout=params.get("timeout"),
)
def __repr__(self, *args, **kwargs):
return "put-pipeline"
class Refresh(Runner):
"""
Execute the `refresh API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html>`_.
"""
async def __call__(self, es, params):
await es.indices.refresh(index=params.get("index", "_all"))
def __repr__(self, *args, **kwargs):
return "refresh"
class CreateIndex(Runner):
"""
Execute the `create index API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html>`_.
"""
async def __call__(self, es, params):
indices = mandatory(params, "indices", self)
api_params = self._default_kw_params(params)
## ignore invalid entries rather than erroring
for term in ["index", "body"]:
api_params.pop(term, None)
for index, body in indices:
await es.indices.create(index=index, body=body, **api_params)
return {
"weight": len(indices),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "create-index"
class CreateDataStream(Runner):
"""
Execute the `create data stream API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-data-stream.html>`_.
"""
async def __call__(self, es, params):
data_streams = mandatory(params, "data-streams", self)
request_params = mandatory(params, "request-params", self)
for data_stream in data_streams:
await es.indices.create_data_stream(data_stream, params=request_params)
return {
"weight": len(data_streams),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "create-data-stream"
class DeleteIndex(Runner):
"""
Execute the `delete index API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html>`_.
"""
async def __call__(self, es, params):
ops = 0
indices = mandatory(params, "indices", self)
only_if_exists = params.get("only-if-exists", False)
request_params = params.get("request-params", {})
for index_name in indices:
if not only_if_exists:
await es.indices.delete(index=index_name, params=request_params)
ops += 1
elif only_if_exists and await es.indices.exists(index=index_name):
self.logger.info("Index [%s] already exists. Deleting it.", index_name)
await es.indices.delete(index=index_name, params=request_params)
ops += 1
return {
"weight": ops,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "delete-index"
class DeleteDataStream(Runner):
"""
Execute the `delete data stream API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-data-stream.html>`_.
"""
async def __call__(self, es, params):
ops = 0
data_streams = mandatory(params, "data-streams", self)
only_if_exists = mandatory(params, "only-if-exists", self)
request_params = mandatory(params, "request-params", self)
for data_stream in data_streams:
if not only_if_exists:
await es.indices.delete_data_stream(data_stream, ignore=[404], params=request_params)
ops += 1
elif only_if_exists and await es.indices.exists(index=data_stream):
self.logger.info("Data stream [%s] already exists. Deleting it.", data_stream)
await es.indices.delete_data_stream(data_stream, params=request_params)
ops += 1
return {
"weight": ops,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "delete-data-stream"
class CreateComponentTemplate(Runner):
"""
Execute the `PUT component template API
<https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html>`_.
"""
async def __call__(self, es, params):
templates = mandatory(params, "templates", self)
request_params = mandatory(params, "request-params", self)
for template, body in templates:
await es.cluster.put_component_template(name=template, body=body,
params=request_params)
return {
"weight": len(templates),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "create-component-template"
class DeleteComponentTemplate(Runner):
"""
Execute the `DELETE component template API
<https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-component-template.html>`_.
"""
async def __call__(self, es, params):
template_names = mandatory(params, "templates", self)
only_if_exists = mandatory(params, "only-if-exists", self)
request_params = mandatory(params, "request-params", self)
async def _exists(name):
# pylint: disable=import-outside-toplevel
from elasticsearch.client import _make_path
# currently not supported by client and hence custom request
return await es.transport.perform_request(
"HEAD", _make_path("_component_template", name)
)
ops_count = 0
for template_name in template_names:
if not only_if_exists:
await es.cluster.delete_component_template(name=template_name, params=request_params, ignore=[404])
ops_count += 1
elif only_if_exists and await _exists(template_name):
self.logger.info("Component Index template [%s] already exists. Deleting it.", template_name)
await es.cluster.delete_component_template(name=template_name, params=request_params)
ops_count += 1
return {
"weight": ops_count,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "delete-component-template"
class CreateComposableTemplate(Runner):
"""
Execute the `PUT index template API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-template.html>`_.
"""
async def __call__(self, es, params):
templates = mandatory(params, "templates", self)
request_params = mandatory(params, "request-params", self)
for template, body in templates:
await es.cluster.put_index_template(name=template, body=body, params=request_params)
return {
"weight": len(templates),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "create-composable-template"
class DeleteComposableTemplate(Runner):
"""
Execute the `PUT index template API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-template.html>`_.
"""
async def __call__(self, es, params):
templates = mandatory(params, "templates", self)
only_if_exists = mandatory(params, "only-if-exists", self)
request_params = mandatory(params, "request-params", self)
ops_count = 0
for template_name, delete_matching_indices, index_pattern in templates:
if not only_if_exists:
await es.indices.delete_index_template(name=template_name, params=request_params, ignore=[404])
ops_count += 1
elif only_if_exists and await es.indices.exists_template(template_name):
self.logger.info("Composable Index template [%s] already exists. Deleting it.", template_name)
await es.indices.delete_index_template(name=template_name, params=request_params)
ops_count += 1
# ensure that we do not provide an empty index pattern by accident
if delete_matching_indices and index_pattern:
await es.indices.delete(index=index_pattern)
ops_count += 1
return {
"weight": ops_count,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "delete-composable-template"
class CreateIndexTemplate(Runner):
"""
Execute the `PUT index template API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_.
"""
async def __call__(self, es, params):
templates = mandatory(params, "templates", self)
request_params = params.get("request-params", {})
for template, body in templates:
await es.indices.put_template(name=template,
body=body,
params=request_params)
return {
"weight": len(templates),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "create-index-template"
class DeleteIndexTemplate(Runner):
"""
Execute the `delete index template API
<https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#delete>`_.
"""
async def __call__(self, es, params):
template_names = mandatory(params, "templates", self)
only_if_exists = params.get("only-if-exists", False)
request_params = params.get("request-params", {})
ops_count = 0
for template_name, delete_matching_indices, index_pattern in template_names:
if not only_if_exists:
await es.indices.delete_template(name=template_name, params=request_params)
ops_count += 1
elif only_if_exists and await es.indices.exists_template(template_name):
self.logger.info("Index template [%s] already exists. Deleting it.", template_name)
await es.indices.delete_template(name=template_name, params=request_params)
ops_count += 1
# ensure that we do not provide an empty index pattern by accident
if delete_matching_indices and index_pattern:
await es.indices.delete(index=index_pattern)
ops_count += 1
return {
"weight": ops_count,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "delete-index-template"
class ShrinkIndex(Runner):
"""
Execute the `shrink index API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html>`_.
This is a high-level runner that actually executes multiple low-level operations under the hood.
"""
def __init__(self):
super().__init__()
self.cluster_health = Retry(ClusterHealth())
async def _wait_for(self, es, idx, description):
# wait a little bit before the first check
await asyncio.sleep(3)
result = await self.cluster_health(es, params={
"index": idx,
"retries": sys.maxsize,
"request-params": {
"wait_for_no_relocating_shards": "true"
}
})
if not result["success"]:
raise exceptions.RallyAssertionError("Failed to wait for [{}].".format(description))
async def __call__(self, es, params):
source_index = mandatory(params, "source-index", self)
source_indices_get = await es.indices.get(source_index)
source_indices = list(source_indices_get.keys())
source_indices_stem = commonprefix(source_indices)
target_index = mandatory(params, "target-index", self)
# we need to inject additional settings so we better copy the body
target_body = deepcopy(mandatory(params, "target-body", self))
shrink_node = params.get("shrink-node")
# Choose a random data node if none is specified
if shrink_node:
node_names = [shrink_node]
else:
node_names = []
# choose a random data node
node_info = await es.nodes.info()
for node in node_info["nodes"].values():
if "data" in node["roles"]:
node_names.append(node["name"])
if not node_names:
raise exceptions.RallyAssertionError("Could not choose a suitable shrink-node automatically. Specify it explicitly.")
for source_index in source_indices:
shrink_node = random.choice(node_names)
self.logger.info("Using [%s] as shrink node.", shrink_node)
self.logger.info("Preparing [%s] for shrinking.", source_index)
# prepare index for shrinking
await es.indices.put_settings(index=source_index,
body={
"settings": {
"index.routing.allocation.require._name": shrink_node,
"index.blocks.write": "true"
}
},
preserve_existing=True)
self.logger.info("Waiting for relocation to finish for index [%s] ...", source_index)
await self._wait_for(es, source_index, f"shard relocation for index [{source_index}]")
self.logger.info("Shrinking [%s] to [%s].", source_index, target_index)
if "settings" not in target_body:
target_body["settings"] = {}
target_body["settings"]["index.routing.allocation.require._name"] = None
target_body["settings"]["index.blocks.write"] = None
# kick off the shrink operation
index_suffix = remove_prefix(source_index, source_indices_stem)
final_target_index = target_index if len(index_suffix) == 0 else target_index+index_suffix
await es.indices.shrink(index=source_index, target=final_target_index, body=target_body)
self.logger.info("Waiting for shrink to finish for index [%s] ...", source_index)
await self._wait_for(es, final_target_index, f"shrink for index [{final_target_index}]")
self.logger.info("Shrinking [%s] to [%s] has finished.", source_index, final_target_index)
# ops_count is not really important for this operation...
return {
"weight": len(source_indices),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "shrink-index"
class CreateMlDatafeed(Runner):
"""
Execute the `create datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
datafeed_id = mandatory(params, "datafeed-id", self)
body = mandatory(params, "body", self)
try:
await es.xpack.ml.put_datafeed(datafeed_id=datafeed_id, body=body)
except elasticsearch.TransportError as e:
# fallback to old path
if e.status_code == 400:
await es.transport.perform_request(
"PUT",
f"/_xpack/ml/datafeeds/{datafeed_id}",
body=body,
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "create-ml-datafeed"
class DeleteMlDatafeed(Runner):
"""
Execute the `delete datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
datafeed_id = mandatory(params, "datafeed-id", self)
force = params.get("force", False)
try:
# we don't want to fail if a datafeed does not exist, thus we ignore 404s.
await es.xpack.ml.delete_datafeed(datafeed_id=datafeed_id, force=force, ignore=[404])
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
await es.transport.perform_request(
"DELETE",
f"/_xpack/ml/datafeeds/{datafeed_id}",
params={
"force": escape(force),
"ignore": 404
},
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "delete-ml-datafeed"
class StartMlDatafeed(Runner):
"""
Execute the `start datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
datafeed_id = mandatory(params, "datafeed-id", self)
body = params.get("body")
start = params.get("start")
end = params.get("end")
timeout = params.get("timeout")
try:
await es.xpack.ml.start_datafeed(datafeed_id=datafeed_id, body=body, start=start, end=end, timeout=timeout)
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
await es.transport.perform_request(
"POST",
f"/_xpack/ml/datafeeds/{datafeed_id}/_start",
body=body,
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "start-ml-datafeed"
class StopMlDatafeed(Runner):
"""
Execute the `stop datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
datafeed_id = mandatory(params, "datafeed-id", self)
force = params.get("force", False)
timeout = params.get("timeout")
try:
await es.xpack.ml.stop_datafeed(datafeed_id=datafeed_id, force=force, timeout=timeout)
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
request_params = {
"force": escape(force),
}
if timeout:
request_params["timeout"] = escape(timeout)
await es.transport.perform_request(
"POST",
f"/_xpack/ml/datafeeds/{datafeed_id}/_stop",
params=request_params
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "stop-ml-datafeed"
class CreateMlJob(Runner):
"""
Execute the `create job API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
job_id = mandatory(params, "job-id", self)
body = mandatory(params, "body", self)
try:
await es.xpack.ml.put_job(job_id=job_id, body=body)
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
await es.transport.perform_request(
"PUT",
f"/_xpack/ml/anomaly_detectors/{job_id}",
body=body,
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "create-ml-job"
class DeleteMlJob(Runner):
"""
Execute the `delete job API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
job_id = mandatory(params, "job-id", self)
force = params.get("force", False)
# we don't want to fail if a job does not exist, thus we ignore 404s.
try:
await es.xpack.ml.delete_job(job_id=job_id, force=force, ignore=[404])
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
await es.transport.perform_request(
"DELETE",
f"/_xpack/ml/anomaly_detectors/{job_id}",
params={
"force": escape(force),
"ignore": 404
},
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "delete-ml-job"
class OpenMlJob(Runner):
"""
Execute the `open job API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
job_id = mandatory(params, "job-id", self)
try:
await es.xpack.ml.open_job(job_id=job_id)
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
await es.transport.perform_request(
"POST",
f"/_xpack/ml/anomaly_detectors/{job_id}/_open",
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "open-ml-job"
class CloseMlJob(Runner):
"""
Execute the `close job API <http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
job_id = mandatory(params, "job-id", self)
force = params.get("force", False)
timeout = params.get("timeout")
try:
await es.xpack.ml.close_job(job_id=job_id, force=force, timeout=timeout)
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
request_params = {
"force": escape(force),
}
if timeout:
request_params["timeout"] = escape(timeout)
await es.transport.perform_request(
"POST",
f"/_xpack/ml/anomaly_detectors/{job_id}/_close",
params=request_params,
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "close-ml-job"
class RawRequest(Runner):
async def __call__(self, es, params):
request_params, headers = self._transport_request_params(params)
if "ignore" in params:
request_params["ignore"] = params["ignore"]
path = mandatory(params, "path", self)
if not path.startswith("/"):
self.logger.error("RawRequest failed. Path parameter: [%s] must begin with a '/'.", path)
raise exceptions.RallyAssertionError(f"RawRequest [{path}] failed. Path parameter must begin with a '/'.")
if not bool(headers):
#counter-intuitive, but preserves prior behavior
headers = None
await es.transport.perform_request(method=params.get("method", "GET"),
url=path,
headers=headers,
body=params.get("body"),
params=request_params)
def __repr__(self, *args, **kwargs):
return "raw-request"
class Sleep(Runner):
"""
Sleeps for the specified duration not issuing any request.
"""
async def __call__(self, es, params):
es.on_request_start()
try:
await asyncio.sleep(mandatory(params, "duration", "sleep"))
finally:
es.on_request_end()
def __repr__(self, *args, **kwargs):
return "sleep"
class DeleteSnapshotRepository(Runner):
"""
Deletes a snapshot repository
"""
async def __call__(self, es, params):
await es.snapshot.delete_repository(repository=mandatory(params, "repository", repr(self)))
def __repr__(self, *args, **kwargs):
return "delete-snapshot-repository"
class CreateSnapshotRepository(Runner):
"""
Creates a new snapshot repository
"""
async def __call__(self, es, params):
request_params = params.get("request-params", {})
await es.snapshot.create_repository(repository=mandatory(params, "repository", repr(self)),
body=mandatory(params, "body", repr(self)),
params=request_params)
def __repr__(self, *args, **kwargs):
return "create-snapshot-repository"
class CreateSnapshot(Runner):
"""
Creates a new snapshot repository
"""
async def __call__(self, es, params):
wait_for_completion = params.get("wait-for-completion", False)
repository = mandatory(params, "repository", repr(self))
snapshot = mandatory(params, "snapshot", repr(self))
# just assert, gets set in _default_kw_params
mandatory(params, "body", repr(self))
api_kwargs = self._default_kw_params(params)
await es.snapshot.create(repository=repository,
snapshot=snapshot,
wait_for_completion=wait_for_completion,
**api_kwargs)
def __repr__(self, *args, **kwargs):
return "create-snapshot"
class WaitForSnapshotCreate(Runner):
async def __call__(self, es, params):
repository = mandatory(params, "repository", repr(self))
snapshot = mandatory(params, "snapshot", repr(self))
wait_period = params.get("completion-recheck-wait-period", 1)
snapshot_done = False
stats = {}
while not snapshot_done:
response = await es.snapshot.status(repository=repository,
snapshot=snapshot,
ignore_unavailable=True)
if "snapshots" in response:
response_state = response["snapshots"][0]["state"]
# Possible states:
# https://www.elastic.co/guide/en/elasticsearch/reference/current/get-snapshot-status-api.html#get-snapshot-status-api-response-body
if response_state == "FAILED":
self.logger.error("Snapshot [%s] failed. Response:\n%s", snapshot, json.dumps(response, indent=2))
raise exceptions.RallyAssertionError(f"Snapshot [{snapshot}] failed. Please check logs.")
snapshot_done = response_state == "SUCCESS"
stats = response["snapshots"][0]["stats"]
if not snapshot_done:
await asyncio.sleep(wait_period)
size = stats["total"]["size_in_bytes"]
file_count = stats["total"]["file_count"]
start_time_in_millis = stats["start_time_in_millis"]
duration_in_millis = stats["time_in_millis"]
duration_in_seconds = duration_in_millis / 1000
return {
"weight": size,
"unit": "byte",
"success": True,
"throughput": size / duration_in_seconds,
"start_time_millis": start_time_in_millis,
"stop_time_millis": start_time_in_millis + duration_in_millis,
"duration": duration_in_millis,
"file_count": file_count
}
def __repr__(self, *args, **kwargs):
return "wait-for-snapshot-create"
class RestoreSnapshot(Runner):
"""
Restores a snapshot from an already registered repository
"""
async def __call__(self, es, params):
api_kwargs = self._default_kw_params(params)
await es.snapshot.restore(repository=mandatory(params, "repository", repr(self)),
snapshot=mandatory(params, "snapshot", repr(self)),
wait_for_completion=params.get("wait-for-completion", False),
**api_kwargs)
def __repr__(self, *args, **kwargs):
return "restore-snapshot"
class IndicesRecovery(Runner):
async def __call__(self, es, params):
index = mandatory(params, "index", repr(self))
wait_period = params.get("completion-recheck-wait-period", 1)
all_shards_done = False
total_recovered = 0
total_start_millis = sys.maxsize
total_end_millis = 0
# wait until recovery is done
# The nesting level is ok here given the structure of the API response
# pylint: disable=too-many-nested-blocks
while not all_shards_done:
response = await es.indices.recovery(index=index)
# This might happen if we happen to call the API before the next recovery is scheduled.
if not response:
self.logger.debug("Empty index recovery response for [%s].", index)
else:
# check whether all shards are done
all_shards_done = True
total_recovered = 0
total_start_millis = sys.maxsize
total_end_millis = 0
for _, idx_data in response.items():
for _, shard_data in idx_data.items():
for shard in shard_data:
current_shard_done = shard["stage"] == "DONE"
all_shards_done = all_shards_done and current_shard_done
if current_shard_done:
total_start_millis = min(total_start_millis, shard["start_time_in_millis"])
total_end_millis = max(total_end_millis, shard["stop_time_in_millis"])
idx_size = shard["index"]["size"]
total_recovered += idx_size["recovered_in_bytes"]
self.logger.debug("All shards done for [%s]: [%s].", index, all_shards_done)
if not all_shards_done:
await asyncio.sleep(wait_period)
response_time_in_seconds = (total_end_millis - total_start_millis) / 1000
return {
"weight": total_recovered,
"unit": "byte",
"success": True,
"throughput": total_recovered / response_time_in_seconds,
"start_time_millis": total_start_millis,
"stop_time_millis": total_end_millis
}
def __repr__(self, *args, **kwargs):
return "wait-for-recovery"
class PutSettings(Runner):
"""
Updates cluster settings with the
`cluster settings API <http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>_.
"""
async def __call__(self, es, params):
await es.cluster.put_settings(body=mandatory(params, "body", repr(self)))
def __repr__(self, *args, **kwargs):
return "put-settings"
class CreateTransform(Runner):
"""
Execute the `create transform API https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html`_.
"""
async def __call__(self, es, params):
transform_id = mandatory(params, "transform-id", self)
body = mandatory(params, "body", self)
defer_validation = params.get("defer-validation", False)
await es.transform.put_transform(transform_id=transform_id, body=body, defer_validation=defer_validation)
def __repr__(self, *args, **kwargs):
return "create-transform"
class StartTransform(Runner):
"""
Execute the `start transform API
https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html`_.
"""
async def __call__(self, es, params):
transform_id = mandatory(params, "transform-id", self)
timeout = params.get("timeout")
await es.transform.start_transform(transform_id=transform_id, timeout=timeout)
def __repr__(self, *args, **kwargs):
return "start-transform"
class WaitForTransform(Runner):
"""
Wait for the transform until it reaches a certain checkpoint.
"""
def __init__(self):
super().__init__()
self._completed = False
self._percent_completed = 0.0
self._start_time = None
self._last_documents_processed = 0
self._last_processing_time = 0
@property
def completed(self):
return self._completed
@property
def percent_completed(self):
return self._percent_completed
async def __call__(self, es, params):
"""
stop the transform and wait until transform has finished return stats
:param es: The Elasticsearch client.
:param params: A hash with all parameters. See below for details.
:return: A hash with stats from the run.
Different to the `stop transform API
https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html`_ this command will wait
until the transform is stopped and a checkpoint has been reached.
It expects a parameter dict with the following mandatory keys:
* ``transform-id``: the transform id to start, the transform must have been created upfront.
The following keys are optional:
* ``force``: forcefully stop a transform, default false
* ``wait-for-checkpoint``: whether to wait until all data has been processed till the next checkpoint, default true
* ``wait-for-completion``: whether to block until the transform has stopped, default true
* ``transform-timeout``: overall runtime timeout of the transform in seconds, default 3600 (1h)
* ``poll-interval``: how often transform stats are polled, used to set progress and check the state, default 0.5.
"""
transform_id = mandatory(params, "transform-id", self)
force = params.get("force", False)
timeout = params.get("timeout")
wait_for_completion = params.get("wait-for-completion", True)
wait_for_checkpoint = params.get("wait-for-checkpoint", True)
transform_timeout = params.get("transform-timeout", 60.0 * 60.0)
poll_interval = params.get("poll-interval", 0.5)
if not self._start_time:
self._start_time = time.monotonic()
await es.transform.stop_transform(transform_id=transform_id,
force=force,
timeout=timeout,
wait_for_completion=False,
wait_for_checkpoint=wait_for_checkpoint)
while True:
stats_response = await es.transform.get_transform_stats(transform_id=transform_id)
state = stats_response["transforms"][0].get("state")
transform_stats = stats_response["transforms"][0].get("stats", {})
if (time.monotonic() - self._start_time) > transform_timeout:
raise exceptions.RallyAssertionError(
f"Transform [{transform_id}] timed out after [{transform_timeout}] seconds. "
"Please consider increasing the timeout in the track.")
if state == "failed":
failure_reason = stats_response["transforms"][0].get("reason", "unknown")
raise exceptions.RallyAssertionError(
f"Transform [{transform_id}] failed with [{failure_reason}].")
elif state == "stopped" or wait_for_completion is False:
self._completed = True
self._percent_completed = 1.0
else:
self._percent_completed = stats_response["transforms"][0].get("checkpointing", {}).get("next", {}).get(
"checkpoint_progress", {}).get("percent_complete", 0.0) / 100.0
documents_processed = transform_stats.get("documents_processed", 0)
processing_time = transform_stats.get("search_time_in_ms", 0)
processing_time += transform_stats.get("processing_time_in_ms", 0)
processing_time += transform_stats.get("index_time_in_ms", 0)
documents_processed_delta = documents_processed - self._last_documents_processed
processing_time_delta = processing_time - self._last_processing_time
# only report if we have enough data or transform has completed
if self._completed or (documents_processed_delta > 5000 and processing_time_delta > 500):
stats = {
"transform-id": transform_id,
"weight": transform_stats.get("documents_processed", 0),
"unit": "docs",
"success": True
}
throughput = 0
if self._completed:
# take the overall throughput
if processing_time > 0:
throughput = documents_processed / processing_time * 1000
elif processing_time_delta > 0:
throughput = documents_processed_delta / processing_time_delta * 1000
stats["throughput"] = throughput
self._last_documents_processed = documents_processed
self._last_processing_time = processing_time
return stats
else:
# sleep for a while, so stats is not called to often
await asyncio.sleep(poll_interval)
def __repr__(self, *args, **kwargs):
return "wait-for-transform"
class DeleteTransform(Runner):
"""
Execute the `delete transform API
https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html`_.
"""
async def __call__(self, es, params):
transform_id = mandatory(params, "transform-id", self)
force = params.get("force", False)
# we don't want to fail if a job does not exist, thus we ignore 404s.
await es.transform.delete_transform(transform_id=transform_id, force=force, ignore=[404])
def __repr__(self, *args, **kwargs):
return "delete-transform"
class SubmitAsyncSearch(Runner):
async def __call__(self, es, params):
request_params = params.get("request-params", {})
response = await es.async_search.submit(body=mandatory(params, "body", self),
index=params.get("index"),
params=request_params)
op_name = mandatory(params, "name", self)
# id may be None if the operation has already returned
search_id = response.get("id")
CompositeContext.put(op_name, search_id)
def __repr__(self, *args, **kwargs):
return "submit-async-search"
def async_search_ids(op_names):
subjects = [op_names] if isinstance(op_names, str) else op_names
for subject in subjects:
subject_id = CompositeContext.get(subject)
# skip empty ids, searches have already completed
if subject_id:
yield subject_id, subject
class GetAsyncSearch(Runner):
async def __call__(self, es, params):
success = True
searches = mandatory(params, "retrieve-results-for", self)
request_params = params.get("request-params", {})
stats = {}
for search_id, search in async_search_ids(searches):
response = await es.async_search.get(id=search_id,
params=request_params)
is_running = response["is_running"]
success = success and not is_running
if not is_running:
stats[search] = {
"hits": response["response"]["hits"]["total"]["value"],
"hits_relation": response["response"]["hits"]["total"]["relation"],
"timed_out": response["response"]["timed_out"],
"took": response["response"]["took"]
}
return {
# only count completed searches - there is one key per search id in `stats`
"weight": len(stats),
"unit": "ops",
"success": success,
"stats": stats
}
def __repr__(self, *args, **kwargs):
return "get-async-search"
class DeleteAsyncSearch(Runner):
async def __call__(self, es, params):
searches = mandatory(params, "delete-results-for", self)
for search_id, search in async_search_ids(searches):
await es.async_search.delete(id=search_id)
CompositeContext.remove(search)
def __repr__(self, *args, **kwargs):
return "delete-async-search"
class CompositeContext:
ctx = contextvars.ContextVar("composite_context")
def __init__(self):
self.token = None
async def __aenter__(self):
self.token = CompositeContext.ctx.set({})
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
CompositeContext.ctx.reset(self.token)
return False
@staticmethod
def put(key, value):
CompositeContext._ctx()[key] = value
@staticmethod
def get(key):
try:
return CompositeContext._ctx()[key]
except KeyError:
raise KeyError(f"Unknown property [{key}]. Currently recognized "
f"properties are [{', '.join(CompositeContext._ctx().keys())}].") from None
@staticmethod
def remove(key):
try:
CompositeContext._ctx().pop(key)
except KeyError:
raise KeyError(f"Unknown property [{key}]. Currently recognized "
f"properties are [{', '.join(CompositeContext._ctx().keys())}].") from None
@staticmethod
def _ctx():
try:
return CompositeContext.ctx.get()
except LookupError:
raise exceptions.RallyAssertionError("This operation is only allowed inside a composite operation.") from None
class Composite(Runner):
"""
Executes a complex request structure which is measured by Rally as one composite operation.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.supported_op_types = [
"raw-request",
"sleep",
"search",
"submit-async-search",
"get-async-search",
"delete-async-search"
]
async def run_stream(self, es, stream, connection_limit):
streams = []
try:
for item in stream:
if "stream" in item:
streams.append(asyncio.create_task(self.run_stream(es, item["stream"], connection_limit)))
elif "operation-type" in item:
# consume all prior streams first
if streams:
await asyncio.gather(*streams)
streams = []
op_type = item["operation-type"]
if op_type not in self.supported_op_types:
raise exceptions.RallyAssertionError(
f"Unsupported operation-type [{op_type}]. Use one of [{', '.join(self.supported_op_types)}].")
runner = runner_for(op_type)
async with connection_limit:
async with runner:
await runner({"default": es}, item)
else:
raise exceptions.RallyAssertionError("Requests structure must contain [stream] or [operation-type].")
except BaseException:
# stop all already created tasks in case of exceptions
for s in streams:
if not s.done():
s.cancel()
raise
# complete any outstanding streams
if streams:
await asyncio.gather(*streams)
async def __call__(self, es, params):
requests = mandatory(params, "requests", self)
max_connections = params.get("max-connections", sys.maxsize)
async with CompositeContext():
await self.run_stream(es, requests, asyncio.BoundedSemaphore(max_connections))
def __repr__(self, *args, **kwargs):
return "composite"
# TODO: Allow to use this from (selected) regular runners and add user documentation.
# TODO: It would maybe be interesting to add meta-data on how many retries there were.
class Retry(Runner, Delegator):
"""
This runner can be used as a wrapper around regular runners to retry operations.
It defines the following parameters:
* ``retries`` (optional, default 0): The number of times the operation is retried.
* ``retry-until-success`` (optional, default False): Retries until the delegate returns a success. This will also
forcibly set ``retry-on-error`` to ``True``.
* ``retry-wait-period`` (optional, default 0.5): The time in seconds to wait after an error.
* ``retry-on-timeout`` (optional, default True): Whether to retry on connection timeout.
* ``retry-on-error`` (optional, default False): Whether to retry on failure (i.e. the delegate
returns ``success == False``)
"""
def __init__(self, delegate, retry_until_success=False):
super().__init__(delegate=delegate)
self.retry_until_success = retry_until_success
async def __aenter__(self):
await self.delegate.__aenter__()
return self
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
import socket
retry_until_success = params.get("retry-until-success", self.retry_until_success)
if retry_until_success:
max_attempts = sys.maxsize
retry_on_error = True
else:
max_attempts = params.get("retries", 0) + 1
retry_on_error = params.get("retry-on-error", False)
sleep_time = params.get("retry-wait-period", 0.5)
retry_on_timeout = params.get("retry-on-timeout", True)
for attempt in range(max_attempts):
last_attempt = attempt + 1 == max_attempts
try:
return_value = await self.delegate(es, params)
if last_attempt or not retry_on_error:
return return_value
# we can determine success if and only if the runner returns a dict. Otherwise, we have to assume it was fine.
elif isinstance(return_value, dict):
if return_value.get("success", True):
self.logger.debug("%s has returned successfully", repr(self.delegate))
return return_value
else:
self.logger.debug("%s has returned with an error: %s.", repr(self.delegate), return_value)
await asyncio.sleep(sleep_time)
else:
return return_value
except (socket.timeout, elasticsearch.exceptions.ConnectionError):
if last_attempt or not retry_on_timeout:
raise
else:
await asyncio.sleep(sleep_time)
except elasticsearch.exceptions.TransportError as e:
if last_attempt or not retry_on_timeout:
raise e
elif e.status_code == 408:
self.logger.debug("%s has timed out.", repr(self.delegate))
await asyncio.sleep(sleep_time)
else:
raise e
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.delegate.__aexit__(exc_type, exc_val, exc_tb)
def __repr__(self, *args, **kwargs):
return "retryable %s" % repr(self.delegate)
|
the-stack_0_17786 | """
Support for Insteon fans via local hub control.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/fan.insteon_local/
"""
import logging
from datetime import timedelta
from homeassistant.components.fan import (
ATTR_SPEED, SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH,
SUPPORT_SET_SPEED, FanEntity)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant import util
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['insteon_local']
DOMAIN = 'fan'
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
SUPPORT_INSTEON_LOCAL = SUPPORT_SET_SPEED
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Insteon local fan platform."""
insteonhub = hass.data['insteon_local']
if discovery_info is None:
return
linked = discovery_info['linked']
device_list = []
for device_id in linked:
if (linked[device_id]['cat_type'] == 'dimmer' and
linked[device_id]['sku'] == '2475F'):
device = insteonhub.fan(device_id)
device_list.append(
InsteonLocalFanDevice(device)
)
add_devices(device_list)
class InsteonLocalFanDevice(FanEntity):
"""An abstract Class for an Insteon node."""
def __init__(self, node):
"""Initialize the device."""
self.node = node
self._speed = SPEED_OFF
@property
def name(self):
"""Return the name of the node."""
return self.node.device_id
@property
def unique_id(self):
"""Return the ID of this Insteon node."""
return self.node.device_id
@property
def speed(self) -> str:
"""Return the current speed."""
return self._speed
@property
def speed_list(self: ToggleEntity) -> list:
"""Get the list of available speeds."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Update state of the fan."""
resp = self.node.status()
if 'cmd2' in resp:
if resp['cmd2'] == '00':
self._speed = SPEED_OFF
elif resp['cmd2'] == '55':
self._speed = SPEED_LOW
elif resp['cmd2'] == 'AA':
self._speed = SPEED_MEDIUM
elif resp['cmd2'] == 'FF':
self._speed = SPEED_HIGH
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_INSTEON_LOCAL
def turn_on(self: ToggleEntity, speed: str = None, **kwargs) -> None:
"""Turn device on."""
if speed is None:
speed = kwargs.get(ATTR_SPEED, SPEED_MEDIUM)
self.set_speed(speed)
def turn_off(self: ToggleEntity, **kwargs) -> None:
"""Turn device off."""
self.node.off()
def set_speed(self: ToggleEntity, speed: str) -> None:
"""Set the speed of the fan."""
if self.node.on(speed):
self._speed = speed
|
the-stack_0_17787 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, wildcard-import
"""A global module storing everything needed to interpret or compile a Relay program."""
from .base import register_relay_node, RelayNode
from .._ffi import base as _base
from . import _make
from . import _module
from . import expr as _expr
from . import ty as _ty
@register_relay_node
class Module(RelayNode):
"""The global Relay module containing collection of functions.
Each global function is identified by an unique tvm.relay.GlobalVar.
tvm.relay.GlobalVar and Module is necessary in order to enable
recursions in function to avoid cyclic reference in the function.x
Parameters
----------
functions: Optional[dict].
Map of global var to Function
"""
def __init__(self, functions=None, type_definitions=None):
if functions is None:
functions = {}
elif isinstance(functions, dict):
mapped_funcs = {}
for k, v in functions.items():
if isinstance(k, _base.string_types):
k = _expr.GlobalVar(k)
if not isinstance(k, _expr.GlobalVar):
raise TypeError("Expect functions to be Dict[GlobalVar, Function]")
mapped_funcs[k] = v
functions = mapped_funcs
if type_definitions is None:
type_definitions = {}
elif isinstance(type_definitions, dict):
mapped_type_defs = {}
for k, v in type_definitions.items():
if isinstance(k, _base.string_types):
k = _ty.GlobalTypeVar(k)
if not isinstance(k, _ty.GlobalTypeVar):
raise TypeError("Expect type_definitions to be Dict[GlobalTypeVar, Type]")
mapped_type_defs[k] = v
type_definitions = mapped_type_defs
self.__init_handle_by_constructor__(_make.Module, functions, type_definitions)
def __setitem__(self, var, val):
"""Add a mapping to the module.
Parameters
---------
var: GlobalVar
The global variable.
val: Union[Function, Type]
The value.
"""
return self._add(var, val)
def _add(self, var, val, update=False):
if isinstance(val, _expr.Expr):
if isinstance(var, _base.string_types):
var = _expr.GlobalVar(var)
# TODO(@jroesch): Port this logic to C++.
if not isinstance(val, _expr.Function):
if isinstance(val, _expr.GlobalVar):
val = ir_pass.eta_expand(val, self)
else:
val = _expr.Function([], val)
_make.Module_Add(self, var, val, update)
else:
assert isinstance(val, _ty.Type)
if isinstance(var, _base.string_types):
var = _ty.GlobalTypeVar(var)
_module.Module_AddDef(self, var, val)
def __getitem__(self, var):
"""Lookup a global definition by name or by variable.
Parameters
----------
var: Union[String, GlobalVar, GlobalTypeVar]
The name or global variable.
Returns
-------
val: Union[Function, Type]
The definition referenced by :code:`var` (either a function or type).
"""
if isinstance(var, _base.string_types):
return _module.Module_Lookup_str(self, var)
elif isinstance(var, _expr.GlobalVar):
return _module.Module_Lookup(self, var)
else:
return _module.Module_LookupDef(self, var)
def update(self, other):
"""Insert functions in another Module to current one.
Parameters
----------
other: Module
The module to merge into the current Module.
"""
if isinstance(other, dict):
other = Module(other)
return _module.Module_Update(self, other)
def get_global_var(self, name):
"""Get a global variable in the function by name.
Parameters
----------
name: str
The name of the global variable.
Returns
-------
global_var: GlobalVar
The global variable mapped to :code:`name`.
Raises
------
tvm.TVMError if we cannot find corresponding global var.
"""
return _module.Module_GetGlobalVar(self, name)
def get_global_type_var(self, name):
"""Get a global type variable in the function by name.
Parameters
----------
name: str
The name of the global type variable.
Returns
-------
global_type_var: GlobalTypeVar
The global variable mapped to :code:`name`.
Raises
------
tvm.TVMError if we cannot find corresponding global type var.
"""
return _module.Module_GetGlobalTypeVar(self, name)
@staticmethod
def from_expr(expr):
return _module.Module_FromExpr(expr)
|
the-stack_0_17788 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_coder
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import densepose_ops
from object_detection.core import keypoint_ops
from object_detection.core import matcher as mat
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import standard_fields as fields
from object_detection.matchers import argmax_matcher
from object_detection.matchers import hungarian_matcher
from object_detection.utils import shape_utils
from object_detection.utils import target_assigner_utils as ta_utils
from object_detection.utils import tf_version
if tf_version.is_tf1():
from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top
ResizeMethod = tf2.image.ResizeMethod
_DEFAULT_KEYPOINT_OFFSET_STD_DEV = 1.0
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self,
similarity_calc,
matcher,
box_coder_instance,
negative_class_weight=1.0):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: an object_detection.core.Matcher used to match groundtruth to
anchors.
box_coder_instance: an object_detection.core.BoxCoder used to encode
matching groundtruth boxes with respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator):
raise ValueError('similarity_calc must be a RegionSimilarityCalculator')
if not isinstance(matcher, mat.Matcher):
raise ValueError('matcher must be a Matcher')
if not isinstance(box_coder_instance, box_coder.BoxCoder):
raise ValueError('box_coder must be a BoxCoder')
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder_instance
self._negative_class_weight = negative_class_weight
@property
def box_coder(self):
return self._box_coder
# TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields.
def assign(self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
unmatched_class_label=None,
groundtruth_weights=None):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1. Generally no
groundtruth boxes with zero weight match to any anchors as matchers are
aware of groundtruth weights. Additionally, `cls_weights` and
`reg_weights` are calculated using groundtruth weights as an added
safety.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: an int32 tensor of shape [num_anchors] containing result of anchor
groundtruth matching. Each position in the tensor indicates an anchor
and holds the following meaning:
(1) if match[i] >= 0, anchor i is matched with groundtruth match[i].
(2) if match[i]=-1, anchor i is marked to be background .
(3) if match[i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if unmatched_class_label is None:
unmatched_class_label = tf.constant([0], tf.float32)
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(unmatched_class_label))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
# set scores on the gt boxes
scores = 1 - groundtruth_labels[:, 0]
groundtruth_boxes.add_field(fields.BoxListFields.scores, scores)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(groundtruth_weights, 0))
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
unmatched_class_label,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
# convert cls_weights from per-anchor to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_shape = tf.shape(cls_weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), class_label_shape],
axis=0)
for _ in range(len(cls_targets.get_shape()[1:])):
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return (cls_targets, cls_weights, reg_targets, reg_weights,
match.match_results)
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(fields.BoxListFields.keypoints):
groundtruth_keypoints = groundtruth_boxes.get_field(
fields.BoxListFields.keypoints)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(fields.BoxListFields.keypoints,
matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels,
unmatched_class_label, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
# TODO(rathodv): This method pulls in all the implementation dependencies into
# core. Therefore its best to have this factory method outside of core.
def create_target_assigner(reference, stage=None,
negative_class_weight=1.0, use_matmul_gather=False):
"""Factory function for creating standard target assigners.
Args:
reference: string referencing the type of TargetAssigner.
stage: string denoting stage: {proposal, detection}.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0)
use_matmul_gather: whether to use matrix multiplication based gather which
are better suited for TPUs.
Returns:
TargetAssigner: desired target assigner.
Raises:
ValueError: if combination reference+stage is invalid.
"""
if reference == 'Multibox' and stage == 'proposal':
if tf_version.is_tf2():
raise ValueError('GreedyBipartiteMatcher is not supported in TF 2.X.')
similarity_calc = sim_calc.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder_instance = mean_stddev_box_coder.MeanStddevBoxCoder()
elif reference == 'FasterRCNN' and stage == 'proposal':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7,
unmatched_threshold=0.3,
force_match_for_each_row=True,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FasterRCNN' and stage == 'detection':
similarity_calc = sim_calc.IouSimilarity()
# Uses all proposals with IOU < 0.5 as candidate negatives.
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
negatives_lower_than_unmatched=True,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FastRCNN':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.1,
force_match_for_each_row=False,
negatives_lower_than_unmatched=False,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder()
else:
raise ValueError('No valid combination of reference and stage.')
return TargetAssigner(similarity_calc, matcher, box_coder_instance,
negative_class_weight=negative_class_weight)
def batch_assign(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_targets_batch,
unmatched_class_label=None,
gt_weights_batch=None):
"""Batched assignment of classification and regression targets.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match: an int32 tensor of shape [batch_size, num_anchors] containing result
of anchor groundtruth matching. Each position in the tensor indicates an
anchor and holds the following meaning:
(1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].
(2) if match[x, i]=-1, anchor i is marked to be background .
(3) if match[x, i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_targets_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_targets_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
for anchors, gt_boxes, gt_class_targets, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch):
(cls_targets, cls_weights,
reg_targets, reg_weights, match) = target_assigner.assign(
anchors, gt_boxes, gt_class_targets, unmatched_class_label,
gt_weights)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
batch_match = tf.stack(match_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match)
# Assign an alias to avoid large refactor of existing users.
batch_assign_targets = batch_assign
def batch_get_targets(batch_match, groundtruth_tensor_list,
groundtruth_weights_list, unmatched_value,
unmatched_weight):
"""Returns targets based on anchor-groundtruth box matching results.
Args:
batch_match: An int32 tensor of shape [batch, num_anchors] containing the
result of target assignment returned by TargetAssigner.assign(..).
groundtruth_tensor_list: A list of groundtruth tensors of shape
[num_groundtruth, d_1, d_2, ..., d_k]. The tensors can be of any type.
groundtruth_weights_list: A list of weights, one per groundtruth tensor, of
shape [num_groundtruth].
unmatched_value: A tensor of shape [d_1, d_2, ..., d_k] of the same type as
groundtruth tensor containing target value for anchors that remain
unmatched.
unmatched_weight: Scalar weight to assign to anchors that remain unmatched.
Returns:
targets: A tensor of shape [batch, num_anchors, d_1, d_2, ..., d_k]
containing targets for anchors.
weights: A float tensor of shape [batch, num_anchors] containing the weights
to assign to each target.
"""
match_list = tf.unstack(batch_match)
targets_list = []
weights_list = []
for match_tensor, groundtruth_tensor, groundtruth_weight in zip(
match_list, groundtruth_tensor_list, groundtruth_weights_list):
match_object = mat.Match(match_tensor)
targets = match_object.gather_based_on_match(
groundtruth_tensor,
unmatched_value=unmatched_value,
ignored_value=unmatched_value)
targets_list.append(targets)
weights = match_object.gather_based_on_match(
groundtruth_weight,
unmatched_value=unmatched_weight,
ignored_value=tf.zeros_like(unmatched_weight))
weights_list.append(weights)
return tf.stack(targets_list), tf.stack(weights_list)
def batch_assign_confidences(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_confidences_batch,
gt_weights_batch=None,
unmatched_class_label=None,
include_background_class=True,
implicit_class_weight=1.0):
"""Batched assignment of classification and regression targets.
This differences between batch_assign_confidences and batch_assign_targets:
- 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and
tensor (high-dimensional) targets. 'batch_assign_confidences' only support
scalar (agnostic) and vector (multiclass) targets.
- 'batch_assign_targets' assumes the input class tensor using the binary
one/K-hot encoding. 'batch_assign_confidences' takes the class confidence
scores as the input, where 1 means positive classes, 0 means implicit
negative classes, and -1 means explicit negative classes.
- 'batch_assign_confidences' assigns the targets in the similar way as
'batch_assign_targets' except that it gives different weights for implicit
and explicit classes. This allows user to control the negative gradients
pushed differently for implicit and explicit examples during the training.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_confidences_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch. Note that in this tensor, 1 means explicit positive class,
-1 means explicit negative class, and 0 means implicit negative class.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_gt_boxes_i] containing weights for groundtruth boxes.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
include_background_class: whether or not gt_class_confidences_batch includes
the background class.
implicit_class_weight: the weight assigned to implicit examples.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match: an int32 tensor of shape [batch_size, num_anchors] containing result
of anchor groundtruth matching. Each position in the tensor indicates an
anchor and holds the following meaning:
(1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].
(2) if match[x, i]=-1, anchor i is marked to be background .
(3) if match[x, i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList, or if any element in gt_class_confidences_batch has rank > 2.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_confidences_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_confidences_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_confidences_batch)
for anchors, gt_boxes, gt_class_confidences, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_confidences_batch,
gt_weights_batch):
if (gt_class_confidences is not None and
len(gt_class_confidences.get_shape().as_list()) > 2):
raise ValueError('The shape of the class target is not supported. ',
gt_class_confidences.get_shape())
cls_targets, _, reg_targets, _, match = target_assigner.assign(
anchors, gt_boxes, gt_class_confidences, unmatched_class_label,
groundtruth_weights=gt_weights)
if include_background_class:
cls_targets_without_background = tf.slice(
cls_targets, [0, 1], [-1, -1])
else:
cls_targets_without_background = cls_targets
positive_mask = tf.greater(cls_targets_without_background, 0.0)
negative_mask = tf.less(cls_targets_without_background, 0.0)
explicit_example_mask = tf.logical_or(positive_mask, negative_mask)
positive_anchors = tf.reduce_any(positive_mask, axis=-1)
regression_weights = tf.cast(positive_anchors, dtype=tf.float32)
regression_targets = (
reg_targets * tf.expand_dims(regression_weights, axis=-1))
regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1)
cls_targets_without_background = (
cls_targets_without_background *
(1 - tf.cast(negative_mask, dtype=tf.float32)))
cls_weights_without_background = ((1 - implicit_class_weight) * tf.cast(
explicit_example_mask, dtype=tf.float32) + implicit_class_weight)
if include_background_class:
cls_weights_background = (
(1 - implicit_class_weight) * regression_weights_expanded
+ implicit_class_weight)
classification_weights = tf.concat(
[cls_weights_background, cls_weights_without_background], axis=-1)
cls_targets_background = 1 - regression_weights_expanded
classification_targets = tf.concat(
[cls_targets_background, cls_targets_without_background], axis=-1)
else:
classification_targets = cls_targets_without_background
classification_weights = cls_weights_without_background
cls_targets_list.append(classification_targets)
cls_weights_list.append(classification_weights)
reg_targets_list.append(regression_targets)
reg_weights_list.append(regression_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
batch_match = tf.stack(match_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match)
def _smallest_positive_root(a, b, c):
"""Returns the smallest positive root of a quadratic equation."""
discriminant = tf.sqrt(b ** 2 - 4 * a * c)
# TODO(vighneshb) We are currently using the slightly incorrect
# CenterNet implementation. The commented lines implement the fixed version
# in https://github.com/princeton-vl/CornerNet. Change the implementation
# after verifying it has no negative impact.
# root1 = (-b - discriminant) / (2 * a)
# root2 = (-b + discriminant) / (2 * a)
# return tf.where(tf.less(root1, 0), root2, root1)
return (-b + discriminant) / (2.0)
def max_distance_for_overlap(height, width, min_iou):
"""Computes how far apart bbox corners can lie while maintaining the iou.
Given a bounding box size, this function returns a lower bound on how far
apart the corners of another box can lie while still maintaining the given
IoU. The implementation is based on the `gaussian_radius` function in the
Objects as Points github repo: https://github.com/xingyizhou/CenterNet
Args:
height: A 1-D float Tensor representing height of the ground truth boxes.
width: A 1-D float Tensor representing width of the ground truth boxes.
min_iou: A float representing the minimum IoU desired.
Returns:
distance: A 1-D Tensor of distances, of the same length as the input
height and width tensors.
"""
# Given that the detected box is displaced at a distance `d`, the exact
# IoU value will depend on the angle at which each corner is displaced.
# We simplify our computation by assuming that each corner is displaced by
# a distance `d` in both x and y direction. This gives us a lower IoU than
# what is actually realizable and ensures that any box with corners less
# than `d` distance apart will always have an IoU greater than or equal
# to `min_iou`
# The following 3 cases can be worked on geometrically and come down to
# solving a quadratic inequality. In each case, to ensure `min_iou` we use
# the smallest positive root of the equation.
# Case where detected box is offset from ground truth and no box completely
# contains the other.
distance_detection_offset = _smallest_positive_root(
a=1, b=-(height + width),
c=width * height * ((1 - min_iou) / (1 + min_iou))
)
# Case where detection is smaller than ground truth and completely contained
# in it.
distance_detection_in_gt = _smallest_positive_root(
a=4, b=-2 * (height + width),
c=(1 - min_iou) * width * height
)
# Case where ground truth is smaller than detection and completely contained
# in it.
distance_gt_in_detection = _smallest_positive_root(
a=4 * min_iou, b=(2 * min_iou) * (width + height),
c=(min_iou - 1) * width * height
)
return tf.reduce_min([distance_detection_offset,
distance_gt_in_detection,
distance_detection_in_gt], axis=0)
def get_batch_predictions_from_indices(batch_predictions, indices):
"""Gets the values of predictions in a batch at the given indices.
The indices are expected to come from the offset targets generation functions
in this library. The returned value is intended to be used inside a loss
function.
Args:
batch_predictions: A tensor of shape [batch_size, height, width, channels]
or [batch_size, height, width, class, channels] for class-specific
features (e.g. keypoint joint offsets).
indices: A tensor of shape [num_instances, 3] for single class features or
[num_instances, 4] for multiple classes features.
Returns:
values: A tensor of shape [num_instances, channels] holding the predicted
values at the given indices.
"""
# Note, gather_nd (and its gradient scatter_nd) runs significantly slower (on
# TPU) than gather with flattened inputs, so reshape the tensor, flatten the
# indices, and run gather.
shape = shape_utils.combined_static_and_dynamic_shape(batch_predictions)
# [B, H, W, C] -> [H*W, W, 1] or [B, H, W, N, C] -> [H*W*N, W*N, N, 1]
rev_cum_interior_indices = tf.reverse(tf.math.cumprod(shape[-2:0:-1]), [0])
rev_cum_interior_indices = tf.concat([rev_cum_interior_indices, [1]], axis=0)
# Compute flattened indices and gather.
flattened_inds = tf.linalg.matmul(
indices, rev_cum_interior_indices[:, tf.newaxis])[:, 0]
batch_predictions_2d = tf.reshape(batch_predictions, [-1, shape[-1]])
return tf.gather(batch_predictions_2d, flattened_inds, axis=0)
def _compute_std_dev_from_box_size(boxes_height, boxes_width, min_overlap):
"""Computes the standard deviation of the Gaussian kernel from box size.
Args:
boxes_height: A 1D tensor with shape [num_instances] representing the height
of each box.
boxes_width: A 1D tensor with shape [num_instances] representing the width
of each box.
min_overlap: The minimum IOU overlap that boxes need to have to not be
penalized.
Returns:
A 1D tensor with shape [num_instances] representing the computed Gaussian
sigma for each of the box.
"""
# We are dividing by 3 so that points closer than the computed
# distance have a >99% CDF.
sigma = max_distance_for_overlap(boxes_height, boxes_width, min_overlap)
sigma = (2 * tf.math.maximum(tf.math.floor(sigma), 0.0) + 1) / 6.0
return sigma
def _preprocess_keypoints_and_weights(out_height, out_width, keypoints,
class_onehot, class_weights,
keypoint_weights, class_id,
keypoint_indices):
"""Preprocesses the keypoints and the corresponding keypoint weights.
This function performs several common steps to preprocess the keypoints and
keypoint weights features, including:
1) Select the subset of keypoints based on the keypoint indices, fill the
keypoint NaN values with zeros and convert to absolute coordinates.
2) Generate the weights of the keypoint using the following information:
a. The class of the instance.
b. The NaN value of the keypoint coordinates.
c. The provided keypoint weights.
Args:
out_height: An integer or an integer tensor indicating the output height
of the model.
out_width: An integer or an integer tensor indicating the output width of
the model.
keypoints: A float tensor of shape [num_instances, num_total_keypoints, 2]
representing the original keypoint grountruth coordinates.
class_onehot: A float tensor of shape [num_instances, num_classes]
containing the class targets with the 0th index assumed to map to the
first non-background class.
class_weights: A float tensor of shape [num_instances] containing weights
for groundtruth instances.
keypoint_weights: A float tensor of shape
[num_instances, num_total_keypoints] representing the weights of each
keypoints.
class_id: int, the ID of the class (0-indexed) that contains the target
keypoints to consider in this task.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints that should be considered in this task.
Returns:
A tuple of two tensors:
keypoint_absolute: A float tensor of shape
[num_instances, num_keypoints, 2] which is the selected and updated
keypoint coordinates.
keypoint_weights: A float tensor of shape [num_instances, num_keypoints]
representing the updated weight of each keypoint.
"""
# Select the targets keypoints by their type ids and generate the mask
# of valid elements.
valid_mask, keypoints = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoints,
class_id=class_id,
class_onehot=class_onehot,
class_weights=class_weights,
keypoint_indices=keypoint_indices)
# Keypoint coordinates in absolute coordinate system.
# The shape of the tensors: [num_instances, num_keypoints, 2].
keypoints_absolute = keypoint_ops.to_absolute_coordinates(
keypoints, out_height, out_width)
# Assign default weights for the keypoints.
if keypoint_weights is None:
keypoint_weights = tf.ones_like(keypoints[:, :, 0])
else:
keypoint_weights = tf.gather(
keypoint_weights, indices=keypoint_indices, axis=1)
keypoint_weights = keypoint_weights * valid_mask
return keypoints_absolute, keypoint_weights
class CenterNetCenterHeatmapTargetAssigner(object):
"""Wrapper to compute the object center heatmap."""
def __init__(self,
stride,
min_overlap=0.7,
compute_heatmap_sparse=False,
keypoint_class_id=None,
keypoint_indices=None,
keypoint_weights_for_center=None,
box_heatmap_type='adaptive_gaussian',
heatmap_exponent=1.0):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
min_overlap: The minimum IOU overlap that boxes need to have to not be
penalized.
compute_heatmap_sparse: bool, indicating whether or not to use the sparse
version of the Op that computes the heatmap. The sparse version scales
better with number of classes, but in some cases is known to cause
OOM error. See (b/170989061).
keypoint_class_id: int, the ID of the class (0-indexed) that contains the
target keypoints to consider in this task.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
keypoint_weights_for_center: The keypoint weights used for calculating the
location of object center. The number of weights need to be the same as
the number of keypoints. The object center is calculated by the weighted
mean of the keypoint locations. If not provided, the object center is
determined by the center of the bounding box (default behavior).
box_heatmap_type: str, the algorithm used to compute the box heatmap,
used when calling the assign_center_targets_from_boxes method.
Options are:
'adaptaive_gaussian': A box-size adaptive Gaussian from the original
paper[1].
'iou': IOU based heatmap target where each point is assigned an IOU
based on its location, assuming that it produced a box centered at
that point with the correct size.
heatmap_exponent: float, The generated heatmap is exponentiated with
this number. A number > 1 will result in the heatmap being more peaky
and a number < 1 will cause the heatmap to be more spreadout.
"""
self._stride = stride
self._min_overlap = min_overlap
self._compute_heatmap_sparse = compute_heatmap_sparse
self._keypoint_class_id = keypoint_class_id
self._keypoint_indices = keypoint_indices
self._keypoint_weights_for_center = keypoint_weights_for_center
self._box_heatmap_type = box_heatmap_type
self._heatmap_exponent = heatmap_exponent
def assign_center_targets_from_boxes(self,
height,
width,
gt_boxes_list,
gt_classes_list,
gt_weights_list=None,
maximum_normalized_coordinate=1.1):
"""Computes the object center heatmap target.
Args:
height: int, height of input to the model. This is used to
determine the height of the output.
width: int, width of the input to the model. This is used to
determine the width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The box coordinates are expected in normalized coordinates.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_weights_list: A list of float tensors with shape [num_boxes]
representing the weight of each groundtruth detection box.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1. This is used to check bounds during
converting normalized coordinates to absolute coordinates.
Returns:
heatmap: A Tensor of size [batch_size, output_height, output_width,
num_classes] representing the per class center heatmap. output_height
and output_width are computed by dividing the input height and width by
the stride specified during initialization.
"""
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
(y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width)
heatmaps = []
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
# TODO(vighneshb) Replace the for loop with a batch version.
for boxes, class_targets, weights in zip(gt_boxes_list, gt_classes_list,
gt_weights_list):
boxes = box_list.BoxList(boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1),
maximum_normalized_coordinate=maximum_normalized_coordinate)
# Get the box center coordinates. Each returned tensors have the shape of
# [num_instances]
(y_center, x_center, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
# Compute the sigma from box size. The tensor shape: [num_instances].
sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width,
self._min_overlap)
# Apply the Gaussian kernel to the center coordinates. Returned heatmap
# has shape of [out_height, out_width, num_classes]
if self._box_heatmap_type == 'adaptive_gaussian':
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=y_center,
x_coordinates=x_center,
sigma=sigma,
channel_onehot=class_targets,
channel_weights=weights,
sparse=self._compute_heatmap_sparse)
elif self._box_heatmap_type == 'iou':
heatmap = ta_utils.coordinates_to_iou(y_grid, x_grid, boxes,
class_targets, weights)
else:
raise ValueError(f'Unknown heatmap type - {self._box_heatmap_type}')
heatmaps.append(heatmap)
# Return the stacked heatmaps over the batch.
stacked_heatmaps = tf.stack(heatmaps, axis=0)
return (tf.pow(stacked_heatmaps, self._heatmap_exponent) if
self._heatmap_exponent != 1.0 else stacked_heatmaps)
def assign_center_targets_from_keypoints(self,
height,
width,
gt_classes_list,
gt_keypoints_list,
gt_weights_list=None,
gt_keypoints_weights_list=None):
"""Computes the object center heatmap target using keypoint locations.
Args:
height: int, height of input to the model. This is used to
determine the height of the output.
width: int, width of the input to the model. This is used to
determine the width of the output.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_keypoints_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The box coordinates are expected in normalized coordinates.
gt_weights_list: A list of float tensors with shape [num_boxes]
representing the weight of each groundtruth detection box.
gt_keypoints_weights_list: [Optional] a list of 3D tf.float32 tensors of
shape [num_instances, num_total_keypoints] representing the weights of
each keypoints. If not provided, then all not NaN keypoints will be
equally weighted.
Returns:
heatmap: A Tensor of size [batch_size, output_height, output_width,
num_classes] representing the per class center heatmap. output_height
and output_width are computed by dividing the input height and width by
the stride specified during initialization.
"""
assert (self._keypoint_weights_for_center is not None and
self._keypoint_class_id is not None and
self._keypoint_indices is not None)
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
(y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width)
heatmaps = []
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
for keypoints, classes, kp_weights, weights in zip(
gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=out_height,
out_width=out_width,
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._keypoint_class_id,
keypoint_indices=self._keypoint_indices)
# _, num_keypoints, _ = (
# shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# Update the keypoint weights by the specified keypoints weights.
kp_loc_weights = tf.constant(
self._keypoint_weights_for_center, dtype=tf.float32)
updated_kp_weights = kp_weights * kp_loc_weights[tf.newaxis, :]
# Obtain the sum of the weights for each instance.
# instance_weight_sum has shape: [num_instance].
instance_weight_sum = tf.reduce_sum(updated_kp_weights, axis=1)
# Weight the keypoint coordinates by updated_kp_weights.
# weighted_keypoints has shape: [num_instance, num_keypoints, 2]
weighted_keypoints = keypoints_absolute * tf.expand_dims(
updated_kp_weights, axis=2)
# Compute the mean of the keypoint coordinates over the weighted
# keypoints.
# keypoint_mean has shape: [num_instance, 2]
keypoint_mean = tf.math.divide(
tf.reduce_sum(weighted_keypoints, axis=1),
tf.expand_dims(instance_weight_sum, axis=-1))
# Replace the NaN values (due to divided by zeros in the above operation)
# by 0.0 where the sum of instance weight is zero.
# keypoint_mean has shape: [num_instance, 2]
keypoint_mean = tf.where(
tf.stack([instance_weight_sum, instance_weight_sum], axis=1) > 0.0,
keypoint_mean, tf.zeros_like(keypoint_mean))
# Compute the distance from each keypoint to the mean location using
# broadcasting and weighted by updated_kp_weights.
# keypoint_dist has shape: [num_instance, num_keypoints]
keypoint_mean = tf.expand_dims(keypoint_mean, axis=1)
keypoint_dist = tf.math.sqrt(
tf.reduce_sum(
tf.math.square(keypoints_absolute - keypoint_mean), axis=2))
keypoint_dist = keypoint_dist * updated_kp_weights
# Compute the average of the distances from each keypoint to the mean
# location and update the average value by zero when the instance weight
# is zero.
# avg_radius has shape: [num_instance]
avg_radius = tf.math.divide(
tf.reduce_sum(keypoint_dist, axis=1), instance_weight_sum)
avg_radius = tf.where(
instance_weight_sum > 0.0, avg_radius, tf.zeros_like(avg_radius))
# Update the class instance weight. If the instance doesn't contain enough
# valid keypoint values (i.e. instance_weight_sum == 0.0), then set the
# instance weight to zero.
# updated_class_weights has shape: [num_instance]
updated_class_weights = tf.where(
instance_weight_sum > 0.0, weights, tf.zeros_like(weights))
# Compute the sigma from average distance. We use 2 * average distance to
# to approximate the width/height of the bounding box.
# sigma has shape: [num_instances].
sigma = _compute_std_dev_from_box_size(2 * avg_radius, 2 * avg_radius,
self._min_overlap)
# Apply the Gaussian kernel to the center coordinates. Returned heatmap
# has shape of [out_height, out_width, num_classes]
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=keypoint_mean[:, 0, 0],
x_coordinates=keypoint_mean[:, 0, 1],
sigma=sigma,
channel_onehot=classes,
channel_weights=updated_class_weights,
sparse=self._compute_heatmap_sparse)
heatmaps.append(heatmap)
# Return the stacked heatmaps over the batch.
return tf.stack(heatmaps, axis=0)
class CenterNetBoxTargetAssigner(object):
"""Wrapper to compute target tensors for the object detection task.
This class has methods that take as input a batch of ground truth tensors
(in the form of a list) and return the targets required to train the object
detection task.
"""
def __init__(self, stride):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
"""
self._stride = stride
def assign_size_and_offset_targets(self,
height,
width,
gt_boxes_list,
gt_weights_list=None,
maximum_normalized_coordinate=1.1):
"""Returns the box height/width and center offset targets and their indices.
The returned values are expected to be used with predicted tensors
of size (batch_size, height//self._stride, width//self._stride, 2). The
predicted values at the relevant indices can be retrieved with the
get_batch_predictions_from_indices function.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_weights_list: A list of tensors with shape [num_boxes] corresponding to
the weight of each groundtruth detection box.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1. This is used to check bounds during
converting normalized coordinates to absolute coordinates.
Returns:
batch_indices: an integer tensor of shape [num_boxes, 3] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively.
batch_box_height_width: a float tensor of shape [num_boxes, 2] holding
expected height and width of each box in the output space.
batch_offsets: a float tensor of shape [num_boxes, 2] holding the
expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_boxes] indicating the
weight of each prediction.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_box_height_width = []
batch_weights = []
batch_offsets = []
for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1),
maximum_normalized_coordinate=maximum_normalized_coordinate)
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_boxes, 2]
# indices: [num_boxes, 2]
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_box_height_width.append(
tf.stack([boxes_height, boxes_width], axis=1))
batch_weights.append(weights)
batch_offsets.append(offsets)
batch_indices = tf.concat(batch_indices, axis=0)
batch_box_height_width = tf.concat(batch_box_height_width, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_box_height_width, batch_offsets, batch_weights)
# TODO(yuhuic): Update this class to handle the instance/keypoint weights.
# Currently those weights are used as "mask" to indicate whether an
# instance/keypoint should be considered or not (expecting only either 0 or 1
# value). In reality, the weights can be any value and this class should handle
# those values properly.
class CenterNetKeypointTargetAssigner(object):
"""Wrapper to compute target tensors for the CenterNet keypoint estimation.
This class has methods that take as input a batch of groundtruth tensors
(in the form of a list) and returns the targets required to train the
CenterNet model for keypoint estimation. Specifically, the class methods
expect the groundtruth in the following formats (consistent with the
standard Object Detection API). Note that usually the groundtruth tensors are
packed with a list which represents the batch dimension:
gt_classes_list: [Required] a list of 2D tf.float32 one-hot
(or k-hot) tensors of shape [num_instances, num_classes] containing the
class targets with the 0th index assumed to map to the first non-background
class.
gt_keypoints_list: [Required] a list of 3D tf.float32 tensors of
shape [num_instances, num_total_keypoints, 2] containing keypoint
coordinates. Note that the "num_total_keypoints" should be the sum of the
num_keypoints over all possible keypoint types, e.g. human pose, face.
For example, if a dataset contains both 17 human pose keypoints and 5 face
keypoints, then num_total_keypoints = 17 + 5 = 22.
If an intance contains only a subet of keypoints (e.g. human pose keypoints
but not face keypoints), the face keypoints will be filled with zeros.
Also note that keypoints are assumed to be provided in normalized
coordinates and missing keypoints should be encoded as NaN.
gt_keypoints_weights_list: [Optional] a list 3D tf.float32 tensors of shape
[num_instances, num_total_keypoints] representing the weights of each
keypoints. If not provided, then all not NaN keypoints will be equally
weighted.
gt_boxes_list: [Optional] a list of 2D tf.float32 tensors of shape
[num_instances, 4] containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and
assumed to be normalized and clipped relative to the image window with
y_min <= y_max and x_min <= x_max.
Note that the boxes are only used to compute the center targets but are not
considered as required output of the keypoint task. If the boxes were not
provided, the center targets will be inferred from the keypoints
[not implemented yet].
gt_weights_list: [Optional] A list of 1D tf.float32 tensors of shape
[num_instances] containing weights for groundtruth boxes. Only useful when
gt_boxes_list is also provided.
"""
def __init__(self,
stride,
class_id,
keypoint_indices,
keypoint_std_dev=None,
per_keypoint_offset=False,
peak_radius=0,
compute_heatmap_sparse=False,
per_keypoint_depth=False):
"""Initializes a CenterNet keypoints target assigner.
Args:
stride: int, the stride of the network in output pixels.
class_id: int, the ID of the class (0-indexed) that contains the target
keypoints to consider in this task. For example, if the task is human
pose estimation, the class id should correspond to the "human" class.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
keypoint_std_dev: A list of floats represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap (in the unit of
output pixels). It is to provide the flexibility of using different
sizes of Gaussian kernel for each keypoint type. If not provided, then
all standard deviation will be the same as the default value (10.0 in
the output pixel space). If provided, the length of keypoint_std_dev
needs to be the same as the length of keypoint_indices, indicating the
standard deviation of each keypoint type.
per_keypoint_offset: boolean, indicating whether to assign offset for
each keypoint channel. If set False, the output offset target will have
the shape [batch_size, out_height, out_width, 2]. If set True, the
output offset target will have the shape [batch_size, out_height,
out_width, 2 * num_keypoints].
peak_radius: int, the radius (in the unit of output pixel) around heatmap
peak to assign the offset targets.
compute_heatmap_sparse: bool, indicating whether or not to use the sparse
version of the Op that computes the heatmap. The sparse version scales
better with number of keypoint types, but in some cases is known to
cause an OOM error. See (b/170989061).
per_keypoint_depth: A bool indicates whether the model predicts the depth
of each keypoints in independent channels. Similar to
per_keypoint_offset but for the keypoint depth.
"""
self._stride = stride
self._class_id = class_id
self._keypoint_indices = keypoint_indices
self._per_keypoint_offset = per_keypoint_offset
self._per_keypoint_depth = per_keypoint_depth
self._peak_radius = peak_radius
self._compute_heatmap_sparse = compute_heatmap_sparse
if keypoint_std_dev is None:
self._keypoint_std_dev = ([_DEFAULT_KEYPOINT_OFFSET_STD_DEV] *
len(keypoint_indices))
else:
assert len(keypoint_indices) == len(keypoint_std_dev)
self._keypoint_std_dev = keypoint_std_dev
def assign_keypoint_heatmap_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoints_weights_list=None,
gt_weights_list=None,
gt_boxes_list=None):
"""Returns the keypoint heatmap targets for the CenterNet model.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of float tensors with shape [num_instances,
num_total_keypoints, 2]. See class-level description for more detail.
gt_classes_list: A list of float tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See
class-level description for more detail. If provided, the keypoint
standard deviations will be scaled based on the box sizes.
Returns:
heatmap: A float tensor of shape [batch_size, output_height, output_width,
num_keypoints] representing the per keypoint type center heatmap.
output_height and output_width are computed by dividing the input height
and width by the stride specified during initialization. Note that the
"num_keypoints" is defined by the length of keypoint_indices, which is
not necessarily equal to "num_total_keypoints".
num_instances_batch: A 2D int tensor of shape
[batch_size, num_keypoints] representing number of instances for each
keypoint type.
valid_mask: A float tensor with shape [batch_size, output_height,
output_width, num_keypoints] where all values within the regions of the
blackout boxes are 0.0 and 1.0 else where. Note that the blackout boxes
are per keypoint type and are blacked out if the keypoint
visibility/weight (of the corresponding keypoint type) is zero.
"""
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
y_grid, x_grid = ta_utils.image_shape_to_grids(out_height, out_width)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_boxes_list is None:
gt_boxes_list = [None] * len(gt_keypoints_list)
heatmaps = []
num_instances_list = []
valid_mask_list = []
for keypoints, classes, kp_weights, weights, boxes in zip(
gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list, gt_boxes_list):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=out_height,
out_width=out_width,
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# A tensor of shape [num_instances, num_keypoints] with
# each element representing the type dimension for each corresponding
# keypoint:
# [[0, 1, ..., k-1],
# [0, 1, ..., k-1],
# :
# [0, 1, ..., k-1]]
keypoint_types = tf.tile(
input=tf.expand_dims(tf.range(num_keypoints), axis=0),
multiples=[num_instances, 1])
# A tensor of shape [num_instances, num_keypoints] with
# each element representing the sigma of the Gaussian kernel for each
# keypoint.
keypoint_std_dev = tf.tile(
input=tf.expand_dims(tf.constant(self._keypoint_std_dev), axis=0),
multiples=[num_instances, 1])
# If boxes is not None, then scale the standard deviation based on the
# size of the object bounding boxes similar to object center heatmap.
if boxes is not None:
boxes = box_list.BoxList(boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box height and width. Each returned tensors have the shape
# of [num_instances]
(_, _, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
# Compute the sigma from box size. The tensor shape: [num_instances].
sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, 0.7)
keypoint_std_dev = keypoint_std_dev * tf.stack(
[sigma] * num_keypoints, axis=1)
# Generate the per-keypoint type valid region mask to ignore regions
# with keypoint weights equal to zeros (e.g. visibility is 0).
# shape of valid_mask: [out_height, out_width, num_keypoints]
kp_weight_list = tf.unstack(kp_weights, axis=1)
valid_mask_channel_list = []
for kp_weight in kp_weight_list:
blackout = kp_weight < 1e-3
valid_mask_channel_list.append(
ta_utils.blackout_pixel_weights_by_box_regions(
out_height, out_width, boxes.get(), blackout))
valid_mask = tf.stack(valid_mask_channel_list, axis=2)
valid_mask_list.append(valid_mask)
# Apply the Gaussian kernel to the keypoint coordinates. Returned heatmap
# has shape of [out_height, out_width, num_keypoints].
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]),
x_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]),
sigma=tf.keras.backend.flatten(keypoint_std_dev),
channel_onehot=tf.one_hot(
tf.keras.backend.flatten(keypoint_types), depth=num_keypoints),
channel_weights=tf.keras.backend.flatten(kp_weights))
num_instances_list.append(
tf.cast(tf.reduce_sum(kp_weights, axis=0), dtype=tf.int32))
heatmaps.append(heatmap)
return (tf.stack(heatmaps, axis=0), tf.stack(num_instances_list, axis=0),
tf.stack(valid_mask_list, axis=0))
def _get_keypoint_types(self, num_instances, num_keypoints, num_neighbors):
"""Gets keypoint type index tensor.
The function prepares the tensor of keypoint indices with shape
[num_instances, num_keypoints, num_neighbors]. Each element represents the
keypoint type index for each corresponding keypoint and tiled along the 3rd
axis:
[[0, 1, ..., num_keypoints - 1],
[0, 1, ..., num_keypoints - 1],
:
[0, 1, ..., num_keypoints - 1]]
Args:
num_instances: int, the number of instances, used to define the 1st
dimension.
num_keypoints: int, the number of keypoint types, used to define the 2nd
dimension.
num_neighbors: int, the number of neighborhood pixels to consider for each
keypoint, used to define the 3rd dimension.
Returns:
A integer tensor of shape [num_instances, num_keypoints, num_neighbors].
"""
keypoint_types = tf.range(num_keypoints)[tf.newaxis, :, tf.newaxis]
tiled_keypoint_types = tf.tile(keypoint_types,
multiples=[num_instances, 1, num_neighbors])
return tiled_keypoint_types
def assign_keypoints_offset_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the offsets and indices of the keypoints for location refinement.
The returned values are used to refine the location of each keypoints in the
heatmap. The predicted values at the relevant indices can be retrieved with
the get_batch_predictions_from_indices function.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of tensors with shape [num_instances,
num_total_keypoints]. See class-level description for more detail.
gt_classes_list: A list of tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_total_instances, 3] (or
[num_total_instances, 4] if 'per_keypoint_offset' is set True) holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column corresponds to the channel
dimension (if 'per_keypoint_offset' is set True).
batch_offsets: a float tensor of shape [num_total_instances, 2] holding
the expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_total_instances] indicating
the weight of each prediction.
Note that num_total_instances = batch_size * num_instances *
num_keypoints * num_neighbors
"""
batch_indices = []
batch_offsets = []
batch_weights = []
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, kp_weights, weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# [num_instances * num_keypoints]
y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0])
x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1])
# All keypoint coordinates and their neighbors:
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
y_source, x_source,
self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
# Update the valid keypoint weights.
# [num_instance * num_keypoints, num_neighbors]
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_instances * num_keypoints, num_neighbors, 2]
# indices: [num_instances * num_keypoints, num_neighbors, 2]
offsets, indices = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=y_source,
x_target=x_source)
# Reshape to:
# offsets: [num_instances * num_keypoints * num_neighbors, 2]
# indices: [num_instances * num_keypoints * num_neighbors, 2]
offsets = tf.reshape(offsets, [-1, 2])
indices = tf.reshape(indices, [-1, 2])
# Prepare the batch indices to be prepended.
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
if self._per_keypoint_offset:
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
else:
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_offsets.append(offsets)
batch_weights.append(tf.keras.backend.flatten(valid_keypoints))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or
# [batch_size * num_instances * num_keypoints * num_neighbors, 4] if
# 'per_keypoint_offset' is set to True.
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 2]
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_offsets, batch_weights)
def assign_keypoints_depth_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoint_depths_list,
gt_keypoint_depth_weights_list,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the target depths of the keypoints.
The returned values are the relative depth information of each keypoints.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of tensors with shape [num_instances,
num_total_keypoints, 2]. See class-level description for more detail.
gt_classes_list: A list of tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoint_depths_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the relative depth of the
keypoints.
gt_keypoint_depth_weights_list: A list of tensors with shape
[num_instances, num_total_keypoints] corresponding to the weights of
the relative depth.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_total_instances, 3] (or
[num_total_instances, 4] if 'per_keypoint_depth' is set True) holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column corresponds to the channel
dimension (if 'per_keypoint_offset' is set True).
batch_depths: a float tensor of shape [num_total_instances, 1] (or
[num_total_instances, num_keypoints] if per_keypoint_depth is set True)
indicating the target depth of each keypoint.
batch_weights: a float tensor of shape [num_total_instances] indicating
the weight of each prediction.
Note that num_total_instances = batch_size * num_instances *
num_keypoints * num_neighbors
"""
batch_indices = []
batch_weights = []
batch_depths = []
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_keypoint_depths_list is None:
gt_keypoint_depths_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, kp_weights, weights,
keypoint_depths, keypoint_depth_weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list,
gt_keypoints_weights_list, gt_weights_list,
gt_keypoint_depths_list, gt_keypoint_depth_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# [num_instances * num_keypoints]
y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0])
x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1])
# All keypoint coordinates and their neighbors:
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
y_source, x_source,
self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
# Update the valid keypoint weights.
# [num_instance * num_keypoints, num_neighbors]
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# indices: [num_instances * num_keypoints, num_neighbors, 2]
_, indices = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=y_source,
x_target=x_source)
# Reshape to:
# indices: [num_instances * num_keypoints * num_neighbors, 2]
indices = tf.reshape(indices, [-1, 2])
# Gather the keypoint depth from corresponding keypoint indices:
# [num_instances, num_keypoints]
keypoint_depths = tf.gather(
keypoint_depths, self._keypoint_indices, axis=1)
# Tile the depth target to surrounding pixels.
# [num_instances, num_keypoints, num_neighbors]
tiled_keypoint_depths = tf.tile(
tf.expand_dims(keypoint_depths, axis=-1),
multiples=[1, 1, num_neighbors])
# [num_instances, num_keypoints]
keypoint_depth_weights = tf.gather(
keypoint_depth_weights, self._keypoint_indices, axis=1)
# [num_instances, num_keypoints, num_neighbors]
keypoint_depth_weights = tf.tile(
tf.expand_dims(keypoint_depth_weights, axis=-1),
multiples=[1, 1, num_neighbors])
# Update the weights of keypoint depth by the weights of the keypoints.
# A keypoint depth target is valid only if its corresponding keypoint
# target is also valid.
# [num_instances, num_keypoints, num_neighbors]
tiled_depth_weights = (
tf.reshape(valid_keypoints,
[num_instances, num_keypoints, num_neighbors]) *
keypoint_depth_weights)
invalid_depths = tf.logical_or(
tf.math.is_nan(tiled_depth_weights),
tf.math.is_nan(tiled_keypoint_depths))
# Assign zero values and weights to NaN values.
final_keypoint_depths = tf.where(invalid_depths,
tf.zeros_like(tiled_keypoint_depths),
tiled_keypoint_depths)
final_keypoint_depth_weights = tf.where(
invalid_depths,
tf.zeros_like(tiled_depth_weights),
tiled_depth_weights)
# [num_instances * num_keypoints * num_neighbors, 1]
batch_depths.append(tf.reshape(final_keypoint_depths, [-1, 1]))
# Prepare the batch indices to be prepended.
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
if self._per_keypoint_depth:
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
else:
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(
tf.keras.backend.flatten(final_keypoint_depth_weights))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or
# [batch_size * num_instances * num_keypoints * num_neighbors, 4] if
# 'per_keypoint_offset' is set to True.
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 1]
batch_depths = tf.concat(batch_depths, axis=0)
return (batch_indices, batch_depths, batch_weights)
def assign_joint_regression_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_boxes_list=None,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the joint regression from center grid to keypoints.
The joint regression is used as the grouping cue from the estimated
keypoints to instance center. The offsets are the vectors from the floored
object center coordinates to the keypoint coordinates.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of float tensors with shape [num_instances,
num_total_keypoints]. See class-level description for more detail.
gt_classes_list: A list of float tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See
class-level description for more detail. If provided, then the center
targets will be computed based on the center of the boxes.
gt_keypoints_weights_list: A list of float tensors with shape
[num_instances, num_total_keypoints] representing to the weight of each
keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_instances, 4] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively, the last dimension refers to the keypoint type
dimension.
batch_offsets: a float tensor of shape [num_instances, 2] holding the
expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_instances] indicating the
weight of each prediction.
Note that num_total_instances = batch_size * num_instances * num_keypoints
Raises:
NotImplementedError: currently the object center coordinates need to be
computed from groundtruth bounding boxes. The functionality of
generating the object center coordinates from keypoints is not
implemented yet.
"""
batch_indices = []
batch_offsets = []
batch_weights = []
batch_size = len(gt_keypoints_list)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * batch_size
if gt_boxes_list is None:
gt_boxes_list = [None] * batch_size
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, boxes, kp_weights, weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list,
gt_boxes_list, gt_keypoints_weights_list, gt_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# If boxes are provided, compute the joint center from it.
if boxes is not None:
# Compute joint center from boxes.
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
y_center, x_center, _, _ = boxes.get_center_coordinates_and_sizes()
else:
# TODO(yuhuic): Add the logic to generate object centers from keypoints.
raise NotImplementedError((
'The functionality of generating object centers from keypoints is'
' not implemented yet. Please provide groundtruth bounding boxes.'
))
# Tile the yx center coordinates to be the same shape as keypoints.
y_center_tiled = tf.tile(
tf.reshape(y_center, shape=[num_instances, 1]),
multiples=[1, num_keypoints])
x_center_tiled = tf.tile(
tf.reshape(x_center, shape=[num_instances, 1]),
multiples=[1, num_keypoints])
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
tf.keras.backend.flatten(y_center_tiled),
tf.keras.backend.flatten(x_center_tiled), self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_instances * num_keypoints, 2]
# indices: [num_instances * num_keypoints, 2]
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]),
x_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]))
# Reshape to:
# offsets: [num_instances * num_keypoints * num_neighbors, 2]
# indices: [num_instances * num_keypoints * num_neighbors, 2]
offsets = tf.reshape(offsets, [-1, 2])
indices = tf.reshape(indices, [-1, 2])
# keypoint type tensor: [num_instances, num_keypoints, num_neighbors].
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
batch_offsets.append(offsets)
batch_weights.append(tf.keras.backend.flatten(valid_keypoints))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints, 4]
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints, 2]
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_offsets, batch_weights)
def _resize_masks(masks, height, width, method):
# Resize segmentation masks to conform to output dimensions. Use TF2
# image resize because TF1's version is buggy:
# https://yaqs.corp.google.com/eng/q/4970450458378240
masks = tf2.image.resize(
masks[:, :, :, tf.newaxis],
size=(height, width),
method=method)
return masks[:, :, :, 0]
class CenterNetMaskTargetAssigner(object):
"""Wrapper to compute targets for segmentation masks."""
def __init__(self, stride, boxes_scale=1.0):
"""Constructor.
Args:
stride: The stride of the network. Targets are assigned at the output
stride.
boxes_scale: Scale to apply to boxes before producing mask weights. This
is meant to ensure the full object region is properly weighted prior to
applying loss. A value of ~1.05 is typically applied when object regions
should be blacked out (perhaps because valid groundtruth masks are not
present).
"""
self._stride = stride
self._boxes_scale = boxes_scale
def assign_segmentation_targets(
self, gt_masks_list, gt_classes_list, gt_boxes_list=None,
gt_mask_weights_list=None, mask_resize_method=ResizeMethod.BILINEAR):
"""Computes the segmentation targets.
This utility produces a semantic segmentation mask for each class, starting
with whole image instance segmentation masks. Effectively, each per-class
segmentation target is the union of all masks from that class.
Args:
gt_masks_list: A list of float tensors with shape [num_boxes,
input_height, input_width] with values in {0, 1} representing instance
masks for each object.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_boxes_list: An optional list of float tensors with shape [num_boxes, 4]
with normalized boxes corresponding to each mask. The boxes are used to
spatially allocate mask weights.
gt_mask_weights_list: An optional list of float tensors with shape
[num_boxes] with weights for each mask. If a mask has a zero weight, it
indicates that the box region associated with the mask should not
contribute to the loss. If not provided, will use a per-pixel weight of
1.
mask_resize_method: A `tf.compat.v2.image.ResizeMethod`. The method to use
when resizing masks from input resolution to output resolution.
Returns:
segmentation_targets: An int32 tensor of size [batch_size, output_height,
output_width, num_classes] representing the class of each location in
the output space.
segmentation_weight: A float32 tensor of size [batch_size, output_height,
output_width] indicating the loss weight to apply at each location.
"""
_, num_classes = shape_utils.combined_static_and_dynamic_shape(
gt_classes_list[0])
_, input_height, input_width = (
shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0]))
output_height = tf.maximum(input_height // self._stride, 1)
output_width = tf.maximum(input_width // self._stride, 1)
if gt_boxes_list is None:
gt_boxes_list = [None] * len(gt_masks_list)
if gt_mask_weights_list is None:
gt_mask_weights_list = [None] * len(gt_masks_list)
segmentation_targets_list = []
segmentation_weights_list = []
for gt_boxes, gt_masks, gt_mask_weights, gt_classes in zip(
gt_boxes_list, gt_masks_list, gt_mask_weights_list, gt_classes_list):
if gt_boxes is not None and gt_mask_weights is not None:
boxes = box_list.BoxList(gt_boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes_absolute = box_list_ops.to_absolute_coordinates(
boxes, output_height, output_width)
# Generate a segmentation weight that applies mask weights in object
# regions.
blackout = gt_mask_weights <= 0
segmentation_weight_for_image = (
ta_utils.blackout_pixel_weights_by_box_regions(
output_height, output_width, boxes_absolute.get(), blackout,
weights=gt_mask_weights, boxes_scale=self._boxes_scale))
segmentation_weights_list.append(segmentation_weight_for_image)
else:
segmentation_weights_list.append(tf.ones((output_height, output_width),
dtype=tf.float32))
gt_masks = _resize_masks(gt_masks, output_height, output_width,
mask_resize_method)
gt_masks = gt_masks[:, :, :, tf.newaxis]
gt_classes_reshaped = tf.reshape(gt_classes, [-1, 1, 1, num_classes])
# Shape: [h, w, num_classes].
segmentations_for_image = tf.reduce_max(
gt_masks * gt_classes_reshaped, axis=0)
# Avoid the case where max of an empty array is -inf.
segmentations_for_image = tf.maximum(segmentations_for_image, 0.0)
segmentation_targets_list.append(segmentations_for_image)
segmentation_target = tf.stack(segmentation_targets_list, axis=0)
segmentation_weight = tf.stack(segmentation_weights_list, axis=0)
return segmentation_target, segmentation_weight
class CenterNetDensePoseTargetAssigner(object):
"""Wrapper to compute targets for DensePose task."""
def __init__(self, stride, num_parts=24):
self._stride = stride
self._num_parts = num_parts
def assign_part_and_coordinate_targets(self,
height,
width,
gt_dp_num_points_list,
gt_dp_part_ids_list,
gt_dp_surface_coords_list,
gt_weights_list=None):
"""Returns the DensePose part_id and coordinate targets and their indices.
The returned values are expected to be used with predicted tensors
of size (batch_size, height//self._stride, width//self._stride, 2). The
predicted values at the relevant indices can be retrieved with the
get_batch_predictions_from_indices function.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_dp_num_points_list: a list of 1-D tf.int32 tensors of shape [num_boxes]
containing the number of DensePose sampled points per box.
gt_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape
[num_boxes, max_sampled_points] containing the DensePose part ids
(0-indexed) for each sampled point. Note that there may be padding, as
boxes may contain a different number of sampled points.
gt_dp_surface_coords_list: a list of 3-D tf.float32 tensors of shape
[num_boxes, max_sampled_points, 4] containing the DensePose surface
coordinates (normalized) for each sampled point. Note that there may be
padding.
gt_weights_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [num_total_points, 4] holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column is the part index.
batch_part_ids: an int tensor of shape [num_total_points, num_parts]
holding 1-hot encodings of parts for each sampled point.
batch_surface_coords: a float tensor of shape [num_total_points, 2]
holding the expected (v, u) coordinates for each sampled point.
batch_weights: a float tensor of shape [num_total_points] indicating the
weight of each prediction.
Note that num_total_points = batch_size * num_boxes * max_sampled_points.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_dp_num_points_list)
batch_indices = []
batch_part_ids = []
batch_surface_coords = []
batch_weights = []
for i, (num_points, part_ids, surface_coords, weights) in enumerate(
zip(gt_dp_num_points_list, gt_dp_part_ids_list,
gt_dp_surface_coords_list, gt_weights_list)):
num_boxes, max_sampled_points = (
shape_utils.combined_static_and_dynamic_shape(part_ids))
part_ids_flattened = tf.reshape(part_ids, [-1])
part_ids_one_hot = tf.one_hot(part_ids_flattened, depth=self._num_parts)
# Get DensePose coordinates in the output space.
surface_coords_abs = densepose_ops.to_absolute_coordinates(
surface_coords,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
surface_coords_abs = tf.reshape(surface_coords_abs, [-1, 4])
# Each tensor has shape [num_boxes * max_sampled_points].
yabs, xabs, v, u = tf.unstack(surface_coords_abs, axis=-1)
# Get the indices (in output space) for the DensePose coordinates. Note
# that if self._stride is larger than 1, this will have the effect of
# reducing spatial resolution of the groundtruth points.
indices_y = tf.cast(yabs, tf.int32)
indices_x = tf.cast(xabs, tf.int32)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Create per-point weights.
weights_per_point = tf.reshape(
tf.tile(weights[:, tf.newaxis], multiples=[1, max_sampled_points]),
shape=[-1])
# Mask out invalid (i.e. padded) DensePose points.
num_points_tiled = tf.tile(num_points[:, tf.newaxis],
multiples=[1, max_sampled_points])
range_tiled = tf.tile(tf.range(max_sampled_points)[tf.newaxis, :],
multiples=[num_boxes, 1])
valid_points = tf.math.less(range_tiled, num_points_tiled)
valid_points = tf.cast(tf.reshape(valid_points, [-1]), dtype=tf.float32)
weights_per_point = weights_per_point * valid_points
# Shape of [num_boxes * max_sampled_points] integer tensor filled with
# current batch index.
batch_index = i * tf.ones_like(indices_y, dtype=tf.int32)
batch_indices.append(
tf.stack([batch_index, indices_y, indices_x, part_ids_flattened],
axis=1))
batch_part_ids.append(part_ids_one_hot)
batch_surface_coords.append(tf.stack([v, u], axis=1))
batch_weights.append(weights_per_point)
batch_indices = tf.concat(batch_indices, axis=0)
batch_part_ids = tf.concat(batch_part_ids, axis=0)
batch_surface_coords = tf.concat(batch_surface_coords, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
return batch_indices, batch_part_ids, batch_surface_coords, batch_weights
class CenterNetTrackTargetAssigner(object):
"""Wrapper to compute targets for tracking task.
Reference paper: A Simple Baseline for Multi-Object Tracking [1]
[1]: https://arxiv.org/abs/2004.01888
"""
def __init__(self, stride, num_track_ids):
self._stride = stride
self._num_track_ids = num_track_ids
def assign_track_targets(self,
height,
width,
gt_track_ids_list,
gt_boxes_list,
gt_weights_list=None):
"""Computes the track ID targets.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_track_ids_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the track ID of each groundtruth detection box.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_weights_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [batch_size, num_boxes, 3]
holding the indices inside the predicted tensor which should be
penalized. The first column indicates the index along the batch
dimension and the second and third columns indicate the index
along the y and x dimensions respectively.
batch_weights: a float tensor of shape [batch_size, num_boxes] indicating
the weight of each prediction.
track_id_targets: An int32 tensor of size [batch_size, num_boxes,
num_track_ids] containing the one-hot track ID vector of each
groundtruth detection box.
"""
track_id_targets = tf.one_hot(
gt_track_ids_list, depth=self._num_track_ids, axis=-1)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_weights = []
for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the indices of the box centers. Shape:
# indices: [num_boxes, 2]
(_, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(weights)
batch_indices = tf.stack(batch_indices, axis=0)
batch_weights = tf.stack(batch_weights, axis=0)
return batch_indices, batch_weights, track_id_targets
def filter_mask_overlap_min_area(masks):
"""If a pixel belongs to 2 instances, remove it from the larger instance."""
num_instances = tf.shape(masks)[0]
def _filter_min_area():
"""Helper function to filter non empty masks."""
areas = tf.reduce_sum(masks, axis=[1, 2], keepdims=True)
per_pixel_area = masks * areas
# Make sure background is ignored in argmin.
per_pixel_area = (masks * per_pixel_area +
(1 - masks) * per_pixel_area.dtype.max)
min_index = tf.cast(tf.argmin(per_pixel_area, axis=0), tf.int32)
filtered_masks = (
tf.range(num_instances)[:, tf.newaxis, tf.newaxis]
==
min_index[tf.newaxis, :, :]
)
return tf.cast(filtered_masks, tf.float32) * masks
return tf.cond(num_instances > 0, _filter_min_area,
lambda: masks)
def filter_mask_overlap(masks, method='min_area'):
if method == 'min_area':
return filter_mask_overlap_min_area(masks)
else:
raise ValueError('Unknown mask overlap filter type - {}'.format(method))
class CenterNetCornerOffsetTargetAssigner(object):
"""Wrapper to compute corner offsets for boxes using masks."""
def __init__(self, stride, overlap_resolution='min_area'):
"""Initializes the corner offset target assigner.
Args:
stride: int, the stride of the network in output pixels.
overlap_resolution: string, specifies how we handle overlapping
instance masks. Currently only 'min_area' is supported which assigns
overlapping pixels to the instance with the minimum area.
"""
self._stride = stride
self._overlap_resolution = overlap_resolution
def assign_corner_offset_targets(
self, gt_boxes_list, gt_masks_list):
"""Computes the corner offset targets and foreground map.
For each pixel that is part of any object's foreground, this function
computes the relative offsets to the top-left and bottom-right corners of
that instance's bounding box. It also returns a foreground map to indicate
which pixels contain valid corner offsets.
Args:
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_masks_list: A list of float tensors with shape [num_boxes,
input_height, input_width] with values in {0, 1} representing instance
masks for each object.
Returns:
corner_offsets: A float tensor of shape [batch_size, height, width, 4]
containing, in order, the (y, x) offsets to the top left corner and
the (y, x) offsets to the bottom right corner for each foregroung pixel
foreground: A float tensor of shape [batch_size, height, width] in which
each pixel is set to 1 if it is a part of any instance's foreground
(and thus contains valid corner offsets) and 0 otherwise.
"""
_, input_height, input_width = (
shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0]))
output_height = tf.maximum(input_height // self._stride, 1)
output_width = tf.maximum(input_width // self._stride, 1)
y_grid, x_grid = tf.meshgrid(
tf.range(output_height), tf.range(output_width),
indexing='ij')
y_grid, x_grid = tf.cast(y_grid, tf.float32), tf.cast(x_grid, tf.float32)
corner_targets = []
foreground_targets = []
for gt_masks, gt_boxes in zip(gt_masks_list, gt_boxes_list):
gt_masks = _resize_masks(gt_masks, output_height, output_width,
method=ResizeMethod.NEAREST_NEIGHBOR)
gt_masks = filter_mask_overlap(gt_masks, self._overlap_resolution)
output_height = tf.cast(output_height, tf.float32)
output_width = tf.cast(output_width, tf.float32)
ymin, xmin, ymax, xmax = tf.unstack(gt_boxes, axis=1)
ymin, ymax = ymin * output_height, ymax * output_height
xmin, xmax = xmin * output_width, xmax * output_width
top_y = ymin[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis]
left_x = xmin[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis]
bottom_y = ymax[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis]
right_x = xmax[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis]
foreground_target = tf.cast(tf.reduce_sum(gt_masks, axis=0) > 0.5,
tf.float32)
foreground_targets.append(foreground_target)
corner_target = tf.stack([
tf.reduce_sum(top_y * gt_masks, axis=0),
tf.reduce_sum(left_x * gt_masks, axis=0),
tf.reduce_sum(bottom_y * gt_masks, axis=0),
tf.reduce_sum(right_x * gt_masks, axis=0),
], axis=2)
corner_targets.append(corner_target)
return (tf.stack(corner_targets, axis=0),
tf.stack(foreground_targets, axis=0))
class CenterNetTemporalOffsetTargetAssigner(object):
"""Wrapper to compute target tensors for the temporal offset task.
This class has methods that take as input a batch of ground truth tensors
(in the form of a list) and returns the targets required to train the
temporal offset task.
"""
def __init__(self, stride):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
"""
self._stride = stride
def assign_temporal_offset_targets(self,
height,
width,
gt_boxes_list,
gt_offsets_list,
gt_match_list,
gt_weights_list=None):
"""Returns the temporal offset targets and their indices.
For each ground truth box, this function assigns it the corresponding
temporal offset to train the model.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_offsets_list: A list of 2-D tf.float32 tensors of shape [num_boxes, 2]
containing the spatial offsets of objects' centers compared with the
previous frame.
gt_match_list: A list of 1-D tf.float32 tensors of shape [num_boxes]
containing flags that indicate if an object has existed in the
previous frame.
gt_weights_list: A list of tensors with shape [num_boxes] corresponding to
the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [num_boxes, 3] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively.
batch_temporal_offsets: a float tensor of shape [num_boxes, 2] of the
expected y and x temporal offset of each object center in the
output space.
batch_weights: a float tensor of shape [num_boxes] indicating the
weight of each prediction.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_weights = []
batch_temporal_offsets = []
for i, (boxes, offsets, match_flags, weights) in enumerate(zip(
gt_boxes_list, gt_offsets_list, gt_match_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_boxes, 2]
# indices: [num_boxes, 2]
(_, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
# if an object is not matched, its weight becomes zero.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
weights *= match_flags
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(weights)
batch_temporal_offsets.append(offsets)
batch_indices = tf.concat(batch_indices, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
batch_temporal_offsets = tf.concat(batch_temporal_offsets, axis=0)
return (batch_indices, batch_temporal_offsets, batch_weights)
class DETRTargetAssigner(object):
"""Target assigner for DETR (https://arxiv.org/abs/2005.12872).
Detection Transformer (DETR) matches predicted boxes to groundtruth directly
to determine targets instead of matching anchors to groundtruth. Hence, the
new target assigner.
"""
def __init__(self):
"""Construct Object Detection Target Assigner."""
self._similarity_calc = sim_calc.DETRSimilarity()
self._matcher = hungarian_matcher.HungarianBipartiteMatcher()
def batch_assign(self,
pred_box_batch,
gt_box_batch,
pred_class_batch,
gt_class_targets_batch,
gt_weights_batch=None,
unmatched_class_label_batch=None):
"""Batched assignment of classification and regression targets.
Args:
pred_box_batch: a tensor of shape [batch_size, num_queries, 4]
representing predicted bounding boxes.
gt_box_batch: a tensor of shape [batch_size, num_queries, 4]
representing groundtruth bounding boxes.
pred_class_batch: A list of tensors with length batch_size, where each
each tensor has shape [num_queries, num_classes] to be used
by certain similarity calculators.
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, num_classes] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
unmatched_class_label_batch: a float32 tensor with shape
[d_1, d_2, ..., d_k] which is consistent with the classification target
for each anchor (and can be empty for scalar targets). This shape must
thus be compatible with the `gt_class_targets_batch`.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_pred_boxes,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_pred_boxes,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_pred_boxes,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_pred_boxes].
"""
pred_box_batch = [
box_list.BoxList(pred_box)
for pred_box in tf.unstack(pred_box_batch)]
gt_box_batch = [
box_list.BoxList(gt_box)
for gt_box in tf.unstack(gt_box_batch)]
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
if unmatched_class_label_batch is None:
unmatched_class_label_batch = [None] * len(gt_class_targets_batch)
pred_class_batch = tf.unstack(pred_class_batch)
for (pred_boxes, gt_boxes, pred_class_batch, gt_class_targets, gt_weights,
unmatched_class_label) in zip(pred_box_batch, gt_box_batch,
pred_class_batch, gt_class_targets_batch,
gt_weights_batch,
unmatched_class_label_batch):
(cls_targets, cls_weights, reg_targets,
reg_weights) = self.assign(pred_boxes, gt_boxes, pred_class_batch,
gt_class_targets, gt_weights,
unmatched_class_label)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights)
def assign(self,
pred_boxes,
gt_boxes,
pred_classes,
gt_labels,
gt_weights=None,
unmatched_class_label=None):
"""Assign classification and regression targets to each box_pred.
For a given set of pred_boxes and groundtruth detections, match pred_boxes
to gt_boxes and assign classification and regression targets to
each box_pred as well as weights based on the resulting match (specifying,
e.g., which pred_boxes should not contribute to training loss).
pred_boxes that are not matched to anything are given a classification
target of `unmatched_cls_target`.
Args:
pred_boxes: a BoxList representing N pred_boxes
gt_boxes: a BoxList representing M groundtruth boxes
pred_classes: A tensor with shape [max_num_boxes, num_classes]
to be used by certain similarity calculators.
gt_labels: a tensor of shape [M, num_classes]
with labels for each of the ground_truth boxes. The subshape
[num_classes] can be empty (corresponding to scalar inputs). When set
to None, gt_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
gt_weights: a float tensor of shape [M] indicating the weight to
assign to all pred_boxes match to a particular groundtruth box. The
weights must be in [0., 1.]. If None, all weights are set to 1.
Generally no groundtruth boxes with zero weight match to any pred_boxes
as matchers are aware of groundtruth weights. Additionally,
`cls_weights` and `reg_weights` are calculated using groundtruth
weights as an added safety.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
Returns:
cls_targets: a float32 tensor with shape [num_pred_boxes, num_classes],
where the subshape [num_classes] is compatible with gt_labels
which has shape [num_gt_boxes, num_classes].
cls_weights: a float32 tensor with shape [num_pred_boxes, num_classes],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_pred_boxes,
box_code_dimension]
reg_weights: a float32 tensor with shape [num_pred_boxes]
"""
if not unmatched_class_label:
unmatched_class_label = tf.constant(
[1] + [0] * (gt_labels.shape[1] - 1), tf.float32)
if gt_weights is None:
num_gt_boxes = gt_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = gt_boxes.num_boxes()
gt_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
gt_boxes.add_field(fields.BoxListFields.classes, gt_labels)
pred_boxes.add_field(fields.BoxListFields.classes, pred_classes)
match_quality_matrix = self._similarity_calc.compare(
gt_boxes,
pred_boxes)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(gt_weights, 0))
matched_gt_boxes = match.gather_based_on_match(
gt_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
ty, tx, th, tw = matched_gt_boxlist.get_center_coordinates_and_sizes()
reg_targets = tf.transpose(tf.stack([ty, tx, th, tw]))
cls_targets = match.gather_based_on_match(
gt_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
reg_weights = match.gather_based_on_match(
gt_weights,
ignored_value=0.,
unmatched_value=0.)
cls_weights = match.gather_based_on_match(
gt_weights,
ignored_value=0.,
unmatched_value=1)
# convert cls_weights from per-box_pred to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_multiple = tf.concat(
[tf.constant([1]), class_label_shape],
axis=0)
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
return (cls_targets, cls_weights, reg_targets, reg_weights)
|
the-stack_0_17789 | """ Tools to get keyword tags (e.g., for XMP metadata) from iNaturalist observations """
from datetime import timedelta
from logging import getLogger
from os import makedirs
from os.path import dirname, getsize
from typing import Dict, List, Optional, Tuple
import requests_cache
import xmltodict
from pyinaturalist.constants import RANKS
from pyinaturalist.v0 import get_observations
from pyinaturalist.v1 import get_observation, get_observation_species_counts, get_taxa, get_taxa_by_id
from naturtag.constants import (
API_CACHE_EXPIRY_HOURS,
CACHE_BACKEND,
CACHE_PATH,
COMMON_NAME_IGNORE_TERMS,
DWC_NAMESPACES,
DWC_TAXON_TERMS,
OBSERVATION_KEYS,
TAXON_KEYS,
IntTuple,
StrTuple,
)
from naturtag.validation import format_file_size
# Patch requests to use CachedSession for pyinaturalist API calls
makedirs(dirname(CACHE_PATH), exist_ok=True)
requests_cache.install_cache(
CACHE_PATH,
backend=CACHE_BACKEND,
expire_after=timedelta(hours=API_CACHE_EXPIRY_HOURS),
)
logger = getLogger().getChild(__name__)
def get_http_cache_size() -> str:
"""Get the current size of the HTTP request cache, in human-readable format"""
return format_file_size(getsize(f'{CACHE_PATH}.{CACHE_BACKEND}'))
def get_observation_taxon(observation_id: int) -> int:
"""Get the current taxon ID for the given observation ID"""
logger.info(f'API: Fetching observation {observation_id}')
obs = get_observation(observation_id)
if obs.get('community_tax_id') and obs['community_tax_id'] != obs['taxon']['id']:
logger.warning('API: Community ID does not match selected taxon')
return obs['taxon']['id']
def get_observation_dwc_terms(observation_id: int) -> Dict[str, str]:
"""Get all DWC terms for an iNaturalist observation"""
logger.info(f'API: Getting Darwin Core terms for observation {observation_id}')
obs_dwc = get_observations(id=observation_id, response_format='dwc')
return convert_dwc_to_xmp(obs_dwc)
def get_taxon_dwc_terms(taxon_id: int) -> Dict[str, str]:
"""Get all DWC terms for an iNaturalist taxon.
Since there is no DWC format for ``GET /taxa``, we'll just search for a random observation
with this taxon ID, strip off the observation metadata, and keep only the taxon metadata.
"""
logger.info(f'API: Getting Darwin Core terms for taxon {taxon_id}')
obs_dwc = get_observations(taxon_id=taxon_id, per_page=1, response_format='dwc')
dwc_xmp = convert_dwc_to_xmp(obs_dwc)
return {k: v for k, v in dwc_xmp.items() if k in DWC_TAXON_TERMS}
# TODO: separate species, binomial, trinomial
def get_keywords(
observation_id: int = None,
taxon_id: int = None,
common: bool = False,
hierarchical: bool = False,
) -> List[str]:
"""Get all taxonomic keywords for a given observation or taxon"""
min_tax_id = taxon_id or get_observation_taxon(observation_id)
taxa = get_taxon_with_ancestors(min_tax_id)
keywords = get_taxonomy_keywords(taxa)
if hierarchical:
keywords.extend(get_hierarchical_keywords(keywords))
if common:
keywords.extend(get_common_keywords(taxa))
keywords.append(f'inaturalist:taxon_id={min_tax_id}')
keywords.append(f'dwc:taxonID={min_tax_id}')
if observation_id:
keywords.append(f'inaturalist:observation_id={observation_id}')
keywords.append(f'dwc:catalogNumber={observation_id}')
logger.info(f'API: {len(keywords)} total keywords generated')
return keywords
def get_taxon_children(taxon_id: int) -> List[Dict]:
"""Get a taxon's children"""
logger.info(f'API: Fetching children of taxon {taxon_id}')
r = get_taxa(parent_id=taxon_id)
logger.info(f'API: {len(r["results"])} child taxa found')
return r['results']
def get_taxon_ancestors(taxon_id: int) -> List[Dict]:
"""Get a taxon's parents"""
return get_taxon_with_ancestors(taxon_id)[:-1]
def get_taxon_with_ancestors(taxon_id: int) -> List[Dict]:
"""Get a taxon with all its parents"""
logger.info(f'API: Fetching parents of taxon {taxon_id}')
results = get_taxa_by_id(taxon_id).get('results', [])
if not results:
logger.info(f'API: taxon {taxon_id} not found')
return []
taxon = results[0]
logger.info(f'API: {len(taxon["ancestors"])} parent taxa found')
return taxon['ancestors'] + [taxon]
# TODO: This should be reorganized somehow, I don't quite like the look if it;
# image_metadata module depends on this module and vice versa (kinda)
def get_taxon_and_obs_from_metadata(metadata) -> Tuple[Dict, Dict]:
logger.info(f'API: Searching for matching taxon and/or observation for {metadata.image_path}')
taxon, observation = get_observation_from_metadata(metadata)
if not taxon and metadata.has_taxon:
taxon = get_taxon_from_metadata(metadata)
if not taxon:
logger.info('API: No taxon found')
return taxon, observation
def get_observation_from_metadata(metadata) -> Tuple[Dict, Dict]:
if not metadata.observation_id:
logger.info('API: No observation ID specified')
return None, None
observation = get_observation(metadata.observation_id)
taxon = None
taxon_id = observation.get('taxon', {}).get('id')
# Handle observation with no taxon ID (e.g., not yet identified)
if taxon_id:
taxon = get_taxa_by_id(taxon_id).get('results', [None])[0]
logger.info(f'API: Found observation {metadata.observation_id} and taxon {taxon_id}')
else:
logger.warning(f'API: Observation {metadata.observation_id} is unidentified')
return taxon, observation
def get_taxon_from_metadata(metadata) -> Optional[Dict]:
"""Fetch taxon record from MetaMetadata object: either by ID or rank + name"""
rank, name = metadata.min_rank
params = {'id': metadata.taxon_id} if metadata.taxon_id else {'rank': rank, 'q': name}
logger.info(f'API: Querying taxon by: {params}')
results = get_taxa(**params)['results']
if results:
logger.info('API: Taxon found')
return results[0]
else:
return None
def get_taxonomy_keywords(taxa: List[Dict]) -> List[str]:
"""Format a list of taxa into rank keywords"""
return [quote(f'taxonomy:{t["rank"]}={t["name"]}') for t in taxa]
def get_common_keywords(taxa: List[Dict]) -> List[str]:
"""Format a list of taxa into common name keywords.
Filters out terms that aren't useful to keep as tags
"""
keywords = [t.get('preferred_common_name', '') for t in taxa]
def is_ignored(kw):
return any([ignore_term in kw.lower() for ignore_term in COMMON_NAME_IGNORE_TERMS])
common_keywords = [quote(kw) for kw in keywords if kw and not is_ignored(kw)]
logger.info(
f'API: {len(keywords) - len(common_keywords)} out of {len(keywords)} common names ignored'
)
return common_keywords
def get_observed_taxa(username: str, include_casual: bool = False) -> Dict[int, int]:
"""Get counts of taxa observed by the user, ordered by number of observations descending"""
if not username:
return {}
logger.info(f'API: Searching for user-observed taxa (casual: {include_casual})')
response = get_observation_species_counts(
user_login=username,
verifiable=None if include_casual else True, # False will return *only* casual observations
)
logger.info(f'API: {len(response["results"])} user-observed taxa found')
observed_taxa = {r['taxon']['id']: r['count'] for r in response['results']}
return dict(sorted(observed_taxa.items(), key=lambda x: x[1], reverse=True))
# TODO: Also include common names in hierarchy?
def get_hierarchical_keywords(keywords: List) -> List[str]:
hier_keywords = [keywords[0]]
for rank_name in keywords[1:]:
hier_keywords.append(f'{hier_keywords[-1]}|{rank_name}')
return hier_keywords
def sort_taxonomy_keywords(keywords: List[str]) -> List[str]:
"""Sort keywords by taxonomic rank, where applicable"""
def _get_rank_idx(tag):
return get_rank_idx(tag.split(':')[-1].split('=')[0])
return sorted(keywords, key=_get_rank_idx, reverse=True)
def get_rank_idx(rank: str) -> int:
return RANKS.index(rank) if rank in RANKS else 0
def get_inaturalist_ids(metadata):
"""Look for taxon and/or observation IDs from metadata if available"""
# Get first non-None value from specified keys, if any; otherwise return None
def _first_match(d, keys):
id = next(filter(None, map(d.get, keys)), None)
return int(id) if id else None
# Check all possible keys for valid taxon and observation IDs
taxon_id = _first_match(metadata, TAXON_KEYS)
observation_id = _first_match(metadata, OBSERVATION_KEYS)
logger.info(f'API: Taxon ID: {taxon_id} | Observation ID: {observation_id}')
return taxon_id, observation_id
def get_min_rank(metadata: Dict[str, str]) -> StrTuple:
"""Get the lowest (most specific) taxonomic rank from tags, if any"""
for rank in RANKS:
if rank in metadata:
logger.info(f'API: Found minimum rank: {rank} = {metadata[rank]}')
return rank, metadata[rank]
return None, None
def quote(s: str) -> str:
"""Surround keyword in quotes if it contains whitespace"""
return f'"{s}"' if ' ' in s else s
def convert_dwc_to_xmp(dwc: str) -> Dict[str, str]:
"""
Get all DWC terms from XML content containing a SimpleDarwinRecordSet, and format them as
XMP tags. For example: ``'dwc:species' -> 'Xmp.dwc.species'``
"""
# Get inner record as a dict, if it exists
xml_dict = xmltodict.parse(dwc)
dwr = xml_dict.get('dwr:SimpleDarwinRecordSet', {}).get('dwr:SimpleDarwinRecord')
if not dwr:
logger.warning('API: No SimpleDarwinRecord found')
return {}
# iNat sometimes includes duplicate occurrence IDs
if isinstance(dwr['dwc:occurrenceID'], list):
dwr['dwc:occurrenceID'] = dwr['dwc:occurrenceID'][0]
def _format_term(k):
ns, term = k.split(':')
return f'Xmp.{ns}.{term}'
def _include_term(k):
ns = k.split(':')[0]
return ns in DWC_NAMESPACES
# Format as XMP tags
return {_format_term(k): v for k, v in dwr.items() if _include_term(k)}
def get_ids_from_url(value: str) -> IntTuple:
"""If a URL is provided containing an ID, return the taxon and/or observation ID.
If it's an observation, fetch its taxon ID as well.
Returns:
taxon_id, observation_id
"""
taxon_id, observation_id = None, None
id = strip_url(value)
# TODO: Update after finishing Observation model
if 'observation' in value:
observation_id = id
json = get_observation(id)
taxon_id = json.get('taxon', {}).get('id')
elif 'taxa' in value:
taxon_id = id
return taxon_id, observation_id
def strip_url(value: str) -> Optional[int]:
"""If a URL is provided containing an ID, return just the ID"""
try:
return int(value.split('/')[-1].split('-')[0]) if value else None
except (TypeError, ValueError):
return None
|
the-stack_0_17790 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import mock
import unittest
import difflib
from six import StringIO
from collections import namedtuple
from azure.cli.core import AzCommandsLoader, MainCommandsLoader
from azure.cli.core.commands import AzCliCommand
from azure.cli.core.parser import AzCliCommandParser
from azure.cli.core.mock import DummyCli
from knack.arguments import enum_choice_list
class TestParser(unittest.TestCase):
def setUp(self):
self.io = StringIO()
def tearDown(self):
self.io.close()
def test_register_simple_commands(self):
def test_handler1():
pass
def test_handler2():
pass
cli = DummyCli()
cli.loader = mock.MagicMock()
cli.loader.cli_ctx = cli
command = AzCliCommand(cli.loader, 'command the-name', test_handler1)
command2 = AzCliCommand(cli.loader, 'sub-command the-second-name', test_handler2)
cmd_table = {'command the-name': command, 'sub-command the-second-name': command2}
cli.commands_loader.command_table = cmd_table
parser = AzCliCommandParser(cli)
parser.load_command_table(cli.commands_loader)
args = parser.parse_args('command the-name'.split())
self.assertIs(args.func, command)
args = parser.parse_args('sub-command the-second-name'.split())
self.assertIs(args.func, command2)
with mock.patch('azure.cli.core.parser.AzCliCommandParser.error', new=VerifyError(self)):
parser.parse_args('sub-command'.split())
self.assertTrue(AzCliCommandParser.error.called)
def test_required_parameter(self):
def test_handler(args): # pylint: disable=unused-argument
pass
cli = DummyCli()
cli.loader = mock.MagicMock()
cli.loader.cli_ctx = cli
command = AzCliCommand(cli.loader, 'test command', test_handler)
command.add_argument('req', '--req', required=True)
cmd_table = {'test command': command}
cli.commands_loader.command_table = cmd_table
parser = AzCliCommandParser(cli)
parser.load_command_table(cli.commands_loader)
args = parser.parse_args('test command --req yep'.split())
self.assertIs(args.func, command)
with mock.patch('azure.cli.core.parser.AzCliCommandParser.error', new=VerifyError(self)):
parser.parse_args('test command'.split())
self.assertTrue(AzCliCommandParser.error.called)
def test_nargs_parameter(self):
def test_handler():
pass
cli = DummyCli()
cli.loader = mock.MagicMock()
cli.loader.cli_ctx = cli
command = AzCliCommand(cli.loader, 'test command', test_handler)
command.add_argument('req', '--req', required=True, nargs=2)
cmd_table = {'test command': command}
cli.commands_loader.command_table = cmd_table
parser = AzCliCommandParser(cli)
parser.load_command_table(cli.commands_loader)
args = parser.parse_args('test command --req yep nope'.split())
self.assertIs(args.func, command)
with mock.patch('azure.cli.core.parser.AzCliCommandParser.error', new=VerifyError(self)):
parser.parse_args('test command -req yep'.split())
self.assertTrue(AzCliCommandParser.error.called)
def test_case_insensitive_enum_choices(self):
from enum import Enum
class TestEnum(Enum): # pylint: disable=too-few-public-methods
opt1 = "ALL_CAPS"
opt2 = "camelCase"
opt3 = "snake_case"
def test_handler():
pass
cli = DummyCli()
cli.loader = mock.MagicMock()
cli.loader.cli_ctx = cli
command = AzCliCommand(cli.loader, 'test command', test_handler)
command.add_argument('opt', '--opt', required=True, **enum_choice_list(TestEnum))
cmd_table = {'test command': command}
cli.commands_loader.command_table = cmd_table
parser = AzCliCommandParser(cli)
parser.load_command_table(cli.commands_loader)
args = parser.parse_args('test command --opt alL_cAps'.split())
self.assertEqual(args.opt, 'ALL_CAPS')
args = parser.parse_args('test command --opt CAMELCASE'.split())
self.assertEqual(args.opt, 'camelCase')
args = parser.parse_args('test command --opt sNake_CASE'.split())
self.assertEqual(args.opt, 'snake_case')
def _mock_import_lib(_):
mock_obj = mock.MagicMock()
mock_obj.__path__ = __name__
return mock_obj
def _mock_iter_modules(_):
return [(None, __name__, None)]
def _mock_extension_modname(ext_name, ext_dir):
return ext_name
def _mock_get_extensions():
MockExtension = namedtuple('Extension', ['name', 'preview', 'experimental', 'path', 'get_metadata'])
return [MockExtension(name=__name__ + '.ExtCommandsLoader', preview=False, experimental=False, path=None, get_metadata=lambda: {}),
MockExtension(name=__name__ + '.Ext2CommandsLoader', preview=False, experimental=False, path=None, get_metadata=lambda: {})]
def _mock_load_command_loader(loader, args, name, prefix):
from enum import Enum
class TestEnum(Enum): # pylint: disable=too-few-public-methods
enum_1 = 'enum_1'
enum_2 = 'enum_2'
def test_handler():
pass
class TestCommandsLoader(AzCommandsLoader):
def load_command_table(self, args):
super(TestCommandsLoader, self).load_command_table(args)
command = AzCliCommand(loader, 'test module', test_handler)
command.add_argument('opt', '--opt', required=True, **enum_choice_list(TestEnum))
self.command_table['test module'] = command
return self.command_table
# A command from an extension
class ExtCommandsLoader(AzCommandsLoader):
def load_command_table(self, args):
super(ExtCommandsLoader, self).load_command_table(args)
command = AzCliCommand(loader, 'test extension', test_handler)
command.add_argument('opt', '--opt', required=True, **enum_choice_list(TestEnum))
self.command_table['test extension'] = command
return self.command_table
if prefix == 'azure.cli.command_modules.':
command_loaders = {'TestCommandsLoader': TestCommandsLoader}
else:
command_loaders = {'ExtCommandsLoader': ExtCommandsLoader}
module_command_table = {}
for _, loader_cls in command_loaders.items():
command_loader = loader_cls(cli_ctx=loader.cli_ctx)
command_table = command_loader.load_command_table(args)
if command_table:
module_command_table.update(command_table)
loader.loaders.append(command_loader) # this will be used later by the load_arguments method
return module_command_table, command_loader.command_group_table
@mock.patch('importlib.import_module', _mock_import_lib)
@mock.patch('pkgutil.iter_modules', _mock_iter_modules)
@mock.patch('azure.cli.core.commands._load_command_loader', _mock_load_command_loader)
@mock.patch('azure.cli.core.extension.get_extension_modname', _mock_extension_modname)
@mock.patch('azure.cli.core.extension.get_extensions', _mock_get_extensions)
def test_parser_error_spellchecker(self):
cli = DummyCli()
main_loader = MainCommandsLoader(cli)
cli.loader = main_loader
cli.loader.load_command_table(None)
parser = cli.parser_cls(cli)
parser.load_command_table(cli.loader)
logger_msgs = []
choice_lists = []
original_get_close_matches = difflib.get_close_matches
def mock_log_error(_, msg):
logger_msgs.append(msg)
def mock_get_close_matches(*args, **kwargs):
choice_lists.append(original_get_close_matches(*args, **kwargs))
def mock_ext_cmd_tree_load(*args, **kwargs):
return {"test": {"new-ext": {"create": "new-ext-name", "reset": "another-ext-name"}}}
def mock_add_extension(*args, **kwargs):
pass
# run multiple faulty commands and save error logs, as well as close matches
with mock.patch('logging.Logger.error', mock_log_error), \
mock.patch('difflib.get_close_matches', mock_get_close_matches):
faulty_cmd_args = [
'test module1 --opt enum_1',
'test extension1 --opt enum_1',
'test foo_bar --opt enum_3',
'test module --opt enum_3',
'test extension --opt enum_3'
]
for text in faulty_cmd_args:
with self.assertRaises(SystemExit):
parser.parse_args(text.split())
parser.parse_args('test module --opt enum_1'.split())
# assert the right type of error msg is logged for command vs argument parsing
self.assertEqual(len(logger_msgs), 5)
for msg in logger_msgs[:3]:
self.assertIn("CommandNotFoundError", msg)
for msg in logger_msgs[3:]:
self.assertIn("not a valid value for '--opt'.", msg)
# assert the right choices are matched as "close".
# If these don't hold, matching algorithm should be deemed flawed.
for choices in choice_lists[:2]:
self.assertEqual(len(choices), 1)
self.assertEqual(len(choice_lists[2]), 0)
for choices in choice_lists[3:]:
self.assertEqual(len(choices), 2)
for choice in ['enum_1', 'enum_2']:
self.assertIn(choice, choices)
# test dynamic extension install
with mock.patch('logging.Logger.error', mock_log_error), \
mock.patch('azure.cli.core.extension.operations.add_extension', mock_add_extension), \
mock.patch('azure.cli.core.parser.AzCliCommandParser._get_extension_command_tree', mock_ext_cmd_tree_load), \
mock.patch('azure.cli.core.parser.AzCliCommandParser._get_extension_use_dynamic_install_config', return_value='yes_without_prompt'), \
mock.patch('azure.cli.core.parser.AzCliCommandParser._get_extension_run_after_dynamic_install_config', return_value=False):
with self.assertRaises(SystemExit):
parser.parse_args('test new-ext create --opt enum_2'.split())
self.assertIn("Extension new-ext-name installed. Please rerun your command.", logger_msgs[5])
with self.assertRaises(SystemExit):
parser.parse_args('test new-ext reset pos1 pos2'.split()) # test positional args
self.assertIn("Extension another-ext-name installed. Please rerun your command.", logger_msgs[6])
@mock.patch('importlib.import_module', _mock_import_lib)
@mock.patch('pkgutil.iter_modules', _mock_iter_modules)
@mock.patch('azure.cli.core.commands._load_command_loader', _mock_load_command_loader)
@mock.patch('azure.cli.core.extension.get_extension_modname', _mock_extension_modname)
@mock.patch('azure.cli.core.extension.get_extensions', _mock_get_extensions)
def test_parser_failure_recovery_recommendations(self):
cli = DummyCli()
main_loader = MainCommandsLoader(cli)
cli.loader = main_loader
cli.loader.load_command_table(None)
parser = cli.parser_cls(cli)
parser.load_command_table(cli.loader)
recommendation_provider_parameters = []
version = cli.get_cli_version()
expected_recommendation_provider_parameters = [
# version, command, parameters, extension
ExpectedParameters(version, 'test module1', ['--opt'], False),
ExpectedParameters(version, 'test extension1', ['--opt'], False),
ExpectedParameters(version, 'foo_bar', ['--opt'], False),
ExpectedParameters(version, 'test module', ['--opt'], False),
ExpectedParameters(version, 'test extension', ['--opt'], True)
]
def mock_recommendation_provider(*args):
recommendation_provider_parameters.append(tuple(args))
return []
AzCliCommandParser.recommendation_provider = mock_recommendation_provider
faulty_cmd_args = [
'test module1 --opt enum_1',
'test extension1 --opt enum_1',
'test foo_bar --opt enum_3',
'test module --opt enum_3',
'test extension --opt enum_3'
]
for text in faulty_cmd_args:
with self.assertRaises(SystemExit):
parser.parse_args(text.split())
for i, parameters in enumerate(recommendation_provider_parameters):
version, command, parameters, extension = parameters
expected = expected_recommendation_provider_parameters[i]
self.assertEqual(expected.version, version)
self.assertIn(expected.command, command)
self.assertEqual(expected.parameters, parameters)
if expected.has_extension:
self.assertIsNotNone(extension)
else:
self.assertIsNone(extension)
class VerifyError(object): # pylint: disable=too-few-public-methods
def __init__(self, test, substr=None):
self.test = test
self.substr = substr
self.called = False
def __call__(self, message):
if self.substr:
self.test.assertTrue(message.find(self.substr) >= 0)
self.called = True
ExpectedParameters = namedtuple('ExpectedParameters', ['version', 'command', 'parameters', 'has_extension'])
if __name__ == '__main__':
unittest.main()
|
the-stack_0_17791 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration code for a LibAFL-based fuzzer."""
import os
import shutil
import subprocess
from fuzzers import utils
def prepare_fuzz_environment(input_corpus):
"""Prepare to fuzz with a LibAFL-based fuzzer."""
os.environ['ASAN_OPTIONS'] = "abort_on_error=1:detect_leaks=0:"\
"malloc_context_size=0:symbolize=0:"\
"allocator_may_return_null=1:"\
"detect_odr_violation=0:handle_segv=0:"\
"handle_sigbus=0:handle_abort=0:"\
"handle_sigfpe=0:handle_sigill=0"
os.environ['UBSAN_OPTIONS'] = "abort_on_error=1:"\
"allocator_release_to_os_interval_ms=500:"\
"handle_abort=0:handle_segv=0:"\
"handle_sigbus=0:handle_sigfpe=0:"\
"handle_sigill=0:print_stacktrace=0:"\
"symbolize=0:symbolize_inline_frames=0"
# Create at least one non-empty seed to start.
utils.create_seed_file_for_empty_corpus(input_corpus)
def build(): # pylint: disable=too-many-branches,too-many-statements
"""Build benchmark."""
benchmark_name = os.environ['BENCHMARK'].lower()
if 'php' in benchmark_name:
copy_file = '/libafl_fuzzbench/grammars/php_nautilus.json'
elif 'ruby' in benchmark_name:
copy_file = '/libafl_fuzzbench/grammars/ruby_nautilus.json'
elif 'js' in benchmark_name or 'javascript' in benchmark_name:
copy_file = '/libafl_fuzzbench/grammars/js_nautilus.json'
else:
raise RuntimeError('Unsupported benchmark, unavailable grammar')
dest = os.path.join(os.environ['OUT'], 'grammar.json')
shutil.copy(copy_file, dest)
os.environ['CC'] = '/libafl_fuzzbench/target/release/token_level_cc'
os.environ['CXX'] = '/libafl_fuzzbench/target/release/token_level_cxx'
os.environ['ASAN_OPTIONS'] = 'abort_on_error=0:allocator_may_return_null=1'
os.environ['UBSAN_OPTIONS'] = 'abort_on_error=0'
cflags = ['--libafl']
utils.append_flags('CFLAGS', cflags)
utils.append_flags('CXXFLAGS', cflags)
os.environ['FUZZER_LIB'] = '/emptylib.a'
utils.build_benchmark()
def fuzz(input_corpus, output_corpus, target_binary):
"""Run fuzzer."""
prepare_fuzz_environment(input_corpus)
command = [target_binary]
grammar = os.path.join(os.environ['OUT'], 'grammar.json')
command += (['-o', output_corpus, '-g', grammar])
print(command)
subprocess.check_call(command, cwd=os.environ['OUT'])
|
the-stack_0_17793 | # example of extracting bounding boxes from an annotation file
from xml.etree import ElementTree
from os import listdir
from os.path import isfile, join
from sys import stdout
mypath = '../annotations/xmls/'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
from os import walk
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
f.extend(filenames)
break
# function to extract bounding boxes from an annotation file
def extract_boxes(filename):
# load and parse the file
tree = ElementTree.parse(filename)
# get the root of the document
root = tree.getroot()
# extract each bounding box
boxes = list()
for box in root.findall('.//bndbox'):
xmin = int(box.find('xmin').text)
ymin = int(box.find('ymin').text)
xmax = int(box.find('xmax').text)
ymax = int(box.find('ymax').text)
coors = [xmin, ymin, xmax, ymax]
boxes.append(coors)
# extract image dimensions
width = int(root.find('.//size/width').text)
height = int(root.find('.//size/height').text)
return boxes, width, height
# extract details form annotation file
# summarize extracted details
for foo in range(f.__len__()):
if f[foo].__contains__("xml"):
boxes, w, h = extract_boxes('../annotations/xmls/'+f[foo])
stdout.write(f[foo])
stdout.flush()
print(boxes, w, h) |
the-stack_0_17794 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 00:47:20 2021
@author: roberta
"""
class shortestSeekTimeFirst:
def processData(self, fileName):
moves = 0
fp = open(fileName, 'r')
lines = fp.readlines()
# number_of_cylinders = int(lines[0])
if len(lines) > 2:
init_head = int(lines[1])
req = lines[2:]
req = [int(r) for r in req]
number_of_req = len(req)
while number_of_req > 0:
distances = [abs(r - init_head) for r in req]
min_distance = min(distances)
i = distances.index(min_distance)
# i, m = short_distance(req,init_head)
moves += min_distance
init_head = req[i]
req = [r for r in req if r!=req[i]]
number_of_req -= 1
fp.close()
print('SSF ', moves)
|
the-stack_0_17796 | # -*- coding:utf-8 -*-
import argparse
import os
import sys
import zipfile
import konlpy
from tqdm import tqdm
sys.path.append("..")
from common.mecab import encode_mecab
def pos_corpus(args, name, tagger):
"""
형태소 분석기 별 corpus 작성
:param args: input arguments
:param name: 형태소 분석기 이름
:param tagger: 형태소 분석기 객체
:return: 결과 파일
"""
output = os.path.join(args.data_dir, f"kowiki_{name}.txt")
with zipfile.ZipFile(f"{args.data_dir}/{args.zip}") as z:
total = 0
with z.open(args.txt) as i_f:
for _, _ in enumerate(i_f):
total += 1
with z.open(args.txt) as i_f:
with open(output, "w") as o_f:
for i, line in enumerate(tqdm(i_f, total=total, desc=f"{name}")):
line = line.strip().decode("UTF-8", "ignore")
if line:
tokens, _ = encode_mecab(tagger, line)
o_f.write(" ".join(tokens))
o_f.write("\n")
return output
def main(args):
"""
main function
:param args: input arguments
"""
output = pos_corpus(args, "mecab", konlpy.tag.Mecab())
basename = os.path.basename(output)
# zip
with zipfile.ZipFile(os.path.join(args.data_dir, f"{basename}.zip"), "w") as z:
z.write(output, os.path.basename(output))
os.remove(output)
def parse_args():
"""
build arguments
:return args: input arguments
"""
parser = argparse.ArgumentParser(description="Make mecab corpus arguments.")
parser.add_argument("--data_dir", type=str, default="kowiki", required=False, help="kowiki data directory")
parser.add_argument("--zip", type=str, default="kowiki.txt.zip", required=False, help="kowiki source zip file")
parser.add_argument("--txt", type=str, default="kowiki.txt", required=False, help="kowiki source txt file")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
the-stack_0_17797 | from PIL import Image
import numpy as np
img = Image.open("img2.jpg")
arr = np.array(img)
a = len(arr)
a1 = len(arr[1])
i = 0
while i < a:
j = 0
while j < a1:
s = 0
for n in range(i, i + 10):
for n1 in range(j, j + 10):
s += (arr[n][n1][0]/3 + arr[n][n1][1]/3 + arr[n][n1][2]/3)
s = int(s // 100)
for n in range(i, i + 10):
for n1 in range(j, j + 10):
arr[n][n1][0] = int(s // 50) * 50
arr[n][n1][1] = int(s // 50) * 50
arr[n][n1][2] = int(s // 50) * 50
j = j + 10
i = i + 10
res = Image.fromarray(arr)
res.save('res.jpg')
|
the-stack_0_17798 | # ----------------------------------------------------------------
# Copyright 2017 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""test_restconf_provider.py
RestconfServiceProvider test
"""
from __future__ import absolute_import
import sys
import unittest
from ydk.providers import NetconfServiceProvider
from test_utils import ParametrizedTestCase
from test_utils import get_device_info
class SanityTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ncc = NetconfServiceProvider(
cls.hostname,
cls.username,
cls.password,
cls.port,
cls.protocol,
cls.on_demand,
cls.common_cache,
cls.timeout)
def test_get_session(self):
session = self.ncc.get_session()
self.assertEqual(session is not None, True)
def test_get_encoding(self):
encoding = self.ncc.get_encoding()
self.assertEqual(encoding is not None, True)
def test_get_capabilities(self):
capabilities = self.ncc.get_capabilities()
self.assertEqual(capabilities is not None, True)
if __name__ == '__main__':
device, non_demand, common_cache, timeout = get_device_info()
suite = unittest.TestSuite()
suite.addTest(ParametrizedTestCase.parametrize(
SanityTest,
device=device,
non_demand=non_demand,
common_cache=common_cache,
timeout=timeout))
ret = not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
sys.exit(ret)
|
the-stack_0_17800 | import copy
import importlib
import itertools
from typing import Tuple, Dict, Callable, List, Optional
import numpy as np
from highway_env.types import Vector, Interval
def do_every(duration: float, timer: float) -> bool:
return duration < timer
def lmap(v: float, x: Interval, y: Interval) -> float:
"""Linear map of value v with range x to desired range y."""
return y[0] + (v - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
def class_from_path(path: str) -> Callable:
module_name, class_name = path.rsplit(".", 1)
class_object = getattr(importlib.import_module(module_name), class_name)
return class_object
def constrain(x: float, a: float, b: float) -> np.ndarray:
return np.clip(x, a, b)
def not_zero(x: float, eps: float = 1e-2) -> float:
if abs(x) > eps:
return x
elif x > 0:
return eps
else:
return -eps
def wrap_to_pi(x: float) -> float:
return ((x + np.pi) % (2 * np.pi)) - np.pi
def point_in_rectangle(point: Vector, rect_min: Vector, rect_max: Vector) -> bool:
"""
Check if a point is inside a rectangle
:param point: a point (x, y)
:param rect_min: x_min, y_min
:param rect_max: x_max, y_max
"""
return rect_min[0] <= point[0] <= rect_max[0] and rect_min[1] <= point[1] <= rect_max[1]
def point_in_rotated_rectangle(point: np.ndarray, center: np.ndarray, length: float, width: float, angle: float) \
-> bool:
"""
Check if a point is inside a rotated rectangle
:param point: a point
:param center: rectangle center
:param length: rectangle length
:param width: rectangle width
:param angle: rectangle angle [rad]
:return: is the point inside the rectangle
"""
c, s = np.cos(angle), np.sin(angle)
r = np.array([[c, -s], [s, c]])
ru = r.dot(point - center)
return point_in_rectangle(ru, (-length/2, -width/2), (length/2, width/2))
def point_in_ellipse(point: Vector, center: Vector, angle: float, length: float, width: float) -> bool:
"""
Check if a point is inside an ellipse
:param point: a point
:param center: ellipse center
:param angle: ellipse main axis angle
:param length: ellipse big axis
:param width: ellipse small axis
:return: is the point inside the ellipse
"""
c, s = np.cos(angle), np.sin(angle)
r = np.matrix([[c, -s], [s, c]])
ru = r.dot(point - center)
return np.sum(np.square(ru / np.array([length, width]))) < 1
def rotated_rectangles_intersect(rect1: Tuple[Vector, float, float, float],
rect2: Tuple[Vector, float, float, float]) -> bool:
"""
Do two rotated rectangles intersect?
:param rect1: (center, length, width, angle)
:param rect2: (center, length, width, angle)
:return: do they?
"""
return has_corner_inside(rect1, rect2) or has_corner_inside(rect2, rect1)
def has_corner_inside(rect1: Tuple[Vector, float, float, float],
rect2: Tuple[Vector, float, float, float]) -> bool:
"""
Check if rect1 has a corner inside rect2
:param rect1: (center, length, width, angle)
:param rect2: (center, length, width, angle)
"""
(c1, l1, w1, a1) = rect1
(c2, l2, w2, a2) = rect2
c1 = np.array(c1)
l1v = np.array([l1/2, 0])
w1v = np.array([0, w1/2])
r1_points = np.array([[0, 0],
- l1v, l1v, -w1v, w1v,
- l1v - w1v, - l1v + w1v, + l1v - w1v, + l1v + w1v])
c, s = np.cos(a1), np.sin(a1)
r = np.array([[c, -s], [s, c]])
rotated_r1_points = r.dot(r1_points.transpose()).transpose()
return any([point_in_rotated_rectangle(c1+np.squeeze(p), c2, l2, w2, a2) for p in rotated_r1_points])
def project_polygon(polygon: Vector, axis: Vector) -> Tuple[float, float]:
min_p, max_p = None, None
for p in polygon:
projected = p.dot(axis)
if min_p is None or projected < min_p:
min_p = projected
if max_p is None or projected > max_p:
max_p = projected
return min_p, max_p
def interval_distance(min_a: float, max_a: float, min_b: float, max_b: float):
"""
Calculate the distance between [minA, maxA] and [minB, maxB]
The distance will be negative if the intervals overlap
"""
return min_b - max_a if min_a < min_b else min_a - max_b
def are_polygons_intersecting(a: Vector, b: Vector,
displacement_a: Vector, displacement_b: Vector) \
-> Tuple[bool, bool, Optional[np.ndarray]]:
"""
Checks if the two polygons are intersecting.
See https://www.codeproject.com/Articles/15573/2D-Polygon-Collision-Detection
:param a: polygon A, as a list of [x, y] points
:param b: polygon B, as a list of [x, y] points
:param displacement_a: velocity of the polygon A
:param displacement_b: velocity of the polygon B
:return: are intersecting, will intersect, translation vector
"""
intersecting = will_intersect = True
min_distance = np.inf
translation, translation_axis = None, None
for polygon in [a, b]:
for p1, p2 in zip(polygon, polygon[1:]):
normal = np.array([-p2[1] + p1[1], p2[0] - p1[0]])
normal /= np.linalg.norm(normal)
min_a, max_a = project_polygon(a, normal)
min_b, max_b = project_polygon(b, normal)
if interval_distance(min_a, max_a, min_b, max_b) > 0:
intersecting = False
velocity_projection = normal.dot(displacement_a - displacement_b)
if velocity_projection < 0:
min_a += velocity_projection
else:
max_a += velocity_projection
distance = interval_distance(min_a, max_a, min_b, max_b)
if distance > 0:
will_intersect = False
if not intersecting and not will_intersect:
break
if abs(distance) < min_distance:
min_distance = abs(distance)
d = a[:-1].mean(axis=0) - b[:-1].mean(axis=0) # center difference
translation_axis = normal if d.dot(normal) > 0 else -normal
if will_intersect:
translation = min_distance * translation_axis
return intersecting, will_intersect, translation
def confidence_ellipsoid(data: Dict[str, np.ndarray], lambda_: float = 1e-5, delta: float = 0.1, sigma: float = 0.1,
param_bound: float = 1.0) -> Tuple[np.ndarray, np.ndarray, float]:
"""
Compute a confidence ellipsoid over the parameter theta, where y = theta^T phi
:param data: a dictionary {"features": [phi_0,...,phi_N], "outputs": [y_0,...,y_N]}
:param lambda_: l2 regularization parameter
:param delta: confidence level
:param sigma: noise covariance
:param param_bound: an upper-bound on the parameter norm
:return: estimated theta, Gramian matrix G_N_lambda, radius beta_N
"""
phi = np.array(data["features"])
y = np.array(data["outputs"])
g_n_lambda = 1/sigma * np.transpose(phi) @ phi + lambda_ * np.identity(phi.shape[-1])
theta_n_lambda = np.linalg.inv(g_n_lambda) @ np.transpose(phi) @ y / sigma
d = theta_n_lambda.shape[0]
beta_n = np.sqrt(2*np.log(np.sqrt(np.linalg.det(g_n_lambda) / lambda_ ** d) / delta)) + \
np.sqrt(lambda_*d) * param_bound
return theta_n_lambda, g_n_lambda, beta_n
def confidence_polytope(data: dict, parameter_box: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray, float]:
"""
Compute a confidence polytope over the parameter theta, where y = theta^T phi
:param data: a dictionary {"features": [phi_0,...,phi_N], "outputs": [y_0,...,y_N]}
:param parameter_box: a box [theta_min, theta_max] containing the parameter theta
:return: estimated theta, polytope vertices, Gramian matrix G_N_lambda, radius beta_N
"""
param_bound = np.amax(np.abs(parameter_box))
theta_n_lambda, g_n_lambda, beta_n = confidence_ellipsoid(data, param_bound=param_bound)
values, pp = np.linalg.eig(g_n_lambda)
radius_matrix = np.sqrt(beta_n) * np.linalg.inv(pp) @ np.diag(np.sqrt(1 / values))
h = np.array(list(itertools.product([-1, 1], repeat=theta_n_lambda.shape[0])))
d_theta = np.array([radius_matrix @ h_k for h_k in h])
# Clip the parameter and confidence region within the prior parameter box.
theta_n_lambda = np.clip(theta_n_lambda, parameter_box[0], parameter_box[1])
for k, _ in enumerate(d_theta):
d_theta[k] = np.clip(d_theta[k], parameter_box[0] - theta_n_lambda, parameter_box[1] - theta_n_lambda)
return theta_n_lambda, d_theta, g_n_lambda, beta_n
def is_valid_observation(y: np.ndarray, phi: np.ndarray, theta: np.ndarray, gramian: np.ndarray,
beta: float, sigma: float = 0.1) -> bool:
"""
Check if a new observation (phi, y) is valid according to a confidence ellipsoid on theta.
:param y: observation
:param phi: feature
:param theta: estimated parameter
:param gramian: Gramian matrix
:param beta: ellipsoid radius
:param sigma: noise covariance
:return: validity of the observation
"""
y_hat = np.tensordot(theta, phi, axes=[0, 0])
error = np.linalg.norm(y - y_hat)
eig_phi, _ = np.linalg.eig(phi.transpose() @ phi)
eig_g, _ = np.linalg.eig(gramian)
error_bound = np.sqrt(np.amax(eig_phi) / np.amin(eig_g)) * beta + sigma
return error < error_bound
def is_consistent_dataset(data: dict, parameter_box: np.ndarray = None) -> bool:
"""
Check whether a dataset {phi_n, y_n} is consistent
The last observation should be in the confidence ellipsoid obtained by the N-1 first observations.
:param data: a dictionary {"features": [phi_0,...,phi_N], "outputs": [y_0,...,y_N]}
:param parameter_box: a box [theta_min, theta_max] containing the parameter theta
:return: consistency of the dataset
"""
train_set = copy.deepcopy(data)
y, phi = train_set["outputs"].pop(-1), train_set["features"].pop(-1)
y, phi = np.array(y)[..., np.newaxis], np.array(phi)[..., np.newaxis]
if train_set["outputs"] and train_set["features"]:
theta, _, gramian, beta = confidence_polytope(train_set, parameter_box=parameter_box)
return is_valid_observation(y, phi, theta, gramian, beta)
else:
return True
def near_split(x, num_bins=None, size_bins=None):
"""
Split a number into several bins with near-even distribution.
You can either set the number of bins, or their size.
The sum of bins always equals the total.
:param x: number to split
:param num_bins: number of bins
:param size_bins: size of bins
:return: list of bin sizes
"""
if num_bins:
quotient, remainder = divmod(x, num_bins)
return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)
elif size_bins:
return near_split(x, num_bins=int(np.ceil(x / size_bins)))
def distance_to_circle(center, radius, direction):
scaling = radius * np.ones((2, 1))
a = np.linalg.norm(direction / scaling) ** 2
b = -2 * np.dot(np.transpose(center), direction / np.square(scaling))
c = np.linalg.norm(center / scaling) ** 2 - 1
root_inf, root_sup = solve_trinom(a, b, c)
if root_inf and root_inf > 0:
distance = root_inf
elif root_sup and root_sup > 0:
distance = 0
else:
distance = np.infty
return distance
def solve_trinom(a, b, c):
delta = b ** 2 - 4 * a * c
if delta >= 0:
return (-b - np.sqrt(delta)) / (2 * a), (-b + np.sqrt(delta)) / (2 * a)
else:
return None, None
|
the-stack_0_17801 | import numpy as np
import sklearn
from sklearn.datasets import fetch_openml
mnist = fetch_openml(name='mnist_784')
print(mnist)
len(mnist['data'])
X, y = mnist['data'], mnist['target']
print(X)
y = y.astype("float")
print(y)
#visualization
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
def viz(n):
plt.imshow(X[n].reshape(28,28))
plt.show()
return
#splitting train , test sets method 1
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True, random_state=42)
#splitting train , test sets method 2
num_split = 60000
X_train, X_test, y_train, y_test = X[:num_split], X[num_split:], y[:num_split], y[num_split:]
#shuffling dataset
shuffle_index = np.random.permutation(num_split)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
#binary classifier (convert dataset to zero and non zero)
y_train_0 = (y_train == 0)
y_test_0 = (y_test == 0)
#SGD Classifier training
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(random_state = 0)
clf.fit(X_train, y_train_0)
#Prediction
viz(1000)
print(clf.predict(X[1000].reshape(1, -1)))
viz(2000)
print(clf.predict(X[2000].reshape(1, -1)))
|
the-stack_0_17802 | import logging
import time
from typing import Optional
import newrelic
import redis
from fastapi import APIRouter, HTTPException, status
from redisearch import Result
from sitesearch import indexer
from sitesearch.config import get_config
from sitesearch.connections import get_async_redis_connection
from sitesearch.query_parser import parse
from sitesearch.transformer import transform_documents
redis_client = get_async_redis_connection()
log = logging.getLogger(__name__)
DEFAULT_NUM = 30
MAX_NUM = 100
# Until we can get MINPREFIX set to 1 on Redis Cluster, map
# single-character queries to two-character queries. Use a
# static map so results are similar across queries.
SINGLE_CHAR_MAP = {
'a': 'ac',
'b': 'be',
'c': 'co',
'd': 'de',
'e': 'en',
'f': 'fi',
'g': 'ge',
'h': 'hi',
'i': 'in',
'j': 'ja',
'k': 'ku',
'l': 'lo',
'm': 'ma',
'n': 'ne',
'o': 'of',
'p': 'pe',
'q': 'qu',
'r': 'ra',
's': 'se',
't': 'ta',
'u': 'us',
'v': 'vo',
'w': 'we',
'x': '.x',
'y': 'ya',
'z': 'zo'
}
router = APIRouter()
config = get_config()
@router.get("/search")
async def search(q: str,
from_url: Optional[str] = None,
start: Optional[int] = None,
num: Optional[int] = None,
site: Optional[str] = None):
"""
Make a full-text search against a site in the index.
GET params:
q: The search key. E.g. https://example.com/search?q=python
from_url: The client's current URL. Including this param will
boost pages in the current section of the site based
on top-level hierarchy. E.g. https://example.com/search?q=python&from_url=https://example.com/technology
This query will boost documents whose URLs start with https://example.com/technology.
start: For pagination. Controls the number of the document in the result
to start with. Defaults to 0. E.g. https://example.com/search?q=python&start=20
num: For pagination. Controls the number of documents to return, starting from
`start`. https://example.com/search?q=python&start=20&num=20
site_url: The site to search. Used when sitesearch is indexing multiple sites.
If this isn't specified, the query searches the default site specified in
AppConfiguration. E.g. https://example.com/search?q=python&site_url=https://docs.redislabs.com
"""
from_url = from_url if from_url else ''
start = start if isinstance(start, int) else 0
num = num if isinstance(num, int) else DEFAULT_NUM
site_url = site if site else config.default_search_site.url
q_len = len(q)
if q_len == 2 and q[1] == '*':
char = q[0]
if char in SINGLE_CHAR_MAP:
q = f"{SINGLE_CHAR_MAP[q[0]]}*"
# Return an error if a site URL was given but it's invalid.
if site_url and site_url not in config.sites:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail="You must specify a valid search site.")
search_site = config.sites.get(site_url)
section = indexer.get_section(site_url, from_url)
num = min(num, MAX_NUM)
index_alias = config.keys.index_alias(search_site.url)
query = await parse(index_alias, q, section, start, num, search_site)
start = time.time()
try:
raw_result = await redis_client.execute_command("FT.SEARCH", *query)
except (redis.exceptions.ResponseError, UnicodeDecodeError) as e:
log.error("Search q failed: %s", e)
total = 0
docs = []
else:
result = Result(raw_result,
True,
duration=(time.time() - start) * 1000.0,
has_payload=False,
with_scores=False)
total = result.total
docs = result.docs
end = time.time()
newrelic.agent.record_custom_metric('search/q_ms', end - start)
docs = transform_documents(docs, search_site, q)
return {"total": total, "results": docs}
|
the-stack_0_17805 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-wildcard-import, wildcard-import
"""Generator for CUTLASS GEMM kernels."""
from .library import *
class GemmOperation:
"""Describes various attributes for instantiating GEMM kernels."""
def __init__(
self,
arch,
tile_description,
A,
B,
C,
element_epilogue,
epilogue_functor=EpilogueFunctor.LinearCombination,
swizzling_functor=SwizzlingFunctor.Identity8,
):
self.operation_kind = OperationKind.Gemm
self.arch = arch
self.tile_description = tile_description
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
def accumulator_type(self):
return self.tile_description.math_instruction.element_accumulator
def short_math_name(self):
return ShortDataTypeNames[self.accumulator_type()]
def core_name(self):
"""The basic operation kind is prefixed with a letter indicating the accumulation type."""
inst_shape = ""
intermediate_type = ""
if (
self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp
or self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp
):
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
if (
self.tile_description.math_instruction.element_a != self.A.element
and self.tile_description.math_instruction.element_a
!= self.tile_description.math_instruction.element_accumulator
):
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return "%s%s%s%s" % (
self.short_math_name(),
inst_shape,
intermediate_type,
"gemm",
)
def extended_name(self):
"""Append data types if they differ from compute type."""
if (
self.C.element != self.tile_description.math_instruction.element_accumulator
and self.A.element != self.tile_description.math_instruction.element_accumulator
):
extended_name = "${element_c}_${core_name}_${element_a}"
elif (
self.C.element == self.tile_description.math_instruction.element_accumulator
and self.A.element != self.tile_description.math_instruction.element_accumulator
):
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = substitute_template(
extended_name,
{
"element_a": DataTypeNames[self.A.element],
"element_c": DataTypeNames[self.C.element],
"core_name": self.core_name(),
},
)
return extended_name
def layout_name(self):
return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout])
def procedural_name(self):
"""The full procedural name indicates architecture, extended name, tile size,
and layout.
"""
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
return substitute_template(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_align${alignment}",
{
"opcode_class": opcode_class_name,
"extended_name": self.extended_name(),
"threadblock": threadblock,
"layout": self.layout_name(),
"alignment": "%d" % self.A.alignment,
},
)
def leading_dim(self):
"""lda, ldb, ldc, according to the leading dimension."""
if self.A.layout == LayoutType.RowMajor:
lda = "K"
elif self.A.layout == LayoutType.ColumnMajor:
lda = "M"
else:
ValueError("The layout of A is not implemented.")
if self.B.layout == LayoutType.RowMajor:
ldb = "N"
elif self.B.layout == LayoutType.ColumnMajor:
ldb = "K"
else:
ValueError("The layout of B is not implemented.")
if self.C.layout == LayoutType.RowMajor:
ldc = "N"
elif self.C.layout == LayoutType.ColumnMajor:
ldc = "M"
else:
ValueError("The layout of B is not implemented.")
return substitute_template(
"int lda = ${lda_val};\n\tint ldb = ${ldb_val};\n\tint ldc = ${ldc_val};\n",
{
"lda_val": lda,
"ldb_val": ldb,
"ldc_val": ldc,
},
)
class EmitGemmInstance:
"""Responsible for emitting a CUTLASS template definition."""
def __init__(self):
self.epilogue_default = """
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>"""
self.epilogue_no_beta_scaling = """
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>"""
self.gemm_template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = cutlass::gemm::device::${kernel_name}<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue},
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial}
${math_operation}
>;
"""
def emit(self, operation, no_beta_scaling=False, batched=False):
"""Instantiate a GEMM kernel from given `operation`."""
warp_shape = [
operation.tile_description.threadblock_shape[idx]
// operation.tile_description.warp_count[idx]
for idx in range(3)
]
epilogue_vector_length = (
min(operation.C.alignment * DataTypeSize[operation.C.element], 128)
// DataTypeSize[operation.C.element]
)
values = {
"operation_name": operation.procedural_name(),
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[operation.A.layout],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[operation.B.layout],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[operation.C.layout],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"opcode_class": OpcodeClassTag[
operation.tile_description.math_instruction.opcode_class
],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"warp_shape_m": str(warp_shape[0]),
"warp_shape_n": str(warp_shape[1]),
"warp_shape_k": str(warp_shape[2]),
"instruction_shape_m": str(
operation.tile_description.math_instruction.instruction_shape[0]
),
"instruction_shape_n": str(
operation.tile_description.math_instruction.instruction_shape[1]
),
"instruction_shape_k": str(
operation.tile_description.math_instruction.instruction_shape[2]
),
"epilogue_vector_length": str(epilogue_vector_length),
"element_epilogue": str(DataTypeTag[operation.element_epilogue]),
"epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor],
"swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor],
"stages": str(operation.tile_description.stages),
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"math_operation": MathOperationTag[
operation.tile_description.math_instruction.math_operation
],
}
values["kernel_name"] = "GemmBatched" if batched else "Gemm"
values["split_k_serial"] = "" if batched else "false,"
gemm_template = substitute_template(
self.gemm_template,
{
"epilogue": self.epilogue_no_beta_scaling
if no_beta_scaling
else self.epilogue_default
},
)
return substitute_template(gemm_template, values)
|
the-stack_0_17806 | #!/usr/bin/env python
import sys
def percentage(total, missing):
return (total - missing) * 100.0 / total
statements_total = int(sys.argv[1])
statements_missing = int(sys.argv[2])
statements_percentage = percentage(statements_total, statements_missing)
branches_total = int(sys.argv[3])
branches_missing = int(sys.argv[4])
branches_percentage = percentage(branches_total, branches_missing)
print("statement coverage: total: %4d, missing: %4d, percentage: %5.2f%%"
% (statements_total, statements_missing, statements_percentage))
print("branch coverage: total: %4d, missing: %4d, percentage: %5.2f%%"
% (branches_total, branches_missing, branches_percentage))
print("total coverage: %5.3f%%" % ((statements_percentage + branches_percentage) / 2.0))
|
the-stack_0_17807 | #!/usr/bin/env python
import argparse
import atexit
import logging
import os
import shutil
import sys
from pathlib import Path
from checkov.arm.runner import Runner as arm_runner
from checkov.cloudformation.runner import Runner as cfn_runner
from checkov.common.bridgecrew.platform_integration import bc_integration
from checkov.common.bridgecrew.image_scanning.image_scanner import image_scanner
from checkov.common.goget.github.get_git import GitGetter
from checkov.common.runners.runner_registry import RunnerRegistry, OUTPUT_CHOICES
from checkov.common.util.banner import banner as checkov_banner
from checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR
from checkov.common.util.docs_generator import print_checks
from checkov.common.util.runner_dependency_handler import RunnerDependencyHandler
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.terraform.runner import Runner as tf_graph_runner
from checkov.helm.runner import Runner as helm_runner
from checkov.kubernetes.runner import Runner as k8_runner
from checkov.logging_init import init as logging_init
from checkov.runner_filter import RunnerFilter
from checkov.serverless.runner import Runner as sls_runner
from checkov.terraform.plan_runner import Runner as tf_plan_runner
from checkov.dockerfile.runner import Runner as dockerfile_runner
from checkov.version import version
outer_registry = None
logging_init()
logger = logging.getLogger(__name__)
checkov_runner_module_names = ['cfn', 'tf', 'k8', 'sls', 'arm', 'tf_plan', 'helm']
checkov_runners = ['cloudformation', 'terraform', 'kubernetes', 'serverless', 'arm', 'terraform_plan', 'helm', 'dockerfile']
# Check runners for necessary system dependencies.
runnerDependencyHandler = RunnerDependencyHandler(checkov_runner_module_names, globals())
runnerDependencyHandler.validate_runner_deps()
def run(banner=checkov_banner, argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description='Infrastructure as code static analysis')
add_parser_args(parser)
args = parser.parse_args(argv)
# bridgecrew uses both the urllib3 and requests libraries, while checkov uses the requests library.
# Allow the user to specify a CA bundle to be used by both libraries.
bc_integration.setup_http_manager(args.ca_certificate)
# Disable runners with missing system dependencies
args.skip_framework = runnerDependencyHandler.disable_incompatible_runners(args.skip_framework)
runner_filter = RunnerFilter(framework=args.framework, skip_framework=args.skip_framework, checks=args.check, skip_checks=args.skip_check,
download_external_modules=convert_str_to_bool(args.download_external_modules),
external_modules_download_path=args.external_modules_download_path,
evaluate_variables=convert_str_to_bool(args.evaluate_variables), runners=checkov_runners)
if outer_registry:
runner_registry = outer_registry
runner_registry.runner_filter = runner_filter
else:
runner_registry = RunnerRegistry(banner, runner_filter, tf_graph_runner(), cfn_runner(), k8_runner(), sls_runner(),
arm_runner(), tf_plan_runner(), helm_runner(),dockerfile_runner())
if args.version:
print(version)
return
if args.bc_api_key == '':
parser.error('The --bc-api-key flag was specified but the value was blank. If this value was passed as a secret, you may need to double check the mapping.')
elif args.bc_api_key:
logger.debug(f'Using API key ending with {args.bc_api_key[-8:]}')
if args.repo_id is None:
parser.error("--repo-id argument is required when using --bc-api-key")
if len(args.repo_id.split('/')) != 2:
parser.error("--repo-id argument format should be 'organization/repository_name' E.g "
"bridgecrewio/checkov")
source = os.getenv('BC_SOURCE', 'cli')
source_version = os.getenv('BC_SOURCE_VERSION', version)
logger.debug(f'BC_SOURCE = {source}, version = {source_version}')
try:
bc_integration.setup_bridgecrew_credentials(bc_api_key=args.bc_api_key, repo_id=args.repo_id,
skip_fixes=args.skip_fixes,
skip_suppressions=args.skip_suppressions,
source=source, source_version=source_version, repo_branch=args.branch)
excluded_paths = bc_integration.get_excluded_paths()
runner_filter.excluded_paths = excluded_paths
except Exception as e:
logger.error('An error occurred setting up the Bridgecrew platform integration. Please check your API token and try again.', exc_info=True)
return
else:
logger.debug('No API key found. Scanning locally only.')
guidelines = {}
if not args.no_guide:
guidelines = bc_integration.get_guidelines()
if args.check and args.skip_check:
parser.error("--check and --skip-check can not be applied together. please use only one of them")
return
if args.list:
print_checks(framework=args.framework)
return
external_checks_dir = get_external_checks_dir(args)
url = None
if args.directory:
exit_codes = []
for root_folder in args.directory:
file = args.file
scan_reports = runner_registry.run(root_folder=root_folder, external_checks_dir=external_checks_dir,
files=file, guidelines=guidelines, bc_integration=bc_integration)
if bc_integration.is_integration_configured():
bc_integration.persist_repository(root_folder)
bc_integration.persist_scan_results(scan_reports)
url = bc_integration.commit_repository(args.branch)
exit_codes.append(runner_registry.print_reports(scan_reports, args, url))
exit_code = 1 if 1 in exit_codes else 0
return exit_code
elif args.file:
scan_reports = runner_registry.run(external_checks_dir=external_checks_dir, files=args.file,
guidelines=guidelines, bc_integration=bc_integration,
repo_root_for_plan_enrichment=args.repo_root_for_plan_enrichment)
if bc_integration.is_integration_configured():
files = [os.path.abspath(file) for file in args.file]
root_folder = os.path.split(os.path.commonprefix(files))[0]
bc_integration.persist_repository(root_folder)
bc_integration.persist_scan_results(scan_reports)
url = bc_integration.commit_repository(args.branch)
return runner_registry.print_reports(scan_reports, args, url)
elif args.docker_image:
if args.bc_api_key is None:
parser.error("--bc-api-key argument is required when using --docker-image")
return
if args.dockerfile_path is None:
parser.error("--dockerfile-path argument is required when using --docker-image")
return
if args.branch is None:
parser.error("--branch argument is required when using --docker-image")
return
image_scanner.scan(args.docker_image, args.dockerfile_path)
else:
print(f"{banner}")
bc_integration.onboarding()
def add_parser_args(parser):
parser.add_argument('-v', '--version',
help='version', action='store_true')
parser.add_argument('-d', '--directory', action='append',
help='IaC root directory (can not be used together with --file).')
parser.add_argument('-f', '--file', action='append',
help='IaC file(can not be used together with --directory)')
parser.add_argument('--external-checks-dir', action='append',
help='Directory for custom checks to be loaded. Can be repeated')
parser.add_argument('--external-checks-git', action='append',
help='Github url of external checks to be added. \n you can specify a subdirectory after a '
'double-slash //. \n cannot be used together with --external-checks-dir')
parser.add_argument('-l', '--list', help='List checks', action='store_true')
parser.add_argument('-o', '--output', nargs='?', choices=OUTPUT_CHOICES,
default='cli',
help='Report output format')
parser.add_argument('--no-guide', action='store_true',
default=False,
help='do not fetch bridgecrew guide in checkov output report')
parser.add_argument('--quiet', action='store_true',
default=False,
help='in case of CLI output, display only failed checks')
parser.add_argument('--compact', action='store_true',
default=False,
help='in case of CLI output, do not display code blocks')
parser.add_argument('--framework', help='filter scan to run only on a specific infrastructure code frameworks',
choices=checkov_runners + ["all"],
default='all')
parser.add_argument('--skip-framework', help='filter scan to skip specific infrastructure code frameworks. \n'
'will be included automatically for some frameworks if system dependencies are missing.',
choices=checkov_runners,
default=None)
parser.add_argument('-c', '--check',
help='filter scan to run only on a specific check identifier(allowlist), You can '
'specify multiple checks separated by comma delimiter', default=None)
parser.add_argument('--skip-check',
help='filter scan to run on all check but a specific check identifier(denylist), You can '
'specify multiple checks separated by comma delimiter', default=None)
parser.add_argument('-s', '--soft-fail',
help='Runs checks but suppresses error code', action='store_true')
parser.add_argument('--bc-api-key', help='Bridgecrew API key')
parser.add_argument('--docker-image', help='Scan docker images by name or ID. Only works with --bc-api-key flag')
parser.add_argument('--dockerfile-path', help='Path to the Dockerfile of the scanned docker image')
parser.add_argument('--repo-id',
help='Identity string of the repository, with form <repo_owner>/<repo_name>')
parser.add_argument('-b', '--branch',
help="Selected branch of the persisted repository. Only has effect when using the --bc-api-key flag",
default='master')
parser.add_argument('--skip-fixes',
help='Do not download fixed resource templates from Bridgecrew. Only has effect when using the --bc-api-key flag',
action='store_true')
parser.add_argument('--skip-suppressions',
help='Do not download preconfigured suppressions from the Bridgecrew platform. Code comment suppressions will still be honored. '
'Only has effect when using the --bc-api-key flag',
action='store_true')
parser.add_argument('--download-external-modules',
help="download external terraform modules from public git repositories and terraform registry",
default=os.environ.get('DOWNLOAD_EXTERNAL_MODULES', False))
parser.add_argument('--external-modules-download-path',
help="set the path for the download external terraform modules",
default=DEFAULT_EXTERNAL_MODULES_DIR)
parser.add_argument('--evaluate-variables',
help="evaluate the values of variables and locals",
default=True)
parser.add_argument('-ca', '--ca-certificate',
help='custom CA (bundle) file', default=None)
parser.add_argument('--repo-root-for-plan-enrichment',
help='Directory containing the hcl code used to generate a given plan file. Use with -f.', dest="repo_root_for_plan_enrichment")
def get_external_checks_dir(args):
external_checks_dir = args.external_checks_dir
if args.external_checks_git:
git_getter = GitGetter(args.external_checks_git[0])
external_checks_dir = [git_getter.get()]
atexit.register(shutil.rmtree, str(Path(external_checks_dir[0]).parent))
return external_checks_dir
if __name__ == '__main__':
exit(run())
|
the-stack_0_17808 | import tkinter as tk
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from fyne import blackscholes, heston
from utils import cols_to_args
class CustomRangeScale(tk.Frame):
def __init__(self, parent, label, floor, ceil, var):
super().__init__(parent)
self._ceil = tk.StringVar(value=str(ceil))
self._floor = tk.StringVar(value=str(floor))
self._ceil.trace_add('write', self._update)
self._floor.trace_add('write', self._update)
tk.Label(self, text=label).pack()
tk.Entry(self, textvariable=self._ceil, width=6).pack()
tk.Scale(self, variable=var, name='scale').pack()
tk.Entry(self, textvariable=self._floor, width=6).pack()
self._update()
def _update(self, *args):
try:
to = float(self._floor.get())
from_ = float(self._ceil.get())
except ValueError:
return
scale = self.children['scale']
scale.configure(to=to, from_=from_, resolution=(to - from_)/20)
class App(tk.Tk):
def __init__(self, fig, plot_update, calibrate, labels, defaults, floors,
ceils):
super().__init__()
self.params = [tk.DoubleVar() for _ in labels]
self._canvas = FigureCanvasTkAgg(fig, self)
controls = tk.Frame(self)
self._canvas.get_tk_widget().pack()
controls.pack()
def vars_plot_update(*args):
plot_update(*map(tk.DoubleVar.get, self.params))
self._canvas.draw()
for label, default, floor, ceil, param in zip(labels, defaults, floors,
ceils, self.params):
scale = CustomRangeScale(controls, label, floor, ceil, param)
scale.pack(side=tk.LEFT)
param.set(default)
param.trace_add('write', vars_plot_update)
def vars_calibrate():
new_values = calibrate(*map(tk.DoubleVar.get, self.params))
for param, new_value in zip(self.params, new_values):
param.set(new_value)
button = tk.Button(controls, text='Calibrate', command=vars_calibrate)
button.pack(side=tk.LEFT)
vars_plot_update()
def run(self):
self.mainloop()
return map(tk.DoubleVar.get, self.params)
def heston_smile(y, ax):
underlying_price = 100
expiry = 0.2
def closure(vol, kappa, theta, nu, rho):
ax.cla()
option_prices = heston.formula(underlying_price, y.index, expiry, vol,
kappa, theta, nu, rho)
y.loc[:] = blackscholes.implied_vol(underlying_price, y.index, expiry,
option_prices)
y.plot(ax=ax)
return closure
def heston_fit(vols_data, vols_model, underlying_price, ax):
def closure(vol, kappa, theta, nu, rho):
ax.cla()
strikes = vols_model.index.get_level_values('Strike')
expiries = vols_model.index.get_level_values('Expiry')
prices_model = heston.formula(underlying_price, strikes, expiries, vol,
kappa, theta, nu, rho)
vols_model.loc[:] = blackscholes.implied_vol(underlying_price, strikes,
expiries, prices_model,
assert_no_arbitrage=False)
for expiry, color in zip(np.unique(expiries),
plt.get_cmap('tab10').colors):
vols_data.xs(expiry).plot(ax=ax, c=color, marker='o', linewidth=0)
vols_model.xs(expiry).plot(ax=ax, color=color)
return closure
def heston_calibration(underlying_price, strikes, expiries, option_prices,
put, **kwargs):
def closure(*initial_guess):
return heston.calibration_crosssectional(
underlying_price, strikes, expiries, option_prices, initial_guess,
put, **kwargs)
return closure
def heston_app(underlying_price, strikes, expiries, option_prices, put,
**kwargs):
backend = mpl.get_backend()
mpl.use('agg')
fig, ax = plt.subplots()
index = pd.MultiIndex.from_arrays([expiries, strikes],
names=['Expiry', 'Strike'])
vols_data = blackscholes.implied_vol(underlying_price, strikes, expiries,
option_prices, put)
vols_data = pd.Series(vols_data, index, name='Data')
underlying_unique = pd.Series(underlying_price,
pd.Index(expiries, name='Expiry')
).groupby('Expiry').first()
strike_grid = np.linspace(np.min(strikes), np.max(strikes), 100)
index = pd.MultiIndex.from_product([np.unique(expiries), strike_grid],
names=['Expiry', 'Strike'])
vols_model = pd.Series(0, index, name='Model')
underlying_b = underlying_unique.reindex(index.get_level_values('Expiry'))
labels = ['vol', 'kappa', 'theta', 'nu', 'rho']
defaults = [0.1, 7.2, 0.05, 1.25, -0.54]
floors = [0.0, 1., 0.0, 0.0, -1.]
ceils = [1.0, 10., 1.0, 5.0, 1.]
plot_update = heston_fit(vols_data, vols_model, underlying_b, ax)
calibrate = heston_calibration(underlying_price, strikes, expiries,
option_prices, put, **kwargs)
app = App(fig, plot_update, calibrate, labels, defaults, floors, ceils)
params = app.run()
mpl.use(backend)
return params
def open_app():
backend = mpl.get_backend()
mpl.use('agg')
fig, ax = plt.subplots()
x = pd.Index(np.linspace(80, 120, 100), name='Strike')
y = pd.Series(0, x)
labels = ['vol', 'kappa', 'theta', 'nu', 'rho']
defaults = [0.1, 7.2, 0.05, 1.25, -0.54]
floors = [0.0, 1., 0.0, 0.0, -1.]
ceils = [1.0, 10., 1.0, 5.0, 1.]
app = App(fig, heston_smile(y, ax), str, labels, defaults, floors,
ceils)
power, scale = app.run()
mpl.use(backend)
return power, scale
|
the-stack_0_17810 | import torch as th
import numpy as np
def build_td_lambda_targets(rewards, terminated, mask, target_qs, n_agents, gamma, td_lambda):
# Assumes <target_qs > in B*T*A and <reward >, <terminated >, <mask > in (at least) B*T-1*1
# Initialise last lambda -return for not terminated episodes
ret = target_qs.new_zeros(*target_qs.shape)
ret[:, -1] = target_qs[:, -1] * (1 - th.sum(terminated, dim=1))
# Backwards recursive update of the "forward view"
for t in range(ret.shape[1] - 2, -1, -1):
ret[:, t] = td_lambda * gamma * ret[:, t + 1] + mask[:, t] \
* (rewards[:, t] + (1 - td_lambda) * gamma * target_qs[:, t + 1] * (1 - terminated[:, t]))
# Returns lambda-return from t=0 to t=T-1, i.e. in B*T-1*A
return ret[:, 0:-1]
def build_gae_targets(rewards, masks, values, gamma, lambd):
B, T, _ = values.size()
T-=1
advantages = th.zeros(B, T, 1).to(device=values.device)
advantage_t = th.zeros(B, 1).to(device=values.device)
for t in reversed(range(T)):
delta = rewards[:, t] + values[:, t+1] * gamma * masks[:, t] - values[:, t]
advantage_t = delta + advantage_t * gamma * lambd * masks[:, t]
advantages[:, t] = advantage_t
returns = values[:, :T] + advantages
return advantages, returns
def build_q_lambda_targets(rewards, terminated, mask, exp_qvals, qvals, gamma, td_lambda):
# Assumes <target_qs > in B*T*A and <reward >, <terminated >, <mask > in (at least) B*T-1*1
# Initialise last lambda -return for not terminated episodes
ret = exp_qvals.new_zeros(*exp_qvals.shape)
ret[:, -1] = exp_qvals[:, -1] * (1 - th.sum(terminated, dim=1))
# Backwards recursive update of the "forward view"
for t in range(ret.shape[1] - 2, -1, -1):
reward = rewards[:, t] + exp_qvals[:, t] - qvals[:, t] #off-policy correction
ret[:, t] = td_lambda * gamma * ret[:, t + 1] + mask[:, t] \
* (reward + (1 - td_lambda) * gamma * exp_qvals[:, t + 1] * (1 - terminated[:, t]))
# Returns lambda-return from t=0 to t=T-1, i.e. in B*T-1*A
return ret[:, 0:-1]
def build_target_q(td_q, target_q, mac, mask, gamma, td_lambda, n):
aug = th.zeros_like(td_q[:, :1])
#Tree diagram
mac = mac[:, :-1]
tree_q_vals = th.zeros_like(td_q)
coeff = 1.0
t1 = td_q[:]
for _ in range(n):
tree_q_vals += t1 * coeff
t1 = th.cat(((t1 * mac)[:, 1:], aug), dim=1)
coeff *= gamma * td_lambda
return target_q + tree_q_vals
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * (self.count)
m_b = batch_var * (batch_count)
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count |
the-stack_0_17811 | import json
import os
import subprocess
import sys
import re
import cookiecutter
from cookiecutter.prompt import read_user_yes_no
from fqdn import FQDN
# Ensure cookiecutter is recent enough
cookiecutter_min_version = '1.6.0'
if cookiecutter.__version__ < cookiecutter_min_version:
print("--------------------------------------------------------------")
print("!! Your cookiecutter is too old, at least %s is required !!" % cookiecutter_min_version)
print("--------------------------------------------------------------")
sys.exit(1)
def is_git_repository(path):
return path.startswith('/') and os.path.exists(path) and os.path.exists(os.path.join(path, '.git'))
def check_remote_repository_updates():
template_dir = '{{ cookiecutter._template }}'
if not is_git_repository(template_dir):
print("Template dir is not absolute dir or not Git repo; skipping freshness check")
return
if os.environ.get("GITLAB_CI", "") != "":
print("No latest version check necessary in CI")
return
print('Template dir:', template_dir)
print('Checking for latest template version via git')
subprocess.call(["git", "fetch"], cwd=template_dir)
print('')
# Warn user if the version of the template that's being used is not the latest available
local_sha = subprocess.check_output(["git", "rev-parse", "@"], cwd=template_dir).decode().strip()
local_branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "@"], cwd=template_dir).decode().strip()
if local_branch == 'HEAD':
remote_branch = 'master' # default to master
else:
remote_branch = local_branch
try:
remote_sha = subprocess.check_output(["git", "rev-parse", "origin/{}".format(remote_branch)], cwd=template_dir).decode().strip()
except subprocess.CalledProcessError:
# The branch is probably not pushed
remote_sha = None
# Print out the template version info
print('local commit: {}; branch: {}'.format(local_sha, local_branch))
print('remote commit: {}; branch: {}'.format(remote_sha, remote_branch))
print()
if local_sha != remote_sha:
if not read_user_yes_no(
'The template version you are using is not the latest available, are you sure you want to continue?',
default_value='yes'):
print("Bye!")
sys.exit(1)
def validate_config():
# Ensure the selected repo name is usable
repo_name = '{{ cookiecutter.repo_name }}'
assert_msg = 'Repo name should be valid Python identifier!'
if hasattr(repo_name, 'isidentifier'):
assert repo_name.isidentifier(), assert_msg
else:
identifier_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
assert bool(identifier_re.match(repo_name)), assert_msg
valid_celery_key = ['yes', 'no']
if "{{ cookiecutter.include_celery }}" not in valid_celery_key:
print("Include Celery '{{ cookiecutter.include_celery }}' is not valid!")
print("Valid include Celery keys are: %s" % ', '.join(valid_celery_key))
sys.exit(1)
valid_storybook_replies = ['yes', 'no']
if "{{ cookiecutter.webapp_include_storybook }}" not in valid_storybook_replies:
print("Your answer to Include Storybook: '{{ cookiecutter.webapp_include_storybook }}' is invalid!")
print("Valid choices are: %s" % ', '.join(valid_storybook_replies))
sys.exit(1)
valid_frontend_styles = ['webapp', 'spa']
if "{{ cookiecutter.frontend_style }}" not in valid_frontend_styles:
print("Your answer to Frontend style: '{{ cookiecutter.webapp_include_storybook }}' is invalid!")
print("Valid choices are: %s" % ', '.join(valid_frontend_styles))
sys.exit(1)
valid_thorgate_key = ['yes', 'no']
if "{{ cookiecutter.thorgate }}" not in valid_thorgate_key:
print("Thorgate '{{ cookiecutter.thorgate }}' is not valid!")
print("Valid thorgate keys are: %s" % ', '.join(valid_thorgate_key))
sys.exit(1)
if not re.match(r'(alpine|debian)$', "{{ cookiecutter.docker_base_image }}"):
print("Only alpine and debian options for docker_base_image are supported.")
sys.exit(1)
if not re.match(r'(3\.[6-9](\.\d+)?)', "{{ cookiecutter.python_version }}"):
print("Only allowed python version options are 3.6 or later.")
sys.exit(1)
if not re.match(r'((8|10|11|12|14)(\.\d+){0,2})', "{{ cookiecutter.node_version }}"):
print("Only allowed Node.js version's start from 8 or 10 and greater.")
sys.exit(1)
valid_dme_keys = ['S3', 'GCS']
if "{{ cookiecutter.django_media_engine }}" not in valid_dme_keys:
print("Django media engine '{{ cookiecutter.django_media_engine }}' is not valid!")
print("Valid media engines are: %s" % ', '.join(valid_dme_keys))
sys.exit(1)
if not FQDN("{{ cookiecutter.test_host }}").is_valid:
print("Test host is not a valid domain name")
sys.exit(1)
if not FQDN("{{ cookiecutter.live_host }}").is_valid:
print("Live host is not a valid domain name")
sys.exit(1)
if not FQDN("{{ cookiecutter.repo_name|as_hostname }}.{{ cookiecutter.test_host }}").is_valid:
print("Test hostname is not a valid domain name")
sys.exit(1)
domain_name = "{{ cookiecutter.domain_name }}"
if 'todo' not in domain_name.lower():
if domain_name != domain_name.lower():
print("Domain name should be lowercase")
sys.exit(1)
if not FQDN(domain_name).is_valid:
print("Domain name is not valid")
sys.exit(1)
def copy_cookiecutter_config(local_filename='.cookiecutterrc'):
""" Copy cookiecutter replay for template to project dir, unless it already exists.
This creates the initial .cookiecutterrc file when the project is first generated.
"""
template_dir = os.path.abspath('{{ cookiecutter._template }}')
template_name = os.path.basename(template_dir) or "django-project-template"
replay_filename = os.path.expanduser(f'~/.cookiecutter_replay/{template_name}.json')
if not os.path.exists(replay_filename) or os.path.exists(local_filename):
# This happens when we're upgrading an existing project
return
with open(replay_filename, 'r') as f_in, open(local_filename, 'w') as f_out:
config = json.load(f_in)
# Don't dump the template dir (stored under '_template' key)
if '_template' in config['cookiecutter']:
del config['cookiecutter']['_template']
json.dump(config, f_out, indent=4, sort_keys=True)
check_remote_repository_updates()
validate_config()
copy_cookiecutter_config()
|
the-stack_0_17812 | import threading
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipUnlessDBFeature,
)
from django.utils.translation import gettext_lazy
from .models import Article, ArticleSelectOnSave, FeaturedArticle, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
with self.assertRaisesMessage(TypeError, "'foo' is an invalid keyword argument for this function"):
Article(
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertIsNotNone(a.id)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date, datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date, datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Parrot programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
with self.assertRaisesMessage(AttributeError, "Manager isn't accessible via Article instances"):
getattr(Article(), "objects",)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(
Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"]
)
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'), ["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
def test_microsecond_precision(self):
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date, datetime(2005, 7, 31, 12, 30, 45, 180))
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"]
)
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline, '\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]]
)
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_gettext_lazy(self):
"""
gettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = gettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
msg = "EmptyQuerySet can't be instantiated"
with self.assertRaisesMessage(TypeError, msg):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
self.assertNotIsInstance('', EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
msg = 'Model instances without primary key value are unhashable'
with self.assertRaisesMessage(TypeError, msg):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
def test_delete_and_access_field(self):
# Accessing a field after it's deleted from a model reloads its value.
pub_date = datetime.now()
article = Article.objects.create(headline='foo', pub_date=pub_date)
new_pub_date = article.pub_date + timedelta(days=10)
article.headline = 'bar'
article.pub_date = new_pub_date
del article.headline
with self.assertNumQueries(1):
self.assertEqual(article.headline, 'foo')
# Fields that weren't deleted aren't reloaded.
self.assertEqual(article.pub_date, new_pub_date)
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Swallow programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Parrot programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(), ['<Article: Parrot programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Swallow'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Swallow programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(id__exact=2000,)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
with self.assertRaises(ObjectDoesNotExist):
Article.objects.get(pub_date__year=2005, pub_date__month=8)
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(pub_date__week_day=6,)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]), ["<Article: Swallow programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Swallow bites Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
msg = "get() returned more than one Article -- it returned 2!"
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(headline__startswith='Swallow',)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005,)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005, pub_date__month=7)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
'union',
'intersection',
'difference',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet)),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaisesMessage(DatabaseError, 'Forced update did not affect any rows.'):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
select_on_save works correctly if the database doesn't return correct
information about matched rows from UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager._queryset_class
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super()._update(*args, **kwargs)
return 0
try:
Article._base_manager._queryset_class = FakeQuerySet
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaisesMessage(DatabaseError, 'Forced update did not affect any rows.'):
asos.save(force_update=True)
msg = (
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
)
with self.assertRaisesMessage(DatabaseError, msg):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager._queryset_class = orig_class
class ModelRefreshTests(TestCase):
def test_refresh(self):
a = Article.objects.create(pub_date=datetime.now())
Article.objects.create(pub_date=datetime.now())
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_unknown_kwarg(self):
s = SelfRef.objects.create()
msg = "refresh_from_db() got an unexpected keyword argument 'unknown_kwarg'"
with self.assertRaisesMessage(TypeError, msg):
s.refresh_from_db(unknown_kwarg=10)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = datetime.now()
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_fk_on_delete_set_null(self):
a = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
s1 = SelfRef.objects.create(article=a)
a.delete()
s1.refresh_from_db()
self.assertIsNone(s1.article_id)
self.assertIsNone(s1.article)
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
def test_refresh_clears_reverse_related(self):
"""refresh_from_db() clear cached reverse relations."""
article = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertFalse(hasattr(article, 'featured'))
FeaturedArticle.objects.create(article_id=article.pk)
article.refresh_from_db()
self.assertTrue(hasattr(article, 'featured'))
def test_refresh_clears_one_to_one_field(self):
article = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
featured = FeaturedArticle.objects.create(article_id=article.pk)
self.assertEqual(featured.article.headline, 'Parrot programs in Python')
article.headline = 'Parrot programs in Python 2.0'
article.save()
featured.refresh_from_db()
self.assertEqual(featured.article.headline, 'Parrot programs in Python 2.0')
|
the-stack_0_17814 | import socket
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import csv
import pandas as pd
import time
import os
from itertools import islice
from datetime import datetime
import pytz
'''
r_t =[] #_real,_time
v_a =[] #_Vind,_kias
v_g = [] #Vtrue,_ktgs
v_t = [] #Vtrue,_ktas
a_a = [] #hpath,__deg
w_v = [] #vpath,__deg
w_a = [] #_fuel,_1_lb
a_g = [] #_fuel,_7_lb
alpha = [] #
'''
'''
while True:
lineNum += 1
with open('Data.txt') as csvfile:
readCSV = csv.reader(csvfile, delimiter='|')
print(lineNum)
for row in islice(readCSV, lineNum, None):
r_t.append(float(row[0]))
v_a.append(float(row[7]))
v_g.append(float(row[10]))
a_a.append(float(row[22]))
w_v.append(float(row[23]))
print(r_t[0],v_a[0],v_g[0],a_a[0],w_v[0])
break
#w_a.append(float(row[0]))
#a_g.append(float(row[0]))
csvfile.close()
time.sleep(1.0)
'''
'''
#works
lineNum = 2
while True:
lineNum += 1
print(lineNum)
with open('Data.txt') as csvfile:
for skip in range(lineNum):
next(csvfile)
readCSV = csv.reader(csvfile, delimiter='|')
for row in readCSV:
r_t=(float(row[0]))
v_a=(float(row[7]))
v_g=(float(row[10]))
a_a=(float(row[22]))
w_v=(float(row[23]))
csvfile.close()
print(r_t,v_a,v_g,a_a,w_v)
break
time.sleep(1.0)
'''
#host = '127.0.0.1' # standard localhost
host = '129.161.48.132'
port = 12345 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
print (host , port)
s.listen(1)
conn, addr = s.accept()
print('Connected by', addr)
#first="r_t,v_a,v_g,a_a,w_v"
first="alpha,v_t"
conn.sendall(first)
lineNum = 2
while True:
lineNum += 1
print(lineNum)
with open('Data.txt') as csvfile:
for skip in range(lineNum):
next(csvfile)
readCSV = csv.reader(csvfile, delimiter='|')
for row in readCSV:
#r_t=((row[0]))
#v_a=((row[7]))
#v_g=((row[10]))
v_t=((row[9]))
a_a=((row[22]))
w_v=((row[23]))
alpha=((row[20]))
csvfile.close()
print(r_t,v_a,v_g,a_a,w_v,alpha)
dt = datetime.now().isoformat()
date =dt[:10]
hour = dt[11:13] +"00"
mint = dt[14:16]
sec = dt[17:19]
msec = dt[20:23]
data = ":"+date+" "+ hour+mint+sec+msec+"-0500:"+','+alpha.strip()+v_t.strip()
#data = ":"+date+" "+ hour+mint+sec+msec+"-0500:"+','+r_t.strip()+','+v_a.strip()+','+v_g.strip()+','+a_a.strip()+','+w_v.strip()+alpha.strip()
conn.sendall(data)
break
time.sleep(1.0)
|
the-stack_0_17819 | """ validator/validator.py
A data validation class that leverages external JSON configuration
files to validate data provided by API consumers.
"""
import json
import os
import re
from datetime import datetime
from data_resource_api.config import ConfigurationFactory
from data_resource_api.validator.util import ValidatorNotFoundError,\
SchemaFormatError, EMAIL_REGEX, URL_REGEX
class Validator(object):
""" Generic validator class.
This class implements all the infrastructure necessary for validating an
object.
Attributes:
schema_path (str): Full path and name of the schema file to validate
data against.
Args:
schema_path (str): Full path and name of the schema to validate data
against.
"""
def __init__(self, schema_path):
self.schema_path = schema_path
def field_exists(self, field: str, dataset: dict):
""" Determine if the field exists. """
return field in dataset.keys()
def is_valid_string(self, text, pattern=None):
""" Determine if an item is a valid string. """
is_valid = False
try:
if isinstance(text, str):
is_valid = True
if pattern is not None:
matcher = re.compile(pattern)
if matcher.match(text):
is_valid = True
else:
is_valid = False
except Exception:
is_valid = False
return is_valid
def is_valid_integer(self, text, min=None, max=None):
""" Determine if an item is a valid string. """
is_valid = False
try:
if isinstance(text, int):
is_valid = True
if is_valid and min is not None:
if int(text) < int(min):
is_valid = False
if is_valid and max is not None:
if int(text) > int(max):
is_valid = False
except Exception:
is_valid = False
return is_valid
def is_valid_float(self, text, min=None, max=None):
""" Determine if an item is a valid float. """
is_valid = False
try:
if isinstance(text, float):
is_valid = True
if is_valid and min is not None:
if float(text) < float(min):
is_valid = False
if is_valid and max is not None:
if float(text) > float(max):
is_valid = False
except Exception:
is_valid = False
return is_valid
def is_valid_url(self, text):
""" Determine if an item is a valid string. """
is_valid = False
try:
if URL_REGEX.match(text):
is_valid = True
except Exception:
pass
return is_valid
def is_valid_email(self, text):
""" Determine if an item is a valid string. """
is_valid = False
try:
if EMAIL_REGEX.match(text):
is_valid = True
except Exception:
pass
return is_valid
def is_valid_date(self, text, format=None):
""" Determine if an item is a valid date. """
is_valid = False
try:
if format is None:
format = "%Y%m%d"
date = datetime.strptime(str(text), format)
is_valid = True
except Exception:
is_valid = False
return is_valid
def validate(self, dataset: dict):
""" Validate a dataset to ensure that it conforms to a schema.
Parameters:
dataset (dict): Dataset (represented as a Python dictionary) to
apply validation rules to.
"""
errors = []
if not os.path.exists(self.schema_path) or not os.path.isfile(
self.schema_path):
raise ValidatorNotFoundError(
'The specified validator ({}) does not exist'.format(
self.schema_path))
else:
try:
with open(self.schema_path, 'r') as f:
schema = json.load(f)
except Exception:
raise(SchemaFormatError(
'Failed to load schema from {}'.format(self.schema_path)))
try:
for field in schema['schema']:
current_field = field['field']
if not self.field_exists(current_field, dataset):
if field['required'] == 'true':
errors.append(
"Field '{0}' is required and cannot be blank."
.format(current_field))
else:
field_type = field['type']
if field_type == 'string':
if 'pattern' in field.keys():
pattern = field['pattern']
else:
pattern = None
if not self.is_valid_string(dataset[current_field],
pattern):
errors.append(
'Field {} is not a valid string'.format(
current_field))
elif field_type == 'integer':
if 'min' in field.keys():
min = field['min']
else:
min = None
if 'max' in field.keys():
max = field['max']
else:
max = None
if not self.is_valid_integer(
dataset[current_field], min, max):
errors.append(
'Field {} is not a valid integer'.format(
current_field))
elif field_type == 'float':
if 'min' in field.keys():
min = field['min']
else:
min = None
if 'max' in field.keys():
max = field['max']
else:
max = None
if not self.is_valid_float(
dataset[current_field], min, max):
errors.append(
'Field {} is not a valid float'.format(
current_field))
elif field_type == 'url':
if not self.is_valid_url(dataset[current_field]):
errors.append(
'Field {} is not a valid URL'.format(
current_field))
elif field_type == 'email':
if not self.is_valid_email(dataset[current_field]):
errors.append(
'Field {} is not a valid email address'
.format(current_field))
elif field_type == 'date':
if 'format' in field.keys():
format = field['format']
else:
format = None
if not self.is_valid_date(dataset[current_field],
format):
errors.append(
'Field {} is not a valid date.'.format(
current_field))
return errors
except Exception as e:
raise(SchemaFormatError(
'Schema format error detected in schema {}'.format(
self.schema_path)))
class CredentialValidator(Validator):
def __init__(self):
config = ConfigurationFactory.from_env()
validator_file = os.path.join(
config.VALIDATOR_HOME, 'credentials.json')
super().__init__(validator_file)
class ParticipantValidator(Validator):
def __init__(self):
config = ConfigurationFactory.from_env()
validator_file = os.path.join(
config.VALIDATOR_HOME, 'participants.json')
super().__init__(validator_file)
class ProgramValidator(Validator):
def __init__(self):
config = ConfigurationFactory.from_env()
validator_file = os.path.join(
config.VALIDATOR_HOME, 'programs.json')
super().__init__(validator_file)
class ProviderValidator(Validator):
def __init__(self):
config = ConfigurationFactory.from_env()
validator_file = os.path.join(
config.VALIDATOR_HOME, 'providers.json')
super().__init__(validator_file)
|
the-stack_0_17820 | import os
from importlib import import_module
import numpy as np
from monty.serialization import dumpfn
from fireworks import FiretaskBase, explicit_serialize
from fireworks.utilities.dict_mods import apply_mod
from pymatgen.core.structure import Structure
from pymatgen.io.vasp import Incar, Poscar, Potcar, PotcarSingle, Kpoints
from pymatgen.io.vasp.sets import MPAbsorptionSet
from pymatgen.io.vasp.outputs import Vasprun
from atomate.utils.utils import env_chk, load_class
@explicit_serialize
class WriteVaspAbsorptionFromPrev(FiretaskBase):
"""
Writes input files for an LOPTICS absorption run. Assumes that output files (WAVECAR) from an
scf job can be accessed.
Optional params:
"prev_calc_dir",
"mode", either "IPA" or "RPA"
"reciprocal_density",
"other_params",
"potcar_spec"
"""
optional_params = [
"prev_calc_dir",
"structure",
"mode",
"copy_wavecar",
"nbands",
"nbands_factor",
"reciprocal_density",
"nkred",
"ncores",
"nedos",
"potcar_spec",
"other_params"
]
def run_task(self, fw_spec):
vis = MPAbsorptionSet.from_prev_calc(
prev_calc_dir=self.get("prev_calc_dir", "."),
mode=self.get("mode", "IPA"),
copy_wavecar=self.get("copy_wavecar", True),
nbands=self.get("nbands", None),
nbands_factor=self.get("nbands_factor", 2),
reciprocal_density=self.get("reciprocal_density", 200),
nkred=self.get("nkred", None),
nedos=self.get("nedos", 2001),
**self.get("other_params", {})
)
potcar_spec = self.get("potcar_spec", False)
vis.write_input(".", potcar_spec=potcar_spec)
|
the-stack_0_17822 | # star/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from candidate.models import CandidateCampaignManager
from django.db import models
from exception.models import handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception
from measure.models import ContestMeasureManager
from office.models import ContestOfficeManager
import wevote_functions.admin
from wevote_functions.functions import positive_value_exists
from voter.models import VoterManager
ITEM_STARRED = 'STARRED'
ITEM_NOT_STARRED = 'NOT_STARRED'
STAR_CHOICES = (
(ITEM_STARRED, 'Item Starred'),
(ITEM_NOT_STARRED, 'Item Not Starred'),
)
logger = wevote_functions.admin.get_logger(__name__)
class StarItem(models.Model):
# We are relying on built-in Python id field
# The voter following the organization
voter_id = models.BigIntegerField(null=True, blank=True)
# The candidate being starred
candidate_campaign_id = models.BigIntegerField(null=True, blank=True)
candidate_campaign_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False)
# The office being starred
contest_office_id = models.BigIntegerField(null=True, blank=True)
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False)
# The measure being starred
contest_measure_id = models.BigIntegerField(null=True, blank=True)
contest_measure_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False)
# Is this person following or ignoring this organization?
star_status = models.CharField(max_length=16, choices=STAR_CHOICES, default=ITEM_NOT_STARRED)
# The date the voter starred or unstarred this ballot_item
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
# This is used when we want to export the organizations that a voter is following
def voter_we_vote_id(self):
voter_manager = VoterManager()
return voter_manager.fetch_we_vote_id_from_local_id(self.voter_id)
def ballot_item_we_vote_id(self):
if self.candidate_campaign_we_vote_id:
return self.candidate_campaign_we_vote_id
elif self.contest_office_we_vote_id:
return self.contest_office_we_vote_id
elif self.contest_measure_we_vote_id:
return self.contest_measure_we_vote_id
elif self.candidate_campaign_id:
candidate_campaign_manager = CandidateCampaignManager()
return candidate_campaign_manager.fetch_candidate_campaign_we_vote_id_from_id(self.candidate_campaign_id)
elif self.contest_measure_id:
contest_measure_manager = ContestMeasureManager()
return contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(self.contest_measure_id)
elif self.contest_office_id:
contest_office_manager = ContestOfficeManager()
return contest_office_manager.fetch_contest_office_we_vote_id_from_id(self.contest_office_id)
else:
return 'not_found'
def is_starred(self):
if self.star_status == ITEM_STARRED:
return True
return False
def is_not_starred(self):
if self.star_status == ITEM_NOT_STARRED:
return True
return False
class StarItemManager(models.Model):
def __unicode__(self):
return "StarItemManager"
# STAR ON
def toggle_on_voter_starred_candidate(self, voter_id, candidate_campaign_id):
star_status = ITEM_STARRED
contest_office_id = 0
contest_measure_id = 0
star_item_manager = StarItemManager()
return star_item_manager.toggle_voter_starred_item(
voter_id, star_status, candidate_campaign_id, contest_office_id, contest_measure_id)
def toggle_on_voter_starred_office(self, voter_id, contest_office_id):
star_status = ITEM_STARRED
candidate_campaign_id = 0
contest_measure_id = 0
star_item_manager = StarItemManager()
return star_item_manager.toggle_voter_starred_item(
voter_id, star_status, candidate_campaign_id, contest_office_id, contest_measure_id)
def toggle_on_voter_starred_measure(self, voter_id, contest_measure_id):
star_status = ITEM_STARRED
candidate_campaign_id = 0
contest_office_id = 0
star_item_manager = StarItemManager()
return star_item_manager.toggle_voter_starred_item(
voter_id, star_status, candidate_campaign_id, contest_office_id, contest_measure_id)
# STAR OFF
def toggle_off_voter_starred_candidate(self, voter_id, candidate_campaign_id):
star_status = ITEM_NOT_STARRED
contest_office_id = 0
contest_measure_id = 0
star_item_manager = StarItemManager()
return star_item_manager.toggle_voter_starred_item(
voter_id, star_status, candidate_campaign_id, contest_office_id, contest_measure_id)
def toggle_off_voter_starred_office(self, voter_id, contest_office_id):
star_status = ITEM_NOT_STARRED
candidate_campaign_id = 0
contest_measure_id = 0
star_item_manager = StarItemManager()
return star_item_manager.toggle_voter_starred_item(
voter_id, star_status, candidate_campaign_id, contest_office_id, contest_measure_id)
def toggle_off_voter_starred_measure(self, voter_id, contest_measure_id):
star_status = ITEM_NOT_STARRED
candidate_campaign_id = 0
contest_office_id = 0
star_item_manager = StarItemManager()
return star_item_manager.toggle_voter_starred_item(
voter_id, star_status, candidate_campaign_id, contest_office_id, contest_measure_id)
def toggle_voter_starred_item(
self, voter_id, star_status, candidate_campaign_id=0, contest_office_id=0, contest_measure_id=0,
contest_office_we_vote_id='', candidate_campaign_we_vote_id='', contest_measure_we_vote_id=''):
# Does a star_item entry exist from this voter already exist?
star_item_manager = StarItemManager()
star_item_id = 0
results = star_item_manager.retrieve_star_item(
star_item_id, voter_id,
contest_office_id, candidate_campaign_id, contest_measure_id)
star_item_on_stage_found = False
star_item_on_stage_id = 0
star_item_on_stage = StarItem()
if results['star_item_found']:
star_item_on_stage = results['star_item']
# Update this star_item entry with new values - we do not delete because we might be able to use
try:
star_item_on_stage.star_status = star_status
# We don't need to update date_last_changed here because set set auto_now=True in the field
star_item_on_stage.save()
star_item_on_stage_id = star_item_on_stage.id
star_item_on_stage_found = True
status = 'UPDATE ' + star_status
except Exception as e:
status = 'FAILED_TO_UPDATE ' + star_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
elif results['MultipleObjectsReturned']:
logger.warn("star_item: delete all but one and take it over?")
status = 'TOGGLE_ITEM_STARRED MultipleObjectsReturned ' + star_status
elif results['DoesNotExist']:
try:
# Create new star_item entry
if candidate_campaign_id and not candidate_campaign_we_vote_id:
candidate_campaign_manager = CandidateCampaignManager()
candidate_campaign_we_vote_id = \
candidate_campaign_manager.fetch_candidate_campaign_we_vote_id_from_id(candidate_campaign_id)
if contest_measure_id and not contest_measure_we_vote_id:
contest_measure_manager = ContestMeasureManager()
contest_measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(
contest_measure_id)
if contest_office_id and not contest_office_we_vote_id:
contest_office_manager = ContestOfficeManager()
contest_office_we_vote_id = contest_office_manager.fetch_contest_office_we_vote_id_from_id(
contest_office_id)
# NOTE: For speed purposes, we are not validating the existence of the items being starred
# although we could if the we_vote_id is not returned.
star_item_on_stage = StarItem(
voter_id=voter_id,
candidate_campaign_id=candidate_campaign_id,
candidate_campaign_we_vote_id=candidate_campaign_we_vote_id,
contest_office_id=contest_office_id,
contest_office_we_vote_id=contest_office_we_vote_id,
contest_measure_id=contest_measure_id,
contest_measure_we_vote_id=contest_measure_we_vote_id,
star_status=star_status,
# We don't need to update date_last_changed here because set set auto_now=True in the field
)
star_item_on_stage.save()
star_item_on_stage_id = star_item_on_stage.id
star_item_on_stage_found = True
status = 'CREATE ' + star_status
except Exception as e:
status = 'FAILED_TO_UPDATE ' + star_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
status = results['status']
results = {
'success': True if star_item_on_stage_found else False,
'status': status,
'star_item_found': star_item_on_stage_found,
'star_item_id': star_item_on_stage_id,
'star_item': star_item_on_stage,
}
return results
def retrieve_star_item(self, star_item_id, voter_id, contest_office_id, candidate_campaign_id, contest_measure_id):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
star_item_on_stage = StarItem()
star_item_on_stage_id = 0
try:
if positive_value_exists(star_item_id):
star_item_on_stage = StarItem.objects.get(id=star_item_id)
star_item_on_stage_id = star_item_on_stage.id
status = 'STAR_ITEM_FOUND_WITH_ID'
success = True
elif positive_value_exists(voter_id) and positive_value_exists(candidate_campaign_id):
star_item_on_stage = StarItem.objects.get(
voter_id=voter_id,
candidate_campaign_id=candidate_campaign_id)
star_item_on_stage_id = star_item_on_stage.id
status = 'STAR_ITEM_FOUND_WITH_VOTER_ID_AND_CANDIDATE_ID'
success = True
elif positive_value_exists(voter_id) and positive_value_exists(contest_office_id):
star_item_on_stage = StarItem.objects.get(
voter_id=voter_id,
contest_office_id=contest_office_id)
star_item_on_stage_id = star_item_on_stage.id
status = 'STAR_ITEM_FOUND_WITH_VOTER_ID_AND_OFFICE_ID'
success = True
elif positive_value_exists(voter_id) and positive_value_exists(contest_measure_id):
star_item_on_stage = StarItem.objects.get(
voter_id=voter_id,
contest_measure_id=contest_measure_id)
star_item_on_stage_id = star_item_on_stage.id
status = 'STAR_ITEM_FOUND_WITH_VOTER_ID_AND_MEASURE_ID'
success = True
else:
status = 'STAR_ITEM_NOT_FOUND-MISSING_VARIABLES'
success = False
except StarItem.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
status = 'STAR_ITEM_NOT_FOUND_MultipleObjectsReturned'
success = False
except StarItem.DoesNotExist:
error_result = False
exception_does_not_exist = True
status = 'STAR_ITEM_NOT_FOUND_DoesNotExist'
success = True
star_item_on_stage_found = True if star_item_on_stage_id > 0 else False
results = {
'status': status,
'success': success,
'star_item_found': star_item_on_stage_found,
'star_item_id': star_item_on_stage_id,
'star_item': star_item_on_stage,
'is_starred': star_item_on_stage.is_starred(),
'is_not_starred': star_item_on_stage.is_not_starred(),
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
class StarItemList(models.Model):
"""
A way to retrieve all of the star_item information
"""
def retrieve_star_item_list_for_voter(self, voter_id):
# Retrieve a list of star_item entries for this voter
star_item_list_found = False
star_item_list = []
try:
star_item_list = StarItem.objects.all()
star_item_list = star_item_list.filter(voter_id=voter_id)
if len(star_item_list):
star_item_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if star_item_list_found:
results = {
'status': "STAR_ITEMS_FOUND",
'success': True,
'star_item_list': star_item_list,
}
return results
else:
results = {
'status': "STAR_ITEMS_NOT_FOUND",
'success': True,
'star_item_list': [],
}
return results
|
the-stack_0_17824 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import re
import time
import uuid
from datetime import datetime
from distutils.version import LooseVersion
from ..core import Backend
from ...expr.expressions import *
from ...expr.arithmetic import Power
from ...expr.reduction import GroupedSequenceReduction, GroupedCount, Count, \
GroupedCat, Cat, NUnique, GroupedNUnique, ToList, GroupedToList, Quantile, \
GroupedQuantile
from ...expr.merge import JoinCollectionExpr
from ...expr.datetimes import DTScalar
from ...expr.collections import PivotCollectionExpr
from ...expr import arithmetic, element, composites
from ...utils import traverse_until_source
from ....dag import DAG
from ..errors import CompileError
from ..utils import refresh_dynamic
from . import types
from ... import types as df_types
from ....models import FileResource, TableResource, Schema
from .... import compat
from ....lib.xnamedtuple import xnamedtuple
from ....compat import lzip
try:
import numpy as np
import pandas as pd
except ImportError:
pd = None
np = None
if pd is not None:
PD_APPLY_HAS_RESULT_TYPE = LooseVersion(pd.__version__) >= '0.23.0'
else:
PD_APPLY_HAS_RESULT_TYPE = False
BINARY_OP_TO_PANDAS = {
'Add': operator.add,
'Substract': operator.sub,
'Multiply': operator.mul,
'Divide': operator.div if six.PY2 else operator.truediv,
'Mod': operator.mod,
'FloorDivide': operator.floordiv,
'Power': operator.pow,
'Greater': operator.gt,
'GreaterEqual': operator.ge,
'Less': operator.lt,
'LessEqual': operator.le,
'Equal': operator.eq,
'NotEqual': operator.ne,
'And': operator.and_,
'Or': operator.or_
}
UNARY_OP_TO_PANDAS = {
'Negate': operator.neg,
'Invert': operator.inv,
'Abs': operator.abs
}
if pd:
SORT_CUM_WINDOW_OP_TO_PANDAS = {
'CumSum': lambda s: s.expanding(min_periods=1).sum(),
'CumMean': lambda s: s.expanding(min_periods=1).mean(),
'CumMedian': lambda s: s.expanding(min_periods=1).median(),
'CumStd': lambda s: s.expanding(min_periods=1).std(),
'CumMin': lambda s: s.expanding(min_periods=1).min(),
'CumMax': lambda s: s.expanding(min_periods=1).max(),
'CumCount': lambda s: s.expanding(min_periods=1).count(),
}
if np:
CUM_WINDOW_OP_TO_PANDAS = {
'CumSum': np.sum,
'CumMean': np.mean,
'CumMedian': np.median,
'CumStd': np.std,
'CumMin': np.min,
'CumMax': np.max,
'CumCount': lambda x: len(x),
}
JOIN_DICT = {
'INNER': 'inner',
'LEFT OUTER': 'left',
'RIGHT OUTER': 'right',
'FULL OUTER': 'outer'
}
def _explode(obj):
if obj and isinstance(obj, tuple):
obj = obj[0]
if obj is None:
return
if isinstance(obj, dict):
for k, v in six.iteritems(obj):
yield k, v
else:
for v in obj:
yield v
def _pos_explode(obj):
if obj and isinstance(obj, tuple):
obj = obj[0]
if obj is None:
return
for idx, v in enumerate(obj):
yield idx, v
def _filter_none(col):
import numpy as np
if hasattr(col, 'dropna'):
col = col.dropna()
else:
try:
col = col[~np.isnan(col)]
except TypeError:
col = col[np.fromiter((v is not None for v in col), np.bool_)]
return col
BUILTIN_FUNCS = {
'EXPLODE': _explode,
'POSEXPLODE': _pos_explode,
}
class PandasCompiler(Backend):
"""
PandasCompiler will compile an Expr into a DAG
in which each node is a pair of <expr, function>.
"""
def __init__(self, expr_dag):
self._dag = DAG()
self._expr_to_dag_node = dict()
self._expr_dag = expr_dag
self._callbacks = list()
def compile(self, expr):
try:
return self._compile(expr)
finally:
self._cleanup()
def _cleanup(self):
for callback in self._callbacks:
callback()
self._callbacks = list()
def _compile(self, expr, traversed=None):
if traversed is None:
traversed = set()
root = self._retrieve_until_find_root(expr)
if root is not None and id(root) not in traversed:
self._compile_join_node(root, traversed)
traversed.add(id(root))
for node in traverse_until_source(expr):
if id(node) not in traversed:
node.accept(self)
traversed.add(id(node))
return self._dag
def _compile_join_node(self, expr, traversed):
nodes = []
self._compile(expr._lhs, traversed)
nodes.append(expr._lhs)
self._compile(expr._rhs, traversed)
nodes.append(expr._rhs)
for node in expr._predicate:
nodes.append(node._lhs)
self._compile(node._lhs, traversed)
nodes.append(node._rhs)
self._compile(node._rhs, traversed)
expr.accept(self)
for node in nodes:
self._dag.add_edge(self._expr_to_dag_node[node], self._expr_to_dag_node[expr])
cached_args = expr.args
def cb():
for arg_name, arg in zip(expr._args, cached_args):
setattr(expr, arg_name, arg)
self._callbacks.append(cb)
for arg_name in expr._args:
setattr(expr, arg_name, None)
@classmethod
def _retrieve_until_find_root(cls, expr):
for node in traverse_until_source(expr, top_down=True, unique=True):
if isinstance(node, JoinCollectionExpr):
return node
def _add_node(self, expr, handle):
children = expr.children()
node = (expr, handle)
self._dag.add_node(node)
self._expr_to_dag_node[expr] = node
# the dependencies do not exist in self._expr_to_dag_node
predecessors = [self._expr_to_dag_node[child] for child in children
if child in self._expr_to_dag_node]
[self._dag.add_edge(p, node) for p in predecessors]
def visit_source_collection(self, expr):
df = next(expr.data_source())
if not isinstance(df, pd.DataFrame):
raise ValueError('Expr data must be a pandas DataFrame.')
# make a copy to avoid modify
handle = lambda _: df.rename(columns=dict(zip(df.columns, expr.schema.names)))
self._add_node(expr, handle)
@classmethod
def _get_children_vals(cls, kw, expr=None, children=None):
children = children or expr.children()
return [kw.get(child) for child in children]
@classmethod
def _merge_values(cls, exprs, kw):
fields = [kw.get(expr) for expr in exprs]
size = max(len(f) for f, e in zip(fields, exprs) if isinstance(e, SequenceExpr))
fields = [pd.Series([f] * size) if isinstance(e, Scalar) else f
for f, e in zip(fields, exprs)]
return pd.concat(fields, axis=1, keys=[e.name for e in exprs])
def visit_project_collection(self, expr):
def handle(kw):
children = expr.children()
fields = self._get_children_vals(kw, children=children)[1:]
names = expr.schema.names
if isinstance(expr, Summary):
size = 1
else:
size = max(len(f) for f, e in zip(fields, expr._fields)
if isinstance(e, SequenceExpr))
for i in range(len(fields)):
if not isinstance(fields[i], pd.Series):
fields[i] = pd.Series([fields[i]] * size)
return pd.concat(fields, axis=1, keys=names)
self._add_node(expr, handle)
def visit_filter_partition_collection(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
df, predicate = children_vals[0:1]
return df[predicate][expr.schema.names]
self._add_node(expr, handle)
def visit_filter_collection(self, expr):
def handle(kw):
df, predicate = tuple(self._get_children_vals(kw, expr))
return df[predicate]
self._add_node(expr, handle)
def visit_slice_collection(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
df = children_vals[0]
start, end, step = expr.start, expr.stop, expr.step
return df[start: end: step]
self._add_node(expr, handle)
def visit_element_op(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
input, args = children_vals[0], children_vals[1:]
if isinstance(expr.input, Scalar):
input = pd.Series([input])
def run():
if isinstance(expr, element.IsNull):
return input.isnull()
elif isinstance(expr, element.NotNull):
return input.notnull()
elif isinstance(expr, element.FillNa):
return input.fillna(args[0])
elif isinstance(expr, element.IsIn):
if isinstance(expr._values[0], SequenceExpr):
return input.isin(list(args[0]))
else:
return input.isin(args)
elif isinstance(expr, element.NotIn):
if isinstance(expr._values[0], SequenceExpr):
return ~input.isin(list(args[0]))
else:
return ~input.isin(args)
elif isinstance(expr, element.IfElse):
return pd.Series(np.where(input, args[0], args[1]), name=expr.name, index=input.index)
elif isinstance(expr, element.Switch):
case = None if expr.case is None else kw.get(expr.case)
default = None if expr.default is None else kw.get(expr.default)
conditions = [kw.get(it) for it in expr.conditions]
thens = [kw.get(it) for it in expr.thens]
if case is not None:
conditions = [case == condition for condition in conditions]
condition_exprs = [expr.case == cond for cond in expr.conditions]
else:
condition_exprs = expr.conditions
size = max(len(val) for e, val in zip(condition_exprs + expr.thens, conditions + thens)
if isinstance(e, SequenceExpr))
curr = pd.Series([None] * size)
for condition, then in zip(conditions, thens):
curr = curr.where(-condition, then)
if default is not None:
return curr.fillna(default)
return curr
elif isinstance(expr, element.Between):
return input.between(*args)
elif isinstance(expr, element.Cut):
bins = [bin.value for bin in expr.bins]
if expr.include_under:
bins.insert(0, -float('inf'))
if expr.include_over:
bins.append(float('inf'))
labels = [l.value for l in expr.labels]
return pd.cut(input, bins, right=expr.right, labels=labels,
include_lowest=expr.include_lowest)
if isinstance(expr.input, Scalar):
return run()[0]
else:
return run()
self._add_node(expr, handle)
def visit_binary_op(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
if expr.lhs.dtype == df_types.datetime and expr.rhs.dtype == df_types.datetime:
return ((pd.to_datetime(children_vals[0]) - pd.to_datetime(children_vals[1])) /
np.timedelta64(1, 'ms')).astype(np.int64)
op = BINARY_OP_TO_PANDAS[expr.node_name]
if isinstance(expr, Power) and isinstance(expr.dtype, df_types.Integer):
return op(*children_vals).astype(types.df_type_to_np_type(expr.dtype))
return op(*children_vals)
self._add_node(expr, handle)
def visit_unary_op(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
op = UNARY_OP_TO_PANDAS[expr.node_name]
return op(*children_vals)
self._add_node(expr, handle)
def visit_math(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
if isinstance(expr, math.Log) and expr._base is not None:
base = expr._base.value
return np.log(children_vals[0]) / np.log(base)
elif isinstance(expr, math.Trunc):
decimals = expr._decimals.value
order = 10 ** decimals
return np.trunc(children_vals[0] * order) / order
else:
op = getattr(np, expr.node_name.lower())
return op(*children_vals)
self._add_node(expr, handle)
def visit_string_op(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
input = children_vals[0]
if isinstance(expr.input, Scalar):
input = pd.Series([input])
assert len(expr._args) == len(expr.args)
kv = dict((name.lstrip('_'), self._get(arg, kw))
for name, arg in zip(expr._args[1:], expr.args[1:]))
op = expr.node_name.lower()
if op == 'get':
res = getattr(getattr(input, 'str'), op)(children_vals[1])
elif op == 'strptime':
res = input.map(lambda x: datetime.strptime(x, children_vals[1]))
elif op == 'extract':
def extract(x, pat, flags, group):
regex = re.compile(pat, flags=flags)
m = regex.match(x)
if m:
return m.group(group)
df = self._merge_values([expr.input, expr._pat, expr._flags, expr._group], kw)
return pd.Series([extract(*r[1]) for r in df.iterrows()])
elif op == 'split':
return input.apply(lambda v: v.split(kv['pat'], kv['n']) if v is not None else None)
elif op == 'stringtodict':
def _parse_dict(x):
return dict(it.split(kv['kv_delim'], 1) for it in x.split(kv['item_delim']))
return input.apply(lambda v: _parse_dict(v) if v is not None else None)
else:
if op == 'slice':
kv['stop'] = kv.pop('end', None)
elif op == 'replace':
assert 'regex' in kv
if kv['regex']:
kv.pop('regex')
else:
kv['pat'] = re.escape(kv['pat'])
kv.pop('regex')
res = getattr(getattr(input, 'str'), op)(**kv)
if isinstance(expr.input, Scalar):
return res[0]
else:
return res
self._add_node(expr, handle)
def visit_datetime_op(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
input = children_vals[0]
if isinstance(expr.input, Scalar):
input = pd.Series([input])
assert len(children_vals) == len(expr.args)
kv = dict(zip([arg.lstrip('_') for arg in expr._args[1:]],
children_vals[1:]))
op = expr.node_name.lower()
res = getattr(getattr(input, 'dt'), op)
if not isinstance(res, pd.Series):
res = res(**kv)
if isinstance(expr.input, Scalar):
return res[0]
else:
return res
self._add_node(expr, handle)
def visit_groupby(self, expr):
def handle(kw):
fields_exprs = expr._fields or expr._by + expr._aggregations
fields = [[kw.get(field), ] if isinstance(field, Scalar) else kw.get(field)
for field in fields_exprs]
length = max(len(it) for it in fields)
for i in range(len(fields)):
bys = self._get_compiled_bys(kw, expr._by, length)
if isinstance(fields_exprs[i], SequenceExpr):
is_reduction = False
for n in itertools.chain(*(fields_exprs[i].all_path(expr.input))):
if isinstance(n, GroupedSequenceReduction):
is_reduction = True
break
if not is_reduction:
fields[i] = fields[i].groupby(bys).first()
elif len(fields[i]) == 1:
fields[i] = pd.Series(fields[i] * length,
name=fields_exprs[i].name).groupby(bys).first()
df = pd.concat(fields, axis=1)
if expr._having is not None:
having = kw.get(expr._having)
if all(not isinstance(e, GroupedSequenceReduction)
for e in itertools.chain(*expr._having.all_path(expr.input))):
# the having comes from the by fields, we need to do Series.groupby explicitly.
bys = self._get_compiled_bys(kw, expr._by, len(having))
having = having.groupby(bys).first()
df = df[having]
return pd.DataFrame(
df.values, columns=[f.name for f in fields_exprs])[expr.schema.names]
self._add_node(expr, handle)
def visit_mutate(self, expr):
def handle(kw):
bys = self._get_compiled_bys(kw, expr._by, len(kw.get(expr.input)))
bys = pd.concat(bys)
bys.sort_values(inplace=True)
wins = [kw.get(f) for f in expr._window_fields]
return pd.DataFrame(pd.concat([bys] + wins, axis=1).values,
columns=expr.schema.names)
self._add_node(expr, handle)
def visit_value_counts(self, expr):
def handle(kw):
by = kw.get(expr._by)
sort = kw.get(expr._sort)
ascending = kw.get(expr._ascending)
dropna = kw.get(expr._dropna)
df = by.value_counts(sort=sort, ascending=ascending, dropna=dropna).to_frame()
df.reset_index(inplace=True)
return pd.DataFrame(df.values, columns=expr.schema.names)
self._add_node(expr, handle)
def visit_sort(self, expr):
def handle(kw):
input = kw.get(expr.input)
names = expr.schema.names
sorted_columns = OrderedDict()
for field in expr._sorted_fields:
name = str(uuid.uuid4())
sorted_columns[name] = kw.get(field)
input = input.assign(**sorted_columns)
return input.sort_values(list(six.iterkeys(sorted_columns)),
ascending=expr._ascending)[names]
self._add_node(expr, handle)
def visit_sort_column(self, expr):
def handle(kw):
input = kw.get(expr.input)
if isinstance(expr.input, CollectionExpr):
return input[expr._source_name]
else:
return input
self._add_node(expr, handle)
def visit_distinct(self, expr):
def handle(kw):
children_vals = self._get_children_vals(kw, expr)
fields = children_vals[1:]
ret = pd.concat(fields, axis=1, keys=expr.schema.names).drop_duplicates()
ret.reset_index(drop=True, inplace=True)
return ret
self._add_node(expr, handle)
def _get(self, item, kw):
if item is None:
return
if isinstance(item, (list, tuple, set)):
return type(item)(kw.get(it) for it in item)
return kw.get(item)
def visit_sample(self, expr):
def handle(kw):
input = self._get(expr.input, kw)
parts = self._get(expr._parts, kw)
i = self._get(expr._i, kw)
n = self._get(expr._n, kw)
frac = self._get(expr._frac, kw)
replace = self._get(expr._replace, kw)
weights = self._get(expr._weights, kw)
strata = self._get(expr._strata, kw)
random_state = self._get(expr._random_state, kw)
if expr._sampled_fields:
collection = pd.DataFrame(
pd.concat([kw.get(e) for e in expr._sampled_fields], axis=1).values,
columns=[str(uuid.uuid4()) for _ in expr._sampled_fields])
else:
collection = input
if parts is not None and frac is None:
frac = 1 / float(parts)
if i is not None and (len(i) != 1 or i[0] > 0):
raise NotImplementedError
if not strata:
sampled = collection.sample(n=n, frac=frac, replace=replace, weights=weights,
random_state=random_state)
else:
frames = []
frac = json.loads(frac) if expr._frac else dict()
n = json.loads(n) if expr._n else dict()
for val in itertools.chain(six.iterkeys(frac), six.iterkeys(n)):
v_frac = frac.get(val)
v_n = n.get(val)
filtered = collection[collection[strata].astype(str) == val]
sampled = filtered.sample(n=v_n, frac=v_frac, replace=replace, random_state=random_state)
frames.append(sampled)
if frames:
sampled = pd.concat(frames)
else:
sampled = pd.DataFrame(columns=collection.columns)
if expr._sampled_fields:
return pd.concat([input, sampled], axis=1, join='inner')[
[n for n in input.columns.tolist()]]
return sampled
self._add_node(expr, handle)
def _get_names(self, x, force_list=False):
if x is None:
return x
res = [it.name for it in x]
if not force_list and len(res) == 1:
return res[0]
return res
def _get_pivot_handler(self, expr):
def handle(kw):
df = self._merge_values(expr._group + expr._columns + expr._values, kw)
pivoted = df.pivot(index=self._get_names(expr._group),
columns=self._get_names(expr._columns))
columns = pivoted.columns.levels
pivoted.reset_index(inplace=True)
names = self._get_names(expr._group, True)
tps = [g.dtype for g in expr._group]
if len(columns[0]) == 1:
tp = expr._values[0].dtype
for name in columns[1]:
names.append(name)
tps.append(tp)
else:
for value_name, value_col in zip(columns[0], expr._values):
for name in columns[1]:
names.append('{0}_{1}'.format(name, value_name))
tps.append(value_col.dtype)
expr._schema = Schema.from_lists(names, tps)
res = pd.DataFrame(pivoted.values, columns=names)
to_sub = CollectionExpr(_source_data=res, _schema=expr._schema)
self._expr_dag.substitute(expr, to_sub)
# trigger refresh of dynamic operations
def func(expr):
for c in traverse_until_source(expr, unique=True):
if c not in self._expr_to_dag_node:
c.accept(self)
refresh_dynamic(to_sub, self._expr_dag, func=func)
return to_sub, res
return handle
def _get_pivot_table_handler(self, expr):
from ...expr.query import ExprVisitor
class WrappedNumpyFunction(object):
def __init__(self, fun):
self._fun = fun
def __call__(self, *args, **kwargs):
return self._fun(*args, **kwargs)
class AggFuncVisitor(ExprVisitor):
def __init__(self, np_object, env):
super(AggFuncVisitor, self).__init__(env)
self.np_object = np_object
def get_named_object(self, obj_name):
if obj_name == 'count':
return WrappedNumpyFunction(np.size)
elif obj_name == 'nunique':
return WrappedNumpyFunction(lambda x: np.size(np.unique(x)))
elif obj_name == 'quantile':
return WrappedNumpyFunction(lambda x, prob: np.percentile(x, prob * 100))
else:
return WrappedNumpyFunction(getattr(np, obj_name))
def visit_Call(self, node):
func = self.visit(node.func)
args = [self.visit(n) for n in node.args]
if isinstance(func, WrappedNumpyFunction):
args = [self.np_object] + args
kwargs = OrderedDict([(kw.arg, self.visit(kw.value)) for kw in node.keywords])
return func(*args, **kwargs)
def get_real_aggfunc(aggfunc):
if isinstance(aggfunc, six.string_types):
if aggfunc == 'count':
return getattr(np, 'size')
if aggfunc == 'nunique':
return lambda x: np.size(np.unique(x))
if hasattr(np, aggfunc):
return getattr(np, aggfunc)
def agg_eval(x):
visitor = AggFuncVisitor(x, {})
return visitor.eval(aggfunc, rewrite=False)
return agg_eval
if inspect.isclass(aggfunc):
aggfunc = aggfunc()
def func(x):
buffer = aggfunc.buffer()
for it in x:
aggfunc(buffer, it)
return aggfunc.getvalue(buffer)
return func
return aggfunc
def handle(kw):
columns = expr._columns if expr._columns else []
df = self._merge_values(expr._group + columns + expr._values, kw)
pivoted = df.pivot_table(index=self._get_names(expr._group),
columns=self._get_names(expr._columns),
values=self._get_names(expr._values),
aggfunc=[get_real_aggfunc(f) for f in expr._agg_func],
fill_value=expr.fill_value)
levels = pivoted.columns.levels if isinstance(pivoted.columns, pd.MultiIndex) \
else [pivoted.columns]
pivoted.reset_index(inplace=True)
names = self._get_names(expr._group, True)
tps = [g.dtype for g in expr._group]
columns_values = levels[-1] if expr._columns else [None, ]
for agg_func_name in expr._agg_func_names:
for value_col in expr._values:
for col in columns_values:
base = '{0}_'.format(col) if col is not None else ''
name = '{0}{1}_{2}'.format(base, value_col.name, agg_func_name)
names.append(name)
tps.append(value_col.dtype)
if expr._columns:
expr._schema = Schema.from_lists(names, tps)
res = pd.DataFrame(pivoted.values, columns=names)
to_sub = CollectionExpr(_source_data=res, _schema=expr._schema)
self._expr_dag.substitute(expr, to_sub)
# trigger refresh of dynamic operations
def func(expr):
for c in traverse_until_source(expr, unique=True):
if c not in self._expr_to_dag_node:
c.accept(self)
refresh_dynamic(to_sub, self._expr_dag, func=func)
return to_sub, res
return handle
def visit_pivot(self, expr):
if isinstance(expr, PivotCollectionExpr):
handle = self._get_pivot_handler(expr)
else:
handle = self._get_pivot_table_handler(expr)
self._add_node(expr, handle)
def _get_compiled_bys(self, kw, by_exprs, length):
bys = [[kw.get(by), ] if isinstance(by, Scalar) else kw.get(by)
for by in by_exprs]
if any(isinstance(e, SequenceExpr) for e in by_exprs):
size = max(len(by) for by, e in zip(bys, by_exprs)
if isinstance(e, SequenceExpr))
else:
size = length
return [(by * size if len(by) == 1 else by) for by in bys]
def _compile_grouped_reduction(self, kw, expr):
if isinstance(expr, GroupedCount) and isinstance(expr._input, CollectionExpr):
df = kw.get(expr.input)
bys = [[kw.get(by), ] if isinstance(by, Scalar) else kw.get(by)
for by in expr._by]
if any(isinstance(e, SequenceExpr) for e in expr._by):
size = max(len(by) for by, e in zip(bys, expr._by)
if isinstance(e, SequenceExpr))
else:
size = len(df)
bys = [(by * size if len(by) == 1 else by) for by in bys]
return df.groupby(bys).size()
if isinstance(expr, GroupedNUnique):
input_df = pd.concat([kw.get(ip) for ip in expr.inputs], axis=1)
bys = self._get_compiled_bys(kw, expr._by, len(input_df))
return input_df.groupby(bys).apply(lambda x: pd.Series([len(x.drop_duplicates())]))[0]
series = kw.get(expr.input) if isinstance(expr.input, SequenceExpr) \
else pd.Series([kw.get(expr.input)], name=expr.input.name)
bys = self._get_compiled_bys(kw, expr._by, len(series))
if isinstance(expr.input, Scalar):
series = pd.Series(series.repeat(len(bys[0])).values, index=bys[0].index)
if isinstance(expr, GroupedCat):
return series.groupby(bys).apply(lambda x: kw.get(expr._sep).join(x))
if isinstance(expr, GroupedToList):
if expr._unique:
return series.groupby(bys).apply(lambda x: list(set(x)))
else:
return series.groupby(bys).apply(list)
kv = dict()
if hasattr(expr, '_ddof'):
kv['ddof'] = expr._ddof
op = expr.node_name.lower()
op = 'size' if op == 'count' else op
return getattr(series.groupby(bys), op)(**kv)
def visit_reduction(self, expr):
def handle(kw):
if isinstance(expr, GroupedSequenceReduction):
return self._compile_grouped_reduction(kw, expr)
children_vals = self._get_children_vals(kw, expr)
kv = dict()
if hasattr(expr, '_ddof'):
kv['ddof'] = expr._ddof
op = expr.node_name.lower()
op = 'size' if op == 'count' else op
if isinstance(expr, NUnique):
inputs = children_vals[:len(expr.inputs)]
if len(expr.inputs) == 1:
inputs[0] = _filter_none(inputs[0])
return len(pd.concat(inputs, axis=1).drop_duplicates())
input = children_vals[0]
if getattr(expr, '_unique', False):
input = input.unique()
if isinstance(expr, Count):
if isinstance(expr.input, CollectionExpr):
return len(input)
elif isinstance(expr.input, SequenceExpr):
return len(_filter_none(input))
input = _filter_none(input)
if isinstance(expr, (Cat, GroupedCat)):
kv['sep'] = expr._sep.value if isinstance(expr._sep, Scalar) else expr._sep
kv['na_rep'] = expr._na_rep.value \
if isinstance(expr._na_rep, Scalar) else expr._na_rep
return getattr(getattr(input, 'str'), 'cat')(**kv)
elif isinstance(expr, (ToList, GroupedToList)):
return list(input)
elif isinstance(expr, (Quantile, GroupedQuantile)):
if isinstance(expr._prob, (list, set)):
return [np.percentile(input, p * 100) for p in expr._prob]
else:
return np.percentile(input, expr._prob * 100)
return getattr(input, op)(**kv)
self._add_node(expr, handle)
def visit_user_defined_aggregator(self, expr):
def handle(kw):
resources = self._get_resources(expr, kw)
input = self._merge_values(expr._inputs, kw)
func = expr._aggregator
args = expr._func_args
kwargs = expr._func_kwargs or dict()
if resources:
if not args and not kwargs:
agg = func(resources)
else:
kwargs['resources'] = resources
agg = func(*args, **kwargs)
else:
agg = func(*args, **kwargs)
if isinstance(expr, GroupedSequenceReduction):
bys = [[kw.get(by), ] if isinstance(by, Scalar) else kw.get(by)
for by in expr._by]
else:
bys = [[1, ]]
if expr._by and any(isinstance(e, SequenceExpr) for e in expr._by):
size = max(len(by) for by, e in zip(bys, expr._by)
if isinstance(e, SequenceExpr))
else:
size = len(input)
bys = [(by * size if len(by) == 1 else by) for by in bys]
def iterrows(x):
if getattr(expr, '_unique', False):
vset = set()
for it in x.iterrows():
if bytes(it[1].values.data) not in vset:
yield it
vset.add(bytes(it[1].values.data))
else:
for it in x.iterrows():
yield it
def f(x):
buffer = agg.buffer()
for it in iterrows(x):
agg(buffer, *it[1])
ret = agg.getvalue(buffer)
np_type = types.df_type_to_np_type(expr.dtype)
return np.array([ret,], dtype=np_type)[0]
res = input.groupby(bys).apply(f)
if isinstance(expr, Scalar):
return res.iloc[0]
return res
self._add_node(expr, handle)
def visit_column(self, expr):
def handle(kw):
chidren_vals = self._get_children_vals(kw, expr)
# FIXME: consider the name which is unicode
return chidren_vals[0][expr._source_name]
self._add_node(expr, handle)
def _get_resources(self, expr, kw):
if not expr._resources:
return
res = []
collection_idx = 0
for resource in expr._resources:
if isinstance(resource, FileResource):
res.append(resource.open())
elif isinstance(resource, TableResource):
def gen():
table = resource.get_source_table()
named_args = xnamedtuple('NamedArgs', table.schema.names)
partition = resource.get_source_table_partition()
with table.open_reader(partition=partition) as reader:
for r in reader:
yield named_args(*r.values)
res.append(gen())
else:
resource = expr._collection_resources[collection_idx]
collection_idx += 1
df = kw.get(resource)
def gen():
named_args = xnamedtuple('NamedArgs', resource.schema.names)
for r in df.iterrows():
yield named_args(*r[1])
res.append(gen())
return res
def visit_function(self, expr):
def handle(kw):
resources = self._get_resources(expr, kw)
if not expr._multiple:
input = self._get_children_vals(kw, expr)[0]
if isinstance(expr.inputs[0], Scalar):
input = pd.Series([input])
func = expr._func
args = expr._func_args
kwargs = expr._func_kwargs
if args is not None and len(args) > 0:
raise NotImplementedError
if kwargs is not None and len(kwargs) > 0:
raise NotImplementedError
if inspect.isclass(func):
if resources:
func = func(resources)
else:
func = func()
else:
if resources:
func = func(resources)
res = input.map(func)
if isinstance(expr.inputs[0], Scalar):
return res[0]
return res
else:
input = self._merge_values(expr.inputs, kw)
def func(s):
names = [f.name for f in expr.inputs]
t = xnamedtuple('NamedArgs', names)
row = t(*s.tolist())
if not inspect.isfunction(expr._func):
if resources:
f = expr._func(resources)
else:
f = expr._func()
else:
if resources:
f = expr._func(resources)
else:
f = expr._func
res = f(row, *expr._func_args, **expr._func_kwargs)
if not inspect.isgeneratorfunction(f):
return res
return next(res)
if PD_APPLY_HAS_RESULT_TYPE:
return input.apply(func, axis=1, result_type='reduce',
args=expr._func_args, **expr._func_kwargs)
else:
return input.apply(func, axis=1, reduce=True,
args=expr._func_args, **expr._func_kwargs)
self._add_node(expr, handle)
def visit_reshuffle(self, expr):
def handle(kw):
if expr._sort_fields is not None:
input = kw.get(expr._input)
names = []
for sort in expr._sort_fields:
name = str(uuid.uuid4())
input[name] = kw.get(sort)
names.append(name)
input = input.sort_values(
names, ascending=[f._ascending for f in expr._sort_fields])
return input[expr.schema.names]
return kw.get(expr._input)
self._add_node(expr, handle)
def _check_output_types(self, pd_df, expect_df_types):
for field, expect_df_type in zip(pd_df.columns, expect_df_types):
arr = pd_df[field].values
try:
df_type = types.np_type_to_df_type(pd_df[field].dtype, arr=arr)
except TypeError:
# all element is None
continue
if not expect_df_type.can_implicit_cast(df_type):
raise TypeError('Field(%s) has wrong type, expect %s, got %s' % (
field, expect_df_type, df_type
))
return pd_df
def visit_apply_collection(self, expr):
def conv(l):
if isinstance(l, tuple):
l = list(l)
elif not isinstance(l, list):
l = [l, ]
return l
def handle(kw):
resources = self._get_resources(expr, kw)
input = self._merge_values(expr.fields, kw)
names = [f.name for f in expr.fields]
t = xnamedtuple('NamedArgs', names)
expr._func_args = expr._func_args or ()
expr._func_kwargs = expr._func_kwargs or {}
func = expr._func
if isinstance(func, six.string_types) and func.upper() in BUILTIN_FUNCS:
func = BUILTIN_FUNCS[func.upper()]
if inspect.isfunction(func):
if resources:
func = func(resources)
is_generator_function = inspect.isgeneratorfunction(func)
close_func = None
is_close_generator_function = False
elif hasattr(func, '__call__'):
if resources:
func = func(resources)
else:
func = func()
is_generator_function = inspect.isgeneratorfunction(func.__call__)
close_func = getattr(func, 'close', None)
is_close_generator_function = inspect.isgeneratorfunction(close_func)
else:
raise NotImplementedError
rows = []
indices = []
idx = 0
for s in input.iterrows():
row = t(*s[1])
res = func(row, *expr._func_args, **expr._func_kwargs)
expand_num = 0
if is_generator_function:
for l in res:
rows.append(conv(l))
expand_num += 1
else:
if res:
rows.append(conv(res))
expand_num += 1
if expand_num == 0 and expr._keep_nulls:
rows.append([None] * len(names))
expand_num += 1
indices.extend([s[0]] * expand_num)
idx = max(idx, s[0] + 1)
if close_func:
expand_num = 0
if is_close_generator_function:
for l in close_func(*expr._func_args, **expr._func_kwargs):
rows.append(conv(l))
expand_num += 1
else:
rows.append(close_func(*expr._func_args, **expr._func_kwargs))
expand_num += 1
indices.extend([idx] * expand_num)
if expr._lateral_view:
out_df = pd.DataFrame(rows, columns=expr.schema.names,
index=pd.Int64Index(indices))
else:
out_df = pd.DataFrame(rows, columns=expr.schema.names)
return self._check_output_types(out_df, expr.schema.types)
self._add_node(expr, handle)
def visit_lateral_view(self, expr):
def handle(kw):
lv_sources = dict()
for lv in expr.lateral_views:
for col_name in lv.schema.names:
lv_sources[col_name] = lv
children = expr.children()
fields = self._get_children_vals(kw, children=children)[1:len(expr._fields) + 1]
names = expr.schema.names
idx = reduce(operator.and_, (set(f.index.tolist()) for f, e in zip(fields, expr._fields)
if isinstance(e, SequenceExpr)))
idx = pd.Int64Index(sorted(idx))
result = pd.DataFrame(index=idx)
lv_visited = set()
for i in range(len(fields)):
f = fields[i]
if names[i] in lv_sources:
lv_src = lv_sources[names[i]]
if lv_src in lv_visited:
continue
lv_visited.add(lv_src)
f = kw[lv_src]
elif not isinstance(f, pd.Series):
f = pd.Series([f] * len(idx), index=idx, name=names[i])
result = result.join(f)
return result
self._add_node(expr, handle)
def visit_composite_op(self, expr):
def handle(kw):
def _zip_args(fields):
zip_args = []
seq_index = None
for it in fields:
if isinstance(it, SequenceExpr):
zip_args.append(kw[it])
seq_index = kw[it].index
else:
zip_args.append(itertools.repeat(kw[it]))
return seq_index, zip_args
children_vals = self._get_children_vals(kw, expr)
_input = children_vals[0]
if isinstance(expr, composites.ListDictLength):
return _input.apply(lambda v: len(v) if v is not None else None)
elif isinstance(expr, composites.ListDictGetItem):
def _get_list_item(l, x):
try:
return l[x] if l is not None else None
except IndexError:
return None
_value = children_vals[1]
if isinstance(expr.input.dtype, df_types.List):
item_fun = _get_list_item
else:
item_fun = lambda s, k: s.get(k) if s is not None else None
if isinstance(expr, Scalar):
return item_fun(_input, _value)
else:
if isinstance(expr.input, Scalar):
return _value.apply(lambda v: item_fun(_input, v))
if isinstance(expr._key, Scalar):
return _input.apply(lambda v: item_fun(v, _value))
seq_values = [item_fun(k, v) for k, v in compat.izip(_input, _value)]
return pd.Series(seq_values, index=_input.index, name=expr.name)
elif isinstance(expr, composites.ListContains):
_value = children_vals[1]
contains_fun = lambda s, k: k in s if s is not None else None
if isinstance(expr, Scalar):
return contains_fun(_input, _value)
else:
if isinstance(expr.input, Scalar):
return _value.apply(lambda v: contains_fun(_input, v))
if isinstance(expr._value, Scalar):
return _input.apply(lambda v: contains_fun(v, _value))
seq_values = [contains_fun(k, v) for k, v in compat.izip(_input, _value)]
return pd.Series(seq_values, index=_input.index, name=expr.name)
elif isinstance(expr, composites.ListSort):
return _input.apply(lambda l: sorted(l) if l is not None else None)
elif isinstance(expr, composites.DictKeys):
return _input.apply(lambda d: list(six.iterkeys(d)) if d is not None else None)
elif isinstance(expr, composites.DictValues):
return _input.apply(lambda d: list(six.itervalues(d)) if d is not None else None)
elif isinstance(expr, composites.ListBuilder):
if isinstance(expr, Scalar):
return [kw[v] for v in expr._values]
else:
seq_index, zip_args = _zip_args(expr._values)
seq_values = []
for r in compat.izip(*zip_args):
seq_values.append(list(r))
return pd.Series(seq_values, index=seq_index, name=expr.name)
elif isinstance(expr, composites.DictBuilder):
if isinstance(expr, Scalar):
return OrderedDict((kw[k], kw[v]) for k, v in compat.izip(expr._keys, expr._values))
else:
seq_index, zip_args = _zip_args(expr._keys + expr._values)
seq_values = []
dict_len = len(expr._values)
for r in zip(*zip_args):
seq_values.append(OrderedDict((k, v) for k, v in compat.izip(r[:dict_len], r[dict_len:])))
return pd.Series(seq_values, index=seq_index, name=expr.name)
else:
raise NotImplementedError
self._add_node(expr, handle)
def visit_sequence(self, expr):
raise NotImplementedError
def visit_cum_window(self, expr):
if expr.preceding is not None or expr.following is not None:
raise NotImplementedError
def handle(kw):
input = kw.get(expr.input)
bys = self._get_compiled_bys(kw, expr.partition_by, len(input))
grouped = input.groupby(bys)
if expr.order_by:
sort = [kw.get(e) for e in expr.order_by]
ascendings = [e._ascending for e in expr.order_by]
for s in sort:
sort_name = str(uuid.uuid4())
s.name = sort_name
else:
sort = None
ascendings = None
def f(x):
if sort:
df = pd.concat([x] + sort, join='inner', axis=1)
df.sort_values([s.name for s in sort], ascending=ascendings, inplace=True)
series = df[x.name]
if expr.node_name in SORT_CUM_WINDOW_OP_TO_PANDAS:
return SORT_CUM_WINDOW_OP_TO_PANDAS[expr.node_name](series)
elif expr.node_name == 'NthValue':
values = [None] * len(series)
if expr._skip_nulls:
new_series = _filter_none(series)
else:
new_series = series
if expr._nth < len(new_series):
values[expr._nth:] = [new_series.iloc[expr._nth]] * (len(series) - expr._nth)
return pd.Series(values, index=series.index)
else:
raise NotImplementedError
else:
if expr.distinct:
new_x = x.drop_duplicates()
else:
new_x = x
if expr.node_name in CUM_WINDOW_OP_TO_PANDAS:
val = CUM_WINDOW_OP_TO_PANDAS[expr.node_name](new_x)
elif expr.node_name == 'NthValue':
if expr._skip_nulls:
new_series = _filter_none(x)
else:
new_series = x
if expr._nth < len(new_series):
val = new_series.iloc[expr._nth]
else:
val = None
else:
raise NotImplementedError
return pd.Series([val] * len(x), index=x.index)
res = grouped.apply(f)
if sort:
for _ in bys:
res = res.reset_index(level=0, drop=True)
return res
self._add_node(expr, handle)
def visit_rank_window(self, expr):
def handle(kw):
input = kw.get(expr.input)
sort = [kw.get(e) * (1 if e._ascending else -1)
for e in expr.order_by]
bys = self._get_compiled_bys(kw, expr.partition_by, len(input))
sort_names = [str(uuid.uuid4()) for _ in sort]
by_names = [str(uuid.uuid4()) for _ in bys]
input_names = [input.name] if isinstance(input, pd.Series) else input.columns.tolist()
df = pd.DataFrame(pd.concat([input] + sort + [pd.Series(b) for b in bys], axis=1).values,
columns=input_names + sort_names + by_names,
index=input.index)
df.sort_values(sort_names, inplace=True)
grouped = df.groupby(by_names)
try:
pd_fast_zip = pd._libs.lib.fast_zip
except AttributeError:
pd_fast_zip = pd.lib.fast_zip
def f(x):
s_df = pd.Series(pd_fast_zip([x[s].values for s in sort_names]), index=x.index)
if expr.node_name == 'Rank':
return s_df.rank(method='min')
elif expr.node_name == 'DenseRank':
return s_df.rank(method='dense')
elif expr.node_name == 'RowNumber':
return pd.Series(compat.lrange(1, len(s_df) + 1), index=s_df.index)
elif expr.node_name == 'PercentRank':
if len(s_df) == 1:
return pd.Series([0.0, ], index=s_df.index)
return (s_df.rank(method='min') - 1) / (len(s_df) - 1)
elif expr.node_name == 'CumeDist':
return pd.Series([v * 1.0 / len(s_df) for v in compat.irange(1, len(s_df) + 1)],
index=s_df.index)
elif expr.node_name == 'QCut':
if len(s_df) <= 1:
return pd.Series([0] * len(s_df), index=s_df.index, dtype=np.int64)
return pd.Series(pd.qcut(compat.irange(1, len(s_df) + 1), expr._bins, labels=False),
index=s_df.index, dtype=np.int64)
else:
raise NotImplementedError
res = grouped.apply(f)
if isinstance(res, pd.DataFrame):
res = res.iloc[0]
else:
for _ in bys:
res = res.reset_index(level=0, drop=True)
return res
self._add_node(expr, handle)
def visit_shift_window(self, expr):
def handle(kw):
input = kw.get(expr.input)
bys = self._get_compiled_bys(kw, expr.partition_by, len(input))
grouped = input.groupby(bys)
if expr.order_by:
sort = [kw.get(e) for e in expr.order_by]
ascendings = [e._ascending for e in expr.order_by]
for s in sort:
sort_name = str(uuid.uuid4())
s.name = sort_name
else:
sort = None
ascendings = None
if expr.node_name == 'Lag':
shift = kw.get(expr.offset)
else:
assert expr.node_name == 'Lead'
shift = -kw.get(expr.offset)
default = kw.get(expr.default)
def f(x):
if sort:
df = pd.concat([x] + sort, join='inner', axis=1)
df.sort_values([s.name for s in sort], ascending=ascendings, inplace=True)
series = df[x.name]
else:
series = x
res = series.shift(shift)
if default is not None:
return res.fillna(default)
return res
res = grouped.apply(f)
if sort:
for _ in bys:
res = res.reset_index(level=0, drop=True)
return res
self._add_node(expr, handle)
def visit_scalar(self, expr):
def handle(_):
if isinstance(expr, DTScalar):
arg_name = type(expr).__name__.lower()[:-6] + 's'
value = expr.value
if arg_name == 'milliseconds':
arg_name = 'microseconds'
value *= 1000
return pd.DateOffset(**{arg_name: value})
if expr.value is not None:
return expr.value
return None
self._add_node(expr, handle)
def visit_cast(self, expr):
def handle(kw):
dtype = types.df_type_to_np_type(expr.dtype)
input = self._get_children_vals(kw, expr)[0]
if isinstance(expr._input, Scalar):
return pd.Series([input]).astype(dtype)[0]
return input.astype(dtype)
self._add_node(expr, handle)
@classmethod
def _find_all_equalizations(cls, predicate, lhs, rhs):
return [eq for eq in traverse_until_source(predicate, top_down=True, unique=True)
if isinstance(eq, arithmetic.Equal) and
eq.is_ancestor(lhs) and eq.is_ancestor(rhs)]
def visit_join(self, expr):
def handle(kw):
left = kw.get(expr._lhs)
right = kw.get(expr._rhs)
eqs = expr._predicate
left_ons = []
right_ons = []
on_same_names = set()
for eq in eqs:
if isinstance(eq._lhs, Column) and isinstance(eq._rhs, Column) and \
eq._lhs.source_name == eq._rhs.source_name:
left_ons.append(eq._lhs.source_name)
right_ons.append(eq._rhs.source_name)
on_same_names.add(eq._lhs.source_name)
continue
left_name = str(uuid.uuid4())
left[left_name] = kw.get(eq._lhs)
left_ons.append(left_name)
right_name = str(uuid.uuid4())
right[right_name] = kw.get(eq._rhs)
right_ons.append(right_name)
for idx, collection in enumerate([left, right]):
collection_expr = (expr._lhs, expr._rhs)[idx]
for field_name in collection_expr.schema.names:
if field_name in expr._renamed_columns and field_name in on_same_names:
new_name = expr._renamed_columns[field_name][idx]
collection[new_name] = collection[field_name]
merged = left.merge(right, how=JOIN_DICT[expr._how], left_on=left_ons,
right_on=right_ons,
suffixes=(expr._left_suffix, expr._right_suffix))
cols = []
for name in expr.schema.names:
if name in merged:
cols.append(merged[name])
else:
cols.append(merged[expr._column_origins[name][1]])
return pd.concat(cols, axis=1, keys=expr.schema.names)
# Just add node, shouldn't add edge here
node = (expr, handle)
self._dag.add_node(node)
self._expr_to_dag_node[expr] = node
def visit_extract_kv(self, expr):
def handle(kw):
from ... import types
_input = kw.get(expr._input)
columns = [getattr(_input, c.name) for c in expr._columns]
kv_delim = kw.get(expr._kv_delimiter)
item_delim = kw.get(expr._item_delimiter)
default = kw.get(expr._default)
kv_slot_map = dict()
app_col_names = []
def validate_kv(v):
parts = v.split(kv_delim)
if len(parts) != 2:
raise ValueError('Malformed KV pair: %s' % v)
return parts[0]
for col in columns:
kv_slot_map[col.name] = dict()
keys = col.apply(lambda s: [validate_kv(kv) for kv in s.split(item_delim)])
for k in sorted(compat.reduce(lambda a, b: set(a) | set(b), keys, set())):
app_col_names.append('%s_%s' % (col.name, k))
kv_slot_map[col.name][k] = len(app_col_names) - 1
type_adapter = None
if isinstance(expr._column_type, types.Float):
type_adapter = float
elif isinstance(expr._column_type, types.Integer):
type_adapter = int
append_grid = [[default] * len(app_col_names) for _ in compat.irange(len(_input))]
for col in columns:
series = getattr(_input, col.name)
for idx, v in enumerate(series):
for kv_item in v.split(item_delim):
k, v = kv_item.split(kv_delim)
if type_adapter:
v = type_adapter(v)
append_grid[idx][kv_slot_map[col.name][k]] = v
intact_names = [c.name for c in expr._intact]
intact_types = [c.dtype for c in expr._intact]
intact_df = _input[intact_names]
append_df = pd.DataFrame(append_grid, columns=app_col_names)
expr._schema = Schema.from_lists(
intact_names + app_col_names,
intact_types + [expr._column_type] * len(app_col_names),
)
res = pd.concat([intact_df, append_df], axis=1)
to_sub = CollectionExpr(_source_data=res, _schema=expr._schema)
self._expr_dag.substitute(expr, to_sub)
# trigger refresh of dynamic operations
def func(expr):
for c in traverse_until_source(expr, unique=True):
if c not in self._expr_to_dag_node:
c.accept(self)
refresh_dynamic(to_sub, self._expr_dag, func=func)
return to_sub, res
self._add_node(expr, handle)
def visit_union(self, expr):
if expr._distinct:
raise CompileError("Distinct union is not supported here.")
def handle(kw):
left = kw.get(expr._lhs)
right = kw.get(expr._rhs)
merged = pd.concat([left, right])
return merged[expr.schema.names]
self._add_node(expr, handle)
def visit_concat(self, expr):
def handle(kw):
left = kw.get(expr._lhs)
right = kw.get(expr._rhs)
merged = pd.concat([left, right], axis=1)
return merged[expr.schema.names]
self._add_node(expr, handle)
def visit_append_id(self, expr):
def handle(kw):
_input = kw.get(expr._input)
id_col = kw.get(expr._id_col)
id_seq = pd.DataFrame(compat.lrange(len(_input)), columns=[id_col])
return pd.concat([id_seq, _input], axis=1)
self._add_node(expr, handle)
def visit_split(self, expr):
def handle(kw):
_input = kw.get(expr._input)
frac = kw.get(expr._frac)
seed = kw.get(expr._seed) if expr._seed else None
split_id = kw.get(expr._split_id)
if seed is not None:
np.random.seed(seed)
cols = list(_input.columns)
factor_col = 'rand_factor_%d' % int(time.time())
factor_df = pd.DataFrame(np.random.rand(len(_input)), columns=[factor_col])
concated_df = pd.concat([factor_df, _input], axis=1)
if split_id == 0:
return concated_df[concated_df[factor_col] <= frac][cols]
else:
return concated_df[concated_df[factor_col] > frac][cols]
self._add_node(expr, handle)
|
the-stack_0_17826 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import parlai.utils.testing as testing_utils
BATCH_SIZE = 8
class TestHred(unittest.TestCase):
"""
Checks that Hred can learn some very basic tasks.
"""
def test_generation(self):
valid, test = testing_utils.train_model(
dict(
task="integration_tests:multiturn_candidate",
model="hred",
batchsize=BATCH_SIZE,
num_epochs=10,
embeddingsize=16,
hiddensize=32,
numlayers=1,
dropout=0.0,
skip_generation=True,
)
)
self.assertLess(valid["ppl"], 2)
@testing_utils.retry(ntries=3)
def test_greedy(self):
"""
Test a simple multiturn task.
"""
valid, test = testing_utils.eval_model(
dict(
task="integration_tests:multiturn_candidate",
model="hred",
model_file="zoo:unittest/hred_model/model",
dict_file="zoo:unittest/hred_model/model.dict",
skip_generation=False,
inference="greedy",
numlayers=1,
embeddingsize=16,
hiddensize=32,
batchsize=BATCH_SIZE,
)
)
self.assertLess(valid["ppl"], 1.2)
self.assertLess(test["ppl"], 1.2)
@testing_utils.retry(ntries=3)
def test_beamsearch(self):
"""
Ensures beam search can generate the correct response.
"""
valid, test = testing_utils.eval_model(
dict(
task="integration_tests:multiturn_candidate",
model="hred",
model_file="zoo:unittest/hred_model/model",
dict_file="zoo:unittest/hred_model/model.dict",
skip_generation=False,
numlayers=1,
embeddingsize=16,
hiddensize=32,
batchsize=8,
inference="beam",
beam_size=5,
)
)
self.assertGreater(valid["accuracy"], 0.95)
self.assertGreater(test["accuracy"], 0.95)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_17829 | import PyQt5
from PyQt5 import QtWidgets, uic
from PyQt5.uic import loadUi
from PyQt5.QtWidgets import QApplication,QMainWindow,QDialog,QWidget
import arcpy
import os
import sys
class WelcomeWindow(QMainWindow):
def __init__(self):
super(WelcomeWindow,self).__init__()
loadUi("shops.ui",self)
self.ENTER.clicked.connect(self.SetDirectory)
self.pushButton.clicked.connect(self.Extraction)
#Function to get the data to work on
def SetDirectory(self):
Working_Directory= self.plainTextEdit.toPlainText()
arcpy.env.workspace=(Working_Directory)
Featurelist=arcpy.ListFeatureClasses()
self.country_comb.addItems(Featurelist)
self.comboBox_2.addItems(Featurelist)
#Function to extract all shops in country
def Extraction(self):
countries =self.country_comb.currentText()
shop = self.comboBox_2.currentText()
Country = self.lineEdit_4.text()
shopType = self.lineEdit_5.text()
outpath = self.lineEdit.text()
s_layer=arcpy.MakeFeatureLayer_management(shop, 'shops_layer')
c_layer=arcpy.MakeFeatureLayer_management(countries, 'countries_layer')
#selection by attribute
country=arcpy.SelectLayerByAttribute_management(c_layer,'NEW_SELECTION',"NAME= '" + Country + "'")
#selection by location
# Process: Select Layer By Location
shops_out = arcpy.SelectLayerByLocation_management(s_layer, "INTERSECT", country, "", "NEW_SELECTION", "NOT_INVERT")
#selection by attribute
SHOPS=arcpy.SelectLayerByAttribute_management(s_layer,'SUBSET_SELECTION',"shop= '" + shopType + "'")
#Writting selected outputs
output=arcpy.FeatureClassToFeatureClass_conversion(SHOPS, outpath, 'shops')
app = QtWidgets.QApplication([])
Welcome = WelcomeWindow()
Welcome.show()
sys.exit(app.exec())
|
the-stack_0_17830 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
"""Molecule Utils Module."""
from __future__ import print_function
import contextlib
import fnmatch
import jinja2
import os
import re
import sys
try:
from subprocess import check_output # noqa 401
from subprocess import run # noqa 401
except ImportError:
from subprocess32 import check_output # noqa 401
from subprocess32 import run # noqa 401
try:
from collections.abc import Mapping
except ImportError: # Python 2 compatibility
from collections import Mapping
try:
from functools import lru_cache # noqa
except ImportError:
from backports.functools_lru_cache import lru_cache # noqa
import colorama
import yaml
from molecule.logger import get_logger
LOG = get_logger(__name__)
class SafeDumper(yaml.SafeDumper):
"""SafeDumper YAML Class."""
def increase_indent(self, flow=False, indentless=False):
return super(SafeDumper, self).increase_indent(flow, False)
def print_debug(title, data):
"""Print debug information."""
title = 'DEBUG: {}'.format(title)
title = [
colorama.Back.WHITE,
colorama.Style.BRIGHT,
colorama.Fore.BLACK,
title,
colorama.Fore.RESET,
colorama.Back.RESET,
colorama.Style.RESET_ALL,
]
print(''.join(title))
data = [
colorama.Fore.BLACK,
colorama.Style.BRIGHT,
data,
colorama.Style.RESET_ALL,
colorama.Fore.RESET,
]
print(''.join(data))
def print_environment_vars(env):
"""
Print ``Ansible`` and ``Molecule`` environment variables and returns None.
:param env: A dict containing the shell's environment as collected by
``os.environ``.
:return: None
"""
ansible_env = {k: v for (k, v) in env.items() if 'ANSIBLE_' in k}
print_debug('ANSIBLE ENVIRONMENT', safe_dump(ansible_env))
molecule_env = {k: v for (k, v) in env.items() if 'MOLECULE_' in k}
print_debug('MOLECULE ENVIRONMENT', safe_dump(molecule_env))
combined_env = ansible_env.copy()
combined_env.update(molecule_env)
print_debug(
'SHELL REPLAY',
" ".join(["{}={}".format(k, v) for (k, v) in sorted(combined_env.items())]),
)
print()
def sysexit(code=1):
"""Performs a system exit with given code, default 1."""
sys.exit(code)
def sysexit_with_message(msg, code=1):
"""Exits with an error message."""
LOG.critical(msg)
sysexit(code)
def run_command(cmd, debug=False):
"""
Execute the given command and returns None.
:param cmd: A ``sh.Command`` object to execute.
:param debug: An optional bool to toggle debug output.
:return: ``sh`` object
"""
if debug:
# WARN(retr0h): Uses an internal ``sh`` data structure to dig
# the environment out of the ``sh.command`` object.
print_environment_vars(cmd._partial_call_args.get('env', {}))
print_debug('COMMAND', str(cmd))
print()
return cmd(_truncate_exc=False)
def os_walk(directory, pattern, excludes=[]):
"""Navigate recursively and retried files based on pattern."""
for root, dirs, files in os.walk(directory, topdown=True):
dirs[:] = [d for d in dirs if d not in excludes]
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def render_template(template, **kwargs):
"""Render a jinaj2 template."""
t = jinja2.Environment()
t = t.from_string(template)
return t.render(kwargs)
def write_file(filename, content):
"""
Writes a file with the given filename and content and returns None.
:param filename: A string containing the target filename.
:param content: A string containing the data to be written.
:return: None
"""
with open_file(filename, 'w') as f:
f.write(content)
file_prepender(filename)
def molecule_prepender(content):
"""Return molecule identification header."""
return '# Molecule managed\n\n' + content
def file_prepender(filename):
"""
Prepend an informational header on files managed by Molecule and returns \
None.
:param filename: A string containing the target filename.
:return: None
"""
with open_file(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(molecule_prepender(content))
def safe_dump(data):
"""
Dump the provided data to a YAML document and returns a string.
:param data: A string containing an absolute path to the file to parse.
:return: str
"""
# TODO(retr0h): Do we need to encode?
# yaml.dump(data) produces the document as a str object in both python
# 2 and 3.
return yaml.dump(
data, Dumper=SafeDumper, default_flow_style=False, explicit_start=True
)
def safe_load(string):
"""
Parse the provided string returns a dict.
:param string: A string to be parsed.
:return: dict
"""
try:
return yaml.safe_load(string) or {}
except yaml.scanner.ScannerError as e:
sysexit_with_message(str(e))
def safe_load_file(filename):
"""
Parse the provided YAML file and returns a dict.
:param filename: A string containing an absolute path to the file to parse.
:return: dict
"""
with open_file(filename) as stream:
return safe_load(stream)
@contextlib.contextmanager
def open_file(filename, mode='r'):
"""
Open the provide file safely and returns a file type.
:param filename: A string containing an absolute path to the file to open.
:param mode: A string describing the way in which the file will be used.
:return: file type
"""
with open(filename, mode) as stream:
yield stream
def instance_with_scenario_name(instance_name, scenario_name):
"""Formats instance name that includes scenario."""
return '{}-{}'.format(instance_name, scenario_name)
def strip_ansi_escape(string):
"""Removeall ANSI escapes from string."""
return re.sub(r'\x1b[^m]*m', '', string)
def strip_ansi_color(s):
"""Removes ANSI colors from string."""
# Taken from tabulate
invisible_codes = re.compile(r'\x1b\[\d*m')
return re.sub(invisible_codes, '', s)
def verbose_flag(options):
"""Return computed verbosity flag."""
verbose = 'v'
verbose_flag = []
for i in range(0, 3):
if options.get(verbose):
verbose_flag = ['-{}'.format(verbose)]
del options[verbose]
if options.get('verbose'):
del options['verbose']
break
verbose = verbose + 'v'
return verbose_flag
def filter_verbose_permutation(options):
"""Cleans verbose information."""
return {k: options[k] for k in options if not re.match('^[v]+$', k)}
def title(word):
"""Formats title."""
return ' '.join(x.capitalize() or '_' for x in word.split('_'))
def abs_path(path):
"""Return absolute path."""
if path:
return os.path.abspath(path)
def camelize(string):
"""Formats string as camel-case."""
# NOTE(retr0h): Taken from jpvanhal/inflection
# https://github.com/jpvanhal/inflection
return re.sub(r"(?:^|_)(.)", lambda m: m.group(1).upper(), string)
def underscore(string):
"""Formats string to underlined notation."""
# NOTE(retr0h): Taken from jpvanhal/inflection
# https://github.com/jpvanhal/inflection
string = re.sub(r"([A-Z]+)([A-Z][a-z])", r'\1_\2', string)
string = re.sub(r"([a-z\d])([A-Z])", r'\1_\2', string)
string = string.replace("-", "_")
return string.lower()
def merge_dicts(a, b):
"""
Merges the values of b into a and returns a new dict.
This function uses the same algorithm as Ansible's `combine(recursive=True)` filter.
:param a: the target dictionary
:param b: the dictionary to import
:return: dict
"""
result = a.copy()
for k, v in b.items():
if k in a and isinstance(a[k], Mapping) and isinstance(v, Mapping):
result[k] = merge_dicts(a[k], v)
else:
result[k] = v
return result
def validate_parallel_cmd_args(cmd_args):
"""Prevents use of options incompatible with parallel mode."""
if cmd_args.get('parallel') and cmd_args.get('destroy') == 'never':
msg = 'Combining "--parallel" and "--destroy=never" is not supported'
sysexit_with_message(msg)
def _parallelize_platforms(config, run_uuid):
def parallelize(platform):
platform['name'] = '{}-{}'.format(platform['name'], run_uuid)
return platform
return [parallelize(platform) for platform in config['platforms']]
|
the-stack_0_17831 | # Tic Tac Toe
from os import system, name
import random
import time
def screen_clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
def display_board(board):
screen_clear()
print(board[1]+'|'+board[2]+'|'+board[3])
print(board[4]+'|'+board[5]+'|'+board[6])
print(board[7]+'|'+board[8]+'|'+board[9])
def player_input():
marker = ''
while marker != 'X' and marker != 'O':
marker = input("Player 1: Choose X or O: ").upper()
if marker == 'X':
return('X', 'O')
else:
return('O', 'X')
def place_marker(board, marker, position):
board[position] = marker
def win_check(board, mark):
return((board[1] == mark and board[2] == mark and board[3] == mark)or
(board[4] == mark and board[5] == mark and board[6] == mark)or
(board[7] == mark and board[8] == mark and board[9] == mark)or
(board[1] == mark and board[4] == mark and board[7] == mark)or
(board[2] == mark and board[5] == mark and board[8] == mark)or
(board[3] == mark and board[6] == mark and board[9] == mark)or
(board[1] == mark and board[5] == mark and board[9] == mark)or
(board[3] == mark and board[5] == mark and board[7] == mark))
def choose_first():
flip = random.randint(0, 1)
if flip == 0:
return "Player 1"
else:
return "Player 2"
def space_check(board, position):
return board[position] == '.'
def full_board_check(board):
for i in range(1, 10):
if space_check(board, i):
return False
return True
def player_choice(board, turn):
position = int(input(turn + ":Choose a position: (1-9) "))
return position
def replay():
choice = input("Play Again: Type Yes or No? ")
return choice == 'yes'
# Main Code
blank = []
print("Welcome to Tic Tac Toe")
while True:
the_board = ['.']*10
player1_marker, player2_marker = player_input()
turn = choose_first()
print(turn + ' will go first.')
play_game = input("Ready to play? y or n? ")
if play_game == 'y':
game_on = True
else:
game_on = False
while game_on:
if turn == 'Player 1':
display_board(the_board)
position = player_choice(the_board, turn)
if position in blank:
print("Enter unique number.This block has been filled")
time.sleep(5)
turn = 'Player 1'
else:
blank.append(position)
place_marker(the_board, player1_marker, position)
if win_check(the_board, player1_marker):
display_board(the_board)
print('Player 1 has Won!!!')
game_on = False
else:
if full_board_check(the_board):
display_board(the_board)
print("Tie Game...")
game_on = False
else:
turn = 'Player 2'
else:
display_board(the_board)
position = player_choice(the_board, turn)
if position in blank:
print("Enter unique number.This block has been filled")
time.sleep(5)
turn = 'Player 2'
else:
blank.append(position)
place_marker(the_board, player2_marker, position)
# display_board(the_board)
if win_check(the_board, player2_marker):
display_board(the_board)
print('Player 2 has Won!!!')
game_on = False
else:
if full_board_check(the_board):
display_board(the_board)
print("Tie Game...")
game_on = False
else:
turn = 'Player 1'
if not replay():
break
|
the-stack_0_17833 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2017 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from collections import namedtuple
import discord.abc
from .utils import snowflake_time, _bytes_to_base64_data, parse_time
from .enums import DefaultAvatar, RelationshipType, UserFlags, HypeSquadHouse, PremiumType, try_enum
from .errors import ClientException
from .colour import Colour
from .asset import Asset
class Profile(namedtuple('Profile', 'flags user mutual_guilds connected_accounts premium_since')):
__slots__ = ()
@property
def nitro(self):
return self.premium_since is not None
premium = nitro
def _has_flag(self, o):
v = o.value
return (self.flags & v) == v
@property
def staff(self):
return self._has_flag(UserFlags.staff)
@property
def partner(self):
return self._has_flag(UserFlags.partner)
@property
def bug_hunter(self):
return self._has_flag(UserFlags.bug_hunter)
@property
def early_supporter(self):
return self._has_flag(UserFlags.early_supporter)
@property
def hypesquad(self):
return self._has_flag(UserFlags.hypesquad)
@property
def hypesquad_houses(self):
flags = (UserFlags.hypesquad_bravery, UserFlags.hypesquad_brilliance, UserFlags.hypesquad_balance)
return [house for house, flag in zip(HypeSquadHouse, flags) if self._has_flag(flag)]
_BaseUser = discord.abc.User
class BaseUser(_BaseUser):
__slots__ = ('name', 'id', 'discriminator', 'avatar', 'bot', '_state')
def __init__(self, *, state, data):
self._state = state
self._update(data)
def __str__(self):
return '{0.name}#{0.discriminator}'.format(self)
def __eq__(self, other):
return isinstance(other, _BaseUser) and other.id == self.id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.id >> 22
def _update(self, data):
if self._state is not None:
data_cache = self._state.user_data_cache
else:
data_cache = ('username', 'avatar', 'bot')
self.name = data['username'] if 'username' in data_cache else None
self.id = int(data['id'])
self.discriminator = data['discriminator']
self.avatar = data['avatar'] if 'avatar' in data_cache else None
self.bot = data.get('bot', False) if 'bot' in data_cache else None
@classmethod
def _copy(cls, user):
self = cls.__new__(cls) # bypass __init__
self.name = user.name
self.id = user.id
self.discriminator = user.discriminator
self.avatar = user.avatar
self.bot = user.bot
self._state = user._state
return self
def _to_minimal_user_json(self):
return {
'name': self.name,
'id': self.id,
'avatar': self.avatar,
'discriminator': self.discriminator,
'bot': self.bot,
}
@property
def avatar_url(self):
"""Returns an :class:`Asset` for the avatar the user has.
If the user does not have a traditional avatar, an asset for
the default avatar is returned instead.
This is equivalent to calling :meth:`avatar_url_as` with
the default parameters (i.e. webp/gif detection and a size of 1024).
"""
return self.avatar_url_as(format=None, size=1024)
def is_avatar_animated(self):
"""Indicates if the user has an animated avatar."""
return bool(self.avatar and self.avatar.startswith('a_'))
def avatar_url_as(self, *, format=None, static_format='webp', size=1024):
"""Returns an :class:`Asset` for the avatar the user has.
If the user does not have a traditional avatar, an asset for
the default avatar is returned instead.
The format must be one of 'webp', 'jpeg', 'jpg', 'png' or 'gif', and
'gif' is only valid for animated avatars. The size must be a power of 2
between 16 and 4096.
Parameters
-----------
format: Optional[:class:`str`]
The format to attempt to convert the avatar to.
If the format is ``None``, then it is automatically
detected into either 'gif' or static_format depending on the
avatar being animated or not.
static_format: Optional[:class:`str`]
Format to attempt to convert only non-animated avatars to.
Defaults to 'webp'
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or ``static_format``, or
invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_avatar(self._state, self, format=format, static_format=static_format, size=size)
@property
def default_avatar(self):
""":class:`DefaultAvatar`: Returns the default avatar for a given user. This is calculated by the user's discriminator."""
return try_enum(DefaultAvatar, int(self.discriminator) % len(DefaultAvatar))
@property
def default_avatar_url(self):
""":class:`Asset`: Returns a URL for a user's default avatar."""
return Asset(self._state, 'https://cdn.discordapp.com/embed/avatars/{}.png'.format(self.default_avatar.value))
@property
def colour(self):
""":class:`Colour`: A property that returns a colour denoting the rendered colour
for the user. This always returns :meth:`Colour.default`.
There is an alias for this named :meth:`color`.
"""
return Colour.default()
@property
def color(self):
""":class:`Colour`: A property that returns a color denoting the rendered color
for the user. This always returns :meth:`Colour.default`.
There is an alias for this named :meth:`colour`.
"""
return self.colour
@property
def mention(self):
""":class:`str`: Returns a string that allows you to mention the given user."""
return '<@{0.id}>'.format(self)
def permissions_in(self, channel):
"""An alias for :meth:`abc.GuildChannel.permissions_for`.
Basically equivalent to:
.. code-block:: python3
channel.permissions_for(self)
Parameters
-----------
channel: :class:`abc.GuildChannel`
The channel to check your permissions for.
"""
return channel.permissions_for(self)
@property
def created_at(self):
""":class:`datetime.datetime`: Returns the user's creation time in UTC.
This is when the user's Discord account was created."""
return snowflake_time(self.id)
@property
def display_name(self):
""":class:`str`: Returns the user's display name.
For regular users this is just their username, but
if they have a guild specific nickname then that
is returned instead.
"""
return self.name
def mentioned_in(self, message):
"""Checks if the user is mentioned in the specified message.
Parameters
-----------
message: :class:`Message`
The message to check if you're mentioned in.
"""
if message.mention_everyone:
return True
for user in message.mentions:
if user.id == self.id:
return True
return False
class ClientUser(BaseUser):
"""Represents your Discord user.
.. container:: operations
.. describe:: x == y
Checks if two users are equal.
.. describe:: x != y
Checks if two users are not equal.
.. describe:: hash(x)
Return the user's hash.
.. describe:: str(x)
Returns the user's name with discriminator.
Attributes
-----------
name: :class:`str`
The user's username.
id: :class:`int`
The user's unique ID.
discriminator: :class:`str`
The user's discriminator. This is given when the username has conflicts.
avatar: Optional[:class:`str`]
The avatar hash the user has. Could be None.
bot: :class:`bool`
Specifies if the user is a bot account.
verified: :class:`bool`
Specifies if the user is a verified account.
email: Optional[:class:`str`]
The email the user used when registering.
locale: Optional[:class:`str`]
The IETF language tag used to identify the language the user is using.
mfa_enabled: :class:`bool`
Specifies if the user has MFA turned on and working.
premium: :class:`bool`
Specifies if the user is a premium user (e.g. has Discord Nitro).
premium_type: :class:`PremiumType`
Specifies the type of premium a user has (e.g. Nitro or Nitro Classic). Could be None if the user is not premium.
"""
__slots__ = BaseUser.__slots__ + \
('email', 'locale', '_flags', 'verified', 'mfa_enabled',
'premium', 'premium_type', '_relationships', '__weakref__')
def __init__(self, *, state, data):
super().__init__(state=state, data=data)
self._relationships = {}
def __repr__(self):
return '<ClientUser id={0.id} name={0.name!r} discriminator={0.discriminator!r}' \
' bot={0.bot} verified={0.verified} mfa_enabled={0.mfa_enabled}>'.format(self)
def _update(self, data):
super()._update(data)
# There's actually an Optional[str] phone field as well but I won't use it
self.verified = data.get('verified', False)
self.email = data.get('email')
self.locale = data.get('locale')
self._flags = data.get('flags', 0)
self.mfa_enabled = data.get('mfa_enabled', False)
self.premium = data.get('premium', False)
self.premium_type = try_enum(PremiumType, data.get('premium_type', None))
def get_relationship(self, user_id):
"""Retrieves the :class:`Relationship` if applicable.
.. note::
This only applies to non-bot accounts.
Parameters
-----------
user_id: :class:`int`
The user ID to check if we have a relationship with them.
Returns
--------
Optional[:class:`Relationship`]
The relationship if available or ``None``.
"""
return self._relationships.get(user_id)
@property
def relationships(self):
"""List[:class:`User`]: Returns all the relationships that the user has.
.. note::
This only applies to non-bot accounts.
"""
return list(self._relationships.values())
@property
def friends(self):
r"""List[:class:`User`]: Returns all the users that the user is friends with.
.. note::
This only applies to non-bot accounts.
"""
return [r.user for r in self._relationships.values() if r.type is RelationshipType.friend]
@property
def blocked(self):
r"""List[:class:`User`]: Returns all the users that the user has blocked.
.. note::
This only applies to non-bot accounts.
"""
return [r.user for r in self._relationships.values() if r.type is RelationshipType.blocked]
async def edit(self, **fields):
"""|coro|
Edits the current profile of the client.
If a bot account is used then a password field is optional,
otherwise it is required.
.. note::
To upload an avatar, a :term:`py:bytes-like object` must be passed in that
represents the image being uploaded. If this is done through a file
then the file must be opened via ``open('some_filename', 'rb')`` and
the :term:`py:bytes-like object` is given through the use of ``fp.read()``.
The only image formats supported for uploading is JPEG and PNG.
Parameters
-----------
password: :class:`str`
The current password for the client's account.
Only applicable to user accounts.
new_password: :class:`str`
The new password you wish to change to.
Only applicable to user accounts.
email: :class:`str`
The new email you wish to change to.
Only applicable to user accounts.
house: Optional[:class:`HypeSquadHouse`]
The hypesquad house you wish to change to.
Could be ``None`` to leave the current house.
Only applicable to user accounts.
username: :class:`str`
The new username you wish to change to.
avatar: :class:`bytes`
A :term:`py:bytes-like object` representing the image to upload.
Could be ``None`` to denote no avatar.
Raises
------
HTTPException
Editing your profile failed.
InvalidArgument
Wrong image format passed for ``avatar``.
ClientException
Password is required for non-bot accounts.
House field was not a HypeSquadHouse.
"""
try:
avatar_bytes = fields['avatar']
except KeyError:
avatar = self.avatar
else:
if avatar_bytes is not None:
avatar = _bytes_to_base64_data(avatar_bytes)
else:
avatar = None
not_bot_account = not self.bot
password = fields.get('password')
if not_bot_account and password is None:
raise ClientException('Password is required for non-bot accounts.')
args = {
'password': password,
'username': fields.get('username', self.name),
'avatar': avatar
}
if not_bot_account:
args['email'] = fields.get('email', self.email)
if 'new_password' in fields:
args['new_password'] = fields['new_password']
http = self._state.http
if 'house' in fields:
house = fields['house']
if house is None:
await http.leave_hypesquad_house()
elif not isinstance(house, HypeSquadHouse):
raise ClientException('`house` parameter was not a HypeSquadHouse')
else:
value = house.value
await http.change_hypesquad_house(value)
data = await http.edit_profile(**args)
if not_bot_account:
self.email = data['email']
try:
http._token(data['token'], bot=False)
except KeyError:
pass
self._update(data)
async def create_group(self, *recipients):
r"""|coro|
Creates a group direct message with the recipients
provided. These recipients must be have a relationship
of type :attr:`RelationshipType.friend`.
.. note::
This only applies to non-bot accounts.
Parameters
-----------
\*recipients: :class:`User`
An argument :class:`list` of :class:`User` to have in
your group.
Raises
-------
HTTPException
Failed to create the group direct message.
ClientException
Attempted to create a group with only one recipient.
This does not include yourself.
Returns
-------
:class:`GroupChannel`
The new group channel.
"""
from .channel import GroupChannel
if len(recipients) < 2:
raise ClientException('You must have two or more recipients to create a group.')
users = [str(u.id) for u in recipients]
data = await self._state.http.start_group(self.id, users)
return GroupChannel(me=self, data=data, state=self._state)
async def edit_settings(self, **kwargs):
"""|coro|
Edits the client user's settings.
.. note::
This only applies to non-bot accounts.
Parameters
-------
afk_timeout: :class:`int`
How long (in seconds) the user needs to be AFK until Discord
sends push notifications to your mobile device.
animate_emojis: :class:`bool`
Whether or not to animate emojis in the chat.
convert_emoticons: :class:`bool`
Whether or not to automatically convert emoticons into emojis.
e.g. :-) -> 😃
default_guilds_restricted: :class:`bool`
Whether or not to automatically disable DMs between you and
members of new guilds you join.
detect_platform_accounts: :class:`bool`
Whether or not to automatically detect accounts from services
like Steam and Blizzard when you open the Discord client.
developer_mode: :class:`bool`
Whether or not to enable developer mode.
disable_games_tab: :class:`bool`
Whether or not to disable the showing of the Games tab.
enable_tts_command: :class:`bool`
Whether or not to allow tts messages to be played/sent.
explicit_content_filter: :class:`UserContentFilter`
The filter for explicit content in all messages.
friend_source_flags: :class:`FriendFlags`
Who can add you as a friend.
gif_auto_play: :class:`bool`
Whether or not to automatically play gifs that are in the chat.
guild_positions: List[:class:`abc.Snowflake`]
A list of guilds in order of the guild/guild icons that are on
the left hand side of the UI.
inline_attachment_media: :class:`bool`
Whether or not to display attachments when they are uploaded in chat.
inline_embed_media: :class:`bool`
Whether or not to display videos and images from links posted in chat.
locale: :class:`str`
The :rfc:`3066` language identifier of the locale to use for the language
of the Discord client.
message_display_compact: :class:`bool`
Whether or not to use the compact Discord display mode.
render_embeds: :class:`bool`
Whether or not to render embeds that are sent in the chat.
render_reactions: :class:`bool`
Whether or not to render reactions that are added to messages.
restricted_guilds: List[:class:`abc.Snowflake`]
A list of guilds that you will not receive DMs from.
show_current_game: :class:`bool`
Whether or not to display the game that you are currently playing.
status: :class:`Status`
The clients status that is shown to others.
theme: :class:`Theme`
The theme of the Discord UI.
timezone_offset: :class:`int`
The timezone offset to use.
Raises
-------
HTTPException
Editing the settings failed.
Forbidden
The client is a bot user and not a user account.
Returns
-------
:class:`dict`
The client user's updated settings.
"""
payload = {}
content_filter = kwargs.pop('explicit_content_filter', None)
if content_filter:
payload.update({'explicit_content_filter': content_filter.value})
friend_flags = kwargs.pop('friend_source_flags', None)
if friend_flags:
dicts = [{}, {'mutual_guilds': True}, {'mutual_friends': True},
{'mutual_guilds': True, 'mutual_friends': True}, {'all': True}]
payload.update({'friend_source_flags': dicts[friend_flags.value]})
guild_positions = kwargs.pop('guild_positions', None)
if guild_positions:
guild_positions = [str(x.id) for x in guild_positions]
payload.update({'guild_positions': guild_positions})
restricted_guilds = kwargs.pop('restricted_guilds', None)
if restricted_guilds:
restricted_guilds = [str(x.id) for x in restricted_guilds]
payload.update({'restricted_guilds': restricted_guilds})
status = kwargs.pop('status', None)
if status:
payload.update({'status': status.value})
theme = kwargs.pop('theme', None)
if theme:
payload.update({'theme': theme.value})
payload.update(kwargs)
data = await self._state.http.edit_settings(**payload)
return data
class User(BaseUser, discord.abc.Messageable):
"""Represents a Discord user.
.. container:: operations
.. describe:: x == y
Checks if two users are equal.
.. describe:: x != y
Checks if two users are not equal.
.. describe:: hash(x)
Return the user's hash.
.. describe:: str(x)
Returns the user's name with discriminator.
Attributes
-----------
name: :class:`str`
The user's username.
id: :class:`int`
The user's unique ID.
discriminator: :class:`str`
The user's discriminator. This is given when the username has conflicts.
avatar: Optional[:class:`str`]
The avatar hash the user has. Could be None.
bot: :class:`bool`
Specifies if the user is a bot account.
"""
__slots__ = BaseUser.__slots__ + ('__weakref__',)
def __repr__(self):
return '<User id={0.id} name={0.name!r} discriminator={0.discriminator!r} bot={0.bot}>'.format(self)
async def _get_channel(self):
ch = await self.create_dm()
return ch
@property
def dm_channel(self):
"""Optional[:class:`DMChannel`]: Returns the channel associated with this user if it exists.
If this returns ``None``, you can create a DM channel by calling the
:meth:`create_dm` coroutine function.
"""
return self._state._get_private_channel_by_user(self.id)
async def create_dm(self):
"""Creates a :class:`DMChannel` with this user.
This should be rarely called, as this is done transparently for most
people.
"""
found = self.dm_channel
if found is not None:
return found
state = self._state
data = await state.http.start_private_message(self.id)
return state.add_dm_channel(data)
@property
def relationship(self):
"""Returns the :class:`Relationship` with this user if applicable, ``None`` otherwise.
.. note::
This only applies to non-bot accounts.
"""
return self._state.user.get_relationship(self.id)
async def mutual_friends(self):
"""|coro|
Gets all mutual friends of this user.
.. note::
This only applies to non-bot accounts.
Raises
-------
Forbidden
Not allowed to get mutual friends of this user.
HTTPException
Getting mutual friends failed.
Returns
-------
List[:class:`User`]
The users that are mutual friends.
"""
state = self._state
mutuals = await state.http.get_mutual_friends(self.id)
return [User(state=state, data=friend) for friend in mutuals]
def is_friend(self):
"""Checks if the user is your friend.
.. note::
This only applies to non-bot accounts.
"""
r = self.relationship
if r is None:
return False
return r.type is RelationshipType.friend
def is_blocked(self):
"""Checks if the user is blocked.
.. note::
This only applies to non-bot accounts.
"""
r = self.relationship
if r is None:
return False
return r.type is RelationshipType.blocked
async def block(self):
"""|coro|
Blocks the user.
.. note::
This only applies to non-bot accounts.
Raises
-------
Forbidden
Not allowed to block this user.
HTTPException
Blocking the user failed.
"""
await self._state.http.add_relationship(self.id, type=RelationshipType.blocked.value)
async def unblock(self):
"""|coro|
Unblocks the user.
.. note::
This only applies to non-bot accounts.
Raises
-------
Forbidden
Not allowed to unblock this user.
HTTPException
Unblocking the user failed.
"""
await self._state.http.remove_relationship(self.id)
async def remove_friend(self):
"""|coro|
Removes the user as a friend.
.. note::
This only applies to non-bot accounts.
Raises
-------
Forbidden
Not allowed to remove this user as a friend.
HTTPException
Removing the user as a friend failed.
"""
await self._state.http.remove_relationship(self.id)
async def send_friend_request(self):
"""|coro|
Sends the user a friend request.
.. note::
This only applies to non-bot accounts.
Raises
-------
Forbidden
Not allowed to send a friend request to the user.
HTTPException
Sending the friend request failed.
"""
await self._state.http.send_friend_request(username=self.name, discriminator=self.discriminator)
async def profile(self):
"""|coro|
Gets the user's profile.
.. note::
This only applies to non-bot accounts.
Raises
-------
Forbidden
Not allowed to fetch profiles.
HTTPException
Fetching the profile failed.
Returns
--------
:class:`Profile`
The profile of the user.
"""
state = self._state
data = await state.http.get_user_profile(self.id)
def transform(d):
return state._get_guild(int(d['id']))
since = data.get('premium_since')
mutual_guilds = list(filter(None, map(transform, data.get('mutual_guilds', []))))
return Profile(flags=data['user'].get('flags', 0),
premium_since=parse_time(since),
mutual_guilds=mutual_guilds,
user=self,
connected_accounts=data['connected_accounts'])
|
the-stack_0_17834 | __author__ = 'a_medelyan'
import os
# class to hold our test instance (document plus its correct manual keywords)
class TestDoc:
def __init__(self, name):
self.name = name
self.text = ''
self.keywords = []
# reading documents and their keywords from a directory
def read_data(input_dir):
test_set = {}
for doc in os.listdir(input_dir):
file_reader = open(os.path.join(input_dir,doc), 'r')
file_name = doc[:-4]
if file_name not in test_set:
d = TestDoc(file_name)
else:
d = test_set[file_name]
if not doc.endswith(".txt"):
continue
# get document text
text = file_reader.read()
d.text = text
# get document keywords
file_reader = open(os.path.join(input_dir,file_name + ".key"), 'r')
manual_keywords = file_reader.read()
for line in manual_keywords.split('\n'):
line = line.rstrip().lower()
if len(line) > 0:
if '\t' in line:
d.keywords.append(line[0:line.find('\t')])
else:
d.keywords.append(line)
# add document to test set
test_set[file_name] = d
return test_set |
the-stack_0_17835 | """Fltabledb module."""
# -*- coding: utf-8 -*-
from PyQt6 import QtCore, QtGui, QtWidgets # type: ignore[import]
from pineboolib.application.database import pnsqlcursor
from pineboolib.application.metadata import pnfieldmetadata, pnrelationmetadata
from pineboolib.application.qsatypes import sysbasetype
from pineboolib.q3widgets import qtable
from pineboolib.core.utils import utils_base
from pineboolib.core import decorators, settings
from pineboolib import application
from pineboolib import logging
from . import (
fldatatable,
flformsearchdb,
flutil,
flformrecorddb,
fldoublevalidator,
fluintvalidator,
flintvalidator,
flcheckbox,
fltimeedit,
fldateedit,
flspinbox,
)
from pineboolib.fllegacy.aqsobjects import aqods
from typing import Any, Optional, List, Union, cast, TYPE_CHECKING
if TYPE_CHECKING:
from pineboolib.interfaces import isqlcursor # pragma: no cover
from pineboolib.application.metadata import pntablemetadata # pragma: no cover
LOGGER = logging.get_logger(__name__)
DEBUG = False
class FLTableDB(QtWidgets.QWidget):
"""
PLUGIN that contains a database table.
This object contains everything needed to handle
the data in a table. In addition to the functionality of
Search the table by a field, using filters.
This plugin to be functional must have as one
from your parents or predecessor to an FLFormDB object.
@author InfoSiAL S.L.
"""
"""
Tipos de condiciones para el filtro
"""
_all: int = 0
_contains: int = 1
_starts: int = 2
_end: int = 3
_equal: int = 4
_dist: int = 5
_greater: int = 6
_less: int = 7
_from_to: int = 8
_null: int = 9
_not_null: int = 10
_parent: "QtWidgets.QWidget"
_name: str
_tdb_filter: Optional[Any]
_pb_data: "QtWidgets.QPushButton"
_pb_filter: "QtWidgets.QPushButton"
_pb_odf: "QtWidgets.QPushButton"
_combo_box_field_to_search_1: "QtWidgets.QComboBox"
_combo_box_field_to_search_2: "QtWidgets.QComboBox"
_line_edit_search: "QtWidgets.QLineEdit"
_tab_data_layout: "QtWidgets.QVBoxLayout"
_tab_control_layout: "QtWidgets.QHBoxLayout"
_data_layout: "QtWidgets.QHBoxLayout"
_tab_data: "QtWidgets.QFrame"
_tab_filter: "QtWidgets.QFrame"
_buttons_layout: "QtWidgets.QVBoxLayout"
_master_layout: "QtWidgets.QVBoxLayout"
_tab_filter_loader: bool
_loaded: bool
"""
Tamaño de icono por defecto
"""
_icon_size: Optional[Any]
"""
Componente para visualizar los registros
"""
_table_records: Optional["fldatatable.FLDataTable"]
"""
Nombre de la tabla a la que esta asociado este componente.
"""
_table_name: Optional[str]
"""
Nombre del campo foráneo
"""
_foreign_field: Optional[str]
"""
Nombre del campo de la relación
"""
_field_relation: Optional[str]
"""
Cursor con los datos de origen para el componente
"""
cursor_: "isqlcursor.ISqlCursor"
"""
Cursor auxiliar de uso interno para almacenar los registros de la tabla
relacionada con la de origen
"""
_cursor_aux: Optional["isqlcursor.ISqlCursor"]
"""
Matiene la ventana padre
"""
_top_widget: Optional["QtWidgets.QWidget"]
"""
Indica que la ventana ya ha sido mostrada una vez
"""
_showed: bool
"""
Mantiene el filtro de la tabla
"""
"""
Almacena si el componente está en modo sólo lectura
"""
_read_only: bool
_req_read_only: bool
"""
Almacena si el componente está en modo sólo edición
"""
_edit_only: bool
_req_edit_only: bool
"""
Indica si el componente está en modo sólo permitir añadir registros
"""
_insert_only: bool
_req_insert_only: bool
"""
Indica que no se realicen operaciones con la base de datos (abrir formularios). Modo "sólo tabla".
"""
_only_table: bool
_req_only_table: bool
"""
Almacena los metadatos del campo por el que está actualmente ordenada la tabla
"""
_sort_field_1: Optional["pnfieldmetadata.PNFieldMetaData"]
"""
Almacena los metadatos del campo por el que está actualmente ordenada la tabla en segunda instancia
@author Silix - dpinelo
"""
_sort_field_2: Optional["pnfieldmetadata.PNFieldMetaData"]
"""
Crónometro interno
"""
_timer: Optional["QtCore.QTimer"]
"""
Filtro inicial de búsqueda
"""
_init_search: Optional[str]
"""
Indica que la columna de seleción está activada
"""
_check_column_enabled: bool
"""
Indica el texto de la etiqueta de encabezado para la columna de selección
"""
_alias_check_column: str
"""
Indica el nombre para crear un pseudocampo en el cursor para la columna de selección
"""
_field_name_check_column: str
"""
Indica que la columna de selección está visible
"""
_check_column_visible: bool
"""
Indica el número de columna por la que ordenar los registros
"""
_sort_column_1: int
"""
Indica el número de columna por la que ordenar los registros
@author Silix - dpinelo
"""
_sort_column_2: int
"""
Indica el número de columna por la que ordenar los registros
@author Silix
"""
_sort_column_3: int
"""
Indica el sentido ascendente o descendente del la ordenacion actual de los registros
"""
_order_asc_1: bool
"""
Indica el sentido ascendente o descendente del la ordenacion actual de los registros
@author Silix - dpinelo
"""
_order_asc_2: bool
"""
Indica el sentido ascendente o descendente del la ordenacion actual de los registros
@author Silix
"""
_order_asc_3: bool
"""
Indica si se debe establecer automáticamente la primera columna como de ordenación
"""
_auto_sort_column: bool
"""
Almacena la última claúsula de filtro aplicada en el refresco
"""
_tdb_filter_last_where: str
"""
Diccionario que relaciona literales descriptivos de una condición de filtro
con su enumeración
"""
_map_cond_type: List[str]
"""
Indica si el marco de búsqueda está oculto
"""
_find_hidden: bool
"""
Indica si el marco para conmutar entre datos y filtro está oculto
"""
_filter_hidden: bool
"""
Indica si se deben mostrar los campos tipo pixmap en todas las filas
"""
_show_all_pixmaps: bool
"""
Nombre de la función de script a invocar para obtener el color y estilo de las filas y celdas
El nombre de la función debe tener la forma 'objeto.nombre_funcion' o 'nombre_funcion',
en el segundo caso donde no se especifica 'objeto' automáticamente se añadirá como
prefijo el nombre del formulario donde se inicializa el componente FLTableDB seguido de un punto.
De esta forma si utilizamos un mismo formulario para varias acciones, p.e. master.ui, podemos controlar
si usamos distintas funciones de obtener color para cada acción (distintos nombres de formularios) o
una única función común para todas las acciones.
Ej. Estableciendo 'tdbGetColor' si el componente se inicializa en el formulario maestro de clientes,
se utilizará 'formclientes.tdbGetColor', si se inicializa en el fomulario maestro de proveedores, se
utilizará 'formproveedores.tdbGetColor', etc... Si establecemos 'flfactppal.tdbGetColor' siempre se llama a
esa función independientemente del formulario en el que se inicialize el componente.
Cuando se está pintando una celda se llamará a esa función pasándole cinco parámentros:
- Nombre del campo correspondiente a la celda
- Valor del campo de la celda
- Cursor de la tabla posicionado en el registro correspondiente a la fila que
está pintando. AVISO: En este punto los valores del buffer son indefinidos, no se hace refreshBuffer
por motivos de eficiencia
- Tipo del campo, ver flutil.FLUtilInterface::Type en FLObjectFactory.h
- Seleccionado. Si es TRUE indica que la celda a pintar está en la fila resaltada/seleccionada.
Generalmente las celdas en la fila seleccionada se colorean de forma distinta al resto.
La función debe devolver una array con cuatro cadenas de caracteres;
[ "color_de_fondo", "color_lapiz", "estilo_fondo", "estilo_lapiz" ]
En los dos primeros, el color, se puede utilizar cualquier valor aceptado por QColor::setNamedColor, ejemplos;
"green"
"#44ADDB"
En los dos últimos, el estilo, se pueden utilizar los valores aceptados por QBrush::setStyle y QPen::setStyle,
ver en fldatatable.FLDataTable.cpp las funciones nametoBrushStyle y nametoPenStyle, ejemplos;
"SolidPattern"
"DiagCrossPattern"
"DotLine"
"SolidLine"
Si alguno de los valores del array es vacio "", entonces se utilizarán los colores o estilos establecidos por defecto.
"""
_function_get_color: Optional[str]
"""
Editor falso
"""
_fake_editor: Optional[Any] = None
_tabledb_filter_records_function_name: Optional[str]
def __init__(self, parent: Optional["QtWidgets.QWidget"] = None, name: str = "") -> None:
"""
Inicialize.
"""
if parent is None:
return
super().__init__(parent)
self._top_widget = parent
self._table_records = None
self._table_name = None
self._foreign_field = None
self._field_relation = None
self._cursor_aux = None
self._show_all_pixmaps = True
self._showed = False
self._filter = ""
self._sort_column_1 = 0
self._sort_column_2 = 1
self._sort_column_3 = 2
self._sort_field_1 = None
self._init_search = None
self._auto_sort_column = True
self._order_asc_1 = True
self._order_asc_2 = True
self._order_asc_3 = True
self._read_only = False
self._edit_only = False
self._only_table = False
self._insert_only = False
self._req_read_only = False
self._req_edit_only = False
self._req_insert_only = False
self._req_only_table = False
self._tab_filter_loader = False
self._field_name_check_column = ""
self._alias_check_column = ""
self._timer_1 = QtCore.QTimer(self)
if name:
self.setObjectName(name)
self._check_column_visible = False
self._check_column_enabled = False
self._tdb_filter_last_where = ""
self._icon_size = []
self._icon_size = application.PROJECT.DGI.icon_size()
self._tab_control_layout = QtWidgets.QHBoxLayout()
self._tab_filter = QtWidgets.QFrame() # contiene filtros
self._tab_filter.setObjectName("tdbFilter")
self._tab_data = QtWidgets.QFrame() # contiene data
self._tab_data.setObjectName("tabTable")
self._function_get_color = None
from . import flformdb
while not isinstance(self._top_widget, flformdb.FLFormDB):
self._top_widget = self._top_widget.parentWidget()
if not self._top_widget:
break
self._loaded = False
self.createFLTableDBWidget()
# def __getattr__(self, name):
# return DefFun(self, name)
def load(self) -> None:
"""Initialize the cursor and controls."""
# Es necesario pasar a modo interactivo lo antes posible
# Sino, creamos un bug en el cierre de ventana: se recarga toda la tabla para saber el tamaño
# print("FLTableDB(%s): setting columns in interactive mode" % self._tableName))
if self.loaded():
return
if self._top_widget is not None:
if not self._top_widget.cursor():
LOGGER.warning(
"FLTableDB : Uno de los padres o antecesores de FLTableDB deber ser de la clase FLFormDB o heredar de ella"
)
return
self.cursor_ = cast(pnsqlcursor.PNSqlCursor, self._top_widget.cursor())
self.initCursor()
# self.setFont(QtWidgets.QApplication.font())
if not self.objectName():
self.setObjectName("FLTableDB")
self._timer = QtCore.QTimer(self)
self._timer.timeout.connect(self.refreshDelayed) # type: ignore [attr-defined] # noqa: F821
# FIXME: El problema de que aparezca al editar un registro que no es, es por carga doble de initCursor()
# ...... Cuando se lanza showWidget, y tiene _initCursorWhenLoad, lanza initCursor y luego otra vez.
# ...... esta doble carga provoca el error y deja en el formulario el cursor original.
self._map_cond_type = []
self._loaded = True
self.showWidget()
if DEBUG:
LOGGER.warning(
"**FLTableDB::name: %r cursor: %r", self.objectName(), self.cursor().curName()
)
def loaded(self) -> bool:
"""Return if the control is inicilized."""
return self._loaded
def initCursor(self) -> None:
"""
Start the cursor according to this field either from the source table or from a related table.
"""
if not self._top_widget or not hasattr(self, "cursor_"):
return
if not self.cursor().private_cursor.metadata_:
return
table_metadata: Optional["pntablemetadata.PNTableMetaData"] = self.cursor().metadata()
if self._sort_field_1 is None:
if table_metadata is not None:
self._sort_field_1 = table_metadata.field(table_metadata.primaryKey())
own_table_metadata = None
if self._table_name:
if DEBUG:
LOGGER.warning(
"**FLTableDB::name: %r tableName: %r", self.objectName(), self._table_name
)
if not self.cursor().db().connManager().manager().existsTable(self._table_name):
own_table_metadata = True
table_metadata = (
self.cursor().db().connManager().manager().createTable(self._table_name)
)
else:
own_table_metadata = True
manager_tmd = self.cursor().db().connManager().manager().metadata(self._table_name)
if not manager_tmd or isinstance(manager_tmd, bool):
return
table_metadata = manager_tmd
# if table_metadata is None:
# return
if not self._foreign_field or not self._field_relation:
if not self.cursor().metadata():
if (
own_table_metadata
and table_metadata is not None
and not table_metadata.inCache()
):
del table_metadata
return
if not self.cursor().metadata().name() == self._table_name:
ctxt = self.cursor().context()
self.cursor_ = pnsqlcursor.PNSqlCursor(
self._table_name,
True,
self.cursor().db().connectionName(),
None,
None,
self,
)
if self.cursor():
self.cursor().setContext(ctxt)
self._cursor_aux = None
if own_table_metadata and table_metadata and not table_metadata.inCache():
del table_metadata
return
else:
cursor_top_widget = cast(pnsqlcursor.PNSqlCursor, self._top_widget.cursor())
if cursor_top_widget and cursor_top_widget.metadata().name() != self._table_name:
self.cursor_ = cursor_top_widget
if (
not self._table_name
or not self._foreign_field
or not self._field_relation
or self._cursor_aux
):
if own_table_metadata and table_metadata and not table_metadata.inCache():
del table_metadata
return
self._cursor_aux = self.cursor()
cursor_name = self.cursor().metadata().name()
relation_metadata = (
self.cursor()
.metadata()
.relation(self._foreign_field, self._field_relation, self._table_name)
)
test_m1 = (
table_metadata.relation(self._field_relation, self._foreign_field, cursor_name)
if table_metadata is not None
else None
)
check_integrity = False
if not relation_metadata:
if test_m1:
if test_m1.cardinality() == pnrelationmetadata.PNRelationMetaData.RELATION_M1:
check_integrity = True
field_metadata = self.cursor().metadata().field(self._foreign_field)
if field_metadata is not None:
tmd_aux_ = self.cursor().db().connManager().manager().metadata(self._table_name)
if not tmd_aux_ or tmd_aux_.isQuery():
check_integrity = False
if tmd_aux_ and not tmd_aux_.inCache():
del tmd_aux_
relation_metadata = pnrelationmetadata.PNRelationMetaData(
self._table_name,
self._field_relation,
pnrelationmetadata.PNRelationMetaData.RELATION_1M,
False,
False,
check_integrity,
)
field_metadata.addRelationMD(relation_metadata)
LOGGER.warning(
"FLTableDB : La relación entre la tabla del formulario %s y esta tabla %s de este campo no existe, "
"pero sin embargo se han indicado los campos de relación( %s, %s )",
cursor_name,
self._table_name,
self._field_relation,
self._foreign_field,
)
LOGGER.trace(
"FLTableDB : Creando automáticamente %s.%s --1M--> %s.%s",
cursor_name,
self._foreign_field,
self._table_name,
self._field_relation,
)
else:
LOGGER.warning(
"FLTableDB : El campo ( %s ) indicado en la propiedad foreignField no se encuentra en la tabla ( %s )",
self._foreign_field,
cursor_name,
)
pass
relation_metadata = test_m1
if not relation_metadata and table_metadata is not None:
field_metadata = table_metadata.field(self._field_relation)
if field_metadata is not None:
relation_metadata = pnrelationmetadata.PNRelationMetaData(
cursor_name,
self._foreign_field,
pnrelationmetadata.PNRelationMetaData.RELATION_1M,
False,
False,
False,
)
field_metadata.addRelationMD(relation_metadata)
if DEBUG:
LOGGER.trace(
"FLTableDB : Creando automáticamente %s.%s --1M--> %s.%s",
self._table_name,
self._field_relation,
cursor_name,
self._foreign_field,
)
else:
if DEBUG:
LOGGER.warning(
"FLTableDB : El campo ( %s ) indicado en la propiedad fieldRelation no se encuentra en la tabla ( %s )",
self._field_relation,
self._table_name,
)
self.cursor_ = pnsqlcursor.PNSqlCursor(
self._table_name,
True,
self.cursor().db().connectionName(),
self._cursor_aux,
relation_metadata,
self,
)
if not self.cursor():
self.cursor_ = self._cursor_aux
self._cursor_aux = None
else:
self.cursor().setContext(self._cursor_aux.context())
if self._showed:
try:
self._cursor_aux.newBuffer.disconnect(self.refresh)
except Exception:
pass
self._cursor_aux.newBuffer.connect(self.refresh)
# Si hay cursor_top_widget no machaco el cursor de _top_widget
if (
self._cursor_aux
and isinstance(self._top_widget, flformsearchdb.FLFormSearchDB)
and not cursor_top_widget
):
self._top_widget.setWindowTitle(self.cursor().metadata().alias())
self._top_widget.setCursor(self.cursor())
if own_table_metadata or table_metadata and not table_metadata.inCache():
del table_metadata
def cursor(self) -> "isqlcursor.ISqlCursor": # type: ignore [override] # noqa F821
"""
Return the cursor used by the component.
return pnsqlcursor.PNSqlCursor object with the cursor containing the records to be used in the form
"""
# if not self.cursor().buffer():
# self.cursor().refreshBuffer()
return self.cursor_
def tableName(self) -> str:
"""
Return the name of the associated table.
@return Name of the associated table
"""
if not self._table_name:
raise Exception("_table_name is empty!")
return self._table_name
def setTableName(self, table_name: str) -> None:
"""
Set the name of the associated table.
@param table_name Name of the table
"""
self._table_name = table_name
if self._top_widget:
self.initCursor()
else:
self.initFakeEditor()
def foreignField(self) -> Optional[str]:
"""
Return the name of the foreign field.
@return Field Name
"""
return self._foreign_field
def setForeignField(self, foreign_field: str) -> None:
"""
Set the name of the foreign field.
@param foreign_field Name of the associated field.
"""
self._foreign_field = foreign_field
if self._top_widget:
self.initCursor()
else:
self.initFakeEditor()
def fieldRelation(self) -> Optional[str]:
"""
Return the name of the related field.
@return Field Name
"""
return self._field_relation
def setFieldRelation(self, field_name: str) -> None:
"""
To set the name of the related field.
@param field_name Field name
"""
self._field_relation = field_name
if self._top_widget:
self.initCursor()
else:
self.initFakeEditor()
def setReadOnly(self, mode: bool) -> None:
"""
Set if the component is in read-only mode or not.
"""
if self._table_records:
self._read_only = mode
self._table_records.setFLReadOnly(mode)
self.readOnlyChanged.emit(mode)
self._req_read_only = mode
def readOnly(self) -> bool:
"""Return if the control is in read only mode."""
return self._req_read_only
def setEditOnly(self, mode: bool) -> None:
"""
Set if the component is in edit only mode or not.
"""
if self._table_records:
self._edit_only = mode
self._table_records.setEditOnly(mode)
self.editOnlyChanged.emit(mode)
self._req_edit_only = mode
def editOnly(self) -> bool:
"""Return if the control is in edit only mode."""
return self._req_edit_only
def setInsertOnly(self, mode: bool) -> None:
"""
Set the component to insert only or not.
"""
if self._table_records:
self._insert_only = mode
self._table_records.setInsertOnly(mode)
self.insertOnlyChanged.emit(mode)
self._req_insert_only = mode
def insertOnly(self) -> bool:
"""Return if the control is in insert only mode."""
return self._req_insert_only
def setInitSearch(self, init_search: str) -> None:
"""
Set the initial search filter.
"""
self._init_search = init_search
@decorators.beta_implementation
def setOrderCols(self, fields: List[str]):
"""
Set the order of the columns in the table.
@param fields List of the names of the fields sorted as you wish them to appear in the table from left to right
"""
if not self.cursor() or not self._table_records:
return
table_metadata = self.cursor().metadata()
if not table_metadata:
return
if not self._showed:
self.showWidget()
fields_list: List[str] = []
for num, field_name in enumerate(fields):
field_metadata = table_metadata.field(field_name)
if field_metadata is not None:
if field_metadata.visibleGrid():
fields_list.append(field_name)
if len(fields_list) > self.cursor().model().columnCount():
return
_index = self._table_records.logical_index_to_visual_index(
self._table_records.column_name_to_column_index(field_name)
)
self.moveCol(_index, num)
if not self._line_edit_search:
raise Exception("_line_edit_search is not defined!")
self.setSortOrder(True)
text_search = self._line_edit_search.text()
self.refresh(True)
if text_search:
self.refresh(False, True)
try:
self._line_edit_search.textChanged.disconnect( # type: ignore [attr-defined] # noqa: F821
self.filterRecords
)
except Exception:
pass
self._line_edit_search.setText(text_search)
self._line_edit_search.textChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.filterRecords
)
self._line_edit_search.selectAll()
# self.seekCursor()
QtCore.QTimer.singleShot(0, self._table_records.ensureRowSelectedVisible)
else:
self.refreshDelayed()
@decorators.beta_implementation
def orderCols(self) -> List[str]:
"""
Return the list of fields sorted by their columns in the table from left to right.
"""
list_: List[str] = []
if not self.cursor():
return list_
table_metadata = self.cursor().metadata()
if not table_metadata:
return list_
if not self._showed:
self.showWidget()
model = self.cursor().model()
if model:
if not self._table_records:
raise Exception("_table_records is not defined!")
for column in range(model.columnCount()):
alias_ = self._table_records.model().headerData(
self._table_records.visual_index_to_metadata_index(column),
QtCore.Qt.Orientation.Horizontal,
QtCore.Qt.ItemDataRole.DisplayRole,
)
list_.append(table_metadata.fieldAliasToName(alias_) or "")
return list_
def setFilter(self, filter: str) -> None:
"""
Set the table filter.
@param filter Where statement setting the filter
"""
self._filter = filter
def filter(self) -> str:
"""
Return the table filter.
@return Filter
"""
return self._filter
def findFilter(self) -> Optional[str]:
"""
Return the filter of the table imposed in the Find.
@return Filter
"""
return self._tdb_filter_last_where
def checkColumnEnabled(self) -> bool:
"""
Return if the selection column is activated.
"""
return self._check_column_enabled
def setCheckColumnEnabled(self, value: bool) -> None:
"""
Set the activation status of the selection column.
The change of status will not be effective until the next refresh.
"""
self._check_column_enabled = value
@decorators.beta_implementation
def aliasCheckColumn(self) -> Optional[str]:
"""
Obtain the header label text for the selection column.
"""
if not self._table_records:
raise Exception("_table_records is not defined!")
return self._table_records.model().headerData(
# self._table_records.selectionModel().selectedColumns(),
self._table_records.currentColumn(),
QtCore.Qt.Orientation.Horizontal,
QtCore.Qt.ItemDataRole.DisplayRole,
)
def setAliasCheckColumn(self, alias: str) -> None:
"""
Set the text of the header tag for the selection column.
The change of the label text will not be effective until the next refresh
"""
self._alias_check_column = alias
def findHidden(self) -> bool:
"""
Get if the search frame is hidden.
"""
return self._find_hidden
@decorators.deprecated
def setFindHidden(self, value: bool) -> None:
"""
Hide or show the search frame.
@param h TRUE hides it, FALSE shows it.
"""
# if self._find_hidden is not h:
# self._find_hidden = h
# if h:
# self._tab_control_layout.hide()
# else:
# self._tab_control_layout.show()
pass
def filterHidden(self) -> bool:
"""
Return if the frame for switching between data and filter is hidden.
"""
return self._filter_hidden
@decorators.deprecated
def setFilterHidden(self, value: bool) -> None:
"""
Hide or show the frame to switch between data and filter.
@param value TRUE hides it, FALSE shows it
"""
# if self._filter_hidden is not h:
# self._filter_hidden = h
# if h:
# self._tab_filter.hide()
# else:
# self._tab_filter.show()
pass
def showAllPixmaps(self) -> bool:
"""
Return if images of unselected lines are displayed.
"""
return self._show_all_pixmaps
def setShowAllPixmaps(self, value: bool) -> None:
"""
Set if images of unselected lines are displayed.
"""
self._show_all_pixmaps = value
def functionGetColor(self) -> Optional[str]:
"""
Return the function that calculates the color of the cell.
"""
return self._function_get_color
def setFunctionGetColor(self, function_get_color: str) -> None:
"""
Set the function that calculates the color of the cell.
"""
self._function_get_color = function_get_color
# if self._table_records is not None:
# self.tableRecords().setFunctionGetColor("%s.%s" % (self._top_widget.name(), f))
def setFilterRecordsFunction(self, function_filter_record: str) -> None:
"""
Assign the function name to call when the filter changes.
"""
self._tabledb_filter_records_function_name = function_filter_record
def setOnlyTable(self, value: bool = True) -> None:
"""
Enable table only mode.
"""
if self._table_records:
self._only_table = value
self._table_records.setOnlyTable(value)
self._req_only_table = value
def onlyTable(self) -> bool:
"""
Return if the control is in table only mode.
"""
return self._req_only_table
@decorators.not_implemented_warn
def setAutoSortColumn(self, value: bool = True):
"""
Set auto sort mode.
"""
self._auto_sort_column = value
def autoSortColumn(self) -> bool:
"""Return if auto sort mode is enabled."""
return self._auto_sort_column
def eventFilter(self, obj_: "QtCore.QObject", event: "QtCore.QEvent") -> bool:
"""
Process user events.
"""
if (
not self._table_records
or not self._line_edit_search
or not self._combo_box_field_to_search_1
or not self._combo_box_field_to_search_2
or not self.cursor()
):
return super().eventFilter(obj_, event)
if event.type() == QtCore.QEvent.Type.KeyPress:
key = cast(QtGui.QKeyEvent, event)
if isinstance(obj_, fldatatable.FLDataTable):
if key.key() == cast(int, QtCore.Qt.Key.Key_F2):
self._combo_box_field_to_search_1.showPopup()
return True
# if event.type() == QtCore.QEvent.WindowUnblocked and isinstance(obj_, fldatatable.FLDataTable):
# self.refreshDelayed()
# return True
elif isinstance(obj_, QtWidgets.QLineEdit):
if key.key() in (
cast(int, QtCore.Qt.Key.Key_Enter),
cast(int, QtCore.Qt.Key.Key_Return),
):
self._table_records.setFocus()
return True
elif key.key() == cast(int, QtCore.Qt.Key.Key_Up):
self._combo_box_field_to_search_1.setFocus()
return True
elif key.key() == cast(int, QtCore.Qt.Key.Key_Down):
self._table_records.setFocus()
return True
elif key.key() == cast(int, QtCore.Qt.Key.Key_F2):
self._combo_box_field_to_search_1.showPopup()
return True
elif key.text() in ["'", "\\"]:
return True
if obj_ in (self._table_records, self._line_edit_search):
return False
else:
return super().eventFilter(obj_, event)
def showEvent(self, event: "QtGui.QShowEvent") -> None:
"""
Proccess show event.
"""
super().showEvent(event)
self.load()
if not self.loaded():
self.showWidget()
def showWidget(self) -> None:
"""
Show the widget.
"""
if self._showed:
return
if not self._top_widget:
self.initFakeEditor()
self._showed = True
return
if not self.cursor():
return
self._showed = True
# own_tmd = bool(self._table_name)
if self._table_name:
if not self.cursor().db().connManager().manager().existsTable(self._table_name):
table_metadata = (
self.cursor().db().connManager().manager().createTable(self._table_name)
)
else:
table_metadata = (
self.cursor().db().connManager().manager().metadata(self._table_name)
)
if not table_metadata:
return
self.tableRecords()
if not self._cursor_aux:
if not self._init_search:
self.refresh(True, True)
# if self._table_records:
# QtCore.QTimer.singleShot(0, self._table_records.ensureRowSelectedVisible)
else:
self.refresh(True)
if self._table_records and self._table_records.numRows() <= 0:
self.refresh(False, True)
else:
self.refreshDelayed()
if (
not isinstance(self._top_widget, flformrecorddb.FLFormRecordDB)
and self._line_edit_search is not None
):
self._line_edit_search.setFocus()
if self._cursor_aux:
if (
isinstance(self._top_widget, flformrecorddb.FLFormRecordDB)
and self._cursor_aux.modeAccess() == pnsqlcursor.PNSqlCursor.Browse
):
self.cursor().setEdition(False)
self.setReadOnly(True)
if self._init_search:
self.refresh(True, True)
if self._table_records:
QtCore.QTimer.singleShot(0, self._table_records.ensureRowSelectedVisible)
else:
# self.refresh(True)
# if self._table_records and self._table_records.numRows() <= 0:
# self.refresh(False, True)
# else:
self.refreshDelayed()
elif (
isinstance(self._top_widget, flformrecorddb.FLFormRecordDB)
and self.cursor().modeAccess() == pnsqlcursor.PNSqlCursor.Browse
and table_metadata
and not table_metadata.isQuery()
):
self.cursor().setEdition(False)
self.setReadOnly(True)
# if own_table_metadata and table_metadata and not table_metadata.inCache():
# del table_metadata
def createFLTableDBWidget(self) -> None:
"""Create all controls."""
size_policy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Minimum
)
size_policy.setHeightForWidth(True)
size_policy_clean = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Fixed
)
size_policy_clean.setHeightForWidth(True)
size_policy_group_box = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding
)
self._data_layout = QtWidgets.QHBoxLayout() # Contiene _tab_data y _tab_filters
# self._data_layout.setContentsMargins(0, 0, 0, 0)
# self._data_layout.setSizeConstraint(0)
self._tab_data_layout = QtWidgets.QVBoxLayout()
filter_layout = QtWidgets.QVBoxLayout()
filter_layout.setSpacing(2)
filter_layout.setContentsMargins(1, 2, 1, 2)
if self._tab_data:
self._tab_data.setSizePolicy(size_policy_group_box)
self._tab_data.setLayout(self._tab_data_layout)
if self._tab_filter:
self._tab_filter.setSizePolicy(size_policy_group_box)
self._tab_filter.setLayout(filter_layout)
# Fix para acercar el lineEdit con el fltable
# self._tab_data.setContentsMargins(0, 0, 0, 0)
# self._tab_filter.setContentsMargins(0, 0, 0, 0)
self._tab_data_layout.setContentsMargins(0, 0, 0, 0)
# filter_layout.setContentsMargins(0, 0, 0, 0)
# Contiene botones lateral (datos, filtros, odf)
self._buttons_layout = QtWidgets.QVBoxLayout()
self._master_layout = QtWidgets.QVBoxLayout() # Contiene todos los layouts
self._pb_data = QtWidgets.QPushButton(self)
self._pb_data.setSizePolicy(size_policy)
if self._icon_size is not None:
self._pb_data.setMinimumSize(self._icon_size)
self._pb_data.setFocusPolicy(QtCore.Qt.FocusPolicy.NoFocus)
self._pb_data.setIcon(
QtGui.QIcon(utils_base.filedir("./core/images/icons", "fltable-data.png"))
)
self._pb_data.setText("")
self._pb_data.setToolTip("Mostrar registros")
self._pb_data.setWhatsThis("Mostrar registros")
self._buttons_layout.addWidget(self._pb_data)
self._pb_data.clicked.connect( # type: ignore [attr-defined] # noqa: F821
self.activeTabData
)
self._pb_filter = QtWidgets.QPushButton(self)
self._pb_filter.setSizePolicy(size_policy)
if self._icon_size is not None:
self._pb_filter.setMinimumSize(self._icon_size)
self._pb_filter.setFocusPolicy(QtCore.Qt.FocusPolicy.NoFocus)
self._pb_filter.setIcon(
QtGui.QIcon(utils_base.filedir("./core/images/icons", "fltable-filter.png"))
)
self._pb_filter.setText("")
self._pb_filter.setToolTip("Mostrar filtros")
self._pb_filter.setWhatsThis("Mostrar filtros")
self._buttons_layout.addWidget(self._pb_filter)
self._pb_filter.clicked.connect( # type: ignore [attr-defined] # noqa: F821
self.activeTabFilter
)
self._pb_odf = QtWidgets.QPushButton(self)
self._pb_odf.setSizePolicy(size_policy)
if self._icon_size is not None:
self._pb_odf.setMinimumSize(self._icon_size)
self._pb_odf.setFocusPolicy(QtCore.Qt.FocusPolicy.NoFocus)
self._pb_odf.setIcon(
QtGui.QIcon(utils_base.filedir("./core/images/icons", "fltable-odf.png"))
)
self._pb_odf.setText("")
self._pb_odf.setToolTip("Exportar a hoja de cálculo")
self._pb_odf.setWhatsThis("Exportar a hoja de cálculo")
self._buttons_layout.addWidget(self._pb_odf)
self._pb_odf.clicked.connect(self.exportToOds) # type: ignore [attr-defined] # noqa: F821
if settings.CONFIG.value("ebcomportamiento/FLTableExport2Calc", "false") == "true":
self._pb_odf.setDisabled(True)
self.pb_clean = QtWidgets.QPushButton(self)
self.pb_clean.setSizePolicy(size_policy_clean)
if self._icon_size is not None:
self.pb_clean.setMinimumSize(self._icon_size)
self.pb_clean.setFocusPolicy(QtCore.Qt.FocusPolicy.NoFocus)
self.pb_clean.setIcon(
QtGui.QIcon(utils_base.filedir("./core/images/icons", "fltable-clean.png"))
)
self.pb_clean.setText("")
self.pb_clean.setToolTip("Limpiar filtros")
self.pb_clean.setWhatsThis("Limpiar filtros")
filter_layout.addWidget(self.pb_clean)
self.pb_clean.clicked.connect( # type: ignore [attr-defined] # noqa: F821
self.tdbFilterClear
)
spacer = QtWidgets.QSpacerItem(
20, 20, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding
)
self._buttons_layout.addItem(spacer)
self._combo_box_field_to_search_1 = QtWidgets.QComboBox()
self._combo_box_field_to_search_2 = QtWidgets.QComboBox()
# self._combo_box_field_to_search_1.addItem("*")
# self._combo_box_field_to_search_2.addItem("*")
self._line_edit_search = QtWidgets.QLineEdit()
self._line_edit_search.textChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.filterRecords
)
label1 = QtWidgets.QLabel()
label2 = QtWidgets.QLabel()
label1.setStyleSheet("border: 0px")
label2.setStyleSheet("border: 0px")
label1.setText("Buscar")
label2.setText("en")
if self._tab_control_layout is not None:
control_frame = QtWidgets.QFrame()
lay = QtWidgets.QHBoxLayout()
control_frame.setFrameStyle(cast(int, QtWidgets.QFrame.Shadow.Raised.value))
control_frame.setStyleSheet("QFrame { border: 1px solid black; }")
lay.setContentsMargins(2, 2, 2, 2)
lay.setSpacing(2)
lay.addWidget(label1)
lay.addWidget(self._line_edit_search)
lay.addWidget(label2)
lay.addWidget(self._combo_box_field_to_search_1)
lay.addWidget(self._combo_box_field_to_search_2)
control_frame.setLayout(lay)
self._tab_control_layout.addWidget(control_frame)
self._master_layout.addLayout(self._tab_control_layout)
self._master_layout.addLayout(self._data_layout)
self._master_layout.setSpacing(2)
self._master_layout.setContentsMargins(1, 2, 1, 2)
self.setLayout(self._master_layout)
# Se añade data, filtros y botonera
if self._tab_data is not None:
self._data_layout.addWidget(self._tab_data)
if self._tab_filter is not None:
self._data_layout.addWidget(self._tab_filter)
self._tab_filter.hide()
self._data_layout.addLayout(self._buttons_layout)
self._combo_box_field_to_search_1.currentIndexChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.putFirstCol
)
self._combo_box_field_to_search_2.currentIndexChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.putSecondCol
)
self._tdb_filter = qtable.QTable()
filter_layout.addWidget(self._tdb_filter)
def tableRecords(self) -> "fldatatable.FLDataTable":
"""
Obtiene el componente tabla de registros.
"""
if self._table_records is None:
self._table_records = fldatatable.FLDataTable(self._tab_data, "tableRecords")
if self._table_records is not None:
self._table_records.setFocusPolicy(QtCore.Qt.FocusPolicy.StrongFocus)
self.setFocusProxy(self._table_records)
if self._tab_data_layout is not None:
self._tab_data_layout.addWidget(self._table_records)
self.setTabOrder(self._table_records, self._line_edit_search)
self.setTabOrder(self._line_edit_search, self._combo_box_field_to_search_1)
self.setTabOrder(
self._combo_box_field_to_search_1, self._combo_box_field_to_search_2
)
if self._line_edit_search is not None:
self._line_edit_search.installEventFilter(self)
self._table_records.installEventFilter(self)
if self._auto_sort_column:
self._table_records.header().sectionClicked.connect(self.switchSortOrder)
t_cursor = self._table_records.cursor_
if (
self.cursor()
and self.cursor() is not t_cursor
and self.cursor().private_cursor.metadata_ is not None
and (
not t_cursor
or (
t_cursor
and t_cursor.metadata()
and t_cursor.metadata().name() != self.cursor().metadata().name()
)
)
):
self.setTableRecordsCursor()
return self._table_records
def setTableRecordsCursor(self) -> None:
"""
Assign the current cursor of the component to the record table.
"""
if self._table_records is None:
self._table_records = fldatatable.FLDataTable(self._tab_data, "tableRecords")
if self._table_records is not None:
self._table_records.setFocusPolicy(QtCore.Qt.FocusPolicy.StrongFocus)
self.setFocusProxy(self._table_records)
if self._tab_data_layout is not None:
self._tab_data_layout.addWidget(self._table_records)
self.setTabOrder(self._table_records, self._line_edit_search)
self.setTabOrder(self._line_edit_search, self._combo_box_field_to_search_1)
self.setTabOrder(
self._combo_box_field_to_search_1, self._combo_box_field_to_search_2
)
self._table_records.installEventFilter(self)
if self._line_edit_search is not None:
self._line_edit_search.installEventFilter(self)
if self._check_column_enabled:
try:
self._table_records.clicked.disconnect( # type: ignore [attr-defined] # noqa: F821
self._table_records.setChecked
)
except Exception:
LOGGER.warning("setTableRecordsCursor: Error disconnecting setChecked signal")
self._table_records.clicked.connect( # type: ignore [attr-defined] # noqa: F821
self._table_records.setChecked
)
t_cursor = self._table_records.cursor_
if t_cursor is not self.cursor():
self._table_records.setFLSqlCursor(self.cursor())
if t_cursor:
self._table_records.recordChoosed.disconnect( # type: ignore [attr-defined] # noqa: F821
self.recordChoosedSlot
)
t_cursor.newBuffer.disconnect( # type: ignore [attr-defined] # noqa: F821
self.currentChangedSlot
)
self._table_records.recordChoosed.connect( # type: ignore [attr-defined] # noqa: F821
self.recordChoosedSlot
)
self.cursor().newBuffer.connect( # type: ignore [attr-defined] # noqa: F821
self.currentChangedSlot
)
@decorators.pyqt_slot()
def recordChoosedSlot(self) -> None:
"""Perform operations when selecting a record."""
if (
isinstance(self._top_widget, flformsearchdb.FLFormSearchDB)
and self._top_widget._in_exec
):
self._top_widget.accept()
else:
self.cursor().chooseRecord()
@decorators.pyqt_slot()
def currentChangedSlot(self) -> None:
"""Emit current changed signal."""
self.currentChanged.emit()
def currentRow(self) -> int:
"""Return current row index."""
return self.cursor().at()
def refreshTabData(self) -> None:
"""
Refresh the data tab by applying the filter.
"""
if self._filter and self._tdb_filter_last_where:
self._filter = self._filter.replace(self._tdb_filter_last_where, "")
self._tdb_filter_last_where = self.tdbFilterBuildWhere()
self.refresh(False, True)
def refreshTabFilter(self) -> None:
"""
Refresh the filter tab.
"""
if self._tab_filter_loader:
return
hori_header = self.tableRecords().horizontalHeader()
if not hori_header:
return
hori_count = hori_header.count() - self._sort_column_1
if self._tdb_filter and self.cursor():
table_metadata = self.cursor().metadata()
if table_metadata is None:
return
field = None
# type = None
# len = None
part_integer = None
part_decimal = None
rx_ = None
self._tdb_filter.setSelectionMode(QtWidgets.QTableWidget.SelectionMode.NoSelection)
self._tdb_filter.setNumCols(5)
not_visibles = 0
for field in table_metadata.fieldList():
if not field.visibleGrid():
not_visibles += 1
self._tdb_filter.setNumRows(hori_count - not_visibles)
self._tdb_filter.setColumnReadOnly(0, True)
util = flutil.FLUtil()
self._tdb_filter.setColumnLabels(",", self.tr("Campo,Condición,Valor,Desde,Hasta"))
self._map_cond_type.insert(self._all, self.tr("Todos"))
self._map_cond_type.insert(self._contains, self.tr("Contiene Valor"))
self._map_cond_type.insert(self._starts, self.tr("Empieza por Valor"))
self._map_cond_type.insert(self._end, self.tr("Acaba por Valor"))
self._map_cond_type.insert(self._equal, self.tr("Igual a Valor"))
self._map_cond_type.insert(self._dist, self.tr("Distinto de Valor"))
self._map_cond_type.insert(self._greater, self.tr("Mayor que Valor"))
self._map_cond_type.insert(self._less, self.tr("Menor que Valor"))
self._map_cond_type.insert(self._from_to, self.tr("Desde - Hasta"))
self._map_cond_type.insert(self._null, self.tr("Vacío"))
self._map_cond_type.insert(self._not_null, self.tr("No Vacío"))
idx_i = 0
# for headT in hori_count:
_linea = 0
while idx_i < hori_count:
_label = (
self.cursor()
.model()
.headerData(
idx_i + self._sort_column_1,
QtCore.Qt.Orientation.Horizontal,
QtCore.Qt.ItemDataRole.DisplayRole,
)
)
_alias = table_metadata.fieldAliasToName(_label)
if _alias is None:
idx_i += 1
continue
field = table_metadata.field(_alias)
if field is None:
idx_i += 1
continue
if not field.visibleGrid():
idx_i += 1
continue
self._tdb_filter.setText(_linea, 0, _label)
type_ = field.type()
len_ = field.length()
part_integer = field.partInteger()
part_decimal = field.partDecimal()
rx_ = field.regExpValidator()
has_option_list = field.hasOptionsList()
cond = QtWidgets.QComboBox(self)
if not type_ == "pixmap":
cond_list = [
self.tr("Todos"),
self.tr("Igual a Valor"),
self.tr("Distinto de Valor"),
self.tr("Vacío"),
self.tr("No Vacío"),
]
if not type_ == "bool":
cond_list = [
self.tr("Todos"),
self.tr("Igual a Valor"),
self.tr("Distinto de Valor"),
self.tr("Vacío"),
self.tr("No Vacío"),
self.tr("Contiene Valor"),
self.tr("Empieza por Valor"),
self.tr("Acaba por Valor"),
self.tr("Mayor que Valor"),
self.tr("Menor que Valor"),
self.tr("Desde - Hasta"),
]
cond.insertItems(len(cond_list), cond_list)
self._tdb_filter.setCellWidget(_linea, 1, cond)
idx_j = 2
while idx_j < 5:
if type_ in (
"uint",
"int",
"double",
"string",
"stringlist",
"timestamp",
"json",
):
if has_option_list:
editor_qcb = QtWidgets.QComboBox(self)
option_list_translated = []
option_list_not_transalated = field.optionsList()
for item in option_list_not_transalated:
option_list_translated.append(util.translate("Metadata", item))
editor_qcb.insertItems(
len(option_list_translated), option_list_translated
)
self._tdb_filter.setCellWidget(_linea, idx_j, editor_qcb)
else:
editor_le = QtWidgets.QLineEdit(self)
if type_ == "double":
editor_le.setValidator(
fldoublevalidator.FLDoubleValidator(
0, pow(10, part_integer) - 1, part_decimal, editor_le
)
)
editor_le.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight)
elif type_ in ("uint", "int"):
if type_ == "uint":
editor_le.setValidator(
fluintvalidator.FLUIntValidator(
0, pow(10, part_integer) - 1, editor_le
)
)
else:
editor_le.setValidator(
flintvalidator.FLIntValidator(
pow(10, part_integer) - 1 * (-1),
pow(10, part_integer) - 1,
editor_le,
)
)
editor_le.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight)
else: # string, stringlist, timestamp
if len_ > 0:
editor_le.setMaxLength(len_)
if rx_:
editor_le.setValidator(
QtGui.QRegularExpressionValidator(
QtCore.QRegularExpression(rx_), editor_le
)
)
editor_le.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeft)
self._tdb_filter.setCellWidget(_linea, idx_j, editor_le)
elif type_ == "serial":
editor_se = flspinbox.FLSpinBox()
editor_se.setMaxValue(pow(10, part_integer) - 1)
self._tdb_filter.setCellWidget(_linea, idx_j, editor_se)
elif type_ == "pixmap":
editor_px = QtWidgets.QLineEdit(self)
self._tdb_filter.setRowReadOnly(idx_i, True)
self._tdb_filter.setCellWidget(_linea, idx_j, editor_px)
elif type_ == "date":
editor_de = fldateedit.FLDateEdit(self, _label)
editor_de.setOrder(fldateedit.FLDateEdit.DMY)
editor_de.setAutoAdvance(True)
editor_de.setCalendarPopup(True)
editor_de.setSeparator("-")
editor_de.setDate(QtCore.QDate().currentDate())
self._tdb_filter.setCellWidget(_linea, idx_j, editor_de)
elif type_ == "time":
editor_te = fltimeedit.FLTimeEdit(self)
time_now = QtCore.QTime.currentTime()
editor_te.setTime(time_now)
self._tdb_filter.setCellWidget(_linea, idx_j, editor_te)
elif type_ in (pnfieldmetadata.PNFieldMetaData.Unlock, "bool"):
editor_cb = flcheckbox.FLCheckBox(self)
self._tdb_filter.setCellWidget(_linea, idx_j, editor_cb)
idx_j += 1
idx_i += 1
_linea += 1
idx_k = 0
while idx_k < 5:
if self._tdb_filter:
self._tdb_filter.adjustColumn(idx_k)
idx_k += 1
self._tab_filter_loader = True # Con esto no volvemos a cargar y reescribir el filtro
def decodeCondType(self, cond_type: str) -> int:
"""
Obtain the enumeration corresponding to a condition for the filter from its literal.
"""
for num, value in enumerate(self._map_cond_type):
if cond_type == value:
return num
return self._all
def tdbFilterBuildWhere(self) -> str:
"""
Build the filter clause in SQL from the contents of the values defined in the filter tab.
"""
if not self._top_widget:
return ""
if self._tdb_filter is None:
return ""
rows_count = self._tdb_filter.numRows()
# rows_count = self.cursor().model.columnCount()
if not rows_count or not self.cursor():
return ""
table_metadata = self.cursor().metadata()
if not table_metadata:
return ""
where = ""
for idx in range(rows_count):
if self._tdb_filter is None:
break
field_name = table_metadata.fieldAliasToName(self._tdb_filter.text(idx, 0))
if field_name is None:
raise Exception("field_name could not be resolved!")
field = table_metadata.field(field_name)
if field is None:
continue
cond = self._tdb_filter.cellWidget(idx, 1)
if cond is None:
continue
cond_type = self.decodeCondType(cond.currentText())
if cond_type == self._all:
continue
if table_metadata.isQuery():
qry = (
self.cursor()
.db()
.connManager()
.manager()
.query(self.cursor().metadata().query())
)
if qry is not None:
for qry_field in qry.fieldList():
if qry_field.endswith(".%s" % field_name):
break
field_name = qry_field
else:
field_name = table_metadata.name() + "." + field_name
_field_arg = field_name or ""
arg2 = ""
arg4 = ""
type_ = field.type()
has_option_list = field.hasOptionsList()
if type_ in ("string", "stringlist", "timestamp"):
_field_arg = "UPPER(%s)" % field_name
if type_ in ("uint", "int", "double", "string", "stringlist", "timestamp", "json"):
if has_option_list:
if cond_type == self._from_to:
editor_op_1 = self._tdb_filter.cellWidget(idx, 3)
editor_op_2 = self._tdb_filter.cellWidget(idx, 4)
arg2 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(type_, editor_op_1.currentText, True)
)
arg4 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(type_, editor_op_2.currentText, True)
)
else:
editor_op_1 = self._tdb_filter.cellWidget(idx, 2)
arg2 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(type_, editor_op_1.currentText, True)
)
else:
if cond_type == self._from_to:
editor_op_1 = self._tdb_filter.cellWidget(idx, 3)
editor_op_2 = self._tdb_filter.cellWidget(idx, 4)
arg2 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(type_, editor_op_1.text(), True)
)
arg4 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(type_, editor_op_2.text(), True)
)
else:
editor_op_1 = self._tdb_filter.cellWidget(idx, 2)
arg2 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(type_, editor_op_1.text(), True)
)
if type_ == "serial":
if cond_type == self._from_to:
editor_op_1 = self._tdb_filter.cellWidget(idx, 3)
editor_op_2 = self._tdb_filter.cellWidget(idx, 4)
arg2 = editor_op_1.value()
arg4 = editor_op_2.value()
else:
editor_op_1 = flspinbox.FLSpinBox(self._tdb_filter.cellWidget(idx, 2))
arg2 = editor_op_1.value()
if type_ == "date":
util = flutil.FLUtil()
if cond_type == self._from_to:
editor_op_1 = self._tdb_filter.cellWidget(idx, 3)
editor_op_2 = self._tdb_filter.cellWidget(idx, 4)
arg2 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(type_, util.dateDMAtoAMD(str(editor_op_1.text())))
)
arg4 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(type_, util.dateDMAtoAMD(str(editor_op_2.text())))
)
else:
editor_op_1 = self._tdb_filter.cellWidget(idx, 2)
arg2 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(type_, util.dateDMAtoAMD(str(editor_op_1.text())))
)
if type_ == "time":
if cond_type == self._from_to:
editor_op_1 = self._tdb_filter.cellWidget(idx, 3)
editor_op_2 = self._tdb_filter.cellWidget(idx, 4)
arg2 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(
type_, editor_op_1.time().toString(QtCore.Qt.DateFormat.ISODate)
)
)
arg4 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(
type_, editor_op_2.time().toString(QtCore.Qt.DateFormat.ISODate)
)
)
else:
editor_op_1 = self._tdb_filter.cellWidget(idx, 2)
arg2 = (
self.cursor()
.db()
.connManager()
.manager()
.formatValue(
type_, editor_op_1.time().toString(QtCore.Qt.DateFormat.ISODate)
)
)
if type_ in ("unlock", "bool"):
editor_op_1 = self._tdb_filter.cellWidget(idx, 2)
checked_ = False
if editor_op_1.isChecked():
checked_ = True
arg2 = self.cursor().db().connManager().manager().formatValue(type_, checked_)
if where:
where += " AND"
cond_val = " " + _field_arg
if arg2 is None:
arg2 = ""
if cond_type == self._contains:
cond_val += " LIKE '%" + arg2.replace("'", "") + "%'"
elif cond_type == self._starts:
cond_val += " LIKE '" + arg2.replace("'", "") + "%'"
elif cond_type == self._end:
cond_val += " LIKE '%%" + arg2.replace("'", "") + "'"
elif cond_type == self._equal:
cond_val += " = " + str(arg2)
elif cond_type == self._dist:
cond_val += " <> " + str(arg2)
elif cond_type == self._greater:
cond_val += " > " + str(arg2)
elif cond_type == self._less:
cond_val += " < " + str(arg2)
elif cond_type == self._from_to:
cond_val += " >= " + str(arg2) + " AND " + _field_arg + " <= " + str(arg4)
elif cond_type == self._null:
cond_val += " IS NULL "
elif cond_type == self._not_null:
cond_val += " IS NOT NULL "
where += cond_val
return where
@decorators.beta_implementation
def initFakeEditor(self) -> None:
"""
Initialize a false and non-functional editor.
This is used when the form is being edited with the designer and not
You can display the actual editor for not having a connection to the database.
Create a very schematic preview of the editor, but enough to
See the position and approximate size of the actual editor.
"""
if not self._fake_editor:
self._fake_editor = QtWidgets.QTextEdit(self._tab_data)
size_policy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding
)
size_policy.setHeightForWidth(True)
self._fake_editor.setSizePolicy(size_policy)
self._fake_editor.setTabChangesFocus(True)
self._fake_editor.setFocusPolicy(QtCore.Qt.FocusPolicy.StrongFocus)
self.setFocusProxy(self._fake_editor)
if not self._tab_data_layout:
raise Exception("self._tab_data_layout is not defined!")
self._tab_data_layout.addWidget(self._fake_editor)
self.setTabOrder(self._fake_editor, self._line_edit_search)
self.setTabOrder(self._fake_editor, self._combo_box_field_to_search_1)
self._fake_editor.show()
prty = ""
if self._table_name:
prty = prty + "tableName: %s\n" % self._table_name
if self._foreign_field:
prty = prty + "foreignField: %s\n" % self._foreign_field
if self._field_relation:
prty = prty + "fieldRelation: %s\n" % self._field_relation
self._fake_editor.setText(prty)
@decorators.pyqt_slot()
@decorators.pyqt_slot(bool)
@decorators.pyqt_slot(bool, bool)
def refresh(self, *args) -> None:
"""
Update the recordset.
"""
refresh_head: bool = False
refresh_data: bool = True
if len(args) == 1:
if isinstance(args[0], list):
refresh_head = args[0][0]
refresh_data = args[0][1]
else:
refresh_head = args[0]
elif len(args) == 2:
refresh_head = args[0]
refresh_data = args[1]
if not self.cursor() or not self._table_records:
return
table_metadata = self.cursor().private_cursor.metadata_
if not table_metadata:
return
if not self._table_name:
self._table_name = table_metadata.name()
if self._check_column_enabled:
if not self._check_column_visible:
field_check = table_metadata.field(self._field_name_check_column)
if field_check is None:
self._field_name_check_column = "%s_check_column" % table_metadata.name()
if self._field_name_check_column not in table_metadata.fieldNames():
field_check = pnfieldmetadata.PNFieldMetaData(
self._field_name_check_column,
self.tr(self._alias_check_column),
True,
False,
pnfieldmetadata.PNFieldMetaData.Check,
0,
False,
True,
True,
0,
0,
False,
False,
False,
None,
False,
None,
True,
False,
False,
)
table_metadata.addFieldMD(field_check)
else:
field_check = table_metadata.field(self._field_name_check_column)
if field_check is None:
raise Exception("field_check is empty!")
self.tableRecords().cur.model().updateColumnsCount()
self.tableRecords().header().reset()
self.tableRecords().header().swapSections(
self.tableRecords().column_name_to_column_index(field_check.name()),
self._sort_column_1,
)
self._check_column_visible = True
self.setTableRecordsCursor()
self._sort_column_1 = 1
self._sort_column_2 = 2
self._sort_column_3 = 3
# for i in enumerate(buffer_.count()):
# buffer_.setGenerated(i, True)
else:
self.setTableRecordsCursor()
self._sort_column_1 = 0
self._sort_column_2 = 1
self._sort_column_3 = 2
self._check_column_visible = False
if self._function_get_color:
self._table_records.setFunctionGetColor( # FIXME: no usar top_widget
self._function_get_color, getattr(self._top_widget, "iface", None)
)
if refresh_head:
if not self.tableRecords().header().isHidden():
self.tableRecords().header().hide()
model = self.cursor().model()
for column in range(model.columnCount()):
field = model.metadata().indexFieldObject(column)
if not field.visibleGrid() or (
field.type() == "check" and not self._check_column_enabled
):
self._table_records.setColumnHidden(column, True)
else:
self._table_records.setColumnHidden(column, False)
if self._auto_sort_column:
sort_list = []
field_1 = self._table_records.visual_index_to_field(self._sort_column_1)
field_2 = self._table_records.visual_index_to_field(self._sort_column_2)
field_3 = self._table_records.visual_index_to_field(self._sort_column_3)
if field_1 is not None:
sort_list.append(
"%s %s" % (field_1.name(), "ASC" if self._order_asc_1 else "DESC")
)
if field_2 is not None:
sort_list.append(
"%s %s" % (field_2.name(), "ASC" if self._order_asc_2 else "DESC")
)
if field_3 is not None:
sort_list.append(
"%s %s" % (field_3.name(), "ASC" if self._order_asc_3 else "DESC")
)
id_mod = (
self.cursor()
.db()
.connManager()
.managerModules()
.idModuleOfFile("%s.mtd" % self.cursor().metadata().name())
)
function_qsa = "%s.tableDB_setSort_%s" % (id_mod, self.cursor().metadata().name())
vars_: List[Any] = []
vars_.append(sort_list)
if field_1:
vars_.append(field_1.name())
vars_.append(self._order_asc_1)
if field_2:
vars_.append(field_2.name())
vars_.append(self._order_asc_2)
if field_3:
vars_.append(field_3.name())
vars_.append(self._order_asc_3)
ret = application.PROJECT.call(function_qsa, vars_, None, False)
LOGGER.debug("functionQsa: %s -> %r" % (function_qsa, ret))
if ret and not isinstance(ret, bool):
if isinstance(ret, str):
ret = [ret]
if isinstance(ret, list):
sort_list = ret
self._table_records.setSort(", ".join(sort_list))
if model:
if self._combo_box_field_to_search_1 is None:
raise Exception("comboBoxFieldSearch is not defined!")
if self._combo_box_field_to_search_2 is None:
raise Exception("comboBoxFieldSearch2 is not defined!")
try:
self._combo_box_field_to_search_1.currentIndexChanged.disconnect( # type: ignore [attr-defined] # noqa: F821
self.putFirstCol
)
self._combo_box_field_to_search_2.currentIndexChanged.disconnect( # type: ignore [attr-defined] # noqa: F821
self.putSecondCol
)
except Exception:
LOGGER.error("Se ha producido un problema al desconectar")
return
self._combo_box_field_to_search_1.clear()
self._combo_box_field_to_search_2.clear()
# cb1 = None
# cb2 = None
for column in range(model.columnCount()):
visual_column = self._table_records.header().logicalIndex(column)
if visual_column is not None:
field = model.metadata().indexFieldObject(visual_column)
if not field.visibleGrid():
continue
# self._table_records.setColumnHidden(column, True)
# else:
self._combo_box_field_to_search_1.addItem(
model.headerData(
visual_column,
QtCore.Qt.Orientation.Horizontal,
QtCore.Qt.ItemDataRole.DisplayRole,
)
)
self._combo_box_field_to_search_2.addItem(
model.headerData(
visual_column,
QtCore.Qt.Orientation.Horizontal,
QtCore.Qt.ItemDataRole.DisplayRole,
)
)
self._combo_box_field_to_search_1.addItem("*")
self._combo_box_field_to_search_2.addItem("*")
self._combo_box_field_to_search_1.setCurrentIndex(self._sort_column_1)
self._combo_box_field_to_search_2.setCurrentIndex(self._sort_column_2)
self._combo_box_field_to_search_1.currentIndexChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.putFirstCol
)
self._combo_box_field_to_search_2.currentIndexChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.putSecondCol
)
else:
self._combo_box_field_to_search_1.addItem("*")
self._combo_box_field_to_search_2.addItem("*")
self._table_records.header().show()
if refresh_data or self.sender():
final_filter = self._filter
if self._tdb_filter_last_where:
if not final_filter:
final_filter = self._tdb_filter_last_where
else:
final_filter = "%s AND %s" % (final_filter, self._tdb_filter_last_where)
self._table_records.setPersistentFilter(final_filter)
self._table_records.setShowAllPixmaps(self._show_all_pixmaps)
self._table_records.refresh()
if self._init_search:
try:
self._line_edit_search.textChanged.disconnect( # type: ignore [attr-defined] # noqa: F821
self.filterRecords
)
except Exception:
pass
self._line_edit_search.setText(self._init_search)
self._line_edit_search.textChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.filterRecords
)
self._line_edit_search.selectAll()
self._init_search = None
# self.seekCursor()
if not self._read_only == self._req_read_only or (
self._table_records and not self._read_only == self._table_records.flReadOnly()
):
self.setReadOnly(self._req_read_only)
if not self._edit_only == self._req_edit_only or (
self._table_records and not self._edit_only == self._table_records.editOnly()
):
self.setEditOnly(self._req_edit_only)
if not self._insert_only == self._req_insert_only or (
self._table_records and not self._insert_only == self._table_records.insertOnly()
):
self.setInsertOnly(self._req_insert_only)
if not self._only_table == self._req_only_table or (
self._table_records and not self._only_table == self._table_records.onlyTable()
):
self.setOnlyTable(self._req_only_table)
if self._table_records and self._table_records.isHidden():
self._table_records.show()
# QtCore.QTimer.singleShot(50, self.setSortOrder)
def refreshDelayed(self, msec: int = 5, refresh_data: bool = True) -> None:
"""
Update the recordset with a delay.
Accept a lapse of time in milliseconds, activating the internal _timer for
to perform the final refresh upon completion of said lapse.
@param msec Amount of lapsus time, in milliseconds.
"""
self._refresh_data = refresh_data
QtCore.QTimer.singleShot(msec, self.refreshDelayed2)
# self.seekCursor()
def refreshDelayed2(self) -> None:
"""Refresh the data when the time ends."""
row = self.currentRow()
self.refresh(True, self._refresh_data)
self._refresh_data = False
if row > -1:
self.setCurrentRow(row)
@decorators.pyqt_slot(bool)
def insertRecord(self, wait: bool = True) -> None:
"""Call method FLSqlCursor.insertRecord."""
widget = cast(QtWidgets.QWidget, self.sender())
relation_lock = False
cur_relation = self.cursor().cursorRelation()
if cur_relation is not None:
relation_lock = cur_relation.isLocked()
if widget and (
not self.cursor()
or self._req_read_only
or self._req_edit_only
or self._req_only_table
or relation_lock
):
widget.setDisabled(True)
return
if self.cursor():
self.cursor().insertRecord(wait)
@decorators.pyqt_slot(bool)
def editRecord(self, wait: bool = True) -> None:
"""
Call method FLSqlCursor.editRecord.
"""
widget = cast(QtWidgets.QWidget, self.sender())
cur_relation = self.cursor().cursorRelation()
if (
widget
and not isinstance(widget, fldatatable.FLDataTable)
and (
not self.cursor()
or self._req_read_only
or self._req_insert_only
or self._req_only_table
or (cur_relation is not None and cur_relation.isLocked())
)
):
widget.setDisabled(True)
return
if self.cursor():
self.cursor().editRecord()
@decorators.pyqt_slot(bool)
def browseRecord(self, wait: bool = True) -> None:
"""
Call method FLSqlCursor.browseRecord.
"""
widget = cast(QtWidgets.QWidget, self.sender())
if (
widget
and not isinstance(widget, fldatatable.FLDataTable)
and (not self.cursor() or self._req_only_table)
):
widget.setDisabled(True)
return
if self.cursor():
self.cursor().browseRecord(wait)
@decorators.pyqt_slot(bool)
def deleteRecord(self, wait: bool = True) -> None:
"""
Call method FLSqlCursor.deleteRecord.
"""
widget = cast(QtWidgets.QWidget, self.sender())
cur_relation = self.cursor().cursorRelation()
if (
widget
and not isinstance(widget, fldatatable.FLDataTable)
and (
not self.cursor()
or self._req_read_only
or self._req_insert_only
or self._req_edit_only
or self._req_only_table
or (cur_relation and cur_relation.isLocked())
)
):
widget.setDisabled(True)
return
if self.cursor():
self.cursor().deleteRecord(wait)
@decorators.pyqt_slot()
def copyRecord(self) -> None:
"""
Call method FLSqlCursor.copyRecord.
"""
widget = cast(QtWidgets.QWidget, self.sender())
cur_relation = self.cursor().cursorRelation()
if (
widget
and not isinstance(widget, fldatatable.FLDataTable)
and (
not self.cursor()
or self._req_read_only
or self._req_edit_only
or self._req_only_table
or (cur_relation and cur_relation.isLocked())
)
):
widget.setDisabled(True)
return
if self.cursor():
self.cursor().copyRecord()
@decorators.pyqt_slot(int)
@decorators.pyqt_slot(str)
def putFirstCol(self, col: Union[int, str]) -> None:
"""
Place the column first by passing the name of the field.
This slot is connected to the search combo box
of the component. When we select a field it is placed
as the first column and the table is rearranged with this column.
In this way we will always have the table sorted by
the field in which we want to search.
@param c Field name, this column exchanges its position with the first column
@return False if the field does not exist
@author [email protected]
@author InfoSiAL, S.L.
"""
if not self._table_records:
raise Exception("_table_records is not defined!")
col_index_: int
if isinstance(col, str):
col_index_ = self._table_records.logical_index_to_visual_index(
self._table_records.column_name_to_column_index(col)
)
else:
col_index_ = col
_index = self._table_records.visual_index_to_column_index(col_index_)
if _index is None or _index < 0:
return
self.moveCol(_index, self._sort_column_1)
self._table_records.sortByColumn(
self._sort_column_1,
QtCore.Qt.SortOrder.AscendingOrder
if self._order_asc_1
else QtCore.Qt.SortOrder.DescendingOrder,
)
@decorators.pyqt_slot(int)
@decorators.pyqt_slot(str)
def putSecondCol(self, col: Union[int, str]) -> None:
"""
Place the column as second by passing the name of the field.
@author Silix - dpinelo
"""
if not self._table_records:
raise Exception("_table_records is not defined!")
col_index_: int
if isinstance(col, str):
col_index_ = self._table_records.logical_index_to_visual_index(
self._table_records.column_name_to_column_index(col)
)
else:
col_index_ = col
_index = self._table_records.visual_index_to_column_index(col_index_)
if _index is None or _index < 0:
return
self.moveCol(_index, self._sort_column_2)
@decorators.beta_implementation
def moveCol(self, from_: int, to_: int, first_search: bool = True) -> None:
"""
Move a column from one source field to the column in another destination field.
@param from Name of the source column field
@param to_ Name of the destination column field
@param first_search dpinelo: Indicates if columns are moved considering that this function
called or not, from the main search and filtering combo
"""
if from_ < 0 or to_ < 0:
return
table_metadata = self.cursor().metadata()
if not table_metadata:
return
if not self._table_records:
raise Exception("_table_records is not defined!")
self._table_records.hide()
text_search = self._line_edit_search.text()
field = self.cursor().metadata().indexFieldObject(to_)
if to_ == 0: # Si ha cambiado la primera columna
try:
self._combo_box_field_to_search_1.currentIndexChanged.disconnect( # type: ignore [attr-defined] # noqa: F821
self.putFirstCol
)
except Exception:
LOGGER.error("Se ha producido un problema al desconectar")
return
self._combo_box_field_to_search_1.setCurrentIndex(from_)
self._combo_box_field_to_search_1.currentIndexChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.putFirstCol
)
# Actializamos el segundo combo
try:
self._combo_box_field_to_search_2.currentIndexChanged.disconnect( # type: ignore [attr-defined] # noqa: F821
self.putSecondCol
)
except Exception:
pass
# Falta mejorar
if (
self._combo_box_field_to_search_1.currentIndex()
== self._combo_box_field_to_search_2.currentIndex()
):
self._combo_box_field_to_search_2.setCurrentIndex(
self._table_records._h_header.logicalIndex(self._sort_column_1)
)
self._combo_box_field_to_search_2.currentIndexChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.putSecondCol
)
if to_ == 1: # Si es la segunda columna ...
try:
self._combo_box_field_to_search_2.currentIndexChanged.disconnect( # type: ignore [attr-defined] # noqa: F821
self.putSecondCol
)
except Exception:
pass
self._combo_box_field_to_search_2.setCurrentIndex(from_)
self._combo_box_field_to_search_2.currentIndexChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.putSecondCol
)
if (
self._combo_box_field_to_search_1.currentIndex()
== self._combo_box_field_to_search_2.currentIndex()
):
try:
self._combo_box_field_to_search_1.currentIndexChanged.disconnect( # type: ignore [attr-defined] # noqa: F821
self.putFirstCol
)
except Exception:
pass
if (
self._combo_box_field_to_search_1.currentIndex()
== self._combo_box_field_to_search_2.currentIndex()
):
self._combo_box_field_to_search_1.setCurrentIndex(
self._table_records._h_header.logicalIndex(self._sort_column_2)
)
self._combo_box_field_to_search_1.currentIndexChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.putFirstCol
)
if not text_search:
text_search = self.cursor().valueBuffer(field.name())
# self.refresh(True)
if text_search:
self.refresh(False, True)
try:
self._line_edit_search.textChanged.disconnect( # type: ignore [attr-defined] # noqa: F821
self.filterRecords
)
except Exception:
pass
self._line_edit_search.setText(str(text_search))
self._line_edit_search.textChanged.connect( # type: ignore [attr-defined] # noqa: F821
self.filterRecords
)
self._line_edit_search.selectAll()
# self.seekCursor()
QtCore.QTimer.singleShot(0, self._table_records.ensureRowSelectedVisible)
else:
self.refreshDelayed()
self._table_records.header().swapSections(from_, to_)
self.refresh(True, False)
def setEnabled(self, enabled: bool) -> None:
"""
Set read only True or False.
"""
self.setReadOnly(not enabled)
def setColumnWidth(self, field: str, weight: int) -> None:
"""
Set the width of a column.
@param field Name of the database field corresponding to the column
@param w Column width
"""
if self._table_records:
self._table_records.setColWidth(field, weight)
def setCurrentRow(self, row: int) -> None:
"""
Select the indicated row.
@param row Index of the row to select
"""
if self._table_records:
self._table_records.selectRow(row)
self._table_records.scrollTo(self._table_records.cur.model().index(row, 0))
@decorators.not_implemented_warn
def columnWidth(self, col: int) -> None:
"""
Return Column width.
"""
pass
@decorators.not_implemented_warn
def setRowHeight(self, row: int, height: int) -> None:
"""
Set the height of a row.
@param row Row order number, starting at 0
@param h High in the row
"""
pass
@decorators.not_implemented_warn
def rowHeight(self, row: int) -> None:
"""
Return height in the row.
"""
pass
def exportToOds(self) -> None:
"""
Export to an ODS spreadsheet and view it.
"""
if not self.cursor() or self.cursor().private_cursor.metadata_ is None:
return
cursor = pnsqlcursor.PNSqlCursor(self.cursor().curName())
_filter = self.cursor().curFilter()
if not _filter:
_filter = "1 = 1"
if self.cursor().sort():
_filter += " ORDER BY %s" % self.cursor().sort()
cursor.select(_filter)
ods_enabled = True
if settings.CONFIG.value("ebcomportamiento/FLTableExport2Calc", False):
ods_enabled = False
global_function_qsa = "flfactppal.exportFLTablesGranted"
ret = application.PROJECT.call(global_function_qsa, [], None, False, None)
if isinstance(ret, bool):
ods_enabled = ret
id_module = (
self.cursor_.db()
.managerModules()
.idModuleOfFile("%s.mtd" % self.cursor_.metadata().name())
)
function_qsa = "%s.exportFLTableGranted_%s" % (id_module, self.cursor_.metadata().name())
ret = application.PROJECT.call(function_qsa, [], None, False, None)
if isinstance(ret, bool):
ods_enabled = ret
if not ods_enabled:
QtWidgets.QMessageBox.information(
QtWidgets.QApplication.activeModalWidget(),
self.tr("Opción deshabilitada"),
self.tr("Esta opción ha sido deshabilitada."),
QtWidgets.QMessageBox.StandardButton.Ok,
)
return
metadata = cursor.metadata()
if not metadata:
return
table_records = self.tableRecords()
if not hasattr(table_records, "cursor"):
return
# hor_header = table_records.horizontalHeader()
title_style = [aqods.AQOdsStyle.Align_center, aqods.AQOdsStyle.Text_bold]
border_bot = aqods.AQOdsStyle.Border_bottom
border_right = aqods.AQOdsStyle.Border_right
border_left = aqods.AQOdsStyle.Border_left
italic = aqods.AQOdsStyle.Text_italic
ods_gen = aqods.AQOdsGenerator()
spread_sheet = aqods.AQOdsSpreadSheet(ods_gen)
sheet = aqods.AQOdsSheet(spread_sheet, metadata.alias())
tdb_num_rows = cursor.size()
tdb_num_cols = len(metadata.fieldNames())
util = flutil.FLUtil()
id_pix = 0
progress_dialog = util.createProgressDialog("Procesando", tdb_num_rows)
util.setProgress(1)
row = aqods.AQOdsRow(sheet)
row.addBgColor(aqods.aq_ods_color(0xE7E7E7))
for idx in range(tdb_num_cols):
field = metadata.indexFieldObject(table_records.visual_index_to_metadata_index(idx))
if field is not None and field.visibleGrid():
row.opIn(title_style)
row.opIn(border_bot)
row.opIn(border_left)
row.opIn(border_right)
row.opIn(field.alias())
row.close()
# cur = table_records.cursor()
# cur_row = table_records.currentRow()
cursor.first()
for idx_row in range(tdb_num_rows):
if progress_dialog.wasCanceled():
break
row = aqods.AQOdsRow(sheet)
for idx_col in range(tdb_num_cols):
# idx = table_records.indexOf(c) # Busca si la columna se ve
# if idx == -1:
# continue
field = metadata.indexFieldObject(
table_records.visual_index_to_metadata_index(idx_col)
)
if field is not None and field.visibleGrid():
val = cursor.valueBuffer(field.name())
if field.type() == "double":
row.setFixedPrecision(metadata.fieldPartDecimal(field.name()))
row.opIn(float(val))
elif field.type() == "date":
if val is not None:
val = str(val)
if val.find("T") > -1:
val = val[0 : val.find("T")]
row.opIn(val)
else:
row.coveredCell()
elif field.type() in ("bool", "unlock"):
str_ = self.tr("Sí") if val else self.tr("No")
row.opIn(italic)
row.opIn(str_)
elif field.type() == "pixmap":
if val:
if val.find("cacheXPM") > -1:
pix = QtGui.QPixmap(val)
if not pix.isNull():
pix_name = "pix%s_" % id_pix
id_pix += 1
row.opIn(
aqods.AQOdsImage(
pix_name,
round((pix.width() * 2.54) / 98, 2) * 20,
round((pix.height() * 2.54) / 98, 2) * 20,
0,
0,
val,
)
)
else:
row.coveredCell()
else:
row.coveredCell()
else:
row.coveredCell()
else:
if isinstance(val, list):
val = ",".join(val)
if val:
row.opIn(str(val))
else:
row.coveredCell()
row.close()
if not idx_row % 4:
util.setProgress(idx_row)
cursor.next()
# cur.seek(cur_row)
sheet.close()
spread_sheet.close()
util.setProgress(tdb_num_rows)
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.CursorShape.WaitCursor)
file_name = "%s/%s%s.ods" % (
application.PROJECT.tmpdir,
metadata.name(),
QtCore.QDateTime.currentDateTime().toString("ddMMyyyyhhmmsszzz"),
)
ods_gen.generateOds(file_name)
if not application.PROJECT.debug_level == 1000: # test
sysbasetype.SysBaseType.openUrl(file_name)
QtWidgets.QApplication.restoreOverrideCursor()
util.destroyProgressDialog()
def switchSortOrder(self, col: int = 0) -> None:
"""
Switch the direction of the table records sorting, from ascending to descending and vice versa.
Records are always sorted by the first column.
If the autoSortColumn property is TRUE.
"""
if not self._auto_sort_column:
return
if self._table_records:
if self._table_records.logical_index_to_visual_index(
col
) == self._table_records.visual_index_to_column_index(self._sort_column_1):
self._order_asc_1 = not self._order_asc_1
self.setSortOrder(self._order_asc_1, self._sort_column_1)
@decorators.pyqt_slot(str)
def filterRecords(self, chr_: str) -> None:
"""
Filter the records in the table using the first field, according to the given pattern.
This slot is connected to the component search text box,
taking the content of this as a standard for filtering.
@param chr_ Character string with filtering pattern
"""
if not self.cursor().model:
return
base_filter: Any = None
if not self._table_records:
LOGGER.warning("FLTableDB %s has no tablerecords defined!", self.objectName())
return
refresh_data = False
msec_refresh = 400
colidx = self._table_records.visual_index_to_metadata_index(self._sort_column_1)
if colidx is None:
raise Exception("Unexpected: Column not found")
field = self.cursor().model().metadata().indexFieldObject(colidx)
base_filter = (
self.cursor().db().connManager().manager().formatAssignValueLike(field, chr_, True)
)
id_module = (
self.cursor()
.db()
.connManager()
.managerModules()
.idModuleOfFile("%s.mtd" % self.cursor().metadata().name())
)
function_qsa = id_module + ".tableDB_filterRecords_" + self.cursor().metadata().name()
vargs = []
vargs.append(self.cursor().metadata().name())
vargs.append(chr_)
vargs.append(field.name())
vargs.append(base_filter)
if function_qsa:
msec_refresh = 200
ret = None
try:
ret = application.PROJECT.call(function_qsa, vargs, None, False)
LOGGER.debug("function_qsa:%s:", function_qsa)
except Exception:
pass
else:
if ret is not isinstance(ret, bool):
base_filter = ret
else:
if not chr_:
base_filter = None
self.refreshDelayed(msec_refresh, refresh_data)
self._filter = base_filter or ""
def setSortOrder(
self, ascending: Union[bool, int] = True, col_order: Optional[int] = None
) -> None:
"""Set sort columns order."""
if isinstance(ascending, int):
ascending = ascending == 1
order = (
QtCore.Qt.SortOrder.AscendingOrder if ascending else QtCore.Qt.SortOrder.DescendingOrder
)
col = col_order if col_order is not None else self._sort_column_1
if col == 0:
self._order_asc_1 = ascending
elif col == 1:
self._order_asc_2 = ascending
elif col == 2:
self._order_asc_3 = ascending
if self._table_records:
while True:
column = self._table_records.header().logicalIndex(col)
if not self._table_records.isColumnHidden(column):
break
col += 1
self._table_records.sortByColumn(column, order)
def isSortOrderAscending(self) -> bool:
"""Return if the order of the first column is ascending."""
return self._order_asc_1
def setActionName(self, name: str):
"""Set action Name to the cursor (deprecated)."""
pass
def activeTabData(self) -> None:
"""
Activate the data table.
"""
if self._tab_filter is not None:
self._tab_filter.hide()
if self._tab_data is not None:
self._tab_data.show()
self.refreshTabData()
def activeTabFilter(self) -> None:
"""
Activate the filter table.
"""
if self._tab_data is not None:
self._tab_data.hide()
if self._tab_filter is not None:
self._tab_filter.show()
self.refreshTabFilter()
def tdbFilterClear(self) -> None:
"""
Clean and initialize the filter.
"""
if not self._top_widget:
return
self._tab_filter_loader = False
self.refreshTabFilter()
"""
Señal emitida cuando se refresca por cambio de filtro
"""
refreshed = QtCore.pyqtSignal()
"""
Señal emitida cuando se establece si el componente es o no de solo lectura.
"""
readOnlyChanged = QtCore.pyqtSignal(bool)
"""
Señal emitida cuando se establece si el componente es o no de solo edición.
"""
editOnlyChanged = QtCore.pyqtSignal(bool)
"""
Señal emitida cuando se establece si el componente es o no de solo inserción.
"""
insertOnlyChanged = QtCore.pyqtSignal(bool)
"""
Señal emitida cuando se establece cambia el registro seleccionado.
"""
currentChanged = QtCore.pyqtSignal()
def primarysKeysChecked(self) -> List[Any]:
"""Return a list of the primary keys checked."""
return self.tableRecords().primarysKeysChecked()
def clearChecked(self) -> None:
"""Empty the list of primary keys checked."""
self.tableRecords().clearChecked()
def setPrimaryKeyChecked(self, name: str, checked: bool) -> None:
"""Set a primary key cheked and add to the cheked list."""
self.tableRecords().setPrimaryKeyChecked(name, checked)
|
the-stack_0_17836 | leyNums = []
end = input("Pick a number. ")
def Leyland(q,w):
num = pow(q,w) + pow(w,q)
return(num)
for x in range(2, int(end)):
#print("W = {}".format(x))
for y in range(2, int(end)):
#print("Q = {}".format(y))
if x >= y:
leyNums.append(Leyland(x,y))
leyNums = sorted(leyNums)
print("The first {} Leyland Numbers are: {}".format(len(leyNums), leyNums))
leyPrimes = []
primes = []
for x in range(2, leyNums[-1]):
primality = 1
for y in range(2, x+1):
if (x % y == 0) & (x != y):
primality = 0
break
if (x == y) & (primality == 1):
primes.append(y)
for item in primes:
for thing in leyNums:
if item == thing:
leyPrimes.append(thing)
leyPrimes = sorted(leyPrimes)
print(leyPrimes)
#print("The first {} Leyland Primes are: {}".format(len(leyPrimes), leyPrimes)) |
the-stack_0_17837 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.vision_v1.types import image_annotator
from .transports.base import ImageAnnotatorTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ImageAnnotatorGrpcAsyncIOTransport
from .client import ImageAnnotatorClient
class ImageAnnotatorAsyncClient:
"""Service that performs Google Cloud Vision API detection tasks
over client images, such as face, landmark, logo, label, and
text detection. The ImageAnnotator service returns detected
entities from the images.
"""
_client: ImageAnnotatorClient
DEFAULT_ENDPOINT = ImageAnnotatorClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ImageAnnotatorClient.DEFAULT_MTLS_ENDPOINT
product_path = staticmethod(ImageAnnotatorClient.product_path)
parse_product_path = staticmethod(ImageAnnotatorClient.parse_product_path)
product_set_path = staticmethod(ImageAnnotatorClient.product_set_path)
parse_product_set_path = staticmethod(ImageAnnotatorClient.parse_product_set_path)
common_billing_account_path = staticmethod(
ImageAnnotatorClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
ImageAnnotatorClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(ImageAnnotatorClient.common_folder_path)
parse_common_folder_path = staticmethod(
ImageAnnotatorClient.parse_common_folder_path
)
common_organization_path = staticmethod(
ImageAnnotatorClient.common_organization_path
)
parse_common_organization_path = staticmethod(
ImageAnnotatorClient.parse_common_organization_path
)
common_project_path = staticmethod(ImageAnnotatorClient.common_project_path)
parse_common_project_path = staticmethod(
ImageAnnotatorClient.parse_common_project_path
)
common_location_path = staticmethod(ImageAnnotatorClient.common_location_path)
parse_common_location_path = staticmethod(
ImageAnnotatorClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ImageAnnotatorAsyncClient: The constructed client.
"""
return ImageAnnotatorClient.from_service_account_info.__func__(ImageAnnotatorAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ImageAnnotatorAsyncClient: The constructed client.
"""
return ImageAnnotatorClient.from_service_account_file.__func__(ImageAnnotatorAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> ImageAnnotatorTransport:
"""Return the transport used by the client instance.
Returns:
ImageAnnotatorTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(ImageAnnotatorClient).get_transport_class, type(ImageAnnotatorClient)
)
def __init__(
self,
*,
credentials: credentials.Credentials = None,
transport: Union[str, ImageAnnotatorTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the image annotator client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ImageAnnotatorTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ImageAnnotatorClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def batch_annotate_images(
self,
request: image_annotator.BatchAnnotateImagesRequest = None,
*,
requests: Sequence[image_annotator.AnnotateImageRequest] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> image_annotator.BatchAnnotateImagesResponse:
r"""Run image detection and annotation for a batch of
images.
Args:
request (:class:`google.cloud.vision_v1.types.BatchAnnotateImagesRequest`):
The request object. Multiple image annotation requests
are batched into a single service call.
requests (:class:`Sequence[google.cloud.vision_v1.types.AnnotateImageRequest]`):
Required. Individual image annotation
requests for this batch.
This corresponds to the ``requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.vision_v1.types.BatchAnnotateImagesResponse:
Response to a batch image annotation
request.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = image_annotator.BatchAnnotateImagesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if requests:
request.requests.extend(requests)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_annotate_images,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def batch_annotate_files(
self,
request: image_annotator.BatchAnnotateFilesRequest = None,
*,
requests: Sequence[image_annotator.AnnotateFileRequest] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> image_annotator.BatchAnnotateFilesResponse:
r"""Service that performs image detection and annotation
for a batch of files. Now only "application/pdf",
"image/tiff" and "image/gif" are supported.
This service will extract at most 5 (customers can
specify which 5 in AnnotateFileRequest.pages) frames
(gif) or pages (pdf or tiff) from each file provided and
perform detection and annotation for each image
extracted.
Args:
request (:class:`google.cloud.vision_v1.types.BatchAnnotateFilesRequest`):
The request object. A list of requests to annotate files
using the BatchAnnotateFiles API.
requests (:class:`Sequence[google.cloud.vision_v1.types.AnnotateFileRequest]`):
Required. The list of file annotation
requests. Right now we support only one
AnnotateFileRequest in
BatchAnnotateFilesRequest.
This corresponds to the ``requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.vision_v1.types.BatchAnnotateFilesResponse:
A list of file annotation responses.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = image_annotator.BatchAnnotateFilesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if requests:
request.requests.extend(requests)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_annotate_files,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def async_batch_annotate_images(
self,
request: image_annotator.AsyncBatchAnnotateImagesRequest = None,
*,
requests: Sequence[image_annotator.AnnotateImageRequest] = None,
output_config: image_annotator.OutputConfig = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Run asynchronous image detection and annotation for a list of
images.
Progress and results can be retrieved through the
``google.longrunning.Operations`` interface.
``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateImagesResponse`` (results).
This service will write image annotation outputs to json files
in customer GCS bucket, each json file containing
BatchAnnotateImagesResponse proto.
Args:
request (:class:`google.cloud.vision_v1.types.AsyncBatchAnnotateImagesRequest`):
The request object. Request for async image annotation
for a list of images.
requests (:class:`Sequence[google.cloud.vision_v1.types.AnnotateImageRequest]`):
Required. Individual image annotation
requests for this batch.
This corresponds to the ``requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
output_config (:class:`google.cloud.vision_v1.types.OutputConfig`):
Required. The desired output location
and metadata (e.g. format).
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.vision_v1.types.AsyncBatchAnnotateImagesResponse`
Response to an async batch image annotation request.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([requests, output_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = image_annotator.AsyncBatchAnnotateImagesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if output_config is not None:
request.output_config = output_config
if requests:
request.requests.extend(requests)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.async_batch_annotate_images,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
image_annotator.AsyncBatchAnnotateImagesResponse,
metadata_type=image_annotator.OperationMetadata,
)
# Done; return the response.
return response
async def async_batch_annotate_files(
self,
request: image_annotator.AsyncBatchAnnotateFilesRequest = None,
*,
requests: Sequence[image_annotator.AsyncAnnotateFileRequest] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Run asynchronous image detection and annotation for a list of
generic files, such as PDF files, which may contain multiple
pages and multiple images per page. Progress and results can be
retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateFilesResponse`` (results).
Args:
request (:class:`google.cloud.vision_v1.types.AsyncBatchAnnotateFilesRequest`):
The request object. Multiple async file annotation
requests are batched into a single service call.
requests (:class:`Sequence[google.cloud.vision_v1.types.AsyncAnnotateFileRequest]`):
Required. Individual async file
annotation requests for this batch.
This corresponds to the ``requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.vision_v1.types.AsyncBatchAnnotateFilesResponse`
Response to an async batch file annotation request.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = image_annotator.AsyncBatchAnnotateFilesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if requests:
request.requests.extend(requests)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.async_batch_annotate_files,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
image_annotator.AsyncBatchAnnotateFilesResponse,
metadata_type=image_annotator.OperationMetadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-vision",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ImageAnnotatorAsyncClient",)
|
the-stack_0_17838 | #coding=utf-8
import numpy as np
import cv2
import time
from matplotlib import pyplot as plt
import math
from scipy.ndimage import filters
#
# def strokeFiter():
# pass;
def angle(x,y):
return int(math.atan2(float(y),float(x))*180.0/3.1415)
def h_rot(src, angle, scale=1.0):
w = src.shape[1]
h = src.shape[0]
rangle = np.deg2rad(angle)
nw = (abs(np.sin(rangle)*h) + abs(np.cos(rangle)*w))*scale
nh = (abs(np.cos(rangle)*h) + abs(np.sin(rangle)*w))*scale
rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale)
rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5,0]))
rot_mat[0,2] += rot_move[0]
rot_mat[1,2] += rot_move[1]
return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4)
pass
def v_rot(img, angel, shape, max_angel):
size_o = [shape[1],shape[0]]
size = (shape[1]+ int(shape[0]*np.cos((float(max_angel )/180) * 3.14)),shape[0])
interval = abs( int( np.sin((float(angel) /180) * 3.14)* shape[0]))
pts1 = np.float32([[0,0],[0,size_o[1]],[size_o[0],0],[size_o[0],size_o[1]]])
if(angel>0):
pts2 = np.float32([[interval,0],[0,size[1] ],[size[0],0 ],[size[0]-interval,size_o[1]]])
else:
pts2 = np.float32([[0,0],[interval,size[1] ],[size[0]-interval,0 ],[size[0],size_o[1]]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,size)
return dst,M
def skew_detection(image_gray):
h, w = image_gray.shape[:2]
eigen = cv2.cornerEigenValsAndVecs(image_gray,12, 5)
angle_sur = np.zeros(180,np.uint)
eigen = eigen.reshape(h, w, 3, 2)
flow = eigen[:,:,2]
vis = image_gray.copy()
vis[:] = (192 + np.uint32(vis)) / 2
d = 12
points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
for x, y in points:
vx, vy = np.int32(flow[int(y), int(x)]*d)
# cv2.line(rgb, (x-vx, y-vy), (x+vx, y+vy), (0, 355, 0), 1, cv2.LINE_AA)
ang = angle(vx,vy)
angle_sur[(ang+180)%180] +=1
# torr_bin = 30
angle_sur = angle_sur.astype(np.float)
angle_sur = (angle_sur-angle_sur.min())/(angle_sur.max()-angle_sur.min())
angle_sur = filters.gaussian_filter1d(angle_sur,5)
skew_v_val = angle_sur[20:180-20].max()
skew_v = angle_sur[30:180-30].argmax() + 30
skew_h_A = angle_sur[0:30].max()
skew_h_B = angle_sur[150:180].max()
skew_h = 0
if (skew_h_A > skew_v_val*0.3 or skew_h_B > skew_v_val*0.3):
if skew_h_A>=skew_h_B:
skew_h = angle_sur[0:20].argmax()
else:
skew_h = - angle_sur[160:180].argmax()
return skew_h,skew_v
def fastDeskew(image):
image_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
skew_h,skew_v = skew_detection(image_gray)
print("校正角度 h ",skew_h,"v",skew_v)
deskew,M = v_rot(image,int((90-skew_v)*1.5),image.shape,60)
return deskew,M
if __name__ == '__main__':
fn = './dataset/0.jpg'
img = cv2.imread(fn)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
skew_h,skew_v = skew_detection(gray)
img = v_rot(img,(90-skew_v ),img.shape,60)
# img = h_rot(img,skew_h)
# if img.shape[0]>img.shape[1]:
# img = h_rot(img, -90)
plt.show()
cv2.waitKey()
|
the-stack_0_17839 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from setuptools import find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('CHANGELOG.rst') as changelog_file:
changelog = changelog_file.read()
with open('requirements.txt') as requirements_file:
requirements = requirements_file.read().splitlines()
with open('testing_requirements.txt') as test_requirements_file:
test_requirements = test_requirements_file.read().splitlines()
setup(
name='GroupyAPI',
version='0.10.3',
description='The simple yet powerful wrapper for the GroupMe API',
long_description=readme + '\n\n' + changelog,
author='Robert Grant',
author_email='[email protected]',
url='https://github.com/rhgrant10/Groupy',
packages=find_packages(),
package_dir={'groupy': 'groupy'},
include_package_data=True,
install_requires=requirements,
license="Apache Software License, Version 2.0",
keywords=['api', 'GroupMe'],
classifiers=[
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Operating System :: OS Independent',
'Topic :: Communications :: Chat',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements
)
|
the-stack_0_17840 | """ PyTorch implementation of DualPathNetworks
Ported to PyTorch by [Ross Wightman](https://github.com/rwightman/pytorch-dpn-pretrained)
Based on original MXNet implementation https://github.com/cypw/DPNs with
many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs.
This implementation is compatible with the pretrained weights
from cypw's MXNet implementation.
"""
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
__all__ = ['DPN', 'dpn68', 'dpn68b', 'dpn92', 'dpn98', 'dpn131', 'dpn107']
pretrained_settings = {
'dpn68': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn68-66bebafa7.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn68b': {
'imagenet+5k': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn68b_extra-84854c156.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn92': {
# 'imagenet': {
# 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn68-66bebafa7.pth',
# 'input_space': 'RGB',
# 'input_size': [3, 224, 224],
# 'input_range': [0, 1],
# 'mean': [124 / 255, 117 / 255, 104 / 255],
# 'std': [1 / (.0167 * 255)] * 3,
# 'num_classes': 1000
# },
'imagenet+5k': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn92_extra-b040e4a9b.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn98': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn98-5b90dec4d.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn131': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn131-71dfe43e0.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn107': {
'imagenet+5k': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn107_extra-1ac7121e2.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
}
}
def dpn68(num_classes=1000, pretrained='imagenet'):
model = DPN(
small=True, num_init_features=10, k_r=128, groups=32,
k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn68'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn68b(num_classes=1000, pretrained='imagenet+5k'):
model = DPN(
small=True, num_init_features=10, k_r=128, groups=32,
b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn68b'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn92(in_channels, num_classes=1000, pretrained='imagenet+5k'):
model = DPN(
num_init_features=64, k_r=96, groups=32,
k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn92'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn98(num_classes=1000, pretrained='imagenet'):
model = DPN(
num_init_features=96, k_r=160, groups=40,
k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn98'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn131(num_classes=1000, pretrained='imagenet'):
model = DPN(
num_init_features=128, k_r=160, groups=40,
k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn131'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn107(num_classes=1000, pretrained='imagenet+5k'):
model = DPN(
num_init_features=128, k_r=200, groups=50,
k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn107'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
class CatBnAct(nn.Module):
def __init__(self, in_chs, activation_fn=nn.ReLU(inplace=True)):
super(CatBnAct, self).__init__()
self.bn = nn.BatchNorm2d(in_chs, eps=0.001)
self.act = activation_fn
def forward(self, x):
x = torch.cat(x, dim=1) if isinstance(x, tuple) else x
return self.act(self.bn(x))
class BnActConv2d(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size, stride,
padding=0, groups=1, activation_fn=nn.ReLU(inplace=True)):
super(BnActConv2d, self).__init__()
self.bn = nn.BatchNorm2d(in_chs, eps=0.001)
self.act = activation_fn
self.conv = nn.Conv2d(in_chs, out_chs, kernel_size, stride, padding, groups=groups, bias=False)
def forward(self, x):
return self.conv(self.act(self.bn(x)))
class InputBlock(nn.Module):
def __init__(self, num_init_features, kernel_size=7,
padding=3, activation_fn=nn.ReLU(inplace=True)):
super(InputBlock, self).__init__()
self.conv = nn.Conv2d(
3, num_init_features, kernel_size=kernel_size, stride=2, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(num_init_features, eps=0.001)
self.act = activation_fn
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
x = self.pool(x)
return x
class DualPathBlock(nn.Module):
def __init__(
self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False):
super(DualPathBlock, self).__init__()
self.num_1x1_c = num_1x1_c
self.inc = inc
self.b = b
if block_type is 'proj':
self.key_stride = 1
self.has_proj = True
elif block_type is 'down':
self.key_stride = 2
self.has_proj = True
else:
assert block_type is 'normal'
self.key_stride = 1
self.has_proj = False
if self.has_proj:
# Using different member names here to allow easier parameter key matching for conversion
if self.key_stride == 2:
self.c1x1_w_s2 = BnActConv2d(
in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2)
else:
self.c1x1_w_s1 = BnActConv2d(
in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1)
self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1)
self.c3x3_b = BnActConv2d(
in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3,
stride=self.key_stride, padding=1, groups=groups)
if b:
self.c1x1_c = CatBnAct(in_chs=num_3x3_b)
self.c1x1_c1 = nn.Conv2d(num_3x3_b, num_1x1_c, kernel_size=1, bias=False)
self.c1x1_c2 = nn.Conv2d(num_3x3_b, inc, kernel_size=1, bias=False)
else:
self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1)
def forward(self, x):
x_in = torch.cat(x, dim=1) if isinstance(x, tuple) else x
if self.has_proj:
if self.key_stride == 2:
x_s = self.c1x1_w_s2(x_in)
else:
x_s = self.c1x1_w_s1(x_in)
x_s1 = x_s[:, :self.num_1x1_c, :, :]
x_s2 = x_s[:, self.num_1x1_c:, :, :]
else:
x_s1 = x[0]
x_s2 = x[1]
x_in = self.c1x1_a(x_in)
x_in = self.c3x3_b(x_in)
if self.b:
x_in = self.c1x1_c(x_in)
out1 = self.c1x1_c1(x_in)
out2 = self.c1x1_c2(x_in)
else:
x_in = self.c1x1_c(x_in)
out1 = x_in[:, :self.num_1x1_c, :, :]
out2 = x_in[:, self.num_1x1_c:, :, :]
resid = x_s1 + out1
dense = torch.cat([x_s2, out2], dim=1)
return resid, dense
class DPN(nn.Module):
def __init__(self, small=False, num_init_features=64, k_r=96, groups=32,
b=False, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128),
num_classes=1000, test_time_pool=False):
super(DPN, self).__init__()
self.test_time_pool = test_time_pool
self.b = b
bw_factor = 1 if small else 4
self.k_sec = k_sec
self.out_channels = []
self.blocks = OrderedDict()
# conv1
if small:
self.blocks['conv1_1'] = InputBlock(num_init_features, kernel_size=3, padding=1)
else:
self.blocks['conv1_1'] = InputBlock(num_init_features, kernel_size=7, padding=3)
self.out_channels.append(num_init_features)
# conv2
bw = 64 * bw_factor
inc = inc_sec[0]
r = (k_r * bw) // (64 * bw_factor)
self.blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[0] + 1):
self.blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.out_channels.append(in_chs)
# conv3
bw = 128 * bw_factor
inc = inc_sec[1]
r = (k_r * bw) // (64 * bw_factor)
self.blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[1] + 1):
self.blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.out_channels.append(in_chs)
# conv4
bw = 256 * bw_factor
inc = inc_sec[2]
r = (k_r * bw) // (64 * bw_factor)
self.blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[2] + 1):
self.blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.out_channels.append(in_chs)
# conv5
bw = 512 * bw_factor
inc = inc_sec[3]
r = (k_r * bw) // (64 * bw_factor)
self.blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[3] + 1):
self.blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.blocks['conv5_bn_ac'] = CatBnAct(in_chs)
self.out_channels.append(in_chs)
self.features = nn.Sequential(self.blocks)
# Using 1x1 conv for the FC layer to allow the extra pooling scheme
self.classifier = nn.Conv2d(in_chs, num_classes, kernel_size=1, bias=True)
def logits(self, features):
if not self.training and self.test_time_pool:
x = F.avg_pool2d(features, kernel_size=7, stride=1)
out = self.classifier(x)
# The extra test time pool should be pooling an img_size//32 - 6 size patch
out = adaptive_avgmax_pool2d(out, pool_type='avgmax')
else:
x = adaptive_avgmax_pool2d(features, pool_type='avg')
out = self.classifier(x)
return out.view(out.size(0), -1)
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
""" PyTorch selectable adaptive pooling
Adaptive pooling with the ability to select the type of pooling from:
* 'avg' - Average pooling
* 'max' - Max pooling
* 'avgmax' - Sum of average and max pooling re-scaled by 0.5
* 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim
Both a functional and a nn.Module version of the pooling is provided.
Author: Ross Wightman (rwightman)
"""
def pooling_factor(pool_type='avg'):
return 2 if pool_type == 'avgmaxc' else 1
def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
"""Selectable global pooling function with dynamic input kernel size
"""
if pool_type == 'avgmaxc':
x = torch.cat([
F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
], dim=1)
elif pool_type == 'avgmax':
x_avg = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = 0.5 * (x_avg + x_max)
elif pool_type == 'max':
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
x = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x
class AdaptiveAvgMaxPool2d(torch.nn.Module):
"""Selectable global pooling layer with dynamic input kernel size
"""
def __init__(self, output_size=1, pool_type='avg'):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
self.pool_type = pool_type
if pool_type == 'avgmaxc' or pool_type == 'avgmax':
self.pool = nn.ModuleList([nn.AdaptiveAvgPool2d(output_size), nn.AdaptiveMaxPool2d(output_size)])
elif pool_type == 'max':
self.pool = nn.AdaptiveMaxPool2d(output_size)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
self.pool = nn.AdaptiveAvgPool2d(output_size)
def forward(self, x):
if self.pool_type == 'avgmaxc':
x = torch.cat([p(x) for p in self.pool], dim=1)
elif self.pool_type == 'avgmax':
x = 0.5 * torch.sum(torch.stack([p(x) for p in self.pool]), 0).squeeze(dim=0)
else:
x = self.pool(x)
return x
def factor(self):
return pooling_factor(self.pool_type)
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ 'output_size=' + str(self.output_size) \
+ ', pool_type=' + self.pool_type + ')'
if __name__ == "__main__":
model = dpn92()
print(model.features, len(model.features))
print(model.features[2])
|
the-stack_0_17841 | """
Write a Python program that accepts a positive number and subtract from this number the sum of its digits and so on.
Continue this until the number is positive.
"""
def repeat_times(n):
s = 0
n_str = str(n)
while (n > 0):
n -= sum([int(i) for i in list(n_str)])
n_str = list(str(n))
s +=1
return s
print(repeat_times(9))
print(repeat_times(21)) |
the-stack_0_17842 | """
This file contains helpful utility functions
for neural network parameter initialization schemes.
"""
from __future__ import division
import math
import deepscoop.autograd.tensor_library as tl
import deepscoop.autograd.tensor_library.random as tlr
def get_in_out_dims(tensor):
"""
Gets the fan-in and fan-out
dimensions of the provided
tensor.
"""
dims = tensor.dims
if dims < 2:
raise ValueError("Tensor must have at least 2 dimensions.")
if dims == 2:
nin = tensor.shape[1]
nout = tensor.shape[0]
else:
num_input_fmaps = tensor.shape[1]
num_output_fmaps = tensor.shape[0]
receptive_field_size = 1
if dims > 2:
receptive_field_size = tensor[0][0]._data.size
nin = num_input_fmaps * receptive_field_size
nout = num_output_fmaps * receptive_field_size
return nin, nout
def normalized(nin, nout):
"""
See Glorot and Bengio (2010)
Pg. 253, Eq. (16)
"""
# nin, nout = get_in_out_dims(tensor)
high = tl.sqrt(6 / float(nin + nout))
low = -high
return tlr.uniform(low, high, size=(nin, nout))
def glorot_uniform(nin, nout):
# nin, nout = get_in_out_dims(tensor)
high = tl.sqrt(3 / nin)
low = -high
return tlr.uniform(low, high, size=(nin, nout))
def xavier_normal(nin, nout):
# nin, nout = get_in_out_dims(tensor)
std = tl.sqrt(2.0 / float(nin + nout))
return tlr.normal(scale=std, size=(nin, nout))
def sparse(tensor, sparsity, std=0.01):
"""
Initialization method described in
"Deep Learning via Hessian-Free Optimization"
by Martens, J. (2010).
"""
if tensor.dims != 2:
raise ValueError("This initialization only works with Tensors having 2 dimensions.")
rows, cols = tensor.shape
num_zeros = int(math.ceil(sparsity * rows))
X = tlr.normal(loc=0, scale=std)
for col_idx in range(cols):
row_idxs = tlr.permutation(rows)
zero_idxs = row_idxs[:num_zeros]
X[zero_idxs, col_idx] = 0.
return X
|
the-stack_0_17843 | # orm/loading.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
"""private module containing functions used to convert database
rows into object instances and associated state.
the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from __future__ import annotations
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from sqlalchemy.orm.context import FromStatement
from . import attributes
from . import exc as orm_exc
from . import path_registry
from .base import _DEFER_FOR_STATE
from .base import _RAISE_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .base import PassiveFlag
from .util import _none_set
from .util import state_str
from .. import exc as sa_exc
from .. import future
from .. import util
from ..engine import result_tuple
from ..engine.result import ChunkedIteratorResult
from ..engine.result import FrozenResult
from ..engine.result import SimpleResultMetaData
from ..sql import util as sql_util
from ..sql.selectable import ForUpdateArg
from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from ..sql.selectable import SelectState
if TYPE_CHECKING:
from ._typing import _IdentityKeyType
from .base import LoaderCallableStatus
from .context import QueryContext
from .interfaces import ORMOption
from .mapper import Mapper
from .query import Query
from .session import Session
from .state import InstanceState
from ..engine.cursor import CursorResult
from ..engine.interfaces import _ExecuteOptions
from ..engine.result import Result
from ..sql import Select
_T = TypeVar("_T", bound=Any)
_O = TypeVar("_O", bound=object)
_new_runid = util.counter()
_PopulatorDict = Dict[str, List[Tuple[str, Any]]]
def instances(cursor: CursorResult[Any], context: QueryContext) -> Result[Any]:
"""Return a :class:`.Result` given an ORM query context.
:param cursor: a :class:`.CursorResult`, generated by a statement
which came from :class:`.ORMCompileState`
:param context: a :class:`.QueryContext` object
:return: a :class:`.Result` object representing ORM results
.. versionchanged:: 1.4 The instances() function now uses
:class:`.Result` objects and has an all new interface.
"""
context.runid = _new_runid()
context.post_load_paths = {}
compile_state = context.compile_state
filtered = compile_state._has_mapper_entities
single_entity = (
not context.load_options._only_return_tuples
and len(compile_state._entities) == 1
and compile_state._entities[0].supports_single_entity
)
try:
(process, labels, extra) = list(
zip(
*[
query_entity.row_processor(context, cursor)
for query_entity in context.compile_state._entities
]
)
)
if context.yield_per and (
context.loaders_require_buffering
or context.loaders_require_uniquing
):
raise sa_exc.InvalidRequestError(
"Can't use yield_per with eager loaders that require uniquing "
"or row buffering, e.g. joinedload() against collections "
"or subqueryload(). Consider the selectinload() strategy "
"for better flexibility in loading objects."
)
except Exception:
with util.safe_reraise():
cursor.close()
def _no_unique(entry):
raise sa_exc.InvalidRequestError(
"Can't use the ORM yield_per feature in conjunction with unique()"
)
def _not_hashable(datatype):
def go(obj):
raise sa_exc.InvalidRequestError(
"Can't apply uniqueness to row tuple containing value of "
"type %r; this datatype produces non-hashable values"
% datatype
)
return go
if context.load_options._legacy_uniquing:
unique_filters = [
_no_unique
if context.yield_per
else id
if (
ent.use_id_for_hash
or ent._non_hashable_value
or ent._null_column_type
)
else None
for ent in context.compile_state._entities
]
else:
unique_filters = [
_no_unique
if context.yield_per
else _not_hashable(ent.column.type) # type: ignore
if (not ent.use_id_for_hash and ent._non_hashable_value)
else id
if ent.use_id_for_hash
else None
for ent in context.compile_state._entities
]
row_metadata = SimpleResultMetaData(
labels, extra, _unique_filters=unique_filters
)
def chunks(size): # type: ignore
while True:
yield_per = size
context.partials = {}
if yield_per:
fetch = cursor.fetchmany(yield_per)
if not fetch:
break
else:
fetch = cursor._raw_all_rows()
if single_entity:
proc = process[0]
rows = [proc(row) for row in fetch]
else:
rows = [
tuple([proc(row) for proc in process]) for row in fetch
]
for path, post_load in context.post_load_paths.items():
post_load.invoke(context, path)
yield rows
if not yield_per:
break
if context.execution_options.get("prebuffer_rows", False):
# this is a bit of a hack at the moment.
# I would rather have some option in the result to pre-buffer
# internally.
_prebuffered = list(chunks(None))
def chunks(size):
return iter(_prebuffered)
result = ChunkedIteratorResult(
row_metadata,
chunks,
source_supports_scalars=single_entity,
raw=cursor,
dynamic_yield_per=cursor.context._is_server_side,
)
# filtered and single_entity are used to indicate to legacy Query that the
# query has ORM entities, so legacy deduping and scalars should be called
# on the result.
result._attributes = result._attributes.union(
dict(filtered=filtered, is_single_entity=single_entity)
)
# multi_row_eager_loaders OTOH is specific to joinedload.
if context.compile_state.multi_row_eager_loaders:
def require_unique(obj):
raise sa_exc.InvalidRequestError(
"The unique() method must be invoked on this Result, "
"as it contains results that include joined eager loads "
"against collections"
)
result._unique_filter_state = (None, require_unique)
if context.yield_per:
result.yield_per(context.yield_per)
return result
@util.preload_module("sqlalchemy.orm.context")
def merge_frozen_result(session, statement, frozen_result, load=True):
"""Merge a :class:`_engine.FrozenResult` back into a :class:`_orm.Session`,
returning a new :class:`_engine.Result` object with :term:`persistent`
objects.
See the section :ref:`do_orm_execute_re_executing` for an example.
.. seealso::
:ref:`do_orm_execute_re_executing`
:meth:`_engine.Result.freeze`
:class:`_engine.FrozenResult`
"""
querycontext = util.preloaded.orm_context
if load:
# flush current contents if we expect to load data
session._autoflush()
ctx = querycontext.ORMSelectCompileState._create_entities_collection(
statement, legacy=False
)
autoflush = session.autoflush
try:
session.autoflush = False
mapped_entities = [
i
for i, e in enumerate(ctx._entities)
if isinstance(e, querycontext._MapperEntity)
]
keys = [ent._label_name for ent in ctx._entities]
keyed_tuple = result_tuple(
keys, [ent._extra_entities for ent in ctx._entities]
)
result = []
for newrow in frozen_result.rewrite_rows():
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load,
_recursive={},
_resolve_conflict_map={},
)
result.append(keyed_tuple(newrow))
return frozen_result.with_new_rows(result)
finally:
session.autoflush = autoflush
@util.became_legacy_20(
":func:`_orm.merge_result`",
alternative="The function as well as the method on :class:`_orm.Query` "
"is superseded by the :func:`_orm.merge_frozen_result` function.",
)
@util.preload_module("sqlalchemy.orm.context")
def merge_result(
query: Query[Any],
iterator: Union[FrozenResult, Iterable[Sequence[Any]], Iterable[object]],
load: bool = True,
) -> Union[FrozenResult, Iterable[Any]]:
"""Merge a result into the given :class:`.Query` object's Session.
See :meth:`_orm.Query.merge_result` for top-level documentation on this
function.
"""
querycontext = util.preloaded.orm_context
session = query.session
if load:
# flush current contents if we expect to load data
session._autoflush()
# TODO: need test coverage and documentation for the FrozenResult
# use case.
if isinstance(iterator, FrozenResult):
frozen_result = iterator
iterator = iter(frozen_result.data)
else:
frozen_result = None
ctx = querycontext.ORMSelectCompileState._create_entities_collection(
query, legacy=True
)
autoflush = session.autoflush
try:
session.autoflush = False
single_entity = not frozen_result and len(ctx._entities) == 1
if single_entity:
if isinstance(ctx._entities[0], querycontext._MapperEntity):
result = [
session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load,
_recursive={},
_resolve_conflict_map={},
)
for instance in iterator
]
else:
result = list(iterator)
else:
mapped_entities = [
i
for i, e in enumerate(ctx._entities)
if isinstance(e, querycontext._MapperEntity)
]
result = []
keys = [ent._label_name for ent in ctx._entities]
keyed_tuple = result_tuple(
keys, [ent._extra_entities for ent in ctx._entities]
)
for row in iterator:
newrow = list(row)
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load,
_recursive={},
_resolve_conflict_map={},
)
result.append(keyed_tuple(newrow))
if frozen_result:
return frozen_result.with_new_rows(result)
else:
return iter(result)
finally:
session.autoflush = autoflush
def get_from_identity(
session: Session,
mapper: Mapper[_O],
key: _IdentityKeyType[_O],
passive: PassiveFlag,
) -> Union[LoaderCallableStatus, Optional[_O]]:
"""Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
instance = session.identity_map.get(key)
if instance is not None:
state = attributes.instance_state(instance)
if mapper.inherits and not state.mapper.isa(mapper):
return attributes.PASSIVE_CLASS_MISMATCH
# expired - ensure it still exists
if state.expired:
if not passive & attributes.SQL_OK:
# TODO: no coverage here
return attributes.PASSIVE_NO_RESULT
elif not passive & attributes.RELATED_OBJECT_OK:
# this mode is used within a flush and the instance's
# expired state will be checked soon enough, if necessary.
# also used by immediateloader for a mutually-dependent
# o2m->m2m load, :ticket:`6301`
return instance
try:
state._load_expired(state, passive)
except orm_exc.ObjectDeletedError:
session._remove_newly_deleted([state])
return None
return instance
else:
return None
def load_on_ident(
session: Session,
statement: Union[Select, FromStatement],
key: Optional[_IdentityKeyType],
*,
load_options: Optional[Sequence[ORMOption]] = None,
refresh_state: Optional[InstanceState[Any]] = None,
with_for_update: Optional[ForUpdateArg] = None,
only_load_props: Optional[Iterable[str]] = None,
no_autoflush: bool = False,
bind_arguments: Mapping[str, Any] = util.EMPTY_DICT,
execution_options: _ExecuteOptions = util.EMPTY_DICT,
):
"""Load the given identity key from the database."""
if key is not None:
ident = key[1]
identity_token = key[2]
else:
ident = identity_token = None
return load_on_pk_identity(
session,
statement,
ident,
load_options=load_options,
refresh_state=refresh_state,
with_for_update=with_for_update,
only_load_props=only_load_props,
identity_token=identity_token,
no_autoflush=no_autoflush,
bind_arguments=bind_arguments,
execution_options=execution_options,
)
def load_on_pk_identity(
session: Session,
statement: Union[Select, FromStatement],
primary_key_identity: Optional[Tuple[Any, ...]],
*,
load_options: Optional[Sequence[ORMOption]] = None,
refresh_state: Optional[InstanceState[Any]] = None,
with_for_update: Optional[ForUpdateArg] = None,
only_load_props: Optional[Iterable[str]] = None,
identity_token: Optional[Any] = None,
no_autoflush: bool = False,
bind_arguments: Mapping[str, Any] = util.EMPTY_DICT,
execution_options: _ExecuteOptions = util.EMPTY_DICT,
):
"""Load the given primary key identity from the database."""
query = statement
q = query._clone()
assert not q._is_lambda_element
# TODO: fix these imports ....
from .context import QueryContext, ORMCompileState
if load_options is None:
load_options = QueryContext.default_load_options
if (
statement._compile_options
is SelectState.default_select_compile_options
):
compile_options = ORMCompileState.default_compile_options
else:
compile_options = statement._compile_options
if primary_key_identity is not None:
mapper = query._propagate_attrs["plugin_subject"]
(_get_clause, _get_params) = mapper._get_clause
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
nones = set(
[
_get_params[col].key
for col, value in zip(
mapper.primary_key, primary_key_identity
)
if value is None
]
)
_get_clause = sql_util.adapt_criterion_to_null(_get_clause, nones)
if len(nones) == len(primary_key_identity):
util.warn(
"fully NULL primary key identity cannot load any "
"object. This condition may raise an error in a future "
"release."
)
q._where_criteria = (
sql_util._deep_annotate(_get_clause, {"_orm_adapt": True}),
)
params = dict(
[
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(
primary_key_identity, mapper.primary_key
)
]
)
else:
params = None
if with_for_update is not None:
version_check = True
q._for_update_arg = with_for_update
elif query._for_update_arg is not None:
version_check = True
q._for_update_arg = query._for_update_arg
else:
version_check = False
if refresh_state and refresh_state.load_options:
compile_options += {"_current_path": refresh_state.load_path.parent}
q = q.options(*refresh_state.load_options)
new_compile_options, load_options = _set_get_options(
compile_options,
load_options,
version_check=version_check,
only_load_props=only_load_props,
refresh_state=refresh_state,
identity_token=identity_token,
)
q._compile_options = new_compile_options
q._order_by = None
if no_autoflush:
load_options += {"_autoflush": False}
execution_options = util.EMPTY_DICT.merge_with(
execution_options, {"_sa_orm_load_options": load_options}
)
result = (
session.execute(
q,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
)
.unique()
.scalars()
)
try:
return result.one()
except orm_exc.NoResultFound:
return None
def _set_get_options(
compile_opt,
load_opt,
populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None,
identity_token=None,
):
compile_options = {}
load_options = {}
if version_check:
load_options["_version_check"] = version_check
if populate_existing:
load_options["_populate_existing"] = populate_existing
if refresh_state:
load_options["_refresh_state"] = refresh_state
compile_options["_for_refresh_state"] = True
if only_load_props:
compile_options["_only_load_props"] = frozenset(only_load_props)
if identity_token:
load_options["_refresh_identity_token"] = identity_token
if load_options:
load_opt += load_options
if compile_options:
compile_opt += compile_options
return compile_opt, load_opt
def _setup_entity_query(
compile_state,
mapper,
query_entity,
path,
adapter,
column_collection,
with_polymorphic=None,
only_load_props=None,
polymorphic_discriminator=None,
**kw,
):
if with_polymorphic:
poly_properties = mapper._iterate_polymorphic_properties(
with_polymorphic
)
else:
poly_properties = mapper._polymorphic_properties
quick_populators = {}
path.set(compile_state.attributes, "memoized_setups", quick_populators)
# for the lead entities in the path, e.g. not eager loads, and
# assuming a user-passed aliased class, e.g. not a from_self() or any
# implicit aliasing, don't add columns to the SELECT that aren't
# in the thing that's aliased.
check_for_adapt = adapter and len(path) == 1 and path[-1].is_aliased_class
for value in poly_properties:
if only_load_props and value.key not in only_load_props:
continue
value.setup(
compile_state,
query_entity,
path,
adapter,
only_load_props=only_load_props,
column_collection=column_collection,
memoized_populators=quick_populators,
check_for_adapt=check_for_adapt,
**kw,
)
if (
polymorphic_discriminator is not None
and polymorphic_discriminator is not mapper.polymorphic_on
):
if adapter:
pd = adapter.columns[polymorphic_discriminator]
else:
pd = polymorphic_discriminator
column_collection.append(pd)
def _warn_for_runid_changed(state):
util.warn(
"Loading context for %s has changed within a load/refresh "
"handler, suggesting a row refresh operation took place. If this "
"event handler is expected to be "
"emitting row refresh operations within an existing load or refresh "
"operation, set restore_load_context=True when establishing the "
"listener to ensure the context remains unchanged when the event "
"handler completes." % (state_str(state),)
)
def _instance_processor(
query_entity,
mapper,
context,
result,
path,
adapter,
only_load_props=None,
refresh_state=None,
polymorphic_discriminator=None,
_polymorphic_from=None,
):
"""Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
# called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
identity_class = mapper._identity_class
compile_state = context.compile_state
# look for "row getter" functions that have been assigned along
# with the compile state that were cached from a previous load.
# these are operator.itemgetter() objects that each will extract a
# particular column from each row.
getter_key = ("getters", mapper)
getters = path.get(compile_state.attributes, getter_key, None)
if getters is None:
# no getters, so go through a list of attributes we are loading for,
# and the ones that are column based will have already put information
# for us in another collection "memoized_setups", which represents the
# output of the LoaderStrategy.setup_query() method. We can just as
# easily call LoaderStrategy.create_row_processor for each, but by
# getting it all at once from setup_query we save another method call
# per attribute.
props = mapper._prop_set
if only_load_props is not None:
props = props.intersection(
mapper._props[k] for k in only_load_props
)
quick_populators = path.get(
context.attributes, "memoized_setups", _none_set
)
todo = []
cached_populators = {
"new": [],
"quick": [],
"deferred": [],
"expire": [],
"delayed": [],
"existing": [],
"eager": [],
}
if refresh_state is None:
# we can also get the "primary key" tuple getter function
pk_cols = mapper.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
primary_key_getter = result._tuple_getter(pk_cols)
else:
primary_key_getter = None
getters = {
"cached_populators": cached_populators,
"todo": todo,
"primary_key_getter": primary_key_getter,
}
for prop in props:
if prop in quick_populators:
# this is an inlined path just for column-based attributes.
col = quick_populators[prop]
if col is _DEFER_FOR_STATE:
cached_populators["new"].append(
(prop.key, prop._deferred_column_loader)
)
elif col is _SET_DEFERRED_EXPIRED:
# note that in this path, we are no longer
# searching in the result to see if the column might
# be present in some unexpected way.
cached_populators["expire"].append((prop.key, False))
elif col is _RAISE_FOR_STATE:
cached_populators["new"].append(
(prop.key, prop._raise_column_loader)
)
else:
getter = None
if adapter:
# this logic had been removed for all 1.4 releases
# up until 1.4.18; the adapter here is particularly
# the compound eager adapter which isn't accommodated
# in the quick_populators right now. The "fallback"
# logic below instead took over in many more cases
# until issue #6596 was identified.
# note there is still an issue where this codepath
# produces no "getter" for cases where a joined-inh
# mapping includes a labeled column property, meaning
# KeyError is caught internally and we fall back to
# _getter(col), which works anyway. The adapter
# here for joined inh without any aliasing might not
# be useful. Tests which see this include
# test.orm.inheritance.test_basic ->
# EagerTargetingTest.test_adapt_stringency
# OptimizedLoadTest.test_column_expression_joined
# PolymorphicOnNotLocalTest.test_polymorphic_on_column_prop # noqa: E501
#
adapted_col = adapter.columns[col]
if adapted_col is not None:
getter = result._getter(adapted_col, False)
if not getter:
getter = result._getter(col, False)
if getter:
cached_populators["quick"].append((prop.key, getter))
else:
# fall back to the ColumnProperty itself, which
# will iterate through all of its columns
# to see if one fits
prop.create_row_processor(
context,
query_entity,
path,
mapper,
result,
adapter,
cached_populators,
)
else:
# loader strategies like subqueryload, selectinload,
# joinedload, basically relationships, these need to interact
# with the context each time to work correctly.
todo.append(prop)
path.set(compile_state.attributes, getter_key, getters)
cached_populators = getters["cached_populators"]
populators = {key: list(value) for key, value in cached_populators.items()}
for prop in getters["todo"]:
prop.create_row_processor(
context, query_entity, path, mapper, result, adapter, populators
)
propagated_loader_options = context.propagated_loader_options
load_path = (
context.compile_state.current_path + path
if context.compile_state.current_path.path
else path
)
session_identity_map = context.session.identity_map
populate_existing = context.populate_existing or mapper.always_refresh
load_evt = bool(mapper.class_manager.dispatch.load)
refresh_evt = bool(mapper.class_manager.dispatch.refresh)
persistent_evt = bool(context.session.dispatch.loaded_as_persistent)
if persistent_evt:
loaded_as_persistent = context.session.dispatch.loaded_as_persistent
instance_state = attributes.instance_state
instance_dict = attributes.instance_dict
session_id = context.session.hash_key
runid = context.runid
identity_token = context.identity_token
version_check = context.version_check
if version_check:
version_id_col = mapper.version_id_col
if version_id_col is not None:
if adapter:
version_id_col = adapter.columns[version_id_col]
version_id_getter = result._getter(version_id_col)
else:
version_id_getter = None
if not refresh_state and _polymorphic_from is not None:
key = ("loader", path.path)
if key in context.attributes and context.attributes[key].strategy == (
("selectinload_polymorphic", True),
):
selectin_load_via = mapper._should_selectin_load(
context.attributes[key].local_opts["entities"],
_polymorphic_from,
)
else:
selectin_load_via = mapper._should_selectin_load(
None, _polymorphic_from
)
if selectin_load_via and selectin_load_via is not _polymorphic_from:
# only_load_props goes w/ refresh_state only, and in a refresh
# we are a single row query for the exact entity; polymorphic
# loading does not apply
assert only_load_props is None
callable_ = _load_subclass_via_in(context, path, selectin_load_via)
PostLoad.callable_for_path(
context,
load_path,
selectin_load_via.mapper,
selectin_load_via,
callable_,
selectin_load_via,
)
post_load = PostLoad.for_context(context, load_path, only_load_props)
if refresh_state:
refresh_identity_key = refresh_state.key
if refresh_identity_key is None:
# super-rare condition; a refresh is being called
# on a non-instance-key instance; this is meant to only
# occur within a flush()
refresh_identity_key = mapper._identity_key_from_state(
refresh_state
)
else:
refresh_identity_key = None
primary_key_getter = getters["primary_key_getter"]
if mapper.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
else:
is_not_primary_key = _none_set.intersection
def _instance(row):
# determine the state that we'll be populating
if refresh_identity_key:
# fixed state that we're refreshing
state = refresh_state
instance = state.obj()
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = True
loaded_instance = False
else:
# look at the row, see if that identity is in the
# session, or we have to create a new one
identitykey = (
identity_class,
primary_key_getter(row),
identity_token,
)
instance = session_identity_map.get(identitykey)
if instance is not None:
# existing instance
state = instance_state(instance)
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = not isnew
loaded_instance = False
if version_check and version_id_getter and not currentload:
_validate_version_id(
mapper, state, dict_, row, version_id_getter
)
else:
# create a new instance
# check for non-NULL values in the primary key columns,
# else no entity is returned for the row
if is_not_primary_key(identitykey[1]):
return None
isnew = True
currentload = True
loaded_instance = True
instance = mapper.class_manager.new_instance()
dict_ = instance_dict(instance)
state = instance_state(instance)
state.key = identitykey
state.identity_token = identity_token
# attach instance to session.
state.session_id = session_id
session_identity_map._add_unpresent(state, identitykey)
effective_populate_existing = populate_existing
if refresh_state is state:
effective_populate_existing = True
# populate. this looks at whether this state is new
# for this load or was existing, and whether or not this
# row is the first row with this identity.
if currentload or effective_populate_existing:
# full population routines. Objects here are either
# just created, or we are doing a populate_existing
# be conservative about setting load_path when populate_existing
# is in effect; want to maintain options from the original
# load. see test_expire->test_refresh_maintains_deferred_options
if isnew and (
propagated_loader_options or not effective_populate_existing
):
state.load_options = propagated_loader_options
state.load_path = load_path
_populate_full(
context,
row,
state,
dict_,
isnew,
load_path,
loaded_instance,
effective_populate_existing,
populators,
)
if isnew:
# state.runid should be equal to context.runid / runid
# here, however for event checks we are being more conservative
# and checking against existing run id
# assert state.runid == runid
existing_runid = state.runid
if loaded_instance:
if load_evt:
state.manager.dispatch.load(state, context)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
if persistent_evt:
loaded_as_persistent(context.session, state)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
elif refresh_evt:
state.manager.dispatch.refresh(
state, context, only_load_props
)
if state.runid != runid:
_warn_for_runid_changed(state)
if effective_populate_existing or state.modified:
if refresh_state and only_load_props:
state._commit(dict_, only_load_props)
else:
state._commit_all(dict_, session_identity_map)
if post_load:
post_load.add_state(state, True)
else:
# partial population routines, for objects that were already
# in the Session, but a row matches them; apply eager loaders
# on existing objects, etc.
unloaded = state.unloaded
isnew = state not in context.partials
if not isnew or unloaded or populators["eager"]:
# state is having a partial set of its attributes
# refreshed. Populate those attributes,
# and add to the "context.partials" collection.
to_load = _populate_partial(
context,
row,
state,
dict_,
isnew,
load_path,
unloaded,
populators,
)
if isnew:
if refresh_evt:
existing_runid = state.runid
state.manager.dispatch.refresh(state, context, to_load)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
state._commit(dict_, to_load)
if post_load and context.invoke_all_eagers:
post_load.add_state(state, False)
return instance
if mapper.polymorphic_map and not _polymorphic_from and not refresh_state:
# if we are doing polymorphic, dispatch to a different _instance()
# method specific to the subclass mapper
def ensure_no_pk(row):
identitykey = (
identity_class,
primary_key_getter(row),
identity_token,
)
if not is_not_primary_key(identitykey[1]):
return identitykey
else:
return None
_instance = _decorate_polymorphic_switch(
_instance,
context,
query_entity,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
ensure_no_pk,
)
return _instance
def _load_subclass_via_in(context, path, entity):
mapper = entity.mapper
zero_idx = len(mapper.base_mapper.primary_key) == 1
if entity.is_aliased_class:
q, enable_opt, disable_opt = mapper._subclass_load_via_in(entity)
else:
q, enable_opt, disable_opt = mapper._subclass_load_via_in_mapper
def do_load(context, path, states, load_only, effective_entity):
orig_query = context.query
options = (enable_opt,) + orig_query._with_options + (disable_opt,)
q2 = q.options(*options)
q2._compile_options = context.compile_state.default_compile_options
q2._compile_options += {"_current_path": path.parent}
if context.populate_existing:
q2 = q2.execution_options(populate_existing=True)
context.session.execute(
q2,
dict(
primary_keys=[
state.key[1][0] if zero_idx else state.key[1]
for state, load_attrs in states
]
),
).unique().scalars().all()
return do_load
def _populate_full(
context,
row,
state,
dict_,
isnew,
load_path,
loaded_instance,
populate_existing,
populators,
):
if isnew:
# first time we are seeing a row with this identity.
state.runid = context.runid
for key, getter in populators["quick"]:
dict_[key] = getter(row)
if populate_existing:
for key, set_callable in populators["expire"]:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
else:
for key, set_callable in populators["expire"]:
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
populator(state, dict_, row)
elif load_path != state.load_path:
# new load path, e.g. object is present in more than one
# column position in a series of rows
state.load_path = load_path
# if we have data, and the data isn't in the dict, OK, let's put
# it in.
for key, getter in populators["quick"]:
if key not in dict_:
dict_[key] = getter(row)
# otherwise treat like an "already seen" row
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: allow "existing" populator to know this is
# a new path for the state:
# populator(state, dict_, row, new_path=True)
else:
# have already seen rows with this identity in this same path.
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: same path
# populator(state, dict_, row, new_path=False)
def _populate_partial(
context, row, state, dict_, isnew, load_path, unloaded, populators
):
if not isnew:
to_load = context.partials[state]
for key, populator in populators["existing"]:
if key in to_load:
populator(state, dict_, row)
else:
to_load = unloaded
context.partials[state] = to_load
for key, getter in populators["quick"]:
if key in to_load:
dict_[key] = getter(row)
for key, set_callable in populators["expire"]:
if key in to_load:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["eager"]:
if key not in unloaded:
populator(state, dict_, row)
return to_load
def _validate_version_id(mapper, state, dict_, row, getter):
if mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col
) != getter(row):
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (
state_str(state),
mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col
),
getter(row),
)
)
def _decorate_polymorphic_switch(
instance_fn,
context,
query_entity,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
ensure_no_pk,
):
if polymorphic_discriminator is not None:
polymorphic_on = polymorphic_discriminator
else:
polymorphic_on = mapper.polymorphic_on
if polymorphic_on is None:
return instance_fn
if adapter:
polymorphic_on = adapter.columns[polymorphic_on]
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(
"No such polymorphic_identity %r is defined" % discriminator
)
else:
if sub_mapper is mapper:
return None
elif not sub_mapper.isa(mapper):
return False
return _instance_processor(
query_entity,
sub_mapper,
context,
result,
path,
adapter,
_polymorphic_from=mapper,
)
polymorphic_instances = util.PopulateDict(configure_subclass_mapper)
getter = result._getter(polymorphic_on)
def polymorphic_instance(row):
discriminator = getter(row)
if discriminator is not None:
_instance = polymorphic_instances[discriminator]
if _instance:
return _instance(row)
elif _instance is False:
identitykey = ensure_no_pk(row)
if identitykey:
raise sa_exc.InvalidRequestError(
"Row with identity key %s can't be loaded into an "
"object; the polymorphic discriminator column '%s' "
"refers to %s, which is not a sub-mapper of "
"the requested %s"
% (
identitykey,
polymorphic_on,
mapper.polymorphic_map[discriminator],
mapper,
)
)
else:
return None
else:
return instance_fn(row)
else:
identitykey = ensure_no_pk(row)
if identitykey:
raise sa_exc.InvalidRequestError(
"Row with identity key %s can't be loaded into an "
"object; the polymorphic discriminator column '%s' is "
"NULL" % (identitykey, polymorphic_on)
)
else:
return None
return polymorphic_instance
class PostLoad:
"""Track loaders and states for "post load" operations."""
__slots__ = "loaders", "states", "load_keys"
def __init__(self):
self.loaders = {}
self.states = util.OrderedDict()
self.load_keys = None
def add_state(self, state, overwrite):
# the states for a polymorphic load here are all shared
# within a single PostLoad object among multiple subtypes.
# Filtering of callables on a per-subclass basis needs to be done at
# the invocation level
self.states[state] = overwrite
def invoke(self, context, path):
if not self.states:
return
path = path_registry.PathRegistry.coerce(path)
for token, limit_to_mapper, loader, arg, kw in self.loaders.values():
states = [
(state, overwrite)
for state, overwrite in self.states.items()
if state.manager.mapper.isa(limit_to_mapper)
]
if states:
loader(context, path, states, self.load_keys, *arg, **kw)
self.states.clear()
@classmethod
def for_context(cls, context, path, only_load_props):
pl = context.post_load_paths.get(path.path)
if pl is not None and only_load_props:
pl.load_keys = only_load_props
return pl
@classmethod
def path_exists(self, context, path, key):
return (
path.path in context.post_load_paths
and key in context.post_load_paths[path.path].loaders
)
@classmethod
def callable_for_path(
cls, context, path, limit_to_mapper, token, loader_callable, *arg, **kw
):
if path.path in context.post_load_paths:
pl = context.post_load_paths[path.path]
else:
pl = context.post_load_paths[path.path] = PostLoad()
pl.loaders[token] = (token, limit_to_mapper, loader_callable, arg, kw)
def load_scalar_attributes(mapper, state, attribute_names, passive):
"""initiate a column-based attribute refresh operation."""
# assert mapper is _state_mapper(state)
session = state.session
if not session:
raise orm_exc.DetachedInstanceError(
"Instance %s is not bound to a Session; "
"attribute refresh operation cannot proceed" % (state_str(state))
)
has_key = bool(state.key)
result = False
no_autoflush = bool(passive & attributes.NO_AUTOFLUSH)
# in the case of inheritance, particularly concrete and abstract
# concrete inheritance, the class manager might have some keys
# of attributes on the superclass that we didn't actually map.
# These could be mapped as "concrete, don't load" or could be completely
# excluded from the mapping and we know nothing about them. Filter them
# here to prevent them from coming through.
if attribute_names:
attribute_names = attribute_names.intersection(mapper.attrs.keys())
if mapper.inherits and not mapper.concrete:
statement = mapper._optimized_get_statement(state, attribute_names)
if statement is not None:
from .query import FromStatement
# undefer() isn't needed here because statement has the
# columns needed already, this implicitly undefers that column
stmt = FromStatement(mapper, statement)
result = load_on_ident(
session,
stmt,
None,
only_load_props=attribute_names,
refresh_state=state,
no_autoflush=no_autoflush,
)
if result is False:
if has_key:
identity_key = state.key
else:
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned
# an identity_key.
# check here to ensure we have the attrs we need.
pk_attrs = [
mapper._columntoproperty[col].key for col in mapper.primary_key
]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError(
"Instance %s cannot be refreshed - it's not "
" persistent and does not "
"contain a full primary key." % state_str(state)
)
identity_key = mapper._identity_key_from_state(state)
if (
_none_set.issubset(identity_key) and not mapper.allow_partial_pks
) or _none_set.issuperset(identity_key):
util.warn_limited(
"Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either).",
state_str(state),
)
return
result = load_on_ident(
session,
future.select(mapper).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
identity_key,
refresh_state=state,
only_load_props=attribute_names,
no_autoflush=no_autoflush,
)
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
|
the-stack_0_17844 | # -*- coding: utf-8 -*-
import sys
from .proxies.cnproxy import tapp
__version__ = '2.5.2.1'
global shared_app
shared_app = tapp
global batch
batch = False
global reopen
reopen = False
def getBatch():
global batch
return batch
def setBatch(is_batch):
global batch
batch = is_batch
def getReopen():
global reopen
return reopen
def setReopen(is_reopen):
global reopen
reopen = is_reopen
def app():
"""
The global cadnano application object.
"""
global shared_app
return shared_app
def initAppWithGui(app_args=None, do_exec=True):
"""
Initializes CadnanoQt object with arguments argv, and then starts the
application main event loop if do_exec is True. We may want to delay
exec for checking user-provided args, or running automated tests.
See: https://doc.qt.io/qt-5/qapplication.html#exec
Args:
app_args (string): see util.py :func:`~cadnano.util.parse_args`
do_exec (bool): don't invoke exec yet
Returns:
shared_app (CadnanoQt): instance of app.
"""
global shared_app
from cadnano.cadnanoqt import CadnanoQt
# 1. Create the application object
shared_app = CadnanoQt(app_args)
# 2. Use the object to finish importing and creating
# application wide objects
shared_app.finishInit()
if do_exec:
sys.exit(shared_app.exec_())
return shared_app
|
the-stack_0_17846 | import click
from kfp import client
from kfp.cli import output
from kfp.cli.utils import parsing
from kfp_server_api.models.api_experiment import ApiExperiment
@click.group()
def experiment():
"""Manage experiment resources."""
pass
@experiment.command()
@click.option(
'-d',
'--description',
help=parsing.get_param_descr(client.Client.create_experiment,
'description'))
@click.argument('name')
@click.pass_context
def create(ctx: click.Context, description: str, name: str):
"""Create an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
experiment = client_obj.create_experiment(name, description=description)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.option(
'--page-token',
default='',
help=parsing.get_param_descr(client.Client.list_experiments, 'page_token'))
@click.option(
'-m',
'--max-size',
default=100,
help=parsing.get_param_descr(client.Client.list_experiments, 'page_size'))
@click.option(
'--sort-by',
default='created_at desc',
help=parsing.get_param_descr(client.Client.list_experiments, 'sort_by'))
@click.option(
'--filter',
help=parsing.get_param_descr(client.Client.list_experiments, 'filter'))
@click.pass_context
def list(ctx: click.Context, page_token: str, max_size: int, sort_by: str,
filter: str):
"""List experiments."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
response = client_obj.list_experiments(
page_token=page_token,
page_size=max_size,
sort_by=sort_by,
filter=filter)
output.print_output(
response.experiments or [],
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.argument('experiment-id')
@click.pass_context
def get(ctx: click.Context, experiment_id: str):
"""Get information about an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
experiment = client_obj.get_experiment(experiment_id)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.argument('experiment-id')
@click.pass_context
def delete(ctx: click.Context, experiment_id: str):
"""Delete an experiment."""
confirmation = 'Caution. The RunDetails page could have an issue' \
' when it renders a run that has no experiment.' \
' Do you want to continue?'
if not click.confirm(confirmation):
return
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
client_obj.delete_experiment(experiment_id)
output.print_deleted_text('experiment', experiment_id, output_format)
either_option_required = 'Either --experiment-id or --experiment-name is required.'
@experiment.command()
@click.option(
'--experiment-id',
default=None,
help=parsing.get_param_descr(client.Client.archive_experiment,
'experiment_id') + ' ' + either_option_required
)
@click.option(
'--experiment-name',
default=None,
help='Name of the experiment.' + ' ' + either_option_required)
@click.pass_context
def archive(ctx: click.Context, experiment_id: str, experiment_name: str):
"""Archive an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
if (experiment_id is None) == (experiment_name is None):
raise ValueError(either_option_required)
if not experiment_id:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
experiment_id = experiment.id
client_obj.archive_experiment(experiment_id=experiment_id)
if experiment_id:
experiment = client_obj.get_experiment(experiment_id=experiment_id)
else:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.option(
'--experiment-id',
default=None,
help=parsing.get_param_descr(client.Client.unarchive_experiment,
'experiment_id') + ' ' + either_option_required
)
@click.option(
'--experiment-name',
default=None,
help='Name of the experiment.' + ' ' + either_option_required)
@click.pass_context
def unarchive(ctx: click.Context, experiment_id: str, experiment_name: str):
"""Unarchive an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
if (experiment_id is None) == (experiment_name is None):
raise ValueError(either_option_required)
if not experiment_id:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
experiment_id = experiment.id
client_obj.unarchive_experiment(experiment_id=experiment_id)
if experiment_id:
experiment = client_obj.get_experiment(experiment_id=experiment_id)
else:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
)
|
the-stack_0_17847 | import os
import math
import torch
import numpy as np
from typing import *
from tqdm.autonotebook import tqdm
from cftool.misc import update_dict
from cftool.misc import shallow_copy_dict
from cfdata.tabular import task_type_type
from cftool.ml.utils import collate_fn_type
from cftool.ml.utils import Metrics
from torch.nn.functional import one_hot
from .basic import *
from ..misc.toolkit import *
from .register import register_metric
from ..types import data_type
from ..pipeline import Pipeline
from ..protocol import DataProtocol
class EnsembleResults(NamedTuple):
data: DataProtocol
pipelines: List[Pipeline]
pattern_weights: Optional[np.ndarray]
predict_config: Optional[Dict[str, Any]]
@property
def pattern(self) -> EnsemblePattern:
predict_config = self.predict_config or {}
patterns = [m.to_pattern(**predict_config) for m in self.pipelines]
return Ensemble.stacking(patterns, pattern_weights=self.pattern_weights)
class MetricsPlaceholder(NamedTuple):
config: Dict[str, Any]
class Ensemble:
def __init__(
self,
task_type: task_type_type,
config: Optional[Dict[str, Any]] = None,
):
self.task_type = task_type
if config is None:
config = {}
self.config = shallow_copy_dict(config)
@staticmethod
def stacking(
patterns: List[ModelPattern],
*,
pattern_weights: Optional[np.ndarray] = None,
ensemble_method: Optional[Union[str, collate_fn_type]] = None,
) -> EnsemblePattern:
if ensemble_method is None:
if pattern_weights is None:
ensemble_method = "default"
else:
if abs(pattern_weights.sum() - 1.0) > 1e-4:
raise ValueError("`pattern_weights` should sum to 1.0")
pattern_weights = pattern_weights.reshape([-1, 1, 1])
def ensemble_method(
arrays: List[np.ndarray],
requires_prob: bool,
) -> np.ndarray:
shape = [len(arrays), len(arrays[0]), -1]
predictions = np.array(arrays).reshape(shape)
if requires_prob or not is_int(predictions):
return (predictions * pattern_weights).sum(axis=0)
encodings = one_hot(to_torch(predictions).to(torch.long).squeeze())
encodings = encodings.to(torch.float32)
weighted = (encodings * pattern_weights).sum(dim=0)
return to_numpy(weighted.argmax(1)).reshape([-1, 1])
return EnsemblePattern(patterns, ensemble_method)
def bagging(
self,
x: data_type,
y: data_type = None,
*,
k: int = 10,
num_jobs: int = 1,
model: str = "fcnn",
model_configs: Optional[Dict[str, Dict[str, Any]]] = None,
predict_config: Optional[Dict[str, Any]] = None,
sequential: Optional[bool] = None,
temp_folder: str = "__tmp__",
return_patterns: bool = True,
use_tqdm: bool = True,
) -> EnsembleResults:
repeat_result = repeat_with(
x,
y,
models=model,
model_configs=model_configs,
predict_config=predict_config,
sequential=sequential,
num_jobs=num_jobs,
num_repeat=k,
temp_folder=temp_folder,
return_patterns=return_patterns,
use_tqdm=use_tqdm,
**self.config,
)
data = repeat_result.data
pipelines = repeat_result.pipelines
assert data is not None and pipelines is not None
return EnsembleResults(data, pipelines[model], None, predict_config)
def adaboost(
self,
x: data_type,
y: data_type = None,
*,
k: int = 10,
eps: float = 1e-12,
model: str = "fcnn",
temp_folder: str = "__tmp__",
predict_config: Optional[Dict[str, Any]] = None,
increment_config: Optional[Dict[str, Any]] = None,
sample_weights: Optional[np.ndarray] = None,
) -> EnsembleResults:
if increment_config is None:
increment_config = {}
config = shallow_copy_dict(self.config)
update_dict(increment_config, config)
config["cv_split"] = 0.0
config.setdefault("use_tqdm", False)
config.setdefault("use_binary_threshold", False)
config.setdefault("verbose_level", 0)
@register_metric("adaboost_error", -1, False)
def adaboost_error(
self_: Union[Metrics, MetricsPlaceholder],
target_: np.ndarray,
predictions_: np.ndarray,
) -> float:
target_ = target_.astype(np.float32)
predictions_ = predictions_.astype(np.float32)
sample_weights_ = self_.config.get("sample_weights")
errors = (target_ != predictions_).ravel()
if sample_weights_ is None:
e_ = errors.mean()
else:
e_ = sample_weights_[errors].sum() / len(errors)
return e_.item()
data = None
pipelines = []
patterns, pattern_weights = [], []
for i in tqdm(list(range(k))):
cfg = shallow_copy_dict(config)
cfg["logging_folder"] = os.path.join(temp_folder, str(i))
metric_config = {"sample_weights": sample_weights}
if sample_weights is not None:
cfg["metric_config"] = {
"types": "adaboost_error",
"adaboost_error_config": metric_config,
}
m = make(model=model, **cfg)
m.fit(x, y, sample_weights=sample_weights)
metrics_placeholder = MetricsPlaceholder(metric_config)
predictions: np.ndarray = m.predict(x, contains_labels=True)
predictions = predictions.astype(np.float32)
target = m.data.processed.y.astype(np.float32)
e = adaboost_error(metrics_placeholder, target, predictions)
em = min(max(e, eps), 1.0 - eps)
am = 0.5 * math.log(1.0 / em - 1.0)
if sample_weights is None:
sample_weights = np.ones_like(predictions).ravel()
target[target == 0.0] = predictions[predictions == 0.0] = -1.0
sample_weights *= np.exp(-am * target * predictions).ravel()
sample_weights /= np.mean(sample_weights)
patterns.append(m.to_pattern())
pattern_weights.append(am)
if data is None:
data = m.data
pipelines.append(m)
weights_array = np.array(pattern_weights, np.float32)
weights_array /= weights_array.sum()
assert data is not None
return EnsembleResults(data, pipelines, weights_array, predict_config)
__all__ = [
"Ensemble",
"EnsembleResults",
]
|
the-stack_0_17848 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddleseg.cvlibs import manager
@manager.LOSSES.add_component
class MixedLoss(nn.Layer):
"""
Weighted computations for multiple Loss.
The advantage is that mixed loss training can be achieved without changing the networking code.
Args:
losses (list of nn.Layer): A list consisting of multiple loss classes
coef (float|int): Weighting coefficient of multiple loss
Returns:
A callable object of MixedLoss.
"""
def __init__(self, losses, coef):
super(MixedLoss, self).__init__()
if not isinstance(losses, list):
raise TypeError('`losses` must be a list!')
if not isinstance(coef, list):
raise TypeError('`coef` must be a list!')
len_losses = len(losses)
len_coef = len(coef)
if len_losses != len_coef:
raise ValueError(
'The length of `losses` should equal to `coef`, but they are {} and {}.'
.format(len_losses, len_coef))
self.losses = losses
self.coef = coef
def forward(self, logits, labels):
loss_list = []
final_output = 0
for i, loss in enumerate(self.losses):
output = loss(logits, labels)
final_output += output * self.coef[i]
return final_output
|
the-stack_0_17853 | import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
import Tkinter as tk
import ttk
LARGE_FONT= ("Verdana", 12)
style.use("ggplot")
f = Figure(figsize=(5,5), dpi=100)
a = f.add_subplot(111)
def animate(i):
pullData = open("matplot-text.txt","r").read()
dataList = pullData.split('\n')
xList = []
yList = []
for eachLine in dataList:
if len(eachLine) > 1:
x, y = eachLine.split(',')
xList.append(int(x))
yList.append(float(y))
a.clear()
a.plot(xList, yList)
class RNclient(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
# tk.Tk.iconbitmap(self, default="clienticon.ico")
tk.Tk.wm_title(self, "RN client")
container = tk.Frame(self)
container.pack(side="top", fill="both", expand = True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
frame = PageThree(container, self)
self.frames[PageThree] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(PageThree)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class PageThree(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Error rate for a perceptron", font=LARGE_FONT)
label.pack(pady=10,padx=10)
canvas = FigureCanvasTkAgg(f, self)
canvas.show()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2TkAgg(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
app = RNclient()
ani = animation.FuncAnimation(f, animate, frames=20, interval=1000)
app.mainloop()
|
the-stack_0_17854 | import numpy as np
import itertools
def get_pad_shape(auto_pad, input_spatial_shape, kernel_spatial_shape, strides_spatial, output_spatial_shape):
pad_shape = [0] * len(input_spatial_shape)
if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):
for i in range(len(input_spatial_shape)):
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial[i] + kernel_spatial_shape[i] - \
input_spatial_shape[i]
elif auto_pad == 'VALID':
pass
return pad_shape
def get_output_shape(auto_pad, input_spatial_shape, kernel_spatial_shape, strides_spatial):
out_shape = [0] * len(input_spatial_shape)
if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):
for i in range(len(input_spatial_shape)):
out_shape[i] = int(np.ceil(float(input_spatial_shape[i]) / float(strides_spatial[i])))
elif auto_pad == 'VALID':
for i in range(len(input_spatial_shape)):
out_shape[i] = int(
np.ceil(float(input_spatial_shape[i] - (kernel_spatial_shape[i] - 1)) / float(strides_spatial[i])))
return out_shape
def pool(padded, x_shape, kernel_shape, strides_shape, out_shape, pad_shape, pooling_type):
spatial_size = len(x_shape) - 2
y = np.zeros([x_shape[0], x_shape[1]] + list(out_shape))
for shape in itertools.product(range(x_shape[0]),
range(x_shape[1]),
*[range(
int((x_shape[i + 2] + pad_shape[i] - kernel_shape[i]) / strides_shape[i] + 1))
for i in range(spatial_size)]):
window = padded[shape[0], shape[1]]
window_vals = np.array([window[i] for i in list(
itertools.product(
*[range(strides_shape[i] * shape[i + 2], strides_shape[i] * shape[i + 2] + kernel_shape[i]) for i in
range(spatial_size)])
)])
if pooling_type == 'AVG':
f = np.average
elif pooling_type == 'MAX':
f = np.max
else:
raise NotImplementedError('Pooling type {} does not support. Should be AVG, MAX'.format(pooling_type))
y[shape] = f(window_vals[np.where(~np.isnan(window_vals))])
return y.astype(np.float32)
|
the-stack_0_17855 | import sys
import esp
import machine
from time import sleep
# copy with
# ampy --port /dev/ttyUSB0 put minimal/minimal-app.py humtemp.py
#
# or
# mpy-cross -O2 -o minimal/humtemp.mpy minimal/minimal-app.py
# ampy --port /dev/ttyUSB0 put minimal/humtemp.mpy humtemp.mpy
def print_mpy_version():
sys_mpy = sys.implementation.mpy
arch = [None, 'x86', 'x64',
'armv6', 'armv6m', 'armv7m', 'armv7em', 'armv7emsp', 'armv7emdp',
'xtensa', 'xtensawin'][sys_mpy >> 10]
print('mpy version:', sys_mpy & 0xff)
print('mpy flags:', end='')
if arch:
print(' -march=' + arch, end='')
if sys_mpy & 0x100:
print(' -mcache-lookup-bc', end='')
if not sys_mpy & 0x200:
print(' -mno-unicode', end='')
print()
def start():
print("starting")
fwok = esp.check_fw()
print("fw ok: "+ str(fwok))
ledpin = machine.Pin(0, machine.Pin.OUT)
def led_on():
ledpin.value(0)
def led_off():
ledpin.value(1)
def blink(cycles = 2):
for i in range(cycles):
led_on()
sleep(0.3)
led_off()
if i != cycles - 1:
sleep(0.3)
print("blinking 10")
blink(10)
print("blinking 10")
blink(10)
print("done")
print_mpy_version()
start()
|
the-stack_0_17857 | import re
from typing import Dict
from yarl import URL
import aiohttp
from aiohttp.web import (Application,
HTTPException,
Request, Response, get,
json_response, middleware, post, HTTPSeeOther)
from aiohttp.client import ClientSession
from injector import inject, singleton, Injector, provider
from backup.time import Time
from backup.logger import getLogger
from backup.server import Server
from tests.faketime import FakeTime
from backup.module import BaseModule
from backup.config import Config, Setting
from .http_exception import HttpMultiException
from .simulated_google import SimulatedGoogle
from .base_server import BaseServer
from .ports import Ports
from .request_interceptor import RequestInterceptor
from .simulated_supervisor import SimulatedSupervisor
from .apiingress import APIIngress
import aiorun
logger = getLogger(__name__)
mimeTypeQueryPattern = re.compile("^mimeType='.*'$")
parentsQueryPattern = re.compile("^'.*' in parents$")
bytesPattern = re.compile("^bytes \\d+-\\d+/\\d+$")
resumeBytesPattern = re.compile("^bytes \\*/\\d+$")
intPattern = re.compile("\\d+")
rangePattern = re.compile("bytes=\\d+-\\d+")
@singleton
class SimulationServer(BaseServer):
@inject
def __init__(self, ports: Ports, time: Time, session: ClientSession, authserver: Server, config: Config, google: SimulatedGoogle, supervisor: SimulatedSupervisor, api_ingress: APIIngress, interceptor: RequestInterceptor):
self.interceptor = interceptor
self.google = google
self.supervisor = supervisor
self.config = config
self.id_counter = 0
self.files: Dict[str, bytearray] = {}
self._port = ports.server
self._time: FakeTime = time
self.urls = []
self.relative = True
self._authserver = authserver
self._api_ingress = api_ingress
def wasUrlRequested(self, pattern):
for url in self.urls:
if pattern in url:
return True
return False
def blockSnapshots(self):
self.block_snapshots = True
def unBlockSnapshots(self):
self.block_snapshots = False
async def uploadfile(self, request: Request):
name: str = str(request.query.get("name", "test"))
self.files[name] = await self.readAll(request)
return Response(text="")
async def readFile(self, request: Request):
return self.serve_bytes(request, self.files[request.query.get("name", "test")])
async def slugRedirect(self, request: Request):
raise HTTPSeeOther("https://localhost:" + str(self.config.get(Setting.INGRESS_PORT)))
@middleware
async def error_middleware(self, request: Request, handler):
self.urls.append(str(request.url))
resp = await self.interceptor.checkUrl(request)
if resp is not None:
return resp
try:
resp = await handler(request)
return resp
except Exception as ex:
await self.readAll(request)
if isinstance(ex, HttpMultiException):
return Response(status=ex.status_code)
elif isinstance(ex, HTTPException):
raise
else:
logger.printException(ex)
return json_response(str(ex), status=500)
def createApp(self):
app = Application(middlewares=[self.error_middleware])
app.add_routes(self.routes())
self._authserver.buildApp(app)
return app
async def start(self, port):
self.runner = aiohttp.web.AppRunner(self.createApp())
await self.runner.setup()
site = aiohttp.web.TCPSite(self.runner, "0.0.0.0", port=port)
await site.start()
async def stop(self):
await self.runner.shutdown()
await self.runner.cleanup()
def routes(self):
return [
get('/readfile', self.readFile),
post('/uploadfile', self.uploadfile),
get('/ingress/self_slug', self.slugRedirect),
get('/debug/config', self.debug_config)
] + self.google.routes() + self.supervisor.routes() + self._api_ingress.routes()
async def debug_config(self, request: Request):
return json_response(self.supervisor._options)
class SimServerModule(BaseModule):
def __init__(self, base_url: URL):
super().__init__(override_dns=False)
self._base_url = base_url
@provider
@singleton
def getConfig(self) -> Config:
return Config.withOverrides({
Setting.DRIVE_AUTHORIZE_URL: str(self._base_url.with_path("o/oauth2/v2/auth")),
Setting.AUTHENTICATE_URL: str(self._base_url.with_path("drive/authorize")),
Setting.DRIVE_TOKEN_URL: str(self._base_url.with_path("token")),
Setting.DRIVE_REFRESH_URL: str(self._base_url.with_path("oauth2/v4/token")),
Setting.INGRESS_PORT: 56152
})
@provider
@singleton
def getPorts(self) -> Ports:
return Ports(56153, 56151, 56152)
async def main():
port = 56153
base = URL("http://localhost").with_port(port)
injector = Injector(SimServerModule(base))
server = injector.get(SimulationServer)
# start the server
runner = aiohttp.web.AppRunner(server.createApp())
await runner.setup()
site = aiohttp.web.TCPSite(runner, "0.0.0.0", port=port)
await site.start()
print("Server started on port " + str(port))
print("Open a browser at http://localhost:" + str(port))
if __name__ == '__main__':
aiorun.run(main())
|
the-stack_0_17858 | #!/usr/bin/env python
# Edom Moges (ESDL)
# ## This module contains the TE code translation from Matlab
#
# (MatLab version written by Laurel L. and modified by Dino B. Translation to Python by Edom M.)
#
# The following functions are included in this module:
#
# 1. Mutual information
#
# 1. mutinfo_new(M, nbins) - Calculates mutual information I(x,y).
#
#
# 2. Tranfer entropy
#
# 1. transen_new(M, lag, nbins) - Calculates transfer information - TE(x,y) x to y. x source M[:,0] and y the sink M[:,1].
#
#
# 3. Intermediate functions
#
# 1. LagData_new - shifts a matrix so that it is rearranged to be ready for TE calculation as in Knutt et al., 2005
# 2. jointentropy_new(M, nbins) - Calculates the joint entropy H(x,y)
# 3. jointentropy3_new(M, nbins) - Calculates the joint entropy for three variables H(x,y,z)
# 4. shuffle( M ) - shuffles the entries of the matrix M in time while keeping NaNs (blank data values) NaNs. So that, Monte Carlo is possible
# 5. transenshuffle_new(M, lag, nbins) - Calculates the transfer entropy for a shuffled time series that has already been lined up with LagData
#
#
# 4. Monte Carlo analysis of mutual information and transfer entropy
#
# 1. mutinfo_crit_new( M, nbins, alpha, numiter) - Finds critical values of mutual information statistics that needs to be exceeded for statistical significance
# 2. transen_crit_new( M, lag, alpha, numiter, nbins) - Finds the critical value of the transfer entropy statistic that needs to be exceeded for statistical signficance
#
#
# 5. All in one code
# RunNewTE2VarsPar(DataMatrix, LabelCell, shift, SinkNodes=None, SourceNodes=None,
# maxLag=100, minSamples=200, numShuffles = 500, sigLevel=0.05, numBins=[11,11,11],ncores=4) - runs all together in #parallel mode.
#get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
import copy
import os
from joblib import Parallel, delayed
from tqdm import tqdm
np.random.seed(50)
# In[2]:
def checkMakeDir2(dirName): #
result = dirName
result2 = dirName*2
return result, result2
# ### Mutual information
# In[8]:
def computeEntropy(M,nbin):
N, binEdges1d=np.histogram(M[np.isfinite(M)],bins=nbin) #Which bin the data column is in
p2 = N/sum(N)
# Shanon entropy
p2gt0 = p2[p2>0] # py
log2p2gt0 = np.log2(p2gt0)
H = (-sum(p2gt0*log2p2gt0))
return H
def mutinfo_new(M2, nbins):
# Calculates mutual information with the sink variable's H used as a normalization
# M is an array with two columns [ source, sink]
# nbins list of number of bins in 1D, 2D and 3D, with three elements
ths = 10e-4
M = M2[~np.isnan(M2).any(axis=1)] # clears the nans at both columns
counts1, binEdges1=np.histogram(M[:,0][np.isfinite(M[:,0])],bins=nbins[1]) # Source Variable
binEdges1[0] = binEdges1[0]-ths
binEdges1[len(binEdges1)-1]=binEdges1[len(binEdges1)-1]+ths
col1cat = np.digitize(M[:,0], binEdges1, right=False)
col1cat[col1cat==nbins[1]+1] = 0. # change the bin index of nan to zero. np assigns it to nbins[1]+1 @@@
counts2, binEdges2=np.histogram(M[:,1][np.isfinite(M[:,1])],bins=nbins[1]) # Sink Variable
binEdges2[0] = binEdges2[0]-ths
binEdges2[len(binEdges2)-1]=binEdges2[len(binEdges2)-1]+ths
col2cat = np.digitize(M[:,1], binEdges2, right=False) # which bin (ID) is the data located
col2cat[col2cat==nbins[1]+1] = 0. # change the bin index of nan to zero. np assigns it to nbins[1]+1 @@@
col1cat[col2cat==0.]=0. # If there is an NaN for any row, assign the other column in that row to the NaN bin too @@@
col2cat[col1cat==0.]=0. # See comment above. @@@
#print(col1cat)
# convert 1D histogram to a 2D histogram
jointentcat = (col1cat-1)*nbins[1]+col2cat #This classifies the joint entropy bin into a number between 1 and nbins^2. 0 is assigned to rows with misisng data.
nbins_2 = nbins[1]**2
#N = np.bincount(jointentcat)[1:] # Number of datapoints within each joint entropy bin.
N = np.bincount(jointentcat[jointentcat>0])[1:] # Number of datapoints within each joint entropy bin. @@@
p = N/sum(N); # Vector of probabilities
# 1D probability/histogram
N1, binEdges1d1=np.histogram(M[:,0][np.isfinite(M[:,0])],bins=nbins[0]) # Which bin the first data column is in
N2, binEdges1d2=np.histogram(M[:,1][np.isfinite(M[:,1])],bins=nbins[0]) #Which bin the second data column is in
p1 = N1/sum(N1)
p2 = N2/sum(N2)
# Shanon entropy
pgt0 = p[p>0] # px,y
p1gt0 = p1[p1>0] # px
p2gt0 = p2[p2>0] # py
log2p2gt0 = np.log2(p2gt0)
#Shannon entropy of the sink variable. Used to normalize mutual informaiton in the next line.
Hy = (-sum(p2gt0*log2p2gt0))
# Mutual information, in bits. Joint entropy is scaled to the number of bins in a single dimension.
I = ( (-sum(p1gt0*np.log2(p1gt0)) - sum(p2gt0*log2p2gt0) ) + (sum(pgt0*np.log2(pgt0)))*np.log2(nbins[0])/np.log2(nbins[1]))/Hy
# double integral in the last component is done as a 1D.
#return nbins_2, jointentcat,p , sum(N), I, Hy
return I, sum(N)
def mutinfo_newRel(M2, nbins): # MI is computed relative to max value of Hx or Hy than sink based MI.mutinfo_new
# Calculates mutual information with the max H used as a normalization.
# M is an array with two columns [ source, sink]
# nbins list of number of bins in 1D, 2D and 3D, with three elements
ths = 10e-4
M = M2[~np.isnan(M2).any(axis=1)] # clears the nans at both columns
counts1, binEdges1=np.histogram(M[:,0][np.isfinite(M[:,0])],bins=nbins[1]) # Source Variable
binEdges1[0] = binEdges1[0]-ths
binEdges1[len(binEdges1)-1]=binEdges1[len(binEdges1)-1]+ths
col1cat = np.digitize(M[:,0], binEdges1, right=False)
col1cat[col1cat==nbins[1]+1] = 0. # change the bin index of nan to zero. np assigns it to nbins[1]+1 @@@
counts2, binEdges2=np.histogram(M[:,1][np.isfinite(M[:,1])],bins=nbins[1]) # Sink Variable
binEdges2[0] = binEdges2[0]-ths
binEdges2[len(binEdges2)-1]=binEdges2[len(binEdges2)-1]+ths
col2cat = np.digitize(M[:,1], binEdges2, right=False) # which bin (ID) is the data located
col2cat[col2cat==nbins[1]+1] = 0. # change the bin index of nan to zero. np assigns it to nbins[1]+1 @@@
col1cat[col2cat==0.]=0. # If there is an NaN for any row, assign the other column in that row to the NaN bin too @@@
col2cat[col1cat==0.]=0. # See comment above. @@@
#print(col1cat)
# convert 1D histogram to a 2D histogram
jointentcat = (col1cat-1)*nbins[1]+col2cat #This classifies the joint entropy bin into a number between 1 and nbins^2. 0 is assigned to rows with misisng data.
nbins_2 = nbins[1]**2
#N = np.bincount(jointentcat)[1:] # Number of datapoints within each joint entropy bin.
N = np.bincount(jointentcat[jointentcat>0])[1:] # Number of datapoints within each joint entropy bin. @@@
p = N/sum(N); # Vector of probabilities
# 1D probability/histogram
N1, binEdges1d1=np.histogram(M[:,0][np.isfinite(M[:,0])],bins=nbins[0]) # Which bin the first data column is in
N2, binEdges1d2=np.histogram(M[:,1][np.isfinite(M[:,1])],bins=nbins[0]) #Which bin the second data column is in
p1 = N1/sum(N1)
p2 = N2/sum(N2)
# Shanon entropy
pgt0 = p[p>0] # px,y
p1gt0 = p1[p1>0] # px
p2gt0 = p2[p2>0] # py
log2p2gt0 = np.log2(p2gt0)
log2p1gt0 = np.log2(p1gt0)
#Shannon entropy of the sink variable. Used to normalize mutual informaiton in the next line.
Hy = (-sum(p2gt0*log2p2gt0))
Hx = (-sum(p1gt0*log2p1gt0))
maxH = np.max(np.array([Hx,Hy]))
# Mutual information, in bits. Joint entropy is scaled to the number of bins in a single dimension.
I = ( (-sum(p1gt0*np.log2(p1gt0)) - sum(p2gt0*log2p2gt0) ) + (sum(pgt0*np.log2(pgt0)))*np.log2(nbins[0])/np.log2(nbins[1]))/maxH
# double integral in the last component is done as a 1D.
#return nbins_2, jointentcat,p , sum(N), I, Hy
return I, sum(N)
# ## Intermediate functions
# In[10]:
def LagData_new( M_unlagged, shift ):
# LagData Shifts two time-series so that a matrix is generated that allows easy computation of Knutt et al 2005 based TE computation
# M_unlagged is a matrix [X Y..n], where X and Y are column vectors of the
# variables to be compared. shift is a row vector that says how much each
# variable in M_unlagged is to be shifted by.
nR,nC = np.shape(M_unlagged)
maxShift = max(shift)
minShift = min(shift)
newlength = nR - maxShift + minShift
M_lagged = np.nan*np.ones([newlength, nC]) #[source_lagged(1:n-lag), sink_unlagged(lag:n), sink_lagged(1:n-lag)]
#@@@@@@@@@@@@@@######## Dino's verson uses shift of [0, 0, -lag ] for the shuffle case of transfer entropy (transenshuffle_new)
for ii in range(np.shape(M_lagged)[1]):
M_lagged[:,ii] = M_unlagged[(shift[ii]-minShift):(np.shape(M_unlagged)[0]-maxShift+shift[ii]), ii]
return M_lagged
# Alternatively
# lag = np.abs(shift[0])
# M_lagged[:,0] = M_unlagged[0:(nR-lag), 0]
# M_lagged[:,1] = M_unlagged[lag:(nR),1]
# M_lagged[:,2] = M_unlagged[0:(nR-lag),2]
# return M_lagged
# In[12]:
def jointentropy_new(M2, nbins):
# Calculates the joint entropy H(x,y)
# M is two dimensional column matrix for which joint entropy is to be computed
# H is the normalized joint entropy
# nvalidpoints is the number of rows (samples) used to calculate the joint entropy
ths = 10e-4
M = M2[~np.isnan(M2).any(axis=1)] # clears the nans at both columns
counts1, binEdges1=np.histogram(M[:,0][np.isfinite(M[:,0])],bins=nbins) # Source Variable [ ]
binEdges1[0] = binEdges1[0]-ths
binEdges1[len(binEdges1)-1]=binEdges1[len(binEdges1)-1]+ths
col1cat = np.digitize(M[:,0], binEdges1, right=False)
col1cat[col1cat==nbins+1] = 0. # change the bin index of nan to zero. np assigns it to nbins[1]+1 @@@
counts2, binEdges2=np.histogram(M[:,1][np.isfinite(M[:,1])],bins=nbins) # Sink Variable
binEdges2[0] = binEdges2[0]-ths
binEdges2[len(binEdges2)-1]=binEdges2[len(binEdges2)-1]+ths
col2cat = np.digitize(M[:,1], binEdges2, right=False) # which bin (ID) is the data located
col2cat[col2cat==nbins+1] = 0. # change the bin index of nan to zero. np assigns it to nbins[1]+1 @@@
col1cat[col2cat==0.]=0. # If there is an NaN for any row, assign the other column in that row to the NaN bin too @@@
col2cat[col1cat==0.]=0. # See comment above. @@@
#print(col1cat)
# convert 1D histogram to a 2D histogram
jointentcat = (col1cat-1)*nbins+col2cat #This classifies the joint entropy bin into a number between 1 and nbins^2. 0 is assigned to rows with misisng data.
nbins_2 = nbins**2
N = np.bincount(jointentcat[jointentcat > 0])[1:] # Number of datapoints within each joint entropy bin. @@@
#N = np.bincount(jointentcat)[1:] # Number of datapoints within each joint entropy bin.
p = N/sum(N); # Vector of probabilities
pgt0 = p[p>0] # p(x,y)
H = -sum(pgt0*np.log2(pgt0))
nvalidpoints = sum(N)
return H, nvalidpoints
# In[14]:
def jointentropy3_new(M2, nbins):
# Calculates the joint entropy for three variables H(x,y,z)
# M is a three-column matrix that contains the input vectors of data.
# nvalidpoints is the number of rows (samples) used to calculate the joint entropy
ths = 10e-4
M = M2[~np.isnan(M2).any(axis=1)] # clears the nans at both columns
counts1, binEdges1=np.histogram(M[:,0][np.isfinite(M[:,0])],bins=nbins) # Source Variable [ ]
binEdges1[0] = binEdges1[0]-ths
binEdges1[len(binEdges1)-1]=binEdges1[len(binEdges1)-1]+ths
col1cat = np.digitize(M[:,0], binEdges1, right=False)
col1cat[col1cat==nbins+1] = 0. # change the bin index of nan to zero. np assigns it to nbins[1]+1 @@@
counts2, binEdges2=np.histogram(M[:,1][np.isfinite(M[:,1])],bins=nbins) # Sink Variable
binEdges2[0] = binEdges2[0]-ths
binEdges2[len(binEdges2)-1]=binEdges2[len(binEdges2)-1]+ths
col2cat = np.digitize(M[:,1], binEdges2, right=False) # which bin (ID) is the data located
col2cat[col2cat==nbins+1] = 0. # change the bin index of nan to zero. np assigns it to nbins[1]+1 @@@
counts3, binEdges3=np.histogram(M[:,2][np.isfinite(M[:,2])],bins=nbins) # Source Variable [ ]
binEdges3[0] = binEdges3[0]-ths
binEdges3[len(binEdges3)-1]=binEdges3[len(binEdges3)-1]+ths
col3cat = np.digitize(M[:,2], binEdges3, right=False)
col3cat[col3cat==nbins+1] = 0. # change the bin index of nan to zero. np assigns it to nbins[1]+1 @@@
# Assign 0 to NAN values
col1cat[col2cat==0.] = 0. # If there is an NaN for any row, assign the other column in that row to the NaN bin too @@@
col1cat[col3cat==0.] = 0. # See comment above. @@@
col2cat[col1cat==0.] = 0. # @@@
col3cat[col1cat==0.] = 0. # @@@
# This classifies the joint entropy bin into a number between 1 and nbins^2. 0 is assigned to rows with misisng data.
jointentcat = (col1cat-1)*nbins**2 + (col2cat-1)*nbins + col3cat
#print(np.asarray((jointentcat,col1cat,col2cat, col3cat)).T)
nbins_3 = nbins**3
#N = np.bincount(jointentcat)[1:] # Number of datapoints within each joint entropy bin.
N = np.bincount(jointentcat[jointentcat>0])[1:] # Number of datapoints within each joint entropy bin. @@@
sumN = sum(N)
p = N/sumN # Vector of probabilities
pgt0 = p[p>0]
H = -sum(pgt0*np.log2(pgt0))
nvalidpoints = sumN
return H, nvalidpoints
# In[16]:
def shuffle( M ):
# shuffles the entries of the matrix M in time while keeping NaNs (blank data values) NaNs.
# M is the matrix where the columns are individual variables and the rows are entries in time
Mss = np.ones(np.shape(M))*np.nan # Initialize
for n in range(np.shape(M)[1]): # Columns are shuffled separately
notnans = np.argwhere(~np.isnan(M[:,n]))
R = np.random.rand(np.shape(notnans)[0],1) #np.random.rand(5,1)
I = np.argsort(R,axis=0)
#print(notnans[:,0])
#print(notnans[I,0])
#print('a',M[notnans[:,0],n])
Mss[notnans[:,0],n] = M[notnans[I[:],0],n].reshape(np.shape(M[notnans[I[:],0],n])[0],)
return Mss
# ## Transfer entropy
# In[20]:
def transen_new2(M, shift, nbins): # with shift as an input different lags btween source and sink are possible
# shift [-lag of source, 0, - lag of sink] # lag of sink usually being 1
# Calculates transfer information
# M is an array with two columns [ source, sink]
# nbins list of number of bins in 1D, 2D and 3D, with three elements
# lag is the time lag of interest.
# M4 is the lagged subset of data transfer entropy was run on.
MQ = LagData_new(np.column_stack((M, M[:,1])), shift) # source, sink, sink is input then
M4 = MQ[~np.isnan(MQ).any(axis=1)] # clears the nans at all columns simultaneously
# M4 becomes [source_lagged(1:n-lag), sink_unlagged(lag:n), sink_lagged(1:n-lag)] => H(Xt-T, Yt, Yt-T)
M4[np.argwhere(np.isnan(np.sum(M4,axis=1))), :] = np.nan # Reset rows with any NaN entry to NaN.
M4short = M4[np.argwhere(~np.isnan(np.sum(M4,axis=1))),:] # Time series without NaN that will be passed on for shuffling.
M1 = M4[:,(0,2)] # [source_lagged(1:n-lag), sink_lagged(1:n-lag)] =>H(Xt-T,Yt-T)
M2 = M4[:,(1,2)] # [sink_unlagged(lag:n), sink_lagged(1:n-lag)] =>H(Yt,Yt-T)
#@@@@@@@@@@@@@@######## Dino uses M4[:,1] to be predicted
M3 = M4[:,2] # [sink_unlagged(lag:n)] to be predicted is used with DINO. BUT, need CORRECTION =>H(Yt) should be corrected to H(Yt-T) M[:,2]
# Knutt et al indicates lagged being used H(Yt-T). Thus, M4[:,2]
# Now calculate the joint and marginal entropy components:
T1, n_valid_pairs1 = jointentropy_new(M1,nbins[1])
T2, n_valid_pairs2 = jointentropy_new(M2,nbins[1])
# Entropy for the single predictor
n3, valueatn = np.histogram(M3[np.isfinite(M3)], nbins[0]) # results in count [n3] and the corresponding value
n3gt0 = n3[n3>0]
sumn3gt0 = sum(n3gt0)
T3 = -sum((n3gt0/sumn3gt0)*(np.log2(n3gt0/sumn3gt0))) # Nonnormalized Shannon entropy of variable Y
# Three variable entropy
T4, n_valid_pairs4 = jointentropy3_new(M4,nbins[2])
Tn = T3 # This is the Shannon entropy of Y, used to normalize the value of transfer entropy obtained below.
log2nbins1 = np.log2(nbins[0])
log2nbins2 = np.log2(nbins[1])
log2nbins3 = np.log2(nbins[2])
log2nbins1_2 = log2nbins1/log2nbins2
log2nbins1_3 = log2nbins1/log2nbins3
T1 = T1*log2nbins1_2
T2 = T2*log2nbins1_2
T4 = T4*log2nbins1_3
T = (T1+T2-T3-T4)/Tn # Knuth formulation of transfer entropy
N = min([n_valid_pairs1, n_valid_pairs2, n_valid_pairs4]) # Number of valid matched pairs used in the calculation
return T, N, M4short
# In[22]:
# In[24]:
def transenshuffle_new2(M, shift, nbins):
# Calculates the transfer entropy for a shuffled time series that has already been lined up with LagData
# Calculates the transfer entropy of X>Y, the amount by which knowledge
# of variable X at a time lag reduces the uncertainty in variable Y. M =
# [X Y], and lag is the time lag of interest. nbins is the number of bins
# used to discretize the probability distributions.
Minput = shuffle(M[:,(0,1)])
T, N,c = transen_new2(Minput, shift, nbins)# use it but not understood why [0 0 -lag] is used instead of [-lag 0 -lag]
return T
# ## Critical values of Mutual information and Transfer entropy
# In[30]:
def mutualshuffle_new2(M,nbins):
Mss = shuffle(M)
MIss = mutinfo_new(Mss,nbins)
return MIss
# In[ ]:
def mutinfo_crit_newPar( M, nbins, alpha, numiter,ncores):
MIss = Parallel(n_jobs=ncores)(delayed(mutualshuffle_new2)(M, nbins) for ii in range(numiter))
MIss = np.sort(MIss)
MIcrit = MIss[round((1-alpha)*numiter)] # develop a histogram and peak the 95% quantile significance level with alpha = 0.05
return MIcrit
# In[44]:
def transen_crit_new2( M, shift, alpha, numiter, nbins,ncores):
# Finds the critical value of the transfer entropy statistic
# that needs to be exceeded for statistical signficance.
# M = matrix of unshifted variables, e.g., [X Y] for calculating the X>Y transfer entropy.
# lag = time lag.
# alpha = significance level.
# numiter = number of Monte Carlo shufflings to perform.
# nbins = number of bins to use to discretize the probability distributions.
# Serial
# Tss = np.ones([numiter])*np.nan # Initializing shuffled transfer entropy table
# #print(Tss)
# for ii in range(numiter):
# Tss[ii] = transenshuffle_new2(M, shift, nbins) # Calculates TE for each Monte Carlo Shuffling
# parallel
Tss = Parallel(n_jobs=ncores)(delayed(transenshuffle_new2)(M, shift, nbins) for ii in range(numiter))
## print(Tss)
Tss = np.sort(Tss)
Tcrit = Tss[round((1-alpha)*numiter)] # develop a histogram and peaks the 1-aplpha (95%) quantile significance level with alpha (= 0.05)
return Tcrit
# ## Parallel TE & I calculater
# number of monteCarlo shuffle - kills the time - going from 100 to 1000 very time consuming. Parallel!!
# maxLag also takes a lot of time. Number of lag considered. 3*365
# number of source variables -- 20
def RunNewTE2VarsPar(DataMatrix, LabelCell, shift, SinkNodes=None, SourceNodes=None,
maxLag=100, minSamples=1000, numShuffles = 500, sigLevel=0.05, numBins=[11,11,11],ncores=4):
# computes TE assumes a data matrix with time in first columns and vars on others
# Inputs
# DataMatrix - data matrix with time in the first column
# LabelCell - variable name of each data matrix entry
# shift - shift for sink variable (-1,0,-1) for self optimality
# Source_nodes - array of column indices for source variables [2]
# Sink_nodes - array of column of indices for sink variales [3:end]
# resultsDir - directory for results ./Results/
# maxLag - maximum lag (3*365) 3 years
# minSamples - minimum number of valid samples for TE (suggestion 200)
# numShuffles - number of MonteCarlo shuffle iterations (suggestion 500)
# sigLevel - significance level (suggested 0.05)
# numBins - number of bins to use in 1, 2, and 3 dimensions default [11,11,11]
# ncores - pc cores to use
# Outputs
# Imat - mutual information
# Icritmat - significance threshold
# Tfirstmat - first T > Tcrit
# Tbiggestmat - Tmax for T > Tcrit
# Tcube_store - all T for all sink, source, lag combinations
# Tcritcube_store - all Tcrits for all sink, source, lag combinations
if DataMatrix.size == 0:
return 'no dataMatrix'
if LabelCell.size == 0:
return 'no variable names'
if SourceNodes is None:
SourceNodes = np.arange(2,np.shape(DataMatrix)[1])
if SinkNodes is None:
SinkNodes = np.array([1])
nSources = len(SourceNodes)
nSinks = len(SinkNodes)
# Start clock
# print('Beginning 2-variable analysis (serial) ...')
# Tot = tic
# print(SourceNodes,SinkNodes)
# =========================================
## Shrink input matrices to include only variables that are used
# now the order is time, sinks, sources
#@@@@@@@@@@@@@@@@@@@@@
# from Pd to np.array
dataMat = np.column_stack((DataMatrix[:,0], DataMatrix[:,SinkNodes], DataMatrix[:,SourceNodes])) # date, sink, sources
labCell = np.r_[np.array([LabelCell[0]]), np.array(LabelCell[SinkNodes]), np.array(LabelCell[SourceNodes])]
#np.r_[np.array(LabelCell[0]), np.array(LabelCell[1]), np.array(LabelCell[[2,3,4]])]
#Or labCell = np.column_stack((LabelCell[:,0], LabelCell[:,SinkNodes], LabelCell[:,SourceNodes]))
del DataMatrix # or set it to empty DataMatrix = []
del LabelCell
# =============================================
# Initialize output matrices
# mutual information between sources and sinks
# the sink is daily mean Q, and all pairwise interactions are evaluated
Imat = np.ones([nSinks,nSources])*np.nan # row value = # sink vars, col values = # source vars;
# significance threshold
Icritmat = copy.deepcopy(Imat)
# first T > Tcrit
Tfirstmat = copy.deepcopy(Imat)
# Tmax for T > Tcrit
Tbiggestmat = copy.deepcopy(Imat)
# All T for all sink, source, lag combinations
Tcube_store = np.ones([nSinks,nSources,maxLag])*np.nan
# All Tcrits for all sink, source, lag combinations
Tcritcube_store = copy.deepcopy(Tcube_store)
# =============================================
# LOOP OVER ALL PAIRS OF SOURCE AND SINK VARIABLES TO CALCULATE MI and TE
for mySinkIter in range(nSinks): # loop over Sink nodes (information receivers) [ 0]
mySinkNum = SinkNodes[mySinkIter]
mySinkInd = 1 + mySinkIter # exclude time
# extract sub-matrices for the ease of computation
Ivec = Imat[mySinkIter,:]
Icritvec = Icritmat[mySinkIter,:]
Tfirstvec = Tfirstmat[mySinkIter,:]
Tbiggestvec = Tbiggestmat[mySinkIter,:]
Tmat_store = np.reshape(Tcube_store[mySinkIter,:,:],[nSources,maxLag])
Tcritmat_store = np.reshape(Tcritcube_store[mySinkIter,:,:], [nSources,maxLag])
sinkName = labCell[mySinkInd] # Text name of the Sink variable
MmySink = dataMat[:,mySinkInd].astype(float) # Select the sink variable to run
#print('sink node = ', mySinkIter)
for mySourceIter in range(nSources): # Loop over the source nodes
#print(mySourceIter)
mySourceNum = SourceNodes[mySourceIter]
mySourceInd = 1 + nSinks + mySourceIter
Mmysource = dataMat[:,mySourceInd].astype(float) # Select source variables
sourceName = labCell[mySourceInd] # Name of the source variable
#print('Source node ', mySourceNum-1, sourceName, ':=>', 'Sink node ', mySinkNum, sinkName)
#print('Lag ', 'Sink', 'Source')
M = np.column_stack((Mmysource, MmySink)) # Source followed by Sink
M = M.astype(float)
#print(M.shape)
# MUTUAL INFORMATION
II,validN = mutinfo_new(M,numBins) # computes mutual information
#print(II,validN,M[500,:])
if validN >= minSamples: # enough length to compute MI
I = II
Icrit, nvd = mutinfo_crit_newPar(M=M, alpha=sigLevel, nbins=numBins,numiter = numShuffles,ncores=ncores)
else:
I = np.nan
Icrit = np.nan
# print(M.shape, II, I,validN)
Ivec[mySourceIter] = I # save it in a matrix
Icritvec[mySourceIter] = Icrit
# TRANSFER ENTROPY
T = np.ones([maxLag])*np.nan # intialize the TE vector over the range of lags examined
Tcrit = copy.deepcopy(T) # Initialize the vector of the critical TE
#for lag in tqdm(range(maxLag)): #[0 to 364] in a year i.e., no lag day
for lag in range(maxLag): #[0 to 364] in a year i.e., no lag day
t, N, Mshort = transen_new2(M=M, shift=[-lag,shift[1],shift[2]], nbins=numBins) # Computes TE for at a given lag of 'lag'
#print(Mshort, type(Mshort),Mshort.shape)
Mshort = Mshort.reshape(Mshort.shape[0],Mshort.shape[2])
if N >= minSamples: # enough length to compute TE
T[lag] = t # save TE computed
Tcrit[lag] = transen_crit_new2(M=M, shift=[-lag,shift[1],shift[2]], alpha= sigLevel,nbins=numBins,numiter=numShuffles,ncores=ncores) # TE critical
#print(lag, mySinkIter, mySourceIter)
# Save the first and biggest value of T over the significance threshold
TgTcrit = np.argwhere(T >= Tcrit) # np.argwhere(np.array([5,6,9,18]) > np.array([3,9,2,9]))
if any(TgTcrit):
Tfirstvec[mySourceIter] = T[TgTcrit[0,0]]
Tbiggestvec[mySourceIter] = max(T[TgTcrit[:,0]]) # @@@@@ Should be T-Tcrit biggest!!!!!!
#print(Tcrit.shape, T.shape, Tcritcube_store.shape)
Tmat_store[mySourceIter,:] = T
Tcritmat_store[mySourceIter,:] = Tcrit
# replace column vectors from source iterations into matrices
Imat[mySinkIter, :] = Ivec
Icritmat[mySinkIter, :] = Icritvec
Tfirstmat[mySinkIter,:] = Tfirstvec
Tbiggestmat[mySinkIter,:] = Tbiggestvec
Tcube_store[mySinkIter,:,:] = Tmat_store
Tcritcube_store[mySinkIter,:,:] = Tcritmat_store
# save results (modify to save just relevant variables)
# save([resultsDir 'TE_analysis_workspace.mat'], '-v7.3');
# Stop clock
#print('Finished 2-variable analysis (Parallel)!');
return Imat, Icritmat, Tfirstmat, Tbiggestmat, Tcube_store, Tcritcube_store # | sink | source | lag |
|
the-stack_0_17859 | # Copyright 2021 AI Redefined Inc. <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import time
from collections import namedtuple
from cogment.session import ActorInfo, EventType, RecvAction, RecvEvent
SentReward = namedtuple("SentReward", ["value", "confidence", "to", "tick_id", "user_data"])
SentEvent = namedtuple(
"SentEvent",
["tick_id", "done", "observations", "rewards", "messages", "error"],
defaults=(0, False, [], [], [], None),
)
# Make it explicit we reexport ActorInfo
# pylint: disable=self-assigning-variable
ActorInfo = ActorInfo
class ActionData:
def __init__(self, tick_id, timestamp):
self.tick_id = tick_id
self.timestamp = timestamp
class MockEnvironmentSession:
def __init__(self, trial_id, environment_config, actor_infos, environment_impl):
self.config = environment_config
self._trial_id = trial_id
self._actor_infos = actor_infos
self._tick_id = 0
self._done = False
self._to_send_rewards = []
self._to_send_messages = []
self._sent_events_queue = asyncio.Queue()
self._recv_events_queue = asyncio.Queue()
self._environment_impl_error = None
async def environment_impl_worker():
try:
await environment_impl(self)
except asyncio.CancelledError as cancelled_error:
# Raising cancellation
raise cancelled_error
except Exception as err:
self._sent_events_queue.put_nowait(SentEvent(tick_id=self._tick_id, error=err))
self._impl_task = asyncio.create_task(environment_impl_worker())
async def terminate(self):
self._impl_task.cancel()
try:
await self._impl_task
except asyncio.CancelledError:
pass
self._impl_task = None
def _produce_observations(self, observations, done):
# Assuming there's exactly one call to `produce_observations`
# Send what's been accumulating up until now alongside the observation
sent_event = SentEvent(
tick_id=self._tick_id,
done=done,
observations=observations,
rewards=self._to_send_rewards,
messages=self._to_send_messages,
)
self._done = done
self._sent_events_queue.put_nowait(sent_event)
self._tick_id += 1
self._to_send_rewards = []
self._to_send_messages = []
def start(self, observations):
self._produce_observations(observations, done=False)
def add_reward(self, value, confidence, to, tick_id=-1, user_data=None):
self._to_send_rewards.append(
SentReward(value=value, confidence=confidence, to=to, tick_id=tick_id, user_data=user_data)
)
def produce_observations(self, observations):
self._produce_observations(observations, done=self._done)
def end(self, observations):
self._produce_observations(observations, done=True)
async def event_loop(self):
while not self._done:
event = await self._recv_events_queue.get()
self._done = (
event.type == EventType.ENDING
) # Will make sure the next call to produce_observations behave as `end`
yield event
def get_trial_id(self):
return self._trial_id
def get_tick_id(self):
return self._tick_id
def is_trial_over(self):
return self._done
def get_active_actors(self):
return self._actor_infos
async def receive_events(self):
event = await self._sent_events_queue.get()
if event.error:
raise RuntimeError("Error occured while executing the environment session") from event.error
return event
# pylint: disable=dangerous-default-value
def send_events(self, etype=EventType.ACTIVE, actions=[]):
# No support for messages yet, to be added later
event = RecvEvent(etype)
action_data = ActionData(self._tick_id, time.time())
event.actions = [
RecvAction(actor_index=i, action_data=action_data, action=action) for i, action in enumerate(actions)
]
self._recv_events_queue.put_nowait(event)
|
the-stack_0_17860 | # Given an integer x, return true if x is palindrome integer.
# An integer is a palindrome when it reads the same backward as forward. For example, 121 is palindrome while 123 is not.
class Solution:
def isPalindrome(self, x: int) -> bool:
s_str = str(x)
print(s_str)
loop_count = int(len(s_str)/2)
result = True
for i in range(loop_count):
if s_str[i] != s_str[len(s_str) - i - 1]:
result = False
return result
def main():
x = 121
x = -121
S = Solution()
return S.isPalindrome(x)
if __name__ == '__main__':
result = main()
print(result) |
the-stack_0_17861 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Alphacon Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends ALP to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more ALP to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import AlphaconTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that getbalance/listtransactions return expected values."""
balance = self.node.getbalance(self.label, 0, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["account"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(tx["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in tx, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(AlphaconTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount and label for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
|
the-stack_0_17862 | # Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.objects import base
from designate.objects import fields
@base.DesignateRegistry.register
class Blacklist(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject):
fields = {
'pattern': fields.StringFields(maxLength=255),
'description': fields.StringFields(maxLength=160, nullable=True),
}
STRING_KEYS = [
'id', 'pattern'
]
@base.DesignateRegistry.register
class BlacklistList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = Blacklist
fields = {
'objects': fields.ListOfObjectsField('Blacklist'),
}
|
the-stack_0_17863 | import torch
from dataclasses import dataclass
from omegaconf import DictConfig
from torch import nn
from src.toxic.modelling.encoder import WideCNNEncoder
from src.toxic.modelling.classifier import SentenceClassifier
@dataclass
class ModelResult:
embeddings: torch.Tensor
logits: torch.Tensor
class Model(nn.Module):
"""A module that contains an encoder and a classification head"""
def __init__(self, config: DictConfig):
super().__init__()
self.encoder = WideCNNEncoder(**config.model.encoder)
self.classifier = SentenceClassifier(
**config.model.classifier,
n_classes=len(config.data.labels)
)
def forward(self, inputs) -> ModelResult:
embeddings = self.encoder(inputs)
logits = self.classifier(embeddings)
return ModelResult(embeddings, logits)
|
the-stack_0_17865 | import warnings
class Intervention(object):
''' Represents an intervention with a start date, and maybe an end date.
Multiple interventions can be composed within a project.
Parameters
----------
start_date : datetime.datetime
Must be timezone aware
end_date : datetime.datetime or None, default None
Must be timezone aware. If None, intervention is assumed to be ongoing.
'''
def __init__(self, start_date, end_date=None):
self.start_date = self._validate_start_date(start_date)
self.end_date = self._validate_end_date(end_date)
def __repr__(self):
return (
"Intervention(start_date={}, end_date={})"
.format(self.start_date, self.end_date)
)
def _validate_start_date(self, dt):
if dt is None:
message = 'Intervention `start_date` cannot be None.'
raise ValueError(message)
if not self._is_tz_aware(dt):
message = 'Given datetime is not tz-aware: {}'.format(dt)
raise ValueError(message)
return dt
def _validate_end_date(self, dt):
if dt is None:
return None
if not self._is_tz_aware(dt):
message = 'Given datetime is not tz-aware: {}'.format(dt)
raise ValueError(message)
if self.start_date > dt:
message = (
'Ignoring end_date because it is before start_date: '
'start_date={} > end_date={}'
.format(self.start_date, dt)
)
warnings.warn(message)
return None
return dt
@staticmethod
def _is_tz_aware(dt):
return dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None
|
the-stack_0_17866 | """Test the debate app."""
from django.test import TestCase
from debate.models import Debate, ArgumentsFor, ArgumentsAgainst
from django.contrib.auth.models import User
class DebateTest(TestCase):
"""Test the debate functionality."""
def setUp(self):
"""Setup database with debate and arguments."""
user = User(password='potatoes',
username='zach',
email='[email protected]')
user.save()
new_debate = Debate(title='Should we have stricter gun laws?', created_by=user)
new_debate.save()
one_argument_for = ArgumentsFor(argument='If its harder to get your hands on a gun it will allow people to have more time to think before doing something reckless.',
debate=new_debate,
created_by=user)
one_argument_for.save()
two_argument_for = ArgumentsFor(argument='Another argument for stricter gun laws.',
debate=new_debate,
created_by=user)
two_argument_for.save()
one_argument_against = ArgumentsAgainst(argument='One argument against.',
debate=new_debate,
created_by=user)
one_argument_against.save()
two_argument_against = ArgumentsAgainst(argument='Another argument against.',
debate=new_debate,
created_by=user)
two_argument_against.save()
def test_debate_is_created_and_title_added(self):
"""Test that a debate model instance is created by setup."""
one_debate = Debate.objects.get(id=1)
debate_title = one_debate.title
self.assertEqual(debate_title, 'Should we have stricter gun laws?')
def test_debate_multiple_arguments_for(self):
"""Test the one to many relationship for a debate and arguments for."""
a_debate = Debate.objects.get(id=3)
arguments_for = a_debate.argumentsfor_set.all()
self.assertEqual(len(arguments_for), 2)
def test_debate_multiple_arguments_against(self):
"""Test the one to many relationship for a debate and arguments for."""
a_debate = Debate.objects.get(id=2)
arguments_against = a_debate.argumentsagainst_set.all()
self.assertEqual(len(arguments_against), 2)
def test_user_can_create_multiple_arguments(self):
"""Test that a user can create multiple arguments for a debate."""
a_user = User.objects.get(username='zach')
arguments_for = a_user.argumentsfor_set.all()
self.assertEqual(len(arguments_for), 2)
|
the-stack_0_17867 | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class SamplingConfigStoppingCondition(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'n_samples': 'float',
'min_distance': 'float'
}
attribute_map = {
'n_samples': 'nSamples',
'min_distance': 'minDistance'
}
def __init__(self, n_samples=None, min_distance=None, _configuration=None): # noqa: E501
"""SamplingConfigStoppingCondition - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._n_samples = None
self._min_distance = None
self.discriminator = None
if n_samples is not None:
self.n_samples = n_samples
if min_distance is not None:
self.min_distance = min_distance
@property
def n_samples(self):
"""Gets the n_samples of this SamplingConfigStoppingCondition. # noqa: E501
How many samples/images should be used for the sampling. 0-1 represents a percentage of all. 1-N are absolute numbers # noqa: E501
:return: The n_samples of this SamplingConfigStoppingCondition. # noqa: E501
:rtype: float
"""
return self._n_samples
@n_samples.setter
def n_samples(self, n_samples):
"""Sets the n_samples of this SamplingConfigStoppingCondition.
How many samples/images should be used for the sampling. 0-1 represents a percentage of all. 1-N are absolute numbers # noqa: E501
:param n_samples: The n_samples of this SamplingConfigStoppingCondition. # noqa: E501
:type: float
"""
self._n_samples = n_samples
@property
def min_distance(self):
"""Gets the min_distance of this SamplingConfigStoppingCondition. # noqa: E501
The minimum distance sampled images should have. Before the distance would fall below, the sampling is stopped. # noqa: E501
:return: The min_distance of this SamplingConfigStoppingCondition. # noqa: E501
:rtype: float
"""
return self._min_distance
@min_distance.setter
def min_distance(self, min_distance):
"""Sets the min_distance of this SamplingConfigStoppingCondition.
The minimum distance sampled images should have. Before the distance would fall below, the sampling is stopped. # noqa: E501
:param min_distance: The min_distance of this SamplingConfigStoppingCondition. # noqa: E501
:type: float
"""
self._min_distance = min_distance
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SamplingConfigStoppingCondition, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SamplingConfigStoppingCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SamplingConfigStoppingCondition):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_17868 | #coding:utf-8
from scrapy.dupefilters import RFPDupeFilter
class URLFilter(RFPDupeFilter):
"""根据url过滤"""
def __init__(self, path=None,debug=False):
self.urls_seen = set()
RFPDupeFilter.__init__(self, path)
def request_seen(self, request):
if request.url in self.urls_seen:
return True
else:
self.urls_seen.add(request.url)
'''
from scrapy.dupefilters import RFPDupeFilter
from w3lib.util.url import canonicalize_url
class URLSha1Filter(RFPDupeFilter):
"""根据urlsha1过滤"""
def __init__(self, path=None,debug=False):
self.urls_seen = set()
RFPDupeFilter.__init__(self, path)
def request_seen(self, request):
fp = hashlib.sha1()
fp.update(canonicalize_url(request.url))
url_sha1 = fp.hexdigest()
if url_sha1 in self.urls_seen:
return True
else:
self.urls_seen.add(url_sha1)
'''
'''
class URLBloomFilter(RFPDupeFilter):
"""根据urlhash_bloom过滤"""
def __init__(self, path=None,debug=False):
self.urls_sbf = ScalableBloomFilter(mode=ScalableBloomFilter.SMALL_SET_GROWTH)
RFPDupeFilter.__init__(self, path)
def request_seen(self, request):
fp = hashlib.sha1()
fp.update(canonicalize_url(request.url))
url_sha1 = fp.hexdigest()
if url_sha1 in self.urls_sbf:
return True
else:
self.urls_sbf.add(url_sha1)
''' |
the-stack_0_17869 | #------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 10/07/2004
#
#------------------------------------------------------------------------------
""" Exports the symbols defined by the traits.ui package.
"""
#-------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------
from __future__ import absolute_import
from .basic_editor_factory import BasicEditorFactory
from .context_value import CV, CVFloat, CVInt, CVStr, CVType, ContextValue
from .editor import Editor
from .editor_factory import EditorFactory
try:
from .editors.api import ArrayEditor
except ImportError:
# ArrayEditor depends on numpy, so ignore if numpy is not present.
pass
from .editors.api import (
BooleanEditor,
ButtonEditor,
CheckListEditor,
CodeEditor,
ColorEditor,
CompoundEditor,
CustomEditor,
CSVListEditor,
DNDEditor,
StyledDateEditor,
DateEditor,
DateRangeEditor,
DefaultOverride,
DirectoryEditor,
DropEditor,
EnumEditor,
FileEditor,
FontEditor,
HTMLEditor,
HistoryEditor,
ImageEditor,
ImageEnumEditor,
InstanceEditor,
KeyBindingEditor,
ListEditor,
ListStrEditor,
NullEditor,
PopupEditor,
ProgressEditor,
RGBColorEditor,
RangeEditor,
ScrubberEditor,
SearchEditor,
SetEditor,
ShellEditor,
TableEditor,
TabularEditor,
TextEditor,
TimeEditor,
TitleEditor,
TreeEditor,
TupleEditor,
ValueEditor)
from .group import (Group, HFlow, HGroup, HSplit, Tabbed, VFlow, VFold, VGrid,
VGroup, VSplit)
from .handler import Controller, Handler, ModelView, ViewHandler, default_handler
from .help import on_help_call
from .help_template import help_template
from .include import Include
from .item import (Custom, Heading, Item, Label, Readonly, Spring, UCustom,
UItem, UReadonly, spring)
from .menu import (
Action,
ActionGroup,
ApplyButton,
CancelButton,
CloseAction,
HelpAction,
HelpButton,
LiveButtons,
Menu,
MenuBar,
ModalButtons,
NoButton,
NoButtons,
OKButton,
OKCancelButtons,
PyFaceAction,
RedoAction,
RevertAction,
RevertButton,
Separator,
StandardMenuBar,
ToolBar,
UndoAction,
UndoButton)
from .message import auto_close_message, error, message
from .table_column import (ExpressionColumn, ListColumn, NumericColumn,
ObjectColumn, TableColumn)
from .table_filter import (EvalTableFilter, MenuTableFilter, RuleTableFilter,
TableFilter)
from .toolkit import toolkit
from .toolkit_traits import ColorTrait, FontTrait, RGBColorTrait
from .tree_node import (ITreeNode, ITreeNodeAdapter, MultiTreeNode,
ObjectTreeNode, TreeNode, TreeNodeObject)
from .ui import UI
from .ui_info import UIInfo
from .ui_traits import (Border, HasBorder, HasMargin, Image, Margin,
StatusItem)
from .undo import (AbstractUndoItem, ListUndoItem, UndoHistory,
UndoHistoryUndoItem, UndoItem)
from .view import View
from .view_element import ViewElement, ViewSubElement
from . import view_elements
_constants = toolkit().constants()
WindowColor = _constants.get('WindowColor', 0xFFFFFF)
def raise_to_debug():
""" When we would otherwise silently swallow an exception, call this instead
to allow people to set the TRAITS_DEBUG environment variable and get the
exception.
"""
import os
if os.getenv('TRAITS_DEBUG') is not None:
raise
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.