max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
libcity/data/batch.py | moghadas76/test_bigcity | 221 | 11113707 | <reponame>moghadas76/test_bigcity
import torch
import numpy as np
class Batch(object):
def __init__(self, feature_name, pad_item=None, pad_max_len=None):
"""Summary of class here
Args:
feature_name (dict): key is the corresponding feature's name, and
the value is the feature's data type
pad_item (dict): key is the feature name, and value is the padding
value. We will just padding the feature in pad_item
pad_max_len (dict): key is the feature name, and value is the max
length of padded feature. use this parameter to truncate the
feature.
"""
self.data = {}
# 默认是根据 batch 中每个特征最长的长度来补齐,如果某个特征的长度超过了 pad_max_len 则进行剪切
self.pad_len = {}
self.origin_len = {} # 用于得知补齐前轨迹的原始长度
self.pad_max_len = pad_max_len if pad_max_len is not None else {}
self.pad_item = pad_item if pad_item is not None else {}
self.feature_name = feature_name
for key in feature_name:
self.data[key] = []
if key in self.pad_item:
self.pad_len[key] = 0
self.origin_len[key] = []
def __getitem__(self, key):
if key in self.data:
return self.data[key]
else:
raise KeyError('{} is not in the batch'.format(key))
def __setitem__(self, key, value):
if key in self.data:
self.data[key] = value
else:
raise KeyError('{} is not in the batch'.format(key))
def append(self, item):
"""
append a new item into the batch
Args:
item (list): 一组输入,跟feature_name的顺序一致,feature_name即是这一组输入的名字
"""
if len(item) != len(self.feature_name):
raise KeyError(
'when append a batch, item is not equal length with \
feature_name')
for i, key in enumerate(self.feature_name):
# 需保证 item 每个特征的顺序与初始化时传入的 feature_name 中特征的顺序一致
self.data[key].append(item[i])
if key in self.pad_item:
self.origin_len[key].append(len(item[i]))
if self.pad_len[key] < len(item[i]):
# 保持 pad_len 是最大的
self.pad_len[key] = len(item[i])
def padding(self):
"""
只提供对一维数组的特征进行补齐
"""
for key in self.pad_item:
# 只对在 pad_item 中的特征进行补齐
if key not in self.data:
raise KeyError('when pad a batch, raise this error!')
max_len = self.pad_len[key]
if key in self.pad_max_len:
max_len = min(self.pad_max_len[key], max_len)
for i in range(len(self.data[key])):
if len(self.data[key][i]) < max_len:
self.data[key][i] += [self.pad_item[key]] * \
(max_len - len(self.data[key][i]))
else:
# 截取的原则是,抛弃前面的点
# 因为是时间序列嘛
self.data[key][i] = self.data[key][i][-max_len:]
# 对于剪切了的,我们没办法还原,但至少不要使他出错
self.origin_len[key][i] = max_len
def get_origin_len(self, key):
return self.origin_len[key]
def to_tensor(self, device):
"""
将数据self.data转移到device上
Args:
device(torch.device): GPU/CPU设备
"""
for key in self.data:
if self.feature_name[key] == 'int':
self.data[key] = torch.LongTensor(np.array(self.data[key])).to(device)
elif self.feature_name[key] == 'float':
self.data[key] = torch.FloatTensor(np.array(self.data[key])).to(device)
elif self.feature_name[key] == 'array of int':
for i in range(len(self.data[key])):
for j in range(len(self.data[key][i])):
try:
self.data[key][i][j] = torch.LongTensor(np.array(self.data[key][i][j])).to(device)
except TypeError:
print('device is ', device)
exit()
elif self.feature_name[key] == 'no_pad_int':
for i in range(len(self.data[key])):
self.data[key][i] = torch.LongTensor(np.array(self.data[key][i])).to(device)
elif self.feature_name[key] == 'no_pad_float':
for i in range(len(self.data[key])):
self.data[key][i] = torch.FloatTensor(np.array(self.data[key][i])).to(device)
elif self.feature_name[key] == 'no_tensor':
pass
else:
raise TypeError(
'Batch to_tensor, only support int, float, array of int, no_pad_float.\
and you give {}'.format(self.feature_name[key]))
def to_ndarray(self):
for key in self.data:
if self.feature_name[key] == 'int':
self.data[key] = np.array(self.data[key])
elif self.feature_name[key] == 'float':
self.data[key] = np.array(self.data[key])
else:
raise TypeError(
'Batch to_tensor, only support int, float, array of int, no_pad_float.\
and you give {}'.format(self.feature_name[key])) |
sdk/cognitiveservices/azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/operations/__init__.py | rsdoherty/azure-sdk-for-python | 2,728 | 11113735 | <filename>sdk/cognitiveservices/azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/operations/__init__.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .image_moderation_operations import ImageModerationOperations
from .text_moderation_operations import TextModerationOperations
from .list_management_image_lists_operations import ListManagementImageListsOperations
from .list_management_term_lists_operations import ListManagementTermListsOperations
from .list_management_image_operations import ListManagementImageOperations
from .list_management_term_operations import ListManagementTermOperations
from .reviews_operations import ReviewsOperations
__all__ = [
'ImageModerationOperations',
'TextModerationOperations',
'ListManagementImageListsOperations',
'ListManagementTermListsOperations',
'ListManagementImageOperations',
'ListManagementTermOperations',
'ReviewsOperations',
]
|
c++/paddle_infer_demo/test_yolov3.py | windstamp/Paddle-Inference-Demo | 115 | 11113756 | <reponame>windstamp/Paddle-Inference-Demo<gh_stars>100-1000
import numpy as np
import argparse
import time
import os
from paddle.inference import Config
from paddle.inference import create_predictor
def init_predictor(args):
config = Config()
if args.model_dir == "":
config.set_model(args.model_file, args.params_file)
else:
config.set_model(args.model_dir)
#config.disable_glog_info()
config.enable_use_gpu(1000, 3)
predictor = create_predictor(config)
return predictor
def run(args, predictor, data):
# copy data to input tensor
input_names = predictor.get_input_names()
for i, name in enumerate(input_names):
input_tensor = predictor.get_input_handle(name)
input_tensor.reshape(data[i].shape)
data[i] = data[i].copy()
input_tensor.copy_from_cpu(data[i])
# warm up
for i in range(10):
predictor.run()
# do the inference
repeat = 100
start = time.clock()
for i in range(repeat):
for i, name in enumerate(input_names):
input_tensor = predictor.get_input_handle(name)
input_tensor.reshape(data[i].shape)
input_tensor.copy_from_cpu(data[i])
predictor.run()
output_names = predictor.get_output_names()
for i, name in enumerate(output_names):
output_tensor = predictor.get_output_handle(name)
output_data = output_tensor.copy_to_cpu()
end = time.clock()
precision = "int8" if args.use_int8 else "float32"
latency = (end - start) * 1000 / repeat
print("latency:", latency, "ms")
results = []
# get out data from output tensor
output_names = predictor.get_output_names()
for i, name in enumerate(output_names):
output_tensor = predictor.get_output_handle(name)
output_data = output_tensor.copy_to_cpu()
results.append(output_data)
return results
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_file", type=str, default="", help="Model filename, Specify this when your model is a combined model.")
parser.add_argument("--params_file", type=str, default="", help="Parameter filename, Specify this when your model is a combined model.")
parser.add_argument("--model_dir", type=str, default="", help="Model dir, If you load a non-combined model, specify the directory of the model.")
parser.add_argument("--int8", dest='use_int8', action='store_true', help="Use int8.")
parser.add_argument("--float32", dest='use_int8', action='store_false', help="Use float32.")
parser.set_defaults(use_int8=False)
parser.add_argument("--min", type=int, default=3, help="min_subgraph_size for tensorrt")
return parser.parse_args()
def fake_input(shape):
fake_img = np.ones(shape).astype(np.float32)
return fake_img
if __name__ == '__main__':
args = parse_args()
pred = init_predictor(args)
input_shape = (1, 3, 608, 608)
fake_img = fake_input(input_shape)
im_size = np.array([[608, 608]]).astype('int32')
result = run(args, pred, [fake_img, im_size])
|
package_control/downloaders/basic_auth_downloader.py | evandrocoan/package_control | 3,373 | 11113762 | <reponame>evandrocoan/package_control<filename>package_control/downloaders/basic_auth_downloader.py
import base64
try:
# Python 3
from urllib.parse import urlparse
except (ImportError):
# Python 2
from urlparse import urlparse
class BasicAuthDownloader(object):
"""
A base for downloaders to add an HTTP basic auth header
"""
def build_auth_header(self, url):
"""
Constructs an HTTP basic auth header for a URL, if present in
settings
:param url:
A unicode string of the URL being downloaded
:return:
A dict with an HTTP header name as the key and the value as the
value. Both are unicode strings.
"""
auth_string = self.get_auth_string(url)
if not auth_string:
return {}
b64_auth = base64.b64encode(auth_string.encode('utf-8')).decode('utf-8')
return {"Authorization": "Basic %s" % b64_auth}
def get_auth_string(self, url):
"""
Constructs a string of username:password for use in HTTP basic auth
:param url:
A unicode string of the URL being downloaded
:return:
None, or a unicode string of the username:password for the URL
"""
username, password = self.get_username_password(url)
if username and password:
return "%s:%s" % (username, password)
return None
def get_username_password(self, url):
"""
Returns a tuple of (username, password) for use in HTTP basic auth
:param url:
A unicode string of the URL being downloaded
:return:
A 2-element tuple of either (None, None) or (username, password)
as unicode strings
"""
domain_name = urlparse(url).netloc
auth_settings = self.settings.get('http_basic_auth')
if auth_settings and isinstance(auth_settings, dict):
params = auth_settings.get(domain_name)
if params and isinstance(params, (list, tuple)) and len(params) == 2:
return (params[0], params[1])
return (None, None)
|
problems/first-greater-number/first-greater-number.py | vidyadeepa/the-coding-interview | 1,571 | 11113812 | <gh_stars>1000+
def first_greater(l, n):
return next((i for i in l if i > n), None)
print first_greater([2, 10,5,6,80], 6)
print first_greater([2, 10,5,6,80], 20)
print first_greater([2, 10,5,6,80], 100)
|
tools/autoconfig.py | joyzainzy/xiaomi_miot_raw | 1,294 | 11113842 | <filename>tools/autoconfig.py<gh_stars>1000+
import requests
url_all = 'http://miot-spec.org/miot-spec-v2/instances?status=all'
url_spec = 'http://miot-spec.org/miot-spec-v2/instance'
def deviceinfo(j):
print(f"设备描述:{j['description']}")
print("设备属性:")
for s in j['services']:
print(f"\nsiid {s['iid']}: {s['description']}\n")
for p in s.get('properties', []):
print(f" piid {p['iid']}: {p['description']}", end=' ')
if 'read' in p['access']:
print("可读取", end=' ')
if 'write' in p['access']:
print("可控制", end=' ')
print()
if 'format' in p:
print(f" 数据类型:{p['format']}")
if 'value-range' in p:
print(f" 取值范围:{p['value-range']}")
if 'value-list' in p:
print(f" 取值范围:")
for item in p['value-list']:
print(f" {item['value']}: {item['description']}")
for a in s.get('actions', []):
print(f" aiid {a['iid']}: {a['description']}", end=' ')
print()
print()
if __name__ == '__main__':
print("正在加载设备列表...")
dev_list = requests.get(url_all).json().get('instances')
print(f"加载成功,现已支持{len(dev_list)}个设备")
model_ = input("请输入设备model:")
result = []
for item in dev_list:
if model_ in item['model'] or model_ in item['type']:
result.append(item)
# print(result)
if result:
print("已发现以下设备")
print("--------------------------------------")
print("序号\t model \t urn")
for idx, item in enumerate(result):
print(f"{idx+1}\t{item['model']}\t{item['type']}")
if len(result) > 1:
inp = input("请确认哪个是你的设备,输入序号:")
urn = result[int(inp)-1]['type']
else:
urn = result[0]['type']
params = {'type': urn}
r = requests.get(url_spec, params=params).json()
# print(r)
deviceinfo(r)
else:
print("未找到相关设备")
input("按任意键退出...") |
tests/tools/test_flake8_works.py | asdfCYBER/nbQA | 457 | 11113909 | """Check :code:`flake8` works as intended."""
import os
from textwrap import dedent
from typing import TYPE_CHECKING
import pytest
from nbqa.__main__ import main
if TYPE_CHECKING:
from _pytest.capture import CaptureFixture
@pytest.mark.parametrize(
"path_0, path_1, path_2",
(
(
os.path.abspath(
os.path.join("tests", "data", "notebook_for_testing.ipynb")
),
os.path.abspath(
os.path.join("tests", "data", "notebook_for_testing_copy.ipynb")
),
os.path.abspath(
os.path.join("tests", "data", "notebook_starting_with_md.ipynb")
),
),
(
os.path.join("tests", "data", "notebook_for_testing.ipynb"),
os.path.join("tests", "data", "notebook_for_testing_copy.ipynb"),
os.path.join("tests", "data", "notebook_starting_with_md.ipynb"),
),
),
)
def test_flake8_works(
path_0: str, path_1: str, path_2: str, capsys: "CaptureFixture"
) -> None:
"""
Check flake8 works. Shouldn't alter the notebook content.
Parameters
----------
capsys
Pytest fixture to capture stdout and stderr.
"""
# check passing both absolute and relative paths
main(["flake8", path_0, path_1, path_2])
expected_path_0 = os.path.join("tests", "data", "notebook_for_testing.ipynb")
expected_path_1 = os.path.join("tests", "data", "notebook_for_testing_copy.ipynb")
expected_path_2 = os.path.join("tests", "data", "notebook_starting_with_md.ipynb")
out, err = capsys.readouterr()
expected_out = dedent(
f"""\
{expected_path_0}:cell_1:1:1: F401 'os' imported but unused
{expected_path_0}:cell_1:3:1: F401 'glob' imported but unused
{expected_path_0}:cell_1:5:1: F401 'nbqa' imported but unused
{expected_path_0}:cell_2:19:9: W291 trailing whitespace
{expected_path_0}:cell_4:1:1: E402 module level import not at top of file
{expected_path_0}:cell_4:1:1: F401 'random.randint' imported but unused
{expected_path_0}:cell_5:1:1: E402 module level import not at top of file
{expected_path_0}:cell_5:2:1: E402 module level import not at top of file
{expected_path_1}:cell_1:1:1: F401 'os' imported but unused
{expected_path_1}:cell_1:3:1: F401 'glob' imported but unused
{expected_path_1}:cell_1:5:1: F401 'nbqa' imported but unused
{expected_path_2}:cell_1:1:1: F401 'os' imported but unused
{expected_path_2}:cell_1:3:1: F401 'glob' imported but unused
{expected_path_2}:cell_1:5:1: F401 'nbqa' imported but unused
{expected_path_2}:cell_3:2:1: E302 expected 2 blank lines, found 0
"""
)
expected_err = ""
assert sorted(out.splitlines()) == sorted(expected_out.splitlines())
assert sorted(err.splitlines()) == sorted(expected_err.splitlines())
|
front/models.py | llazzaro/django-front | 135 | 11113951 | <reponame>llazzaro/django-front<gh_stars>100-1000
from django.db import models
from django.core.cache import cache
from django.dispatch import receiver
from django.db.models.signals import post_save
import hashlib
import six
class Placeholder(models.Model):
key = models.CharField(max_length=40, primary_key=True, db_index=True)
value = models.TextField(blank=True)
def __unicode__(self):
return self.value
def cache_key(self):
return "front-edit-%s" % self.key
@classmethod
def key_for(cls, name, *bits):
return hashlib.new('sha1', six.text_type(name + ''.join([six.text_type(token) for token in bits])).encode('utf8')).hexdigest()
@classmethod
def copy_content(cls, name, source_bits, target_bits):
source_key = cls.key_for(name, *source_bits)
target_key = cls.key_for(name, *target_bits)
source = cls.objects.filter(key=source_key)
if source.exists():
source = source.get()
cls.objects.create(key=target_key, value=source.value)
class PlaceholderHistory(models.Model):
placeholder = models.ForeignKey(Placeholder, related_name='history', on_delete=models.CASCADE)
value = models.TextField(blank=True)
saved = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-saved', )
@property
def _as_json(self):
return {'value': self.value, 'saved': self.saved.strftime('%s')}
@receiver(post_save, sender=Placeholder)
def save_placeholder(sender, instance, created, raw, *args, **kwargs):
if not raw:
# If we have placeholders, check wheter the content has changed before saving history
if PlaceholderHistory.objects.filter(placeholder=instance).exists():
ph = PlaceholderHistory.objects.all()[0]
if ph.value != instance.value:
PlaceholderHistory.objects.create(placeholder=instance, value=instance.value)
else:
PlaceholderHistory.objects.create(placeholder=instance, value=instance.value)
@receiver(post_save, sender=PlaceholderHistory)
def save_history(sender, instance, created, raw, *args, **kwargs):
cache.delete(instance.placeholder.cache_key())
|
spockbot/mcp/mcpacket.py | SpockBotMC/SpockBot | 171 | 11113966 | try:
basestring
except NameError:
basestring = str
import copy
import logging
import zlib
from time import gmtime, strftime
from spockbot.mcp import datautils, proto
from spockbot.mcp.bbuff import BoundBuffer, BufferUnderflowException
from spockbot.mcp.extensions import hashed_extensions
from spockbot.mcp.proto import MC_VARINT
logger = logging.getLogger('spockbot')
class PacketDecodeFailure(Exception):
def __init__(self, packet, pbuff, underflow=False):
self.packet = packet
self.pbuff = pbuff
self.underflow = underflow
class Packet(object):
def __init__(self,
ident=[proto.HANDSHAKE_STATE, proto.CLIENT_TO_SERVER, 0x00],
data=None
):
if isinstance(ident, basestring):
ident = proto.packet_str2ident[ident]
self.__ident = list(ident)
# Quick hack to fake default ident
if len(self.__ident) == 2:
self.__ident.append(0x00)
self.ident = tuple(self.__ident)
self.str_ident = proto.packet_ident2str[self.ident]
self.data = data if data else {}
def clone(self):
return Packet(self.ident, copy.deepcopy(self.data))
def new_ident(self, ident):
self.__init__(ident, self.data)
def decode(self, bbuff, proto_comp_state):
self.data = {}
packet_length = datautils.unpack(MC_VARINT, bbuff)
packet_data = bbuff.recv(packet_length)
pbuff = BoundBuffer(packet_data)
if proto_comp_state == proto.PROTO_COMP_ON:
body_length = datautils.unpack(MC_VARINT, pbuff)
if body_length:
body_data = zlib.decompress(pbuff.flush(), zlib.MAX_WBITS)
pbuff.write(body_data)
pbuff.save()
try:
# Ident
self.__ident[2] = datautils.unpack(MC_VARINT, pbuff)
self.ident = tuple(self.__ident)
self.str_ident = proto.packet_ident2str[self.ident]
# Payload
for dtype, name in proto.hashed_structs[self.ident]:
self.data[name] = datautils.unpack(dtype, pbuff)
# Extension
if self.ident in hashed_extensions:
hashed_extensions[self.ident].decode_extra(self, pbuff)
if pbuff:
raise PacketDecodeFailure(self, pbuff)
except BufferUnderflowException:
raise PacketDecodeFailure(self, pbuff, True)
return self
def encode(self, proto_comp_state, proto_comp_threshold, comp_level=6):
# Ident
o = datautils.pack(MC_VARINT, self.ident[2])
# Payload
for dtype, name in proto.hashed_structs[self.ident]:
o += datautils.pack(dtype, self.data[name])
# Extension
if self.ident in hashed_extensions:
o += hashed_extensions[self.ident].encode_extra(self)
if proto_comp_state == proto.PROTO_COMP_ON:
uncompressed_len = len(o)
if uncompressed_len < proto_comp_threshold:
header = datautils.pack(MC_VARINT, uncompressed_len + 1)
header += datautils.pack(MC_VARINT, 0)
else:
o = zlib.compress(o, comp_level)
ulen_varint = datautils.pack(MC_VARINT, uncompressed_len)
header = datautils.pack(MC_VARINT,
len(o) + len(ulen_varint))
header += ulen_varint
return header + o
elif proto_comp_state == proto.PROTO_COMP_OFF:
return datautils.pack(MC_VARINT, len(o)) + o
else:
return None
def __repr__(self):
s = ('<<<', '>>>')[self.ident[1]]
f = "[%s] %s (0x%02X, 0x%02X): %-" + str(
max([len(i) for i in proto.hashed_names.values()]) + 1) + "s%s"
return f % (
strftime("%H:%M:%S", gmtime()), s, self.ident[0], self.ident[2],
proto.hashed_names[self.ident],
str(self.data)
)
|
data_preparation/move_valid_files.py | Dieg0Alejandr0/EquiBind | 128 | 11113977 | <filename>data_preparation/move_valid_files.py
import os
from shutil import copyfile
from tqdm import tqdm
from commons.utils import read_strings_from_txt
data_path = '../data/PDBBind'
overwrite = False
names = sorted(os.listdir(data_path))
invalid_names = read_strings_from_txt('select_chains.log')
valid_names = list(set(names) - set(invalid_names))
if not os.path.exists('../data/PDBBind_processed'):
os.mkdir('../data/PDBBind_processed')
for i, name in tqdm(enumerate(valid_names)):
if not os.path.exists(f'../data/PDBBind_processed/{name}'):
os.mkdir(f'../data/PDBBind_processed/{name}')
rec_path = os.path.join(data_path, name, f'{name}_protein.pdb')
copyfile(os.path.join(data_path, name, f'{name}_protein_processed.pdb'), f'../data/PDBBind_processed/{name}/{name}_protein_processed.pdb')
copyfile(os.path.join(data_path, name, f'{name}_ligand.mol2'), f'../data/PDBBind_processed/{name}/{name}_ligand.mol2')
copyfile(os.path.join(data_path, name, f'{name}_ligand.sdf'),
f'../data/PDBBind_processed/{name}/{name}_ligand.sdf')
|
DQM/L1TMonitor/python/L1GtHwValidation_cff.py | ckamtsikis/cmssw | 852 | 11113978 | import FWCore.ParameterSet.Config as cms
from DQM.L1TMonitor.l1GtHwValidation_cfi import *
from DQM.L1TMonitor.l1Stage1GtHwValidation_cfi import *
|
evdev/__init__.py | alexbprofit/python-evdev | 231 | 11113992 | #--------------------------------------------------------------------------
# Gather everything into a single, convenient namespace.
#--------------------------------------------------------------------------
from evdev.device import DeviceInfo, InputDevice, AbsInfo, EvdevError
from evdev.events import InputEvent, KeyEvent, RelEvent, SynEvent, AbsEvent, event_factory
from evdev.uinput import UInput, UInputError
from evdev.util import list_devices, categorize, resolve_ecodes, resolve_ecodes_dict
from evdev import ecodes
from evdev import ff
|
caffe2/python/operator_test/rms_norm_op_test.py | Hacky-DH/pytorch | 60,067 | 11113997 |
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import unittest
class TestRMSNormOp(hu.HypothesisTestCase):
@given(
M=st.integers(0, 8),
N=st.integers(1, 16),
eps=st.floats(0, 1e-3),
dtype=st.sampled_from([np.float32, np.float64]),
**hu.gcs,
)
@settings(deadline=None)
def test_rms_norm(self, M, N, eps, dtype, gc, dc):
X = (np.random.randn(M, N) * 2.0 + 1.0).astype(dtype)
gamma = np.random.randn(N).astype(dtype)
beta = np.random.randn(N).astype(dtype)
op = core.CreateOperator(
"RMSNorm",
["X", "gamma", "beta"],
["Y", "rrms"],
eps=eps,
)
def rms_norm_ref(X, gamma, beta):
rrms = 1.0 / np.sqrt(np.mean(np.square(X), axis=1) + eps)
Y = X * np.expand_dims(rrms, axis=1) * gamma + beta
return Y, rrms
inputs = [X, gamma, beta]
self.assertReferenceChecks(gc, op, inputs, rms_norm_ref)
self.assertDeviceChecks(dc, op, inputs, [0, 1])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
unittest.main()
|
examples/applications/restapi/handlers.py | electrumsv/electrumsv | 136 | 11114041 | <gh_stars>100-1000
# TODO(REST-API-Refactoring) Notes follow.
# - All these functions should be moved out of the example and into the code base proper. We should
# make a very simple example that extends it as a daemon app.
# - Remove the variables like VERSION, NETWORK, .... a good idea in theory but they overcomplicate
# things in practice.
# - In `handler_tools.argparser` it should check the type of each variable as it extracts them
# either from the route or the body and convert them at point of extraction or raise a fault
# on the first found failed conversion.
# - The only time a wallet name should be passed in the route is at load time for the filename.
# Beyond that we should use it's ephemeral id, and we should perhaps consider replacing that
# with a GUID. We should also consider dropping the `.sqlite` suffix from wallet name.
# - Add a `pay` API where the caller just provides a destination address and the wallet manages
# everything and just returns some result to indicate success.
import asyncio
import atexit
from functools import partial
import json
import os
from pathlib import Path
import shutil
import tempfile
from typing import Any, cast, List, Optional
from aiohttp import web
import aiorpcx
import bitcoinx
import requests
from electrumsv.app_state import app_state
from electrumsv.bitcoin import COINBASE_MATURITY, script_template_to_string
from electrumsv.constants import AccountCreationType, CredentialPolicyFlag, KeystoreTextType, \
RECEIVING_SUBPATH
from electrumsv.keystore import instantiate_keystore_from_text
from electrumsv.storage import WalletStorage
from electrumsv.transaction import Transaction
from electrumsv.logs import logs
from electrumsv.networks import BitcoinRegtest, Net
from electrumsv.restapi import Fault, good_response, fault_to_http_response
from electrumsv.startup import base_dir
from electrumsv.types import TransactionSize
from .errors import Errors
from .handler_utils import ExtendedHandlerUtils, VNAME, InsufficientCoinsError, \
WalletInstanceKind, WalletInstancePaths
from .txstatewebsocket import TxStateWebSocket
logger = logs.get_logger("app_state")
# Makes this code docker-friendly (can access a node on host with "host.docker.internal"
BITCOIN_NODE_HOST = os.environ.get("BITCOIN_NODE_HOST") or "127.0.0.1"
BITCOIN_NODE_PORT = os.environ.get("BITCOIN_NODE_PORT") or 18332
BITCOIN_NODE_RPCUSER = os.environ.get("BITCOIN_NODE_RPCUSER") or "rpcuser"
BITCOIN_NODE_RPCPASSWORD = os.environ.get("BITCOIN_NODE_RPCPASSWORD") or "rpcpassword"
BITCOIN_NODE_URI = f"http://{BITCOIN_NODE_RPCUSER}:{BITCOIN_NODE_RPCPASSWORD}" \
f"@{BITCOIN_NODE_HOST}:{BITCOIN_NODE_PORT}"
def node_rpc_call(method_name: str, *args: Any) -> Any:
result = None
try:
if not args:
params = []
else:
params = [*args]
payload = json.dumps({"jsonrpc": "2.0", "method": f"{method_name}", "params": params,
"id": 0})
result = requests.post(BITCOIN_NODE_URI, data=payload)
result.raise_for_status()
return result
except requests.exceptions.HTTPError as e:
if result is not None:
logger.error(result.json()['error']['message'])
raise e
# hardcoded
# - WIF private_key: <KEY>
# - Pubkey hash: <KEY>
REGTEST_FUNDS_PRIVATE_KEY = bitcoinx.PrivateKey(
bytes.fromhex('a2d9803c912ab380c1491d3bd1aaab34ca06742d7885a224ec8d386182d26ed2'),
coin=BitcoinRegtest)
REGTEST_FUNDS_PRIVATE_KEY_WIF = REGTEST_FUNDS_PRIVATE_KEY.to_WIF()
REGTEST_FUNDS_PUBLIC_KEY: bitcoinx.PublicKey = REGTEST_FUNDS_PRIVATE_KEY.public_key
REGTEST_P2PKH_ADDRESS: str = REGTEST_FUNDS_PUBLIC_KEY.to_address(coin=Net.COIN).to_string()
def regtest_get_mined_balance() -> int:
# Calculate matured balance
payload = json.dumps({"jsonrpc": "2.0", "method": "listunspent",
"params": [1, 1_000_000_000, [REGTEST_P2PKH_ADDRESS]], "id": 1})
result = requests.post(BITCOIN_NODE_URI, data=payload)
result.raise_for_status()
utxos = result.json()['result']
matured_balance = sum(
utxo['amount'] for utxo in utxos if utxo['confirmations'] > COINBASE_MATURITY)
logger.debug("matured coins in regtest slush fund=%s", matured_balance)
return matured_balance
def regtest_topup_account(receive_address: bitcoinx.P2PKH_Address, amount: int=25) \
-> Optional[str]:
matured_balance = regtest_get_mined_balance()
while matured_balance < amount:
nblocks = 1
if matured_balance == 0:
nblocks = 200
result = node_rpc_call("generatetoaddress", nblocks, REGTEST_P2PKH_ADDRESS)
if result.status_code == 200:
logger.debug(f"generated {nblocks}: {result.json()['result']}")
matured_balance = regtest_get_mined_balance()
# Note: for bare multi-sig support may need to craft rawtxs manually via bitcoind's
# 'signrawtransaction' jsonrpc method - AustEcon
payload = json.dumps({"jsonrpc": "2.0", "method": "sendtoaddress",
"params": [receive_address.to_string(), amount], "id": 0})
result = requests.post(BITCOIN_NODE_URI, data=payload)
if result.status_code != 200:
raise requests.exceptions.HTTPError(result.text)
txid = cast(str, result.json()['result'])
logger.info("topped up wallet with %s coins to receive address='%s'. txid=%s", amount,
receive_address.to_string(), txid)
return txid
def regtest_generate_nblocks(nblocks: int, address: str) -> List[str]:
payload1 = json.dumps(
{"jsonrpc": "2.0", "method": "generatetoaddress", "params": [nblocks, address],
"id": 0})
result = requests.post(BITCOIN_NODE_URI, data=payload1)
result.raise_for_status()
block_hashes = []
for block_hash in cast(List[str], result.json()['result']):
block_hashes.append(block_hash)
logger.debug("newly mined blockhash: %s", block_hash)
logger.debug("mined %s new blocks (funds to address=%s). use the "
"'regtest_topup_account' method to fund your account", nblocks, address)
return block_hashes
class ExtensionEndpoints(ExtendedHandlerUtils):
"""Extension endpoints for ElectrumSV REST API"""
routes = []
# PATHS
VERSION = "/v1"
NETWORK = "/{network}"
BASE = VERSION + NETWORK + "/dapp" # avoid conflicts with built-ins
WALLETS_TLD = BASE + "/wallets"
WALLETS_PARENT = WALLETS_TLD + "/{wallet_name}"
WALLETS_ACCOUNT = WALLETS_PARENT + "/{account_id}"
ACCOUNT_TXS = WALLETS_ACCOUNT + "/txs"
ACCOUNT_UTXOS = WALLETS_ACCOUNT + "/utxos"
def __init__(self):
super().__init__()
self.logger = logs.get_logger("restapi-dapp")
self.app_state = app_state # easier to monkeypatch for testing
self.add_routes()
self.temp_dir = tempfile.TemporaryDirectory()
def cleanup(self) -> None:
atexit.register(self.temp_dir.cleanup)
def add_routes(self):
self.routes = [
web.get(self.WALLETS_TLD, self.get_all_wallets),
web.post("/v1/{network}/dapp/wallets/load_instanced", self.load_instanced_wallet),
web.get(self.WALLETS_PARENT, self.get_parent_wallet),
web.post(self.WALLETS_PARENT + "/load_wallet", self.load_wallet),
web.get(self.WALLETS_ACCOUNT, self.get_account),
web.post("/v1/{network}/dapp/wallets/{wallet_id}/{account_id}/payment_request",
self.create_payment_request),
web.get(self.ACCOUNT_UTXOS + "/coin_state", self.get_coin_state),
web.get(self.ACCOUNT_UTXOS, self.get_utxos),
web.get(self.ACCOUNT_UTXOS + "/balance", self.get_balance),
web.delete(self.ACCOUNT_TXS, self.remove_txs),
web.get(self.ACCOUNT_TXS + "/history", self.get_transaction_history),
web.post(self.ACCOUNT_TXS + "/fetch", self.fetch_transaction),
web.post(self.ACCOUNT_TXS + "/create", self.create_tx),
web.post(self.ACCOUNT_TXS + "/create_and_broadcast", self.create_and_broadcast),
web.post(self.ACCOUNT_TXS + "/broadcast", self.broadcast),
web.post(self.ACCOUNT_TXS + "/split_utxos", self.split_utxos),
web.view(self.ACCOUNT_TXS + "/websocket/text-events", TxStateWebSocket),
]
if app_state.config.get('regtest'):
self.routes.extend([
web.post(self.WALLETS_ACCOUNT + "/topup_account", self.topup_account),
web.post(self.WALLETS_ACCOUNT + "/generate_blocks", self.generate_blocks),
web.post(self.WALLETS_PARENT + "/create_new_wallet", self.create_new_wallet),
])
# ----- Extends electrumsv/restapi_endpoints ----- #
async def get_all_wallets(self, request: web.Request) -> web.Response:
try:
all_parent_wallets = self._get_all_wallets(self.wallets_path)
response = all_parent_wallets
return good_response({"wallets": response})
except Fault as e:
return fault_to_http_response(e)
async def get_parent_wallet(self, request: web.Request) -> web.Response:
"""Overview of parent wallet and accounts"""
try:
vars = await self.argparser(request, required_vars=[VNAME.WALLET_NAME])
wallet_name = vars[VNAME.WALLET_NAME]
wallet = self._get_parent_wallet(wallet_name)
accounts = self._accounts_dto(wallet)
response = {"parent_wallet": wallet_name,
"accounts": accounts}
return good_response(response)
except Fault as e:
return fault_to_http_response(e)
async def load_instanced_wallet(self, request: web.Request) -> web.Response:
"""
This copies a pre-generated wallet file to a temporary location and loads it. It can only
be called once for each wallet instance kind and will error if the instance file name is
in use. We do not want duplicated wallets reusing keys, it is problems waiting to happen.
The reason we do this via ids, is that we do not want to allow users to load wallets from
arbitrary paths.
"""
try:
vars = await self.argparser(request, required_vars=[VNAME.PASSWORD,
VNAME.WALLET_INSTANCE_ID])
valid_instance_ids = set(item.value for item in WalletInstanceKind)
wallet_instance_id = cast(int, vars[VNAME.WALLET_INSTANCE_ID])
if wallet_instance_id not in valid_instance_ids:
raise Fault(message="Unknown wallet instance id")
relative_wallet_path = WalletInstancePaths[wallet_instance_id]
wallet_path = os.path.join(base_dir, relative_wallet_path)
if not os.path.exists(wallet_path):
raise Fault(message="Instanced wallet path invalid")
wallet_filename = os.path.basename(wallet_path)
instanced_wallet_path = os.path.join(self.temp_dir.name, wallet_filename)
if os.path.exists(instanced_wallet_path):
raise Fault(message="Instanced wallet in use")
shutil.copyfile(wallet_path, instanced_wallet_path)
wallet = await self._load_wallet(instanced_wallet_path, vars[VNAME.PASSWORD],
enforce_wallet_directory=False)
accounts = self._accounts_dto(wallet)
return good_response({
"wallet_id": wallet.get_id(),
"accounts": accounts
})
except Fault as e:
return fault_to_http_response(e)
async def load_wallet(self, request: web.Request) -> web.Response:
try:
vars = await self.argparser(request, required_vars=[VNAME.PASSWORD, VNAME.WALLET_NAME])
wallet_name = vars[VNAME.WALLET_NAME]
wallet = await self._load_wallet(wallet_name, vars[VNAME.PASSWORD])
accounts = self._accounts_dto(wallet)
return good_response({
"wallet_id": wallet.get_id(),
"parent_wallet": wallet_name,
"accounts": accounts
})
except Fault as e:
return fault_to_http_response(e)
async def create_new_wallet(self, request: web.Request) -> web.Response:
"""only for regtest for the moment..."""
try:
vars = await self.argparser(request, required_vars=[VNAME.PASSWORD, VNAME.WALLET_NAME],
check_wallet_availability=False)
create_filepath = str(Path(self.wallets_path).joinpath(vars[VNAME.WALLET_NAME]))
self.check_if_wallet_exists(create_filepath)
storage = WalletStorage.create(create_filepath, vars[VNAME.PASSWORD])
storage.close()
app_state.credentials.set_wallet_password(create_filepath, vars[VNAME.PASSWORD],
CredentialPolicyFlag.FLUSH_AFTER_WALLET_LOAD)
parent_wallet = self.app_state.daemon.load_wallet(create_filepath)
assert parent_wallet is not None
# create an account for the Wallet with the same password via an imported seed
text_type = KeystoreTextType.EXTENDED_PRIVATE_KEY
text_match = '<KEY>' \
'<KEY>'
keystore = instantiate_keystore_from_text(text_type, text_match, vars[VNAME.PASSWORD],
derivation_text=None, passphrase='')
parent_wallet.create_account_from_keystore(AccountCreationType.IMPORTED, keystore)
await self._load_wallet(vars[VNAME.WALLET_NAME], vars[VNAME.PASSWORD])
response = {"new_wallet": create_filepath}
return good_response(response)
except Fault as e:
return fault_to_http_response(e)
async def get_account(self, request: web.Request) -> web.Response:
"""Overview of a single 'account"""
try:
vars = await self.argparser(request, required_vars=[VNAME.WALLET_NAME,
VNAME.ACCOUNT_ID])
wallet_name = vars[VNAME.WALLET_NAME]
account_id = vars[VNAME.ACCOUNT_ID]
account = self._get_account(wallet_name, account_id)
response = self._account_dto(account)
return good_response(response)
except Fault as e:
return fault_to_http_response(e)
async def topup_account(self, request):
"""only for regtest"""
try:
vars = await self.argparser(request,
required_vars=[VNAME.WALLET_NAME, VNAME.ACCOUNT_ID])
wallet_name = vars[VNAME.WALLET_NAME]
account_id = vars[VNAME.ACCOUNT_ID]
amount = vars.get(VNAME.AMOUNT, 25)
account = self._get_account(wallet_name, account_id)
receive_key = account.get_fresh_keys(RECEIVING_SUBPATH, 1)[0]
receive_address = account.get_script_template_for_derivation(
account.get_default_script_type(),
receive_key.derivation_type, receive_key.derivation_data2)
txid = regtest_topup_account(receive_address, amount)
response = {"txid": txid}
return good_response(response)
except Fault as e:
return fault_to_http_response(e)
async def generate_blocks(self, request):
"""only for regtest"""
try:
vars = await self.argparser(request,
required_vars=[VNAME.WALLET_NAME, VNAME.ACCOUNT_ID])
nblocks = vars.get(VNAME.NBLOCKS, 1)
txid = regtest_generate_nblocks(nblocks, REGTEST_P2PKH_ADDRESS)
response = {"txid": txid}
return good_response(response)
except Fault as e:
return fault_to_http_response(e)
async def create_payment_request(self, request: web.Request) -> web.Response:
try:
vars = await self.argparser(request, required_vars=[VNAME.WALLET_ID,
VNAME.ACCOUNT_ID, VNAME.MESSAGE ])
wallet_id = vars[VNAME.WALLET_ID]
account_id = vars[VNAME.ACCOUNT_ID]
message = vars[VNAME.MESSAGE]
if not len(message):
raise Fault(message="Empty message")
wallet = self._get_wallet_by_id(wallet_id)
account = self._get_account_from_wallet(wallet, account_id)
future, key_data = account.create_payment_request(message)
rows = await asyncio.wrap_future(future)
if len(rows) != 1:
raise Fault(message="Error creating the payment request")
script_type = account.get_default_script_type()
script_template = account.get_script_template_for_derivation(
script_type, key_data.derivation_type, key_data.derivation_data2)
if script_template is None:
raise Fault(message="Error creating the payment destination")
text = script_template_to_string(script_template)
return good_response({
"script_type": script_type.name,
"destination": text,
})
except Fault as e:
return fault_to_http_response(e)
async def get_coin_state(self, request: web.Request) -> web.Response:
"""get coin state (unconfirmed and confirmed coin count)"""
try:
vars = await self.argparser(request, required_vars=[VNAME.WALLET_NAME,
VNAME.ACCOUNT_ID])
wallet_name = vars[VNAME.WALLET_NAME]
account_id = vars[VNAME.ACCOUNT_ID]
account = self._get_account(wallet_name, account_id)
response = self._coin_state_dto(account)
return good_response(response)
except Fault as e:
return fault_to_http_response(e)
async def get_utxos(self, request: web.Request) -> web.Response:
try:
vars = await self.argparser(request, required_vars=[VNAME.WALLET_NAME,
VNAME.ACCOUNT_ID])
wallet_name = vars[VNAME.WALLET_NAME]
account_id = vars[VNAME.ACCOUNT_ID]
exclude_frozen = vars.get(VNAME.EXCLUDE_FROZEN, False)
confirmed_only = vars.get(VNAME.CONFIRMED_ONLY, False)
mature = vars.get(VNAME.MATURE, True)
account = self._get_account(wallet_name, account_id)
utxos = account.get_transaction_outputs_with_key_data(exclude_frozen=exclude_frozen,
confirmed_only=confirmed_only, mature=mature)
result = self._utxo_dto(utxos)
response = {"utxos": result}
return good_response(response)
except Fault as e:
return fault_to_http_response(e)
async def get_balance(self, request: web.Request) -> web.Response:
"""get confirmed, unconfirmed and coinbase balances"""
try:
vars = await self.argparser(request, required_vars=[VNAME.WALLET_NAME,
VNAME.ACCOUNT_ID])
wallet_name = vars[VNAME.WALLET_NAME]
account_id = vars[VNAME.ACCOUNT_ID]
account = self._get_account(wallet_name, account_id)
response = self._balance_dto(wallet=account)
return good_response(response)
except Fault as e:
return fault_to_http_response(e)
async def remove_txs(self, request: web.Request) -> web.Response:
# follows this spec https://opensource.zalando.com/restful-api-guidelines/#152
"""This might be used to clean up after creating many transactions that were never sent."""
try:
vars = await self.argparser(request, required_vars=[VNAME.WALLET_NAME,
VNAME.ACCOUNT_ID, VNAME.TXIDS])
wallet_name = vars[VNAME.WALLET_NAME]
account_id = vars[VNAME.ACCOUNT_ID]
txids = vars[VNAME.TXIDS]
account = self._get_account(wallet_name, account_id)
results = []
if txids:
for txid in txids:
try:
self.remove_transaction(bitcoinx.hex_str_to_hash(txid), account)
results.append({"id": txid, "result": 200})
except Fault as e:
if e.code == Errors.DISABLED_FEATURE_CODE:
results.append({"id": txid, "result": 400,
"description": Errors.DISABLED_FEATURE_MESSAGE})
if e.code == Errors.TRANSACTION_NOT_FOUND_CODE:
results.append({"id": txid, "result": 400,
"description": Errors.TRANSACTION_NOT_FOUND_MESSAGE})
return self.batch_response({"items": results})
except Fault as e:
return fault_to_http_response(e)
async def get_transaction_history(self, request: web.Request) -> web.Response:
"""get transactions - currently only used for debugging via 'postman'"""
try:
vars = await self.argparser(request, required_vars=[VNAME.WALLET_NAME,
VNAME.ACCOUNT_ID])
wallet_name = vars[VNAME.WALLET_NAME]
account_id = vars[VNAME.ACCOUNT_ID]
tx_flags = vars.get(VNAME.TX_FLAGS)
account = self._get_account(wallet_name, account_id)
response = self._history_dto(account, tx_flags)
return good_response({"history": response})
except Fault as e:
return fault_to_http_response(e)
async def fetch_transaction(self, request: web.Request) -> web.Response:
"""get transaction"""
try:
required_vars = [VNAME.WALLET_NAME, VNAME.ACCOUNT_ID, VNAME.TXID]
vars = await self.argparser(request, required_vars)
wallet_name = vars[VNAME.WALLET_NAME]
account_id = vars[VNAME.ACCOUNT_ID]
txid = vars[VNAME.TXID]
account = self._get_account(wallet_name, account_id)
response = self._fetch_transaction_dto(account, tx_id=txid)
return good_response(response)
except Fault as e:
return fault_to_http_response(e)
async def create_tx(self, request: web.Request) -> web.Response:
"""
General purpose transaction builder.
- Should handle any kind of output script.( see bitcoinx.address for
utilities for building p2pkh, multisig etc outputs as hex strings.)
"""
account = None
tx = None
try:
tx, account, password = await self._create_tx_helper(request)
response = {"txid": tx.txid(),
"rawtx": str(tx)}
return good_response(response)
except Fault as e:
if tx and tx.is_complete() and e.code != Fault(Errors.ALREADY_SENT_TRANSACTION_CODE):
self.cleanup_tx(tx, account)
return fault_to_http_response(e)
except Exception as e:
if tx and tx.is_complete():
self.cleanup_tx(tx, account)
return fault_to_http_response(
Fault(code=Errors.GENERIC_INTERNAL_SERVER_ERROR, message=str(e)))
async def create_and_broadcast(self, request):
account = None
tx = None
try:
tx, account, password = await self._create_tx_helper(request)
try:
result = await self._broadcast_transaction(str(tx), tx.hash(), account)
except aiorpcx.jsonrpc.RPCError as e:
raise Fault(Errors.AIORPCX_ERROR_CODE, e.message)
self.prev_transaction = result
response = {"txid": result}
self.logger.debug("successful broadcast for %s", result)
return good_response(response)
except Fault as e:
if tx and tx.is_complete() and e.code != Errors.ALREADY_SENT_TRANSACTION_CODE:
self.cleanup_tx(tx, account)
return fault_to_http_response(e)
except Exception as e:
self.logger.exception("unexpected error in create_and_broadcast handler")
if tx and tx.is_complete() and not (
isinstance(e, AssertionError) and str(e) == 'duplicate set not supported'):
self.cleanup_tx(tx, account)
return fault_to_http_response(
Fault(code=Errors.GENERIC_INTERNAL_SERVER_ERROR, message=str(e)))
async def broadcast(self, request: web.Request) -> web.Response:
"""Broadcast a rawtx (hex string) to the network. """
try:
required_vars = [VNAME.WALLET_NAME, VNAME.ACCOUNT_ID, VNAME.RAWTX]
vars = await self.argparser(request, required_vars=required_vars)
wallet_name = vars[VNAME.WALLET_NAME]
index = vars[VNAME.ACCOUNT_ID]
rawtx = vars[VNAME.RAWTX]
account = self._get_account(wallet_name, index)
tx = Transaction.from_hex(rawtx)
self.raise_for_duplicate_tx(tx)
try:
result = await self._broadcast_transaction(rawtx, tx.hash(), account)
except aiorpcx.jsonrpc.RPCError as e:
raise Fault(Errors.AIORPCX_ERROR_CODE, e.message)
self.prev_transaction = result
response = {"txid": result}
return good_response(response)
except Fault as e:
return fault_to_http_response(e)
async def split_utxos(self, request: web.Request) -> web.Response:
account = None
tx = None
try:
required_vars = [VNAME.WALLET_NAME, VNAME.ACCOUNT_ID, VNAME.SPLIT_COUNT, VNAME.PASSWORD]
vars = await self.argparser(request, required_vars=required_vars)
wallet_name = vars[VNAME.WALLET_NAME]
account_id = vars[VNAME.ACCOUNT_ID]
split_count = vars[VNAME.SPLIT_COUNT]
# optional
split_value = vars.get(VNAME.SPLIT_VALUE, 10000)
password = vars.get(VNAME.PASSWORD, None)
desired_utxo_count = vars.get(VNAME.DESIRED_UTXO_COUNT, 2000)
require_confirmed = vars.get(VNAME.REQUIRE_CONFIRMED, False)
account = self._get_account(wallet_name, account_id)
# Approximate size of a transaction with one P2PKH input and one P2PKH output.
base_fee = self.app_state.config.estimate_fee(TransactionSize(203, 0))
loop = asyncio.get_event_loop()
# run in thread - CPU intensive code
partial_coin_selection = partial(self.select_inputs_and_outputs,
self.app_state.config, account, base_fee,
split_count=split_count, desired_utxo_count=desired_utxo_count,
require_confirmed=require_confirmed, split_value=split_value)
split_result = await loop.run_in_executor(self.txb_executor, partial_coin_selection)
if isinstance(split_result, Fault):
return fault_to_http_response(split_result)
self.logger.debug("split result: %s", split_result)
utxos, outputs, attempted_split = split_result
if not attempted_split:
fault = Fault(Errors.SPLIT_FAILED_CODE, Errors.SPLIT_FAILED_MESSAGE)
return fault_to_http_response(fault)
tx, tx_context = account.make_unsigned_transaction(utxos, outputs)
future = account.sign_transaction(tx, password, tx_context)
if future is not None:
future.result()
self.raise_for_duplicate_tx(tx)
# broadcast
result = await self._broadcast_transaction(str(tx), tx.hash(), account)
self.prev_transaction = result
response = {"txid": result}
return good_response(response)
except Fault as e:
if tx and tx.is_complete() and e.code != Fault(Errors.ALREADY_SENT_TRANSACTION_CODE):
self.cleanup_tx(tx, account)
return fault_to_http_response(e)
except InsufficientCoinsError as e:
self.logger.debug(Errors.INSUFFICIENT_COINS_MESSAGE)
return fault_to_http_response(
Fault(Errors.INSUFFICIENT_COINS_CODE, Errors.INSUFFICIENT_COINS_MESSAGE))
except Exception as e:
if tx and tx.is_complete():
self.cleanup_tx(tx, account)
return fault_to_http_response(
Fault(code=Errors.GENERIC_INTERNAL_SERVER_ERROR, message=str(e)))
|
alipay/aop/api/response/AlipayOpenMiniTemplatemessageUsertemplateApplyResponse.py | snowxmas/alipay-sdk-python-all | 213 | 11114055 | <filename>alipay/aop/api/response/AlipayOpenMiniTemplatemessageUsertemplateApplyResponse.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenMiniTemplatemessageUsertemplateApplyResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniTemplatemessageUsertemplateApplyResponse, self).__init__()
self._user_template_id = None
@property
def user_template_id(self):
return self._user_template_id
@user_template_id.setter
def user_template_id(self, value):
self._user_template_id = value
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniTemplatemessageUsertemplateApplyResponse, self).parse_response_content(response_content)
if 'user_template_id' in response:
self.user_template_id = response['user_template_id']
|
nitorch/transforms.py | ArneBinder/Pytorch-LRP | 117 | 11114068 | <reponame>ArneBinder/Pytorch-LRP
import numpy as np
import numbers
import torch
from scipy.ndimage.interpolation import rotate
from scipy.ndimage.interpolation import zoom
def normalize_float(x, min=-1):
"""
Function that performs min-max normalization on a `numpy.ndarray`
matrix.
"""
if min == -1:
norm = (2 * (x - np.min(x)) / (np.max(x) - np.min(x))) - 1
elif min == 0:
if np.max(x) == 0 and np.min(x) == 0:
norm = x
else:
norm = (x - np.min(x)) / (np.max(x) - np.min(x))
return norm
def normalize_float_torch(x, min=-1):
'''
Function that performs min-max normalization on a Pytorch tensor
matrix. Can also deal with Pytorch dictionaries where the data
matrix key is 'image'.
'''
import torch
if min == -1:
norm = (2 * (x - torch.min(x)) / (torch.max(x) - torch.min(x))) - 1
elif min == 0:
if torch.max(x) == 0 and torch.min(x) == 0:
norm = x
else:
norm = (x - torch.min(x)) / (torch.max(x) - torch.min(x))
return norm
def normalization_factors(data, train_idx, shape, mode="slice"):
"""
Shape should be of length 3.
mode : either "slice" or "voxel" - defines the granularity of the
normalization. Voxelwise normalization does not work well with only
linear registered data.
"""
print("Computing the normalization factors of the training data..")
if mode == "slice":
axis = (0, 1, 2, 3)
elif mode == "voxel":
axis = 0
else:
raise NotImplementedError("Normalization mode unknown.")
samples = np.zeros(
[len(train_idx), 1, shape[0], shape[1], shape[2]], dtype=np.float32
)
for c, value in enumerate(train_idx):
samples[c] = data[value]["image"].numpy()
mean = np.mean(samples, axis=axis)
std = np.std(samples, axis=axis)
return np.squeeze(mean), np.squeeze(std)
class CenterCrop(object):
"""Crops the given 3D ndarray Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w, d), a cube crop (size, size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size), int(size))
else:
self.size = np.asarray(size)
assert len(self.size) == 3, "The `size` must be a tuple of length 3 but is \
length {}".format(len(self.size))
def __call__(self, img):
"""
Args:
3D ndarray Image : Image to be cropped.
Returns:
3D ndarray Image: Cropped image.
"""
# if the 4th dimension of the image is the batch then ignore that dim
if len(img.shape) == 4:
img_size = img.shape[1:]
elif len(img.shape) == 3:
img_size = img.shape
else:
raise ValueError("The size of the image can be either 3 dimension or 4\
dimension with one dimension as the batch size")
# crop only if the size of the image is bigger than the size to be cropped to.
if all(img_size >= self.size):
slice_start = (img_size - self.size)//2
slice_end = self.size + slice_start
cropped = img[slice_start[0]:slice_end[0],
slice_start[1]:slice_end[1],
slice_start[2]:slice_end[2]
]
if len(img.shape) == 4:
cropped = np.expand_dims(cropped, 0)
else:
cropped = img
return cropped
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class Normalize(object):
"""
Normalize tensor with first and second moments.
By default will only normalize on non-zero voxels. Set
masked = False if this is undesired.
"""
def __init__(self, mean, std=1, masked=True, eps=1e-10):
self.mean = mean
self.std = std
self.masked = masked
# set epsilon only if using std scaling
self.eps = eps if np.all(std) != 1 else 0
def __call__(self, image):
if self.masked:
image = self.zero_masked_transform(image)
else:
image = self.apply_transform(image)
return image
def denormalize(self, image):
image = image * (self.std + self.eps) + self.mean
return image
def apply_transform(self, image):
return (image - self.mean) / (self.std + self.eps)
def zero_masked_transform(self, image):
""" Only apply transform where input is not zero. """
img_mask = image == 0
# do transform
image = self.apply_transform(image)
image[img_mask] = 0.
return image
class IntensityRescale:
"""
Rescale image itensities between 0 and 1 for a single image.
Arguments:
masked: applies normalization only on non-zero voxels. Default
is True.
on_gpu: speed up computation by using GPU. Requires torch.Tensor
instead of np.array. Default is False.
"""
def __init__(self, masked=True, on_gpu=False):
self.masked = masked
self.on_gpu = on_gpu
def __call__(self, image):
if self.masked:
image = self.zero_masked_transform(image)
else:
image = self.apply_transform(image)
return image
def apply_transform(self, image):
if self.on_gpu:
return normalize_float_torch(image, min=0)
else:
return normalize_float(image, min=0)
def zero_masked_transform(self, image):
""" Only apply transform where input is not zero. """
img_mask = image == 0
# do transform
image = self.apply_transform(image)
image[img_mask] = 0.
return image
########################################################################
# Data augmentations
########################################################################
class ToTensor(object):
"""
Convert ndarrays to Tensors.
Expands channel axis
# numpy image: H x W x Z
# torch image: C x H x W x Z
"""
def __call__(self, image):
image = torch.from_numpy(image).unsqueeze(0)
image = image.float()
return image
class Flip:
"""
Flip the input along a given axis.
Arguments:
axis: axis to flip over. Default is 0
prob: probability to flip the image. Executes always when set to
1. Default is 0.5
"""
def __init__(self, axis=0, prob=0.5):
self.axis = axis
self.prob = prob
def __call__(self, image):
rand = np.random.uniform()
if rand <= self.prob:
augmented = np.flip(image, axis=self.axis).copy()
else:
augmented = image
return augmented
class SagittalFlip(Flip):
"""
Flip image along the sagittal axis (x-axis).
Expects input shape (X, Y, Z).
"""
def __init__(self, prob=0.5):
super().__init__(axis=0, prob=prob)
def __call__(self, image):
assert(len(image.shape) == 3)
return super().__call__(image)
class CoronalFlip(Flip):
"""
Flip image along the coronal axis (y-axis).
Expects input shape (X, Y, Z).
"""
def __init__(self, prob=0.5):
super().__init__(axis=1, prob=prob)
def __call__(self, image):
assert(len(image.shape) == 3)
return super().__call__(image)
class AxialFlip(Flip):
"""
Flip image along the axial axis (z-axis).
Expects input shape (X, Y, Z).
"""
def __init__(self, prob=0.5):
super().__init__(axis=2, prob=prob)
def __call__(self, image):
assert(len(image.shape) == 3)
return super().__call__(image)
class Rotate:
"""
Rotate the input along a given axis.
Arguments:
axis: axis to rotate. Default is 0
deg: min and max rotation angles in degrees. Randomly rotates
within that range. Can be scalar, list or tuple. In case of
scalar it rotates between -abs(deg) and abs(deg). Default is
(-3, 3).
"""
def __init__(self, axis=0, deg=(-3, 3)):
if axis == 0:
self.axes = (1, 0)
elif axis == 1:
self.axes = (2, 1)
elif axis == 2:
self.axes = (0, 2)
if isinstance(deg, tuple) or isinstance(deg, list):
assert(len(deg) == 2)
self.min_rot = np.min(deg)
self.max_rot = np.max(deg)
else:
self.min_rot = -int(abs(deg))
self.max_rot = int(abs(deg))
def __call__(self, image):
rand = np.random.randint(self.min_rot, self.max_rot + 1)
augmented = rotate(
image,
angle=rand,
axes=self.axes,
reshape=False
).copy()
return augmented
class SagittalRotate(Rotate):
"""
Rotate image's sagittal axis (x-axis).
Expects input shape (X, Y, Z).
"""
def __init__(self, deg=(-3, 3)):
super().__init__(axis=0, deg=deg)
class CoronalRotate(Rotate):
"""
Rotate image's coronal axis (y-axis).
Expects input shape (X, Y, Z).
"""
def __init__(self, deg=(-3, 3)):
super().__init__(axis=1, deg=deg)
class AxialRotate(Rotate):
"""
Rotate image's axial axis (z-axis).
Expects input shape (X, Y, Z).
"""
def __init__(self, deg=(-3, 3)):
super().__init__(axis=2, deg=deg)
class Translate:
"""
Translate the input along a given axis.
Arguments:
axis: axis to rotate. Default is 0
dist: min and max translation distance in pixels. Randomly
translates within that range. Can be scalar, list or tuple.
In case of scalar it translates between -abs(dist) and
abs(dist). Default is (-3, 3).
"""
def __init__(self, axis=0, dist=(-3, 3)):
self.axis = axis
if isinstance(dist, tuple) or isinstance(dist, list):
assert(len(dist) == 2)
self.min_trans = np.min(dist)
self.max_trans = np.max(dist)
else:
self.min_trans = -int(abs(dist))
self.max_trans = int(abs(dist))
def __call__(self, image):
rand = np.random.randint(self.min_trans, self.max_trans + 1)
augmented = np.zeros_like(image)
if self.axis == 0:
if rand < 0:
augmented[-rand:, :] = image[:rand, :]
elif rand > 0:
augmented[:-rand, :] = image[rand:, :]
else:
augmented = image
elif self.axis == 1:
if rand < 0:
augmented[:,-rand:, :] = image[:,:rand, :]
elif rand > 0:
augmented[:,:-rand, :] = image[:,rand:, :]
else:
augmented = image
elif self.axis == 2:
if rand < 0:
augmented[:,:,-rand:] = image[:,:,:rand]
elif rand > 0:
augmented[:,:,:-rand] = image[:,:,rand:]
else:
augmented = image
return augmented
class SagittalTranslate(Translate):
"""
Translate image along the sagittal axis (x-axis).
Expects input shape (X, Y, Z).
"""
def __init__(self, dist=(-3, 3)):
super().__init__(axis=0, dist=dist)
class CoronalTranslate(Translate):
"""
Translate image along the coronal axis (y-axis).
Expects input shape (X, Y, Z).
"""
def __init__(self, dist=(-3, 3)):
super().__init__(axis=1, dist=dist)
class AxialTranslate(Translate):
"""
Translate image along the axial axis (z-axis).
Expects input shape (X, Y, Z).
"""
def __init__(self, dist=(-3, 3)):
super().__init__(axis=2, dist=dist)
|
demo/demo/app/models.py | HiveTechies/django-star-ratings | 296 | 11114076 | from __future__ import unicode_literals
from django.db import models
class Foo(models.Model):
bar = models.CharField(max_length=100)
|
homeassistant/components/workday/__init__.py | domwillcode/home-assistant | 30,023 | 11114093 | <filename>homeassistant/components/workday/__init__.py
"""Sensor to indicate whether the current day is a workday."""
|
ui/UpgradeDownloader.py | s1kx/BitcoinArmory | 410 | 11114096 | <gh_stars>100-1000
################################################################################
# #
# Copyright (C) 2011-2015, Armory Technologies, Inc. #
# Distributed under the GNU Affero General Public License (AGPL v3) #
# See LICENSE or http://www.gnu.org/licenses/agpl.html #
# #
################################################################################
from PyQt4.Qt import *
from PyQt4.QtGui import *
from PyQt4.QtNetwork import *
from qtdefines import *
from armoryengine.parseAnnounce import *
class UpgradeDownloader:
def __init__(self, parent, main):
self.finishedCB = lambda : None
self.startedCB = lambda : None
self.url = None
self.filesha = None
self.downloadFile = None
self.progressBar = None
self.frame = None
self.parent = parent
self.main = main
self.packageName = ''
self.networkAccess = QNetworkAccessManager()
# downloadLinkFile
def setFile(self, url, filehash):
self.url = url
self.filesha = filehash
if self.downloadButton:
if url and not self.downloadFile:
self.downloadButton.setEnabled(True)
else:
self.downloadButton.setEnabled(False)
def useDownloadLinkFileAndSignature(self, linkfile):
self.downloadLinkFile = linkfile
def setFinishedCallback(self, callback):
self.finishedCB = callback
def setStartedCallback(self, callback):
self.startedCB = callback
def setPackageName(self, pkgName):
self.packageName = pkgName
def createDownloaderWidget(self, parent):
if self.frame:
raise RuntimeError("already created a downloader widget")
self.frame = QWidget(parent)
bottomRowLayout = QHBoxLayout(self.frame)
self.progressBar = QProgressBar(self.frame)
bottomRowLayout.addWidget(self.progressBar, +1)
self.downloadButton = QPushButton(tr("Download"), self.frame)
self.frame.connect(self.downloadButton, SIGNAL('clicked()'), \
self.startOrStopDownload)
bottomRowLayout.addWidget(self.downloadButton)
return self.frame
def startOrStopDownload(self):
if self.downloadFile:
o = self.downloadFile
self.downloadFile = None
o.close()
else:
self.startDownload()
def startDownload(self):
req = QNetworkRequest(QUrl.fromEncoded(self.url))
self.receivedData = ""
self.downloadFile = self.networkAccess.get(req)
QObject.connect(self.downloadFile, SIGNAL('readyRead()'), self.readMoreDownloadData)
QObject.connect(self.downloadFile, SIGNAL('finished()'), self.downloadFinished)
if not self.downloadButton is None:
self.downloadButton.setText(tr("Cancel"))
self.progressTimer()
self.startedCB()
#############################################################################
def downloadFinished(self):
if not self.downloadButton is None:
self.downloadButton.setText(tr("Download"))
# We will ask the user if they want us to unpack it for them
# Only if linux, only if satoshi, only if not offline-pkg-signed
linuxUnpackFile = None
# downloadFile will be removed on cancel
if not self.downloadFile is None:
status = self.downloadFile.attribute(QNetworkRequest.HttpStatusCodeAttribute).toInt()[0]
if len(self.receivedData)==0:
if status == 404:
status = tr("File not found")
QMessageBox.warning(self.frame, tr("Download failed"), \
tr("There was a failure downloading this file: {}").format(str(status)))
else:
res = binary_to_hex(sha256(self.receivedData))
LOGINFO("Downloaded package has hash " + res)
if res != self.filesha:
QMessageBox.warning(self.frame, tr("Verification failed"), tr("""
The download completed but its cryptographic signature is invalid.
Please try the download again. If you get another error, please
report the problem to <EMAIL>.
<br><br>
The downloaded data has been discarded. """))
else:
defaultFN = os.path.basename(self.url)
if self.downloadLinkFile:
defaultFN += ".signed"
dest = self.main.getFileSave(tr("Save File"),
[tr('Installers (*.exe *.app *.deb *.tar.gz)')],
defaultFilename=defaultFN)
if len(dest)!=0:
df = open(dest, "wb")
if self.downloadLinkFile:
df.write("START_OF_SIGNATURE_SECTION")
df.write(self.downloadLinkFile)
df.write("END_OF_SIGNATURE_SECTION")
df.write(self.receivedData)
df.close()
if self.downloadLinkFile:
QMessageBox.warning(self.frame, tr("Download complete"), tr("""
The package has the
signature from <font color="%s"><b>Armory Technologies,
Inc.</b></font> bundled with it, so it can be verified
by an offline computer before installation. To use this
feature, the offline system must be running Armory
0.91-beta or higher. Go to
<i>"Help"</i>\xe2\x86\x92<i>"Verify Signed Package"</i>
and load the <i>*.signed</i> file. The file was saved
to:
<br><br>
%s
<br><br>
<b>There is no special procedure to update a previous
installation.</b> The installer will update existing
versions without touching your wallets or settings.""") % \
(htmlColor("TextGreen"), dest), \
QMessageBox.Ok)
else:
if OS_LINUX and \
self.packageName=='Satoshi' and \
dest.endswith('tar.gz'):
linuxUnpackFile = dest
else:
QMessageBox.warning(self.frame, tr("Download complete"), tr("""
The file downloaded successfully, and carries a valid
signature from <font color="%s"><b>Armory Technologies,
Inc.</b></font> You can now use it to install the
software. The file was saved to:
<br><br> %s <br><br>
<b>There is no special procedure to update a previous
installation.</b> The installer will update existing
versions without touching your wallets or settings.""") % \
(htmlColor("TextGreen"), dest), QMessageBox.Ok)
if linuxUnpackFile is not None:
reply = QMessageBox.warning(self.frame, tr('Unpack Download'), tr("""
You just downloaded the Bitcoin Core software for Linux.
Would you like Armory to extract it for you and adjust your
settings to use it automatically?
<br><br>
If you modified your settings to run Bitcoin Core manually,
click "No" then extract the downloaded file and manually start
bitcoin-qt or bitcoind in from the extracted "bin/%d"
directory.""") % (64 if SystemSpecs.IsX64 else 32), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
finalDir = self.main.unpackLinuxTarGz(dest, changeSettings=True)
if finalDir is None:
QMessageBox.critical(self.frame, tr('Error Unpacking'), tr("""
There was an error unpacking the Bitcoin Core file. To use
it, you need to go to where the file was saved, right-click
on it and select "Extract Here", then adjust your settings
(<i>"File"</i>\xe2\x86\x92<i>"Settings"</i> from the main
window) to point "Bitcoin Install Dir" to the extracted
directory.
<br><br>
You saved the installer to:
<br><br>
%s""") % dest, QMessageBox.Ok)
else:
QMessageBox.warning(self.frame, tr('Finished!'), tr("""
The operation was successful. Restart Armory to use the
newly-downloaded Bitcoin Core software"""), QMessageBox.Ok)
self.receivedData = None
self.downloadFile = None
self.downloadButton.setEnabled(True)
self.progressBar.setFormat("")
if self.finishedCB:
self.finishedCB()
def readMoreDownloadData(self):
if not self.receivedData is None:
self.receivedData = self.receivedData + self.downloadFile.readAll()
def progressTimer(self):
if not self.downloadFile:
self.progressBar.reset()
self.progressBar.setRange(0, 100)
self.progressBar.setValue(0)
return
size = self.downloadFile.header(QNetworkRequest.ContentLengthHeader).toInt()[0]
self.progressBar.setRange(0, size)
self.progressBar.setValue(len(self.receivedData))
totalStr = bytesToHumanSize(size)
sofarStr = bytesToHumanSize(len(self.receivedData))
s = tr("{0} / {1} downloaded").format(sofarStr, totalStr)
self.progressBar.setFormat(s)
QTimer.singleShot(250, self.progressTimer)
class UpgradeDownloaderDialog(ArmoryDialog):
# parent: QWidget
# showPackage: automatically select this package name, if available, for the current OS
# downloadText: the text *WITH SIGNATURE* of the downloaded text data
# changeLog: the text of the downloaded changelogs
def __init__(self, parent, main, showPackage, downloadText, changeLog):
super(UpgradeDownloaderDialog, self).__init__(parent, main)
self.downloader = UpgradeDownloader(parent, main)
self.bitsColor = htmlColor('Foreground')
def enableOrDisable(e):
self.os.setEnabled(e)
self.osver.setEnabled(e)
self.osarch.setEnabled(e)
self.packages.setEnabled(e)
self.closeButton.setEnabled(e)
self.downloader.setFinishedCallback(lambda : enableOrDisable(True))
def onStart():
enableOrDisable(False)
downloadText=None
if self.saveAsOfflinePackage.isChecked():
downloadText = self.downloadText
self.downloader.useDownloadLinkFileAndSignature(downloadText)
self.downloader.setStartedCallback(onStart)
self.downloadText = downloadText
self.nestedDownloadMap = downloadLinkParser(filetext=downloadText).downloadMap
self.changelog = changelogParser().parseChangelogText(changeLog)
self.localizedData = { \
"Ubuntu" : tr("Ubuntu/Debian"), \
"Windows" : tr("Windows"), \
"MacOSX" : tr("MacOSX"), \
"32" : tr("32-bit"), \
"64" : tr("64-bit"), \
"Satoshi" : tr("Bitcoin Core"), \
"ArmoryTesting" : tr("Armory Testing (unstable)"), \
"ArmoryOffline" : tr("Offline Armory Wallet") \
}
oslabel = QLabel(tr("OS:"), self)
self.os = QComboBox(self)
self.osver = QComboBox(self)
self.osarch = QComboBox(self)
packages = QTreeWidget(self)
self.packages = packages
packages.setRootIsDecorated(False)
packages.sortByColumn(0, Qt.AscendingOrder)
packages.setSortingEnabled(True)
headerItem = QTreeWidgetItem()
headerItem.setText(0,tr("Package"))
headerItem.setText(1,tr("Version"))
packages.setHeaderItem(headerItem)
packages.setMaximumHeight(int(7*tightSizeStr(packages, "Abcdefg")[1]))
packages.header().setResizeMode(0, QHeaderView.Stretch)
packages.header().setResizeMode(1, QHeaderView.Stretch)
self.connect(self.os, SIGNAL("activated(int)"), self.cascadeOsVer)
self.connect(self.osver, SIGNAL("activated(int)"), self.cascadeOsArch)
self.connect(self.osarch, SIGNAL("activated(int)"), self.displayPackages)
self.connect(packages, \
SIGNAL('currentItemChanged(QTreeWidgetItem*,QTreeWidgetItem*)'), \
self.useSelectedPackage)
self.changelogView = QTextBrowser(self)
self.changelogView.setOpenExternalLinks(True)
self.saveAsOfflinePackage = QCheckBox(tr("Save with offline-verifiable signature"))
self.closeButton = QPushButton(tr("Close"), self)
self.connect(self.closeButton, SIGNAL('clicked()'), self.accept)
self.btnDLInfo = QLabelButton('Download Info')
self.btnDLInfo.setVisible(False)
self.connect(self.btnDLInfo, SIGNAL('clicked()'), self.popupPackageInfo)
self.lblSelectedSimple = QRichLabel(tr('No download selected'),
doWrap=False, hAlign=Qt.AlignHCenter)
self.lblSelectedSimpleMore = QRichLabel('', doWrap=False)
self.lblSelectedComplex = QRichLabel(tr('No download selected'))
self.lblCurrentVersion = QRichLabel('', hAlign=Qt.AlignHCenter)
# At the moment, not sure we actually need this label
self.lblSelectedComplex.setVisible(False)
self.btnShowComplex = QLabelButton(tr('Show all downloads for all OS'))
self.connect(self.btnShowComplex, SIGNAL('clicked()'), self.showComplex)
frmDisp = makeHorizFrame(['Stretch', self.lblSelectedSimpleMore, 'Stretch'])
frmBtnShowComplex = makeHorizFrame(['Stretch', self.btnShowComplex])
layoutSimple = QVBoxLayout()
layoutSimple.addWidget(self.lblSelectedSimple)
layoutSimple.addWidget(frmDisp)
layoutSimple.addWidget(self.lblCurrentVersion)
layoutSimple.addWidget(frmBtnShowComplex)
frmTopSimple = QFrame()
frmTopSimple.setLayout(layoutSimple)
layoutComplex = QGridLayout()
layoutComplex.addWidget(oslabel, 0,0)
layoutComplex.addWidget(self.os, 0,1)
layoutComplex.addWidget(self.osver, 0,2)
layoutComplex.addWidget(self.osarch,0,3)
layoutComplex.addWidget(packages, 1,0, 1,4)
layoutComplex.addWidget(self.lblSelectedComplex, 2,0, 1,4)
layoutComplex.setColumnStretch(0,0)
layoutComplex.setColumnStretch(1,2)
layoutComplex.setColumnStretch(2,2)
layoutComplex.setColumnStretch(3,1)
frmTopComplex = QFrame()
frmTopComplex.setLayout(layoutComplex)
frmTopComplex.setFrameStyle(STYLE_SUNKEN)
frmTopSimple.setFrameStyle(STYLE_SUNKEN)
self.stackedDisplay = QStackedWidget()
self.stackedDisplay.addWidget(frmTopSimple)
self.stackedDisplay.addWidget(frmTopComplex)
layout = QGridLayout()
layout.addWidget(self.stackedDisplay, 0,0, 1,3)
layout.addWidget(self.changelogView, 1,0, 1,3)
layout.addWidget(self.saveAsOfflinePackage, 2,0)
layout.addWidget(self.btnDLInfo, 2,2)
layout.addWidget(self.downloader.createDownloaderWidget(self), \
3,0, 1,3)
layout.addWidget(self.closeButton, 4,2)
layout.setRowStretch(0, 1)
layout.setColumnStretch(1, 1)
self.cascadeOs()
self.selectMyOs()
# Above we had to select *something*, we should check that the
# architecture actually matches our system. If not, warn
trueBits = '64' if SystemSpecs.IsX64 else '32'
selectBits = self.itemData(self.osarch)[:2]
if showPackage and not trueBits==selectBits:
QMessageBox.warning(self, tr("Wrong Architecture"), tr("""
You appear to be on a %s-bit architecture, but the only
available download is for %s-bit systems. It is unlikely
that this download will work on this operating system.
<br><br>
Please make sure that the correct operating system is
selected before attempting to download and install any
packages.""") % (trueBits, selectBits), QMessageBox.Ok)
self.bitsColor = htmlColor('TextRed')
if showPackage == 'Armory':
expectVer = self.main.armoryVersions[1]
elif showPackage == 'Satoshi':
expectVer = self.main.satoshiVersions[1]
if showPackage:
for n in range(0, packages.topLevelItemCount()):
row = packages.topLevelItem(n)
if str(row.data(0, 32).toString()).startswith(showPackage):
packages.setCurrentItem(row)
if not expectVer or str(row.data(1, 32).toString())==expectVer:
break
self.useSelectedPackage(limit=True)
else:
QMessageBox.warning(self, tr("Not Found"), tr(
"Armory could not determine an appropriate download for "
"your operating system. You will have to manually select "
"the correct download on the next window."), QMessageBox.Ok)
self.stackedDisplay.setCurrentIndex(1)
else:
self.stackedDisplay.setCurrentIndex(1)
self.setLayout(layout)
self.setMinimumWidth(600)
self.setWindowTitle(tr('Secure Downloader'))
def showSimple(self):
self.stackedDisplay.setCurrentIndex(0)
def showComplex(self):
self.stackedDisplay.setCurrentIndex(1)
def findCmbData(self, cmb, findStr, last=False, nonenotfound=False):
"""
So I ran into some issues with finding python strings in comboboxes
full of QStrings. I confirmed that
self.os.itemText(i)==tr("Ubuntu/Debian")
but not
self.os.findData(tr("Ubuntu/Debian"))
I'm probably being stupid, I thought I saw this work before...
Return zero if failed so we just select the first item in the list
if we don't find it.
"""
for i in range(cmb.count()):
if cmb.itemText(i)==findStr:
return i
if nonenotfound:
return None
return cmb.count()-1 if last else 0
def selectMyOs(self):
osVar = OS_VARIANT
if isinstance(osVar, (list,tuple)):
osVar = osVar[0]
osIndex = 0
if OS_WINDOWS:
osIndex = self.findCmbData(self.os, tr("Windows"))
elif OS_LINUX:
if osVar.lower() in ('debian', 'linuxmint', 'ubuntu'):
osIndex = self.findCmbData(self.os, tr('Ubuntu/Debian'))
else:
osIndex = self.findCmbData(self.os, tr('Linux'))
elif OS_MACOSX:
osIndex = self.findCmbData(self.os, tr('MacOSX'))
self.os.setCurrentIndex(osIndex)
self.cascadeOsVer() # signals don't go through for some reason
osverIndex = 0
if OS_WINDOWS:
win_ver = platform.win32_ver()[0]
osverIndex = self.findCmbData(self.osver, win_ver, True)
elif OS_LINUX:
osverIndex = self.findCmbData(self.osver, OS_VARIANT[1], True)
elif OS_MACOSX:
mac_ver = platform.mac_ver()[0]
osverIndex = self.findCmbData(self.osver, mac_ver, nonenotfound=True)
if osverIndex is None:
mac_ver = mac_ver[:mac_ver.rfind(".")]
osverIndex = self.findCmbData(self.osver, mac_ver, True)
self.osver.setCurrentIndex(osverIndex)
self.cascadeOsArch()
archIndex = 0
if platform.machine() in ("x86_64", "AMD64"):
archIndex = self.findCmbData(self.osarch, tr('64-bit'))
else:
archIndex = self.findCmbData(self.osarch, tr('32-bit'))
self.osarch.setCurrentIndex(archIndex)
self.cascadeOsArch()
def useSelectedPackage(self, *args, **kwargs):
limit = kwargs.get("limit")
if self.packages.currentItem() is None:
self.changelogView.setHtml("<html>" + tr("""
There is no version information to be shown here.""") +"</html>")
self.downloader.setFile(None, None)
else:
packagename = str(self.packages.currentItem().data(0, 32).toString())
packagever = str(self.packages.currentItem().data(1, 32).toString())
packageurl = str(self.packages.currentItem().data(2, 32).toString())
packagehash = str(self.packages.currentItem().data(3, 32).toString())
self.downloader.setFile(packageurl, packagehash)
self.selectedDLInfo = [packagename,packagever,packageurl,packagehash]
self.btnDLInfo.setVisible(True)
self.downloader.setPackageName(packagename)
# Figure out where to bound the changelog information
startIndex = -1
if self.changelog is not None:
for i,triplet in enumerate(self.changelog):
if triplet[0]==packagever:
startIndex = i
break
stopIndex = len(self.changelog)
if limit and len(self.main.armoryVersions[0])>0:
for i,triplet in enumerate(self.changelog):
currVer = getVersionInt(readVersionString(self.main.armoryVersions[0]))
thisVer = getVersionInt(readVersionString(triplet[0]))
if thisVer <= currVer:
stopIndex = i + 1
break
if startIndex > -1:
logHtml = "<html><body>"
if startIndex >= stopIndex:
logHtml = tr("Release notes are not available for this package")
else:
for i in range(startIndex, stopIndex):
block = self.changelog[i]
logHtml += "<h2>" + tr("Version {0}").format(block[0]) + "</h2>\n"
logHtml += "<em>" + tr("Released on {0}").format(block[1]) + "</em>\n"
features = block[2]
logHtml += "<ul>"
for f in features:
logHtml += "<li>" + tr("<b>{0}</b>: {1}").format(f[0], f[1]) + "</li>\n"
logHtml += "</ul>\n\n"
else:
if packagename == "Satoshi":
logHtml = tr(
"No version information is available here for any of the "
"core Bitcoin software downloads. You can find the "
"information at: "
"<a href='https://bitcoin.org/en/version-history'>https://bitcoin.org/en/version-history</a>")
else:
logHtml = tr("Release notes are not available for this package")
#logHtml += tr("""
#<br><br>
#-----
#<br><br>
#<u>Package</u>: <b>%s version %s</b><br>
#<u>Download URL</u>: <b>%s</b><br>
#<u>Verified sha256sum</u>: <b>%s</b>""") % \
#(packagename, packagever, packageurl, packagehash)
self.changelogView.setHtml(logHtml)
self.updateLabels(packagename, packagever,
self.itemData(self.os),
self.itemData(self.osver),
self.itemData(self.osarch))
def popupPackageInfo(self):
pkgname,pkgver,pkgurl,pkghash = self.selectedDLInfo
pkgname= tr(pkgname)
pkgver = tr(pkgver)
osname = tr(self.itemData(self.os))
osver = tr(self.itemData(self.osver))
osarch = self.itemData(self.osarch)
inst = os.path.basename(pkgurl)
QMessageBox.information(self, tr('Package Information'), tr("""
Download information for <b>%(pkgname)s version %(pkgver)s:</b>
<br>
<ul>
<li><u><b>Operating System</b></u>:</li>
<ul>
<li>%(osname)s %(osver)s %(osarch)s-bit</li>
</ul>
<li><u><b>Installer Filename</b></u>:</li>
<ul>
<li>%(inst)s</li>
</ul>
<li><u><b>Download URL</b></u>:</li>
<ul>
<li>%(pkgurl)s</li>
</ul>
<li><u><b>Verified sha256sum</b></u>:</li>
<ul>
<li>%(pkghash)s</li>
</ul>
</ul>""") % locals(), QMessageBox.Ok)
def cascade(self, combobox, valuesfrom, nextToCascade):
combobox.blockSignals(True)
current = combobox.currentText()
combobox.clear()
for v in valuesfrom:
combobox.addItem(self.localized(v), QVariant(v))
at = combobox.findText(current)
if at != -1:
combobox.setCurrentIndex(at)
nextToCascade()
combobox.blockSignals(False)
# pass the combobox (self.os, osver, ...) and the part of nestedDownloadMap
# to look into
def cascadeOs(self):
allOSes = set()
for pack in self.nestedDownloadMap.itervalues():
for packver in pack.itervalues():
for os in packver.iterkeys():
allOSes.add(os)
self.cascade(self.os, allOSes, self.cascadeOsVer)
def cascadeOsVer(self):
chosenos = str(self.os.itemData(self.os.currentIndex()).toString())
if len(chosenos)==0:
return
allVers = set()
for pack in self.nestedDownloadMap.itervalues():
for packver in pack.itervalues():
if chosenos in packver:
for osver in packver[chosenos].iterkeys():
allVers.add(osver)
# We use a list here because we need to sort the subvers
allVers = sorted([x for x in allVers])
self.cascade(self.osver, allVers, self.cascadeOsArch)
def cascadeOsArch(self):
chosenos = str(self.os.itemData(self.os.currentIndex()).toString())
chosenosver = str(self.osver.itemData(self.osver.currentIndex()).toString())
if len(chosenosver)==0:
return
allArchs = set()
for pack in self.nestedDownloadMap.itervalues():
for packver in pack.itervalues():
if chosenos in packver and chosenosver in packver[chosenos]:
for osarch in packver[chosenos][chosenosver].iterkeys():
allArchs.add(osarch)
self.cascade(self.osarch, allArchs, self.displayPackages)
def displayPackages(self):
packages = self.packages
packages.clear()
chosenos = str(self.os.itemData(self.os.currentIndex()).toString())
chosenosver = str(self.osver.itemData(self.osver.currentIndex()).toString())
chosenosarch = str(self.osarch.itemData(self.osarch.currentIndex()).toString())
if len(chosenosarch)==0:
return
for packname,pack in self.nestedDownloadMap.iteritems():
for packvername,packver in pack.iteritems():
if chosenos in packver \
and chosenosver in packver[chosenos] \
and chosenosarch in packver[chosenos][chosenosver]:
row = QTreeWidgetItem()
row.setText(0, self.localized(packname))
row.setData(0, 32, packname) # not localized
row.setText(1, self.localized(packvername))
row.setData(1, 32, packvername)
row.setData(2, 32, packver[chosenos][chosenosver][chosenosarch][0])
row.setData(3, 32, packver[chosenos][chosenosver][chosenosarch][1])
packages.addTopLevelItem(row)
self.updateLabels(packname, packvername,
self.itemData(self.os),
self.itemData(self.osver),
self.itemData(self.osarch))
def updateLabels(self, pkgName, pkgVer, osName, osVer, osArch):
if not pkgName:
self.lblSelectedComplex.setText(tr("""No package currently selected"""))
self.lblSelectedSimple.setText(tr("""No package currently selected"""))
self.lblSelectedSimpleMore.setText("")
else:
self.lblSelectedComplex.setText(tr("""
<font size=4><b>Selected Package:</b> {} {} for {} {} {}</font>"""). \
format(tr(pkgName), tr(pkgVer), tr(osName), tr(osVer), tr(osArch)))
self.lblSelectedSimple.setText(tr(""" <font size=4><b>Securely
download latest version of <u>%s</u></b></font>""") % pkgName)
self.lblCurrentVersion.setText('')
currVerStr = ''
if pkgName=='Satoshi':
if self.main.satoshiVersions[0]:
self.lblCurrentVersion.setText(tr("""
You are currently using Bitcoin Core version %s""") % \
self.main.satoshiVersions[0])
elif pkgName.startswith('Armory'):
if self.main.armoryVersions[0]:
self.lblCurrentVersion.setText(tr("""
You are currently using Armory version %s""") % \
self.main.armoryVersions[0])
self.lblSelectedSimpleMore.setText(tr("""
<b>Software Download:</b> %s version %s<br>
<b>Operating System:</b> %s %s <br>
<b>System Architecture:</b> <font color="%s">%s</font> """) % \
(tr(pkgName), tr(pkgVer), tr(osName), tr(osVer), self.bitsColor, tr(osArch)))
# get the untranslated name from the combobox specified
def itemData(self, combobox):
return str(combobox.itemData(combobox.currentIndex()).toString())
def localized(self, v):
if v in self.localizedData:
return str(self.localizedData[v])
else:
return str(v)
# kate: indent-width 3; replace-tabs on;
|
fastmri_recon/data/scripts/multicoil_nc_tf_records_generation.py | samiulshuvo/fastmri-reproducible-benchmark | 105 | 11114108 | from pathlib import Path
import tensorflow as tf
from tfkbnufft.kbnufft import KbNufftModule
from tfkbnufft import kbnufft_forward, kbnufft_adjoint
from tfkbnufft.mri.dcomp_calc import calculate_density_compensator
from tqdm import tqdm
from fastmri_recon.config import FASTMRI_DATA_DIR
from fastmri_recon.data.utils.crop import adjust_image_size
from fastmri_recon.data.utils.fourier import tf_ortho_ifft2d
from fastmri_recon.data.utils.non_cartesian import get_radial_trajectory, get_debugging_cartesian_trajectory, get_spiral_trajectory
from fastmri_recon.data.utils.multicoil.smap_extract import extract_smaps, non_cartesian_extract_smaps
from fastmri_recon.data.utils.h5 import from_multicoil_train_file_to_image_and_kspace_and_contrast
from fastmri_recon.data.utils.tfrecords import encode_ncmc_example
from fastmri_recon.models.utils.fourier import tf_unmasked_adj_op, tf_unmasked_adj_op, nufft, FFTBase
def generate_multicoil_nc_tf_records(
acq_type='radial',
af=4,
mode='train',
brain=False,
):
if brain:
path = Path(FASTMRI_DATA_DIR) / f'brain_multicoil_{mode}'
else:
path = Path(FASTMRI_DATA_DIR) / f'multicoil_{mode}'
filenames = sorted(list(path.glob('*.h5')))
scale_factor = 1e6
image_size = (640, 400)
nufft_ob = KbNufftModule(
im_size=image_size,
grid_size=None,
norm='ortho',
)
class PreProcModel(tf.keras.models.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
interpob = nufft_ob._extract_nufft_interpob()
self.nufftob_back = kbnufft_adjoint(interpob, multiprocessing=False)
self.nufftob_forw = kbnufft_forward(interpob, multiprocessing=False)
if acq_type == 'radial':
self.traj = get_radial_trajectory(image_size, af=af)
elif acq_type == 'cartesian':
self.traj = get_debugging_cartesian_trajectory()
elif acq_type == 'spiral':
self.traj = get_spiral_trajectory(image_size, af=af)
else:
raise NotImplementedError(f'{acq_type} dataset not implemented yet.')
self.dcomp = calculate_density_compensator(
interpob,
self.nufftob_forw,
self.nufftob_back,
self.traj[0],
)
if brain:
self.fft = FFTBase(False, multicoil=True, use_smaps=False)
def call(self, inputs):
images = inputs['image']
kspaces = inputs['kspace']
if brain:
complex_images = self.fft.adj_op([kspaces[..., None], None])[..., 0]
complex_images_padded = adjust_image_size(
complex_images,
image_size,
multicoil=True,
)
kspaces = self.fft.op([complex_images_padded[..., None], None])[..., 0]
traj = tf.repeat(self.traj, tf.shape(images)[0], axis=0)
orig_image_channels = tf_ortho_ifft2d(kspaces)
nc_kspace = nufft(nufft_ob, orig_image_channels, traj, image_size, multiprocessing=False)
nc_kspace_scaled = nc_kspace * scale_factor
images_scaled = images * scale_factor
images_channeled = images_scaled[..., None]
nc_kspaces_channeled = nc_kspace_scaled[..., None]
orig_shape = tf.ones([tf.shape(kspaces)[0]], dtype=tf.int32) * tf.shape(kspaces)[-1]
dcomp = tf.ones([tf.shape(kspaces)[0], tf.shape(self.dcomp)[0]], dtype=self.dcomp.dtype) * self.dcomp[None, :]
extra_args = (orig_shape, dcomp)
smaps = non_cartesian_extract_smaps(nc_kspace, traj, dcomp, self.nufftob_back, orig_shape)
model_inputs = (nc_kspaces_channeled, traj, smaps, extra_args)
if brain:
output_shape = tf.shape(images)[1:][None, :]
output_shape = tf.tile(output_shape, [tf.shape(images)[0], 1])
model_inputs += (output_shape,)
return model_inputs, images_channeled
extension = f'_nc_{acq_type}'
if af != 4:
extension += f'_af{af}'
extension += '.tfrecords'
selection = [
{'inner_slices': None, 'rand': False}, # slice selection
{'rand': False, 'keep_dim': False}, # coil selection
]
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
preproc_model = PreProcModel()
for filename in tqdm(filenames):
directory = filename.parent
filename_tfrecord = directory / (filename.stem + extension)
if filename_tfrecord.exists():
continue
image, kspace, _ = from_multicoil_train_file_to_image_and_kspace_and_contrast(
filename,
selection=selection,
)
data = tf.data.Dataset.zip({
'image': tf.data.Dataset.from_tensor_slices(image),
'kspace': tf.data.Dataset.from_tensor_slices(kspace),
})
data = data.batch(len(image))
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
data = data.with_options(options)
model_inputs, model_outputs = preproc_model.predict(data)
with tf.io.TFRecordWriter(str(filename_tfrecord)) as writer:
example = encode_ncmc_example(model_inputs, [model_outputs])
writer.write(example)
|
toy/mdn.py | mehrdad-shokri/WorldModels-1 | 149 | 11114130 | import matplotlib.pyplot as plt
plt.switch_backend('agg')
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training, datasets, iterators, report
from chainer.training import extensions
import numpy as np
class MDN(chainer.Chain):
def __init__(self, hidden_dim, output_dim, k):
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.k = k
super(MDN, self).__init__(
input_layer=L.Linear(None, hidden_dim),
coef_layer=L.Linear(hidden_dim, k * output_dim),
mu_layer=L.Linear(hidden_dim, k * output_dim),
ln_var_layer=L.Linear(hidden_dim, k * output_dim),
)
def __call__(self, input):
coef, mu, ln_var = self.fprop(input)
def sample(row_num):
cum_prod = 0
r = np.random.uniform()
index = None
for i, probability in enumerate(coef[row_num]):
cum_prod += sum(probability)
if r <= cum_prod.data:
index = i
break
return F.gaussian(mu[row_num][index], ln_var[row_num][index])
output = F.expand_dims(sample(0), 0)
for row_num in range(1, input.shape[0]):
this_output = F.expand_dims(sample(row_num), 0)
output = F.concat((output, this_output), axis=0)
return output
def fprop(self, input):
k = self.k
output_dim = self.output_dim
h = self.input_layer(input)
coef = F.softmax(self.coef_layer(h))
mu = self.mu_layer(h)
ln_var = self.ln_var_layer(h)
mu = F.reshape(mu, (-1, k, output_dim))
coef = F.reshape(coef, (-1, k, output_dim))
ln_var = F.reshape(ln_var, (-1, k, output_dim))
return coef, mu, ln_var
def get_loss_func(self):
def lf(input, output, epsilon=1e-8):
output_dim = self.output_dim
coef, mu, ln_var = self.fprop(input)
output = F.reshape(output, (-1, 1, output_dim))
mu, output = F.broadcast(mu, output)
var = F.exp(ln_var)
density = F.sum(
coef *
(1 / (np.sqrt(2 * np.pi) * F.sqrt(var))) *
F.exp(-0.5 * F.square(output - mu) / var)
, axis=1)
nll = -F.sum(F.log(density))
report({'loss': nll}, self)
return nll
return lf
class Linear(chainer.Chain):
def __init__(self, hidden_dim, output_dim):
self.output_dim = output_dim
super(Linear, self).__init__(
input_layer=L.Linear(None, hidden_dim),
output_layer=L.Linear(hidden_dim, output_dim),
)
def __call__(self, input):
return self.fprop(input)
def fprop(self, input):
h = self.input_layer(input)
return self.output_layer(h)
def get_loss_func(self):
def lf(input, output):
pred = self.fprop(input)
loss = F.mean_squared_error(output.reshape(-1, 1), pred)
report({'loss': loss}, self)
return loss
return lf
def main():
model = MDN(256, 1, 5)
# model = Linear(256, 1)
points = 500
y = np.random.rand(points).astype(np.float32)
x = np.sin(2 * np.pi * y) + 0.2 * np.random.rand(points) * (np.cos(2 * np.pi * y) + 2)
x = x.astype(np.float32)
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
dataset = datasets.tuple_dataset.TupleDataset(x.reshape(-1, 1), y)
train_iter = iterators.SerialIterator(dataset, batch_size=100)
updater = training.StandardUpdater(train_iter, optimizer, loss_func=model.get_loss_func())
trainer = training.Trainer(updater, (2000, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(['epoch', 'main/loss']))
trainer.run()
plt.ylim(-0.1, 1.1)
plt.plot(x, y, "b.")
plt.savefig("result/mdn-data_only.png")
plt.clf()
x_test = np.linspace(min(x), max(x), points).astype(np.float32)
y_pred = model(x_test.reshape(-1, 1)).data
plt.ylim(-0.1, 1.1)
plt.plot(x, y, "b.")
plt.plot(x_test, y_pred, "r.")
plt.savefig("result/mdn-with_preds.png")
if __name__ == '__main__':
main()
|
toad/preprocessing/partition.py | Padfoot-ted/toad | 325 | 11114170 | import numpy as np
import pandas as pd
class Partition:
def partition(self, data):
"""partition data
Args:
data (DataFrame): dataframe
Returns:
iterator -> ndarray[bool]: mask of partition data
iterator -> str: suffix string of current partition
"""
yield np.ones(len(data)).astype(bool), ''
class TimePartition(Partition):
"""partition data by time delta
Args:
base (str): column name of base time
filter (str): column name of target time to be compared
times (list): list of time delta`
Example:
>>> TimePartition('apply_time', 'query_time', ['30d', '90d', 'all'])
"""
def __init__(self, base, filter, times):
self.base = base
self.filter = filter
self.times = times
def partition(self, data):
base = pd.to_datetime(data[self.base])
filter = pd.to_datetime(data[self.filter])
for t in self.times:
if t != 'all':
delta = pd.Timedelta(t)
mask = filter > (base - delta)
else:
mask = np.ones(len(filter)).astype(bool)
yield mask, '_' + t
class ValuePartition(Partition):
"""partition data by column values
Args:
column (str): column name which will be used as partition
Example:
>>> ValuePartition('status')
"""
def __init__(self, column):
self.column = column
def partition(self, data):
data = data[self.column]
unique = data.unique()
for u in unique:
if pd.isna(u):
mask = data.isna()
else:
mask = (data == u)
yield mask, '_' + str(u) |
convokit/model/corpusComponent.py | CornellNLP/Cornell-Conversational-Analysis-Toolkit | 371 | 11114227 | <filename>convokit/model/corpusComponent.py
from .convoKitMeta import ConvoKitMeta
from convokit.util import warn, deprecation
from typing import List, Optional
class CorpusComponent:
def __init__(self, obj_type: str, owner=None, id=None, vectors: List[str]=None, meta=None):
self.obj_type = obj_type # utterance, speaker, conversation
self._owner = owner
if meta is None:
meta = dict()
self.meta = self.init_meta(meta)
self.id = id
self.vectors = vectors if vectors is not None else []
def get_owner(self):
return self._owner
def set_owner(self, owner):
self._owner = owner
if owner is not None:
self.meta = self.init_meta(self.meta)
owner = property(get_owner, set_owner)
def init_meta(self, meta):
if self._owner is None:
return meta
else:
ck_meta = ConvoKitMeta(self.owner.meta_index, self.obj_type)
for key, value in meta.items():
ck_meta[key] = value
return ck_meta
def get_id(self):
return self._id
def set_id(self, value):
if not isinstance(value, str) and value is not None:
self._id = str(value)
warn("{} id must be a string. ID input has been casted to a string.".format(self.obj_type))
else:
self._id = value
id = property(get_id, set_id)
# def __eq__(self, other):
# if type(self) != type(other): return False
# # do not compare 'utterances' and 'conversations' in Speaker.__dict__. recursion loop will occur.
# self_keys = set(self.__dict__).difference(['_owner', 'meta', 'utterances', 'conversations'])
# other_keys = set(other.__dict__).difference(['_owner', 'meta', 'utterances', 'conversations'])
# return self_keys == other_keys and all([self.__dict__[k] == other.__dict__[k] for k in self_keys])
def retrieve_meta(self, key: str):
"""
Retrieves a value stored under the key of the metadata of corpus object
:param key: name of metadata attribute
:return: value
"""
return self.meta.get(key, None)
def add_meta(self, key: str, value) -> None:
"""
Adds a key-value pair to the metadata of the corpus object
:param key: name of metadata attribute
:param value: value of metadata attribute
:return: None
"""
self.meta[key] = value
def get_info(self, key):
"""
Gets attribute <key> of the corpus object. Returns None if the corpus object does not have this attribute.
:param key: name of attribute
:return: attribute <key>
"""
deprecation("get_info()", "retrieve_meta()")
return self.meta.get(key, None)
def set_info(self, key, value):
"""
Sets attribute <key> of the corpus object to <value>.
:param key: name of attribute
:param value: value to set
:return: None
"""
deprecation("set_info()", "add_meta()")
self.meta[key] = value
def get_vector(self, vector_name: str, as_dataframe: bool = False, columns: Optional[List[str]] = None):
"""
Get the vector stored as `vector_name` for this object.
:param vector_name: name of vector
:param as_dataframe: whether to return the vector as a dataframe (True) or in its raw array form (False). False
by default.
:param columns: optional list of named columns of the vector to include. All columns returned otherwise. This
parameter is only used if as_dataframe is set to True
:return: a numpy / scipy array
"""
if vector_name not in self.vectors:
raise ValueError("This {} has no vector stored as '{}'.".format(self.obj_type, vector_name))
return self.owner.get_vector_matrix(vector_name).get_vectors(ids=[self.id], as_dataframe=as_dataframe,
columns=columns)
def add_vector(self, vector_name: str):
"""
Logs in the Corpus component object's internal vectors list that the component object has a vector row
associated with it in the vector matrix named `vector_name`.
Transformers that add vectors to the Corpus should use this to update the relevant component objects during
the transform() step.
:param vector_name: name of vector matrix
:return: None
"""
if vector_name not in self.vectors:
self.vectors.append(vector_name)
def has_vector(self, vector_name: str):
return vector_name in self.vectors
def delete_vector(self, vector_name: str):
"""
Delete a vector associated with this Corpus component object.
:param vector_name:
:return: None
"""
self.vectors.remove(vector_name)
def __str__(self):
return "{}(id: {}, vectors: {}, meta: {})".format(self.obj_type.capitalize(), self.id, self.vectors, self.meta)
def __hash__(self):
return hash(self.obj_type + str(self.id))
def __repr__(self):
copy = self.__dict__.copy()
deleted_keys = ['utterances', 'conversations', 'user', '_root', '_utterance_ids', '_speaker_ids']
for k in deleted_keys:
if k in copy:
del copy[k]
to_delete = [k for k in copy if k.startswith('_')]
to_add = {k[1:]: copy[k] for k in copy if k.startswith('_')}
for k in to_delete:
del copy[k]
copy.update(to_add)
try:
return self.obj_type.capitalize() + "(" + str(copy) + ")"
except AttributeError: # for backwards compatibility when corpus objects are saved as binary data, e.g. wikiconv
return "(" + str(copy) + ")" |
samsungctl/upnp/UPNP_Device/instance_singleton.py | p3g4asus/samsungctl | 135 | 11114265 | # -*- coding: utf-8 -*-
class InstanceSingleton(type):
_objects = {}
def __call__(cls, id, *args):
if id not in InstanceSingleton._objects:
InstanceSingleton._objects[id] = (
super(InstanceSingleton, cls).__call__(id, *args)
)
return InstanceSingleton._objects[id]
|
openrec/tf1/legacy/modules/__init__.py | pbaiz/openrec | 399 | 11114291 | from openrec.tf1.legacy.modules.module import Module
|
labs/notebooks/reinforcement_learning/RL.py | mtreviso/lxmls-toolkit | 183 | 11114368 | <reponame>mtreviso/lxmls-toolkit
from IPython import embed
# Load Part-of-Speech data
from lxmls.readers.pos_corpus import PostagCorpusData
data = PostagCorpusData()
print(data.input_size)
print(data.output_size)
print(data.maxL)
import numpy as np
import time
batch_size = 1
# Get batch iterators for train and test
train_batches = data.batches('train', batch_size=batch_size)
dev_set = data.batches('dev', batch_size=batch_size)
test_set = data.batches('test', batch_size=batch_size)
# Alternative native CuDNN native implementation of RNNs
#from lxmls.deep_learning.pytorch_models.rnn import FastPytorchRNN
# model = FastPytorchRNN(
# input_size=data.input_size,
# embedding_size=50,
# hidden_size=100,
# output_size=data.output_size,
# learning_rate=0.05
# )
#
#
# num_epochs = 15
# # Epoch loop
# start = time.time()
# for epoch in range(num_epochs):
#
# # Batch loop
# for batch in train_batches:
# model.update(input=batch['input'], output=batch['output'])
#
# # Evaluation dev
# is_hit = []
# for batch in dev_set:
# is_hit.extend(model.predict(input=batch['input']) == batch['output'])
# accuracy = 100 * np.mean(is_hit)
#
# # Inform user
# print("Epoch %d: dev accuracy %2.2f %%" % (epoch + 1, accuracy))
# print("Training took %2.2f seconds per epoch" % ((time.time() - start) / num_epochs))
# # Evaluation test
# is_hit = []
# for batch in test_set:
# is_hit.extend(model.predict(input=batch['input']) == batch['output'])
# accuracy = 100 * np.mean(is_hit)
#
# # Inform user
# print("Test accuracy %2.2f %%" % accuracy)
# Alternative native CuDNN native implementation of RNNs
from lxmls.deep_learning.pytorch_models.rnn import PolicyRNN
model = PolicyRNN(
input_size=data.input_size,
embedding_size=50,
hidden_size=100,
output_size=data.output_size,
learning_rate=0.05,
gamma=0.8,
RL=True,
maxL=data.maxL
)
num_epochs = 15
import numpy as np
import time
batch_size = 1
# Get batch iterators for train and test
train_batches = data.sample('train', batch_size=batch_size)
dev_set = data.sample('dev', batch_size=batch_size)
test_set = data.sample('test', batch_size=batch_size)
print("RL")
# Epoch loop
start = time.time()
for epoch in range(num_epochs):
# Batch loop
for batch in train_batches:
#TODO: Use this here to create an RL inside model.update()
#samples, log_probs = model._sample(input=batch['input']) # sample actions and its neg log probs
model.update(input=batch['input'], output=batch['output'])
# Evaluation dev
is_hit = []
for batch in dev_set:
loss = model.predict_loss(batch['input'], batch['output'])
is_hit.extend(loss)
accuracy = 100 * np.mean(is_hit)
# Inform user
print("Epoch %d: dev accuracy %2.2f %%" % (epoch + 1, accuracy))
print("Training took %2.2f seconds per epoch" % ((time.time() - start) / num_epochs))
# Evaluation test
is_hit = []
for batch in test_set:
is_hit.extend(model.predict_loss(batch['input'],batch['output']))
accuracy = 100 * np.mean(is_hit)
# Inform user
print("Test accuracy %2.2f %%" % accuracy)
|
test/vectors_example.py | shardros/autopep8 | 3,459 | 11114381 | <gh_stars>1000+
# vectors - a simple vector, complex, quaternion, and 4d matrix math module
'''
http://www.halley.cc/code/python/vectors.py
A simple vector, complex, quaternion, and 4d matrix math module.
ABSTRACT
This module gives a simple way of doing lightweight 2D, 3D or other
vector math tasks without the heavy clutter of doing tuple/list/vector
conversions yourself, or the burden of installing some high-performance
native math routine module.
Included are:
V(...) - mathematical real vector
C(...) - mathematical complex number (same as python "complex" type)
Q(...) - hypercomplex number, also known as a quaternion
M(...) - a fixed 4x4 matrix commonly used in graphics
SYNOPSIS
>>> import vectors ; from vectors import *
Vectors:
>>> v = V(1,2,3) ; w = V(4,5,6)
>>> v+w, (v+w == w+v), v*2
V(5.0, 7.0, 9.0), True, V(2.0, 4.0, 6.0)
>>> v.dot(w), v.cross(w), v.normalize().magnitude()
32.0, V(-3.0, 6.0, -3.0), 1.0
Quaternions:
>>> q = Q.rotate('X', vectors.radians(30))
Q(0.7071067811, 0.0, 0.0, 0.7071067811)
>>> q*q*q*q*q*q == Q.rotate('X', math.pi) == Q(1,0,0,0)
True
AUTHOR
<NAME> (<EMAIL>) 12 February 2005
REFERENCES
Many libraries implement a host of 4x4 matrix math and vector
routines, usually related to the historical SGI GL implementation.
A common problem is whether matrix element formulas are transposed.
One useful FAQ on matrix math can be found at this address:
http://www.j3d.org/matrix_faq/matrfaq_latest.html
'''
__all__ = [ 'V', 'C', 'Q', 'M',
'zero', 'equal',
'radians', 'degrees',
'angle', 'track',
'distance', 'nearest', 'farthest' ]
#----------------------------------------------------------------------------
import math
import random
EPSILON = 0.00000001
__deg2rad = math.pi / 180.0
__rad2deg = 180.0 / math.pi
def zero(a): return abs(a) < EPSILON
def equal(a, b): return abs(a - b) < EPSILON
def degrees(rad): return rad * __rad2deg
def radians(deg): return deg * __deg2rad
def sign(a):
if a < 0: return -1
if a > 0: return +1
return 0
def isseq(x):
return isinstance(x, (list, tuple))
def collapse(*args):
it = []
for i in range(len(args)):
if isinstance(args[i], V):
it.extend(args[i]._v)
else:
it.extend(args[i])
return it
#----------------------------------------------------------------------------
class V:
'''A mathematical vector of arbitrary number of scalar number elements.
'''
O = None
X = None
Y = None
Z = None
__slots__ = [ '_v', '_l' ]
@classmethod
def __constants__(cls):
if V.O: return
V.O = V() ; V.O._v = (0.,0.,0.) ; V.O._l = 0.
V.X = V() ; V.X._v = (1.,0.,0.) ; V.X._l = 1.
V.Y = V() ; V.Y._v = (0.,1.,0.) ; V.Y._l = 1.
V.Z = V() ; V.Z._v = (0.,0.,1.) ; V.Z._l = 1.
def __init__(self, *args):
l = len(args)
if not l:
self._v = [0.,0.,0.]
self._l = 0.
return
if l > 1:
self._v = map(float, args)
self._l = None
return
arg = args[0]
if isinstance(arg, (list, tuple)):
self._v = map(float, arg)
self._l = None
elif isinstance(arg, V):
self._v = list(arg._v[:])
self._l = arg._l
else:
arg = float(arg)
self._v = [ arg ]
self._l = arg
def __len__(self):
'''The len of a vector is the dimensionality.'''
return len(self._v)
def __list__(self):
'''Accessing the list() will return all elements as a list.'''
if isinstance(self._v, tuple): return list(self._v[:])
return self._v[:]
list = __list__
def __getitem__(self, key):
'''Vector elements can be accessed directly.'''
return self._v[key]
def __setitem__(self, key, value):
'''Vector elements can be accessed directly.'''
self._v[key] = value
def __str__(self): return self.__repr__()
def __repr__(self):
return self.__class__.__name__ + repr(tuple(self._v))
def __eq__(self, other):
'''Vectors can be checked for equality.
Uses epsilon floating comparison; tiny differences are still equal.
'''
for i in range(len(self._v)):
if not equal(self._v[i], other._v[i]):
return False
return True
def __cmp__(self, other):
'''Vectors can be compared, returning -1,0,+1.
Elements are compared in order, using epsilon floating comparison.
'''
for i in range(len(self._v)):
if not equal(self._v[i], other._v[i]):
if self._v[i] > other._v[i]: return 1
return -1
return 0
def __pos__(self): return V(self)
def __neg__(self):
'''The inverse of a vector is a negative in all elements.'''
v = V(self)
for i in range(len(v._v)):
v[i] = -v[i]
v._l = self._l
return v
def __nonzero__(self):
'''A vector is nonzero if any of its elements are nonzero.'''
for i in range(len(self._v)):
if self._v[i]: return True
return False
def zero(self):
'''A vector is zero if none of its elements are nonzero.'''
return not self.__nonzero__()
@classmethod
def random(cls, order=3):
'''Returns a unit vector in a random direction.'''
# distribution is not without bias, need to use polar coords?
v = V(range(order))
v._l = None
short = True
while short:
for i in range(order):
v._v[i] = 2.0*random.random() - 1.0
if not zero(v._v[i]): short = False
return v.normalize()
# Vector or scalar addition.
def __add__(self, other): return self.__class__(self).__iadd__(other)
def __radd__(self, other): return self.__class__(self).__iadd__(other)
def __iadd__(self, other):
'''Vectors can be added to each other, or a scalar added to them.'''
if isinstance(other, V):
if len(other._v) != len(self._v):
raise ValueError, 'mismatched dimensions'
for i in range(len(self._v)):
self._v[i] += other._v[i]
else:
for i in range(len(self._v)):
self._v[i] += other
self._l = None
return self
# Vector or scalar subtraction.
def __sub__(self, other): return self.__class__(self).__isub__(other)
def __rsub__(self, other): return (-self.__class__(self)).__iadd__(other)
def __isub__(self, other):
'''Vectors can be subtracted, or a scalar subtracted from them.'''
if isinstance(other, V):
if len(other._v) != len(self._v):
raise ValueError, 'mismatched dimensions'
for i in range(len(self._v)):
self._v[i] -= other._v[i]
else:
for i in range(len(self._v)):
self._v[i] -= other
self._l = None
return self
# Cross product or magnification. See dot() for dot product.
def __mul__(self, other):
if isinstance(other, M): return other.__rmul__(self)
return self.__class__(self).__imul__(other)
def __rmul__(self, other):
# The __rmul__ is called in scalar * vector case; it's commutative.
return self.__class__(self).__imul__(other)
def __imul__(self, other):
'''Vectors can be multipled by a scalar. Two 3d vectors can cross.'''
if isinstance(other, V):
self._v = self.cross(other)._v
else:
for i in range(len(self._v)):
self._v[i] *= other
self._l = None
return self
def __div__(self, other): return self.__class__(self).__idiv__(other)
def __rdiv__(self, other):
raise TypeError, 'cannot divide scalar by non-scalar value'
def __idiv__(self, other):
'''Vectors can be divided by scalars; each element is divided.'''
other = 1.0 / other
for i in range(len(self._v)):
self._v[i] *= other
self._l = None
return self
def cross(self, other):
'''Find the vector cross product between two 3d vectors.'''
if len(self._v) != 3 or len(other._v) != 3:
raise ValueError, 'cross multiplication only for 3d vectors'
p, q = self._v, other._v
r = [ p[1] * q[2] - p[2] * q[1],
p[2] * q[0] - p[0] * q[2],
p[0] * q[1] - p[1] * q[0] ]
return V(r)
def dot(self, other):
'''Find the scalar dot product between this vector and another.'''
s = 0
for i in range(len(self._v)):
s += self._v[i] * other._v[i]
return s
def __mag(self):
if self._l is not None:
return self._l
m = 0
for i in range(len(self._v)):
m += self._v[i] * self._v[i]
self._l = math.sqrt(m)
return self._l
def magnitude(self, value=None):
'''Find the magnitude (spatial length) of this vector.
With a value, return a vector with same direction but of given length.
'''
mag = self.__mag()
if value is None: return mag
if zero(mag):
raise ValueError, 'Zero-magnitude vector cannot be scaled.'
v = self.__class__(self)
v.__imul__(value / mag)
v._l = value
return v
def dsquared(self, other):
m = 0
for i in range(len(self._v)):
d = self._v[i] - other._v[i]
m += d * d
return m
def distance(self, other):
'''Compare this vector with another, for distance.'''
return math.sqrt(self.dsquared(other))
def normalize(self):
'''Return a vector with the same direction but of unit length.'''
return self.magnitude(1.0)
def order(self, order):
'''Remove elements from the end, or extend with new elements.'''
order = int(order)
if order < 1:
raise ValueError, 'cannot reduce a vector to zero elements'
v = V(self)
while order < len(v._v):
v._v.pop()
while order > len(v._v):
v._v.append(1.0)
v._l = None
return v
#----------------------------------------------------------------------------
class C (V):
# python has a built-in complex() type which is pretty good,
# but we provide this class for completeness and consistency
O = None
j = None
__slots__ = [ '_v', '_l' ]
@classmethod
def __constants__(cls):
if C.O: return
C.O = C(0+0j) ; C.O._v = tuple(C.O._v)
C.j = C(0+1j) ; C.j._v = tuple(C.j._v)
def __init__(self, *args):
if not args:
args = (0, 0)
a = args[0]
if isinstance(a, complex):
a = (a.real, a.imag)
if isinstance(a, V):
if len(a) != 2: raise TypeError, 'C() takes exactly 2 elements'
self._v = list(a._v[:])
elif isseq(a):
if len(a) != 2: raise TypeError, 'C() takes exactly 2 elements'
self._v = map(float, a)
else:
if len(args) != 2: raise TypeError, 'C() takes exactly 2 elements'
self._v = map(float, args)
#def __repr__(self):
# return 'C(%s+%sj)' % (repr(self._v[0]), repr(self._v[1]))
# addition and subtraction of C() work the same as V()
def dot(self): raise AttributeError, "C instance has no attribute 'dot'"
def __imul__(self, other):
if isinstance(other, C):
sx,sj = self._v
ox,oj = other._v
self._v = [ sx*ox - sj*oj, sx*oj + ox*sj ]
else:
V.__imul__(self, other)
return self
def conjugate(self):
twin = C(self)
twin._v[0] = -twin._v[0]
return twin
#----------------------------------------------------------------------------
class Q (V):
I = None
__slots__ = [ '_v', '_l' ]
@classmethod
def __constants__(cls):
if Q.O: return
Q.I = Q() ; Q.I._v = tuple(Q.I._v)
def __init__(self, *args):
# x, y, z, w
if not args:
args = (0, 0, 0, 1)
a = args[0]
if isinstance(a, V):
if len(a) != 4: raise TypeError, 'Q() takes exactly 4 elements'
self._v = list(a._v[:])
elif isseq(a):
if len(a) != 4: raise TypeError, 'Q() takes exactly 4 elements'
self._v = map(float, a)
else:
if len(args) != 4: raise TypeError, 'Q() takes exactly 4 elements'
self._v = map(float, args)
self._l = None
# addition and subtraction of Q() work the same as V()
def dot(self): raise AttributeError, "Q instance has no attribute 'dot'"
#TODO: extra methods to convert euler vectors and quaternions
def conjugate(self):
'''The conjugate of a quaternion has its X, Y and Z negated.'''
twin = Q(self)
for i in range(3):
twin._v[i] = -twin._v[i]
twin._l = twin._l
return twin
def inverse(self):
'''The quaternion inverse is the conjugate with reciprocal W.'''
twin = self.conjugate()
if twin._v[3] != 1.0:
twin._v[3] = 1.0 / twin._v[3]
twin._l = None
return twin
@classmethod
def rotate(cls, axis, theta):
'''Prepare a quaternion that represents a rotation on a given axis.'''
if isinstance(axis, str):
if axis in ('x','X'): axis = V.X
elif axis in ('y','Y'): axis = V.Y
elif axis in ('z','Z'): axis = V.Z
axis = axis.normalize()
s = math.sin(theta / 2.)
c = math.cos(theta / 2.)
return Q( axis._v[0] * s, axis._v[1] * s, axis._v[2] * s, c )
def __imul__(self, other):
if isinstance(other, Q):
sx,sy,sz,sw = self._v
ox,oy,oz,ow = other._v
self._v = [ sw*ox + sx*ow + sy*oz - sz*oy,
sw*oy + sy*ow + sz*ox - sx*oz,
sw*oz + sz*ow + sx*oy - sy*ox,
sw*ow - sx*ox - sy*oy - sz*oz ]
else:
V.__imul__(self, other)
return self
#----------------------------------------------------------------------------
class M (V):
I = None
Z = None
__slots__ = [ '_v' ]
@classmethod
def __constants__(cls):
if M.I: return
M.I = M() ; M.I._v = tuple(M.I._v)
M.Z = M() ; M.Z._v = (0,)*16
def __init__(self, *args):
'''Constructs a new 4x4 matrix.
If no arguments are given, an identity matrix is constructed.
Any combination of V vectors, tuples, lists or scalars may be given,
but taken together in order, they must have 16 number values total.
'''
# no args gives identity matrix
# 16 scalars collapsed from any combination of lists, tuples, vectors
if not args:
args = (1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1)
if len(args) == 4: args = collapse(*args)
a = args[0]
if isinstance(a, V):
if len(a) != 16: raise TypeError, 'M() takes exactly 16 elements'
self._v = list(a._v[:])
elif isseq(a):
if len(a) != 16: raise TypeError, 'M() takes exactly 16 elements'
self._v = map(float, a)
else:
if len(args) != 16: raise TypeError, 'M() takes exactly 16 elements'
self._v = map(float, args)
@classmethod
def rotate(cls, axis, theta=0.0):
if isinstance(axis, str):
if axis in ('x','X'):
c = math.cos(theta) ; s = math.sin(theta)
return cls( [ 1, 0, 0, 0,
0, c, -s, 0,
0, s, c, 0,
0, 0, 0, 1 ] )
if axis in ('y','Y'):
c = math.cos(theta) ; s = math.sin(theta)
return cls( [ c, 0, s, 0,
0, 1, 0, 0,
-s, 0, c, 0,
0, 0, 0, 1 ] )
if axis in ('z','Z'):
c = math.cos(theta) ; s = math.sin(theta)
return cls( [ c, -s, 0, 0,
s, c, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1 ] )
if isinstance(axis, V):
axis = Q.rotate(axis, theta)
if isinstance(axis, Q):
return cls.twist(axis)
raise ValueError, 'unknown rotation axis'
@classmethod
def twist(cls, torsion):
# quaternion to matrix
torsion = torsion.normalize()
(X,Y,Z,W) = torsion._v
xx = X * X ; xy = X * Y ; xz = X * Z ; xw = X * W
yy = Y * Y ; yz = Y * Z ; yw = Y * W ; zz = Z * Z ; zw = Z * W
a = 1 - 2*(yy + zz) ; b = 2*(xy - zw) ; c = 2*(xz + yw)
e = 2*(xy + zw) ; f = 1 - 2*(xx + zz) ; g = 2*(yz - xw)
i = 2*(xz - yw) ; j = 2*(yz + xw) ; k = 1 - 2*(xx + yy)
return cls( [ a, b, c, 0,
e, f, g, 0,
i, j, k, 0,
0, 0, 0, 1 ] )
@classmethod
def scale(cls, factor):
m = cls()
if isinstance(factor, (V, list, tuple)):
for i in (0,1,2):
m[(i,i)] = factor[i]
else:
for i in (0,1,2):
m[(i,i)] = factor
return m
@classmethod
def translate(cls, offset):
m = cls()
for i in (0,1,2):
m[(3,i)] = offset[i]
return m
@classmethod
def reflect(cls, normal, dist=0.0):
(x,y,z,w) = normal.normalize()._v
(n2x,n2y,n2z) = (-2*x, -2*y, -2*z)
return cls( [ 1+n2x*x, n2x*y, n2x*z, 0,
n2y*x, 1+n2y*y, n2y*z, 0,
n2z*x, n2z*y, 1+n2z*z, 0,
d*x, d*y, d*y, 1 ] )
@classmethod
def shear(cls, amount):
# | 1. yx zx 0. |
# | xy 1. zy 0. |
# | xz yz 1. 0. |
# | 0. 0. 0. 1. |
pass
@classmethod
def frustrum(cls, l, r, b, t, n, f):
rl = 1/(r-l) ; tb = 1/(t-b) ; fn = 1/(f-n)
return cls( [ 2*n*rl, 0, (r+l)*rl, 0,
0, 2*n*tb, 0, 0,
0, 0, -(f+n)*fn, -2*f*n*fn,
0, 0, -1, 0 ] )
@classmethod
def perspective(cls, yfov, aspect, n, f):
t = math.tan(yfov/2)*n
b = -t
r = aspect * t
l = -r
return cls.frustrum(l, r, b, t, n, f)
def __str__(self):
'''Returns a multiple-line string representation of the matrix.'''
# prettier on multiple lines
n = self.__class__.__name__
ns = ' '*len(n)
t = n+'('+', '.join([ repr(self._v[i]) for i in 0,1,2,3 ])+',\n'
t += ns+' '+', '.join([ repr(self._v[i]) for i in 4,5,6,7 ])+',\n'
t += ns+' '+', '.join([ repr(self._v[i]) for i in 8,9,10,11 ])+',\n'
t += ns+' '+', '.join([ repr(self._v[i]) for i in 12,13,14,15 ])+')'
return t
def __getitem__(self, rc):
'''Returns a single element of the matrix.
May index 0-15, or with tuples of (row,column) 0-3 each.
Indexing goes across first, so m[3] is m[0,3] and m[7] is m[1,3].
'''
if not isinstance(rc, tuple): return V.__getitem__(self, rc)
return self._v[rc[0]*4+rc[1]]
def __setitem__(self, rc, value):
'''Injects a single element into the matrix.
May index 0-15, or with tuples of (row,column) 0-3 each.
Indexing goes across first, so m[3] is m[0,3] and m[7] is m[1,3].
'''
if not isinstance(rc, tuple): return V.__getitem__(self, rc)
self._v[rc[0]*4+rc[1]] = float(value)
def dot(self): raise AttributeError, "M instance has no attribute 'dot'"
def magnitude(self): raise AttributeError, "M instance has no attribute 'magnitude'"
def row(self, r, v=None):
'''Returns or replaces a vector representing a row of the matrix.
Rows are counted 0-3. If given, new vector must be four numbers.
'''
if r < 0 or r > 3: raise IndexError, 'row index out of range'
if v is None: return V(self._v[r*4:(r+1)*4])
e = v
if isinstance(v, V): e = v._v
if len(e) != 4: raise ValueError, 'new row must include 4 values'
self._v[r*4:(r+1)*4] = e
return v
def col(self, c, v=None):
'''Returns or replaces a vector representing a column of the matrix.
Columns are counted 0-3. If given, new vector must be four numbers.
'''
if c < 0 or c > 3: raise IndexError, 'column index out of range'
if v is None: return V([ self._v[c+4*i] for i in range(4) ])
e = v
if isinstance(v, V): e = v._v
if len(e) != 4: raise ValueError, 'new row must include 4 values'
for i in range(4): self._v[c+4*i] = e[i]
return v
def translation(self):
'''Extracts the translation component from this matrix.'''
(a,b,c,d,
e,f,g,h,
i,j,k,l,
m,n,o,p) = self._v
return V(m,n,o)
def rotation(self):
'''Extracts Euler angles of rotation from this matrix.
This attempts to find alternate rotations in case of gimbal lock,
but all of the usual problems with Euler angles apply here.
All Euler angles are in radians.
'''
(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) = self._v
rotY = D = math.asin(c)
C = math.cos(rotY)
if (abs(C) > 0.005):
trX = k/C ; trY = -g/C ; rotX = math.atan2(trY, trX)
trX = a/C ; trY = -b/C ; rotZ = math.atan2(trY, trX)
else:
rotX = 0
trX = f ; trY = e ; rotZ = math.atan2(trY, trX)
return V(rotX,rotY,rotZ)
def scaling(self):
'''Extracts the scaling component from this matrix.'''
(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) = self._v
return V(a,f,k)
def determinant(self):
(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) = self._v
# determinants of 2x2 submatrices
kplo = k*p-l*o ; jpln = j*p-l*n ; jokn = j*o-k*n
iplm = i*p-l*m ; iokm = i*o-k*m ; injm = i*n-j*m
# determinants of 3x3 submatrices
d00 = (f*kplo - g*jpln + h*jokn)
d01 = (e*kplo - g*iplm + h*iokm)
d02 = (e*jpln - f*iplm + h*injm)
d03 = (e*jokn - f*iokm + g*injm)
# reciprocal of the determinant of the 4x4
dr = a*d00 - b*d01 + c*d02 - d*d03
return dr
def inverse(self):
(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) = self._v
# determinants of 2x2 submatrices
kplo = k*p-l*o ; jpln = j*p-l*n ; jokn = j*o-k*n
iplm = i*p-l*m ; iokm = i*o-k*m ; injm = i*n-j*m
gpho = g*p-h*o ; ifhn = f*p-h*n ; fogn = f*o-g*n
ephm = e*p-h*m ; eogm = e*o-g*m ; enfm = e*n-f*m
glhk = g*l-h*k ; flhj = f*l-h*j ; fkgj = f*k-g*j
elhi = e*l-h*i ; ekgi = e*k-g*i ; ejfi = e*j-f*i
# determinants of 3x3 submatrices
d00 = (f*kplo - g*jpln + h*jokn)
d01 = (e*kplo - g*iplm + h*iokm)
d02 = (e*jpln - f*iplm + h*injm)
d03 = (e*jokn - f*iokm + g*injm)
d10 = (b*kplo - c*jpln + d*jokn)
d11 = (a*kplo - c*iplm + d*iokm)
d12 = (a*jpln - b*iplm + d*injm)
d13 = (a*jokn - b*iokm + c*injm)
d20 = (b*gpho - c*ifhn + d*fogn)
d21 = (a*gpho - c*ephm + d*eogm)
d22 = (a*ifhn - b*ephm + d*enfm)
d23 = (a*fogn - b*eogm + c*enfm)
d30 = (b*glhk - c*flhj + d*fkgj)
d31 = (a*glhk - c*elhi + d*ekgi)
d32 = (a*flhj - b*elhi + d*ejfi)
d33 = (a*fkgj - b*ekgi + c*ejfi)
# reciprocal of the determinant of the 4x4
dr = 1.0 / (a*d00 - b*d01 + c*d02 - d*d03)
# inverse
return self.__class__( [ d00*dr, -d10*dr, d20*dr, -d30*dr,
-d01*dr, d11*dr, -d21*dr, d31*dr,
d02*dr, -d12*dr, d22*dr, -d32*dr,
-d03*dr, d13*dr, -d23*dr, d33*dr ] )
def transpose(self):
return M( [ self._v[i] for i in [ 0, 4, 8, 12,
1, 5, 9, 13,
2, 6, 10, 14,
3, 7, 11, 15 ] ] )
def __mul__(self, other):
# called in case of m *m, m * v, m * s
# support 3d m * v by extending to 4d v
if isinstance(other, V):
if len(other._v) == 3:
other = V(other._v[0], other._v[1], other._v[2], 1)
if len(other._v) == 4:
a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p = self._v
X,Y,Z,W = other._v
return V( a*X + b*Y + c*Z + d*W,
e*X + f*Y + g*Z + h*W,
i*X + j*Y + k*Z + l*W,
m*X + n*Y + o*Z + p*W )
return self.__class__(self).__imul__(other)
def __rmul__(self, other):
# called in case of s * m or v * m
# support 3d v * m by extending to 4d v
if isinstance(other, V):
if len(other._v) == 3:
other = V(other._v[0], other._v[1], other._v[2], 1)
if len(other._v) == 4:
A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P = self._v
x,y,z,w = other._v
return V( x*A + y*E + z*I + w*M,
x*B + y*F + z*J + w*N,
x*C + y*G + z*K + w*O,
x*D + y*H + z*L + w*P )
return self.__class__(self).__imul__(other)
def __imul__(self, other):
# can m *= s
# can't m *= v since answer is vector
if not isinstance(other, V):
other = float(other)
self._v = [ self._v[i]*other for i in range(len(self._v)) ]
elif len(other._v) == 16:
s0,s1,s2,s3,s4,s5,s6,s7,s8,s9,sA,sB,sC,sD,sE,sF = self._v
o0,o1,o2,o3,o4,o5,o6,o7,o8,o9,oA,oB,oC,oD,oE,oF = other._v
self._v = [ o0*s0 + o1*s4 + o2*s8 + o3*sC, #
o0*s1 + o1*s5 + o2*s9 + o3*sD,
o0*s2 + o1*s6 + o2*sA + o3*sE,
o0*s3 + o1*s7 + o2*sB + o3*sF,
o4*s0 + o5*s4 + o6*s8 + o7*sC, #
o4*s1 + o5*s5 + o6*s9 + o7*sD,
o4*s2 + o5*s6 + o6*sA + o7*sE,
o4*s3 + o5*s7 + o6*sB + o7*sF,
o8*s0 + o9*s4 + oA*s8 + oB*sC, #
o8*s1 + o9*s5 + oA*s9 + oB*sD,
o8*s2 + o9*s6 + oA*sA + oB*sE,
o8*s3 + o9*s7 + oA*sB + oB*sF,
oC*s0 + oD*s4 + oE*s8 + oF*sC, #
oC*s1 + oD*s5 + oE*s9 + oF*sD,
oC*s2 + oD*s6 + oE*sA + oF*sE,
oC*s3 + oD*s7 + oE*sB + oF*sF ]
else:
raise ValueError, 'multiply by 4d matrix or 4d vector or scalar'
return self
#----------------------------------------------------------------------------
V.__constants__()
C.__constants__()
Q.__constants__()
M.__constants__()
def angle(a, b):
'''Find the angle (scalar value in radians) between two 3d vectors.'''
a = a.normalize()
b = b.normalize()
if a == b: return 0.0
return math.acos(a.dot(b))
def track(v):
'''Find the track (direction in positive radians) of a 2d vector.
E.g., track(V(1,0)) == 0 radians; track(V(0,1) == math.pi/2 radians;
track(V(-1,0)) == math.pi radians; and track(V(0,-1) is math.pi*3/2.
'''
t = math.atan2(v[1], v[0])
if t < 0:
return t + 2.*math.pi
return t
def quangle(a, b):
'''Find a quaternion that rotates one 3d vector to parallel another.'''
x = a.cross(b)
w = a.magnitude() * b.magnitude() + a.dot(b)
return Q(x[0], x[1], x[2], w)
def dsquared(one, two):
'''Find the square of the distance between two points.'''
# working with squared distances is common to avoid slow sqrt() calls
m = 0
for i in range(len(one._v)):
d = one._v[i] - two._v[i]
m += d * d
return m
def distance(one, two):
'''Find the distance between two points.
Equivalent to (one-two).magnitude().
'''
return math.sqrt(dsquared(one, two))
def nearest(point, neighbors):
'''Find the nearest neighbor point to a given point.'''
best = None
for other in neighbors:
d = dsquared(point, other)
if best is None or d < best[1]:
best = (other, d)
return best[0]
def farthest(point, neighbors):
'''Find the farthest neighbor point to a given point.'''
best = None
for other in neighbors:
d = dsquared(point, other)
if best is None or d > best[1]:
best = (other, d)
return best[0]
#----------------------------------------------------------------------------
def __test__():
from testing import __ok__, __report__
print 'Testing basic math...'
__ok__(equal(1.0, 1.0), True)
__ok__(equal(1.0, 1.01), False)
__ok__(equal(1.0, 1.0001), False)
__ok__(equal(1.0, 0.9999), False)
__ok__(equal(1.0, 1.0000001), False)
__ok__(equal(1.0, 0.9999999), False)
__ok__(equal(1.0, 1.0000000001), True)
__ok__(equal(1.0, 0.9999999999), True)
__ok__(equal(degrees(0), 0.0))
__ok__(equal(degrees(math.pi/2), 90.0))
__ok__(equal(degrees(math.pi), 180.0))
__ok__(equal(radians(0.0), 0.0))
__ok__(equal(radians(90.0), math.pi/2))
__ok__(equal(radians(180.0), math.pi))
print 'Testing V vector class...'
# structural construction
__ok__(V.O is not None, True)
__ok__(V.O._v is not None, True)
__ok__(V.O._v, (0., 0., 0.)) ; __ok__(V.O._l, 0.)
__ok__(V.X._v, (1., 0., 0.)) ; __ok__(V.X._l, 1.)
__ok__(V.Y._v, (0., 1., 0.)) ; __ok__(V.Y._l, 1.)
__ok__(V.Z._v, (0., 0., 1.)) ; __ok__(V.Z._l, 1.)
a = V(3., 2., 1.) ; __ok__(a._v, [3., 2., 1.])
a = V((1., 2., 3.)) ; __ok__(a._v, [1., 2., 3.])
a = V([1., 1., 1.]) ; __ok__(a._v, [1., 1., 1.])
a = V(0.) ; __ok__(a._v, [0.]) ; __ok__(a._l, 0.)
a = V(3.) ; __ok__(a._v, [3.]) ; __ok__(a._l, 3.)
# constants and direct comparisons
__ok__(V.O, V(0.,0.,0.))
__ok__(V.X, V(1.,0.,0.))
__ok__(V.Y, V(0.,1.,0.))
__ok__(V.Z, V(0.,0.,1.))
# formatting and elements
__ok__(repr(V.X), 'V(1.0, 0.0, 0.0)')
__ok__(V.X[0], 1.)
__ok__(V.X[1], 0.)
__ok__(V.X[2], 0.)
# simple addition
__ok__(V.X + V.Y, V(1.,1.,0.))
__ok__(V.Y + V.Z, V(0.,1.,1.))
__ok__(V.X + V.Z, V(1.,0.,1.))
# didn't overwrite our constants, did we?
__ok__(V.X, V(1.,0.,0.))
__ok__(V.Y, V(0.,1.,0.))
__ok__(V.Z, V(0.,0.,1.))
a = V(3.,2.,1.)
b = a.normalize()
__ok__(a != b)
__ok__(a == V(3.,2.,1.))
__ok__(b.magnitude(), 1)
b = a.magnitude(5)
__ok__(a == V(3.,2.,1.))
__ok__(b.magnitude(), 5)
__ok__(equal(b.dsquared(V.O), 25))
a = V(3.,2.,1.).normalize()
__ok__(equal(a[0], 0.80178372573727319))
b = V(1.,3.,2.).normalize()
__ok__(equal(b[2], 0.53452248382484879))
d = a.dot(b)
__ok__(equal(d, 0.785714285714), True)
__ok__(V(2., 2., 1.) * 3, V(6, 6, 3))
__ok__(3 * V(2., 2., 1.), V(6, 6, 3))
__ok__(V(2., 2., 1.) / 2, V(1, 1, 0.5))
v = V(1,2,3)
w = V(4,5,6)
__ok__(v.cross(w), V(-3,6,-3))
__ok__(v.cross(w), v*w)
__ok__(v*w, -(w*v))
__ok__(v.dot(w), 32)
__ok__(v.dot(w), w.dot(v))
__ok__(zero(angle(V(1,1,1), V(2,2,2))), True)
__ok__(equal(90.0, degrees(angle(V(1,0,0), V(0,1,0)))), True)
__ok__(equal(180.0, degrees(angle(V(1,0,0), V(-1,0,0)))), True)
__ok__(equal( 0.0, degrees(track(V( 1, 0)))), True)
__ok__(equal( 90.0, degrees(track(V( 0, 1)))), True)
__ok__(equal(180.0, degrees(track(V(-1, 0)))), True)
__ok__(equal(270.0, degrees(track(V( 0,-1)))), True)
__ok__(equal( 45.0, degrees(track(V( 1, 1)))), True)
__ok__(equal(135.0, degrees(track(V(-1, 1)))), True)
__ok__(equal(225.0, degrees(track(V(-1,-1)))), True)
__ok__(equal(315.0, degrees(track(V( 1,-1)))), True)
print 'Testing C complex number class...'
__ok__(C(1,2) is not None, True)
__ok__(C(1,2)[0], 1.0)
__ok__(C(1+2j)[0], 1.0)
__ok__(C((1,2))[1], 2.0)
__ok__(C(V([1,2]))[1], 2.0)
__ok__(C(3+2j) * C(1+4j), C(-5+14j))
try:
__ok__(C(1,2,3) is not None, True)
except TypeError: # takes exactly 2 elements
__ok__(True, True)
try:
__ok__(C([1,2,3]) is not None, True)
except TypeError: # takes exactly 2 elements
__ok__(True, True)
except TypeError: # takes exactly 2 elements
__ok__(True, True)
print 'Testing Q quaternion class...'
__ok__(Q(1,2,3,4) is not None, True)
__ok__(Q(1,2,3,4)[1], 2.0)
__ok__(Q((1,2,3,4))[2], 3.0)
__ok__(Q(V(1,2,3,4))[3], 4.0)
__ok__(Q(), Q(0,0,0,1))
__ok__(Q(1,2,3,4).conjugate(), Q(-1,-2,-3,4))
print 'Testing M matrix class...'
m = M()
__ok__(V(1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1), m)
__ok__(m.row(0), V(1,0,0,0))
__ok__(m.row(2), V(0,0,1,0))
__ok__(m.col(1), V(0,1,0,0))
__ok__(m.col(3), V(0,0,0,1))
__ok__(m[5], 1.0)
__ok__(m[1,1], 1.0)
__ok__(m[6], 0.0)
__ok__(m[1,2], 0.0)
__ok__(m * V(1,2,3,4), V(1,2,3,4))
__ok__(V(1,2,3,4) * m, V(1,2,3,4))
mm = m * m
__ok__(mm.__class__, M)
__ok__(mm, M.I)
mm = m * 2
__ok__(mm.__class__, M)
__ok__(mm, 2.0 * m)
__ok__(mm[3,3], 2)
__ok__(mm[3,2], 0)
__ok__(M.rotate('X',radians(90)),
M.twist(Q.rotate('X',radians(90))))
__ok__(M.twist(Q(0,0,0,1)), M.I)
__ok__(M.twist(Q(.5,0,0,1)),
M.twist(Q(.5,0,0,1).normalize()))
__ok__(V.O * M.translate(V(1,2,3)),
V(1,2,3,1))
__ok__((V.X+V.Y+V.Z) * M.translate(V(1,2,3)),
V(2,3,4,1))
# need some tests on m.determinant()
m = M()
m = m.translate(V(1,2,3))
__ok__(m.inverse(), M().translate(-V(1,2,3)))
m = m.rotate('Y', radians(30))
__ok__(m * m.inverse(), M.I)
__report__()
def __time__():
from testing import __time__
__time__("(V.X+V.Y).magnitude() memo",
"import vectors; x=(vectors.V.X+vectors.V.Y)",
"x._l = x._l ; x.magnitude()")
__time__("(V.X+V.Y).magnitude() unmemo",
"import vectors; x=(vectors.V.X+vectors.V.Y)",
"x._l = None ; x.magnitude()")
import psyco
psyco.full()
__time__("(V.X+V.Y).magnitude() memo [psyco]",
"import vectors; x=(vectors.V.X+vectors.V.Y)",
"x._l = x._l ; x.magnitude()")
__time__("(V.X+V.Y).magnitude() unmemo [psyco]",
"import vectors; x=(vectors.V.X+vectors.V.Y)",
"x._l = None ; x.magnitude()")
if __name__ == '__main__':
import sys
if 'test' in sys.argv:
__test__()
elif 'time' in sys.argv:
__time__()
else:
raise Exception, \
'This module is not a stand-alone script. Import it in a program.'
|
ikbtbasics/ik_classes.py | uw-biorobotics/IKBT | 129 | 11114426 | #!/usr/bin/python
#
# Inverse Kinematics Classes
#
# Copyright 2017 University of Washington
# Developed by <NAME> and <NAME>
# BioRobotics Lab, University of Washington
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sympy as sp
import shutil as sh
import os as os
#import numpy as np
import ikbtbasics.pykinsym as pks
import re
from ikbtbasics.solution_graph_v2 import *
import ikbtbasics.matching as mtch
import sys as sys
import b3 as b3 # behavior trees
import pickle
from ikbtfunctions.helperfunctions import *
import ikbtfunctions.graph2latex as gl
#from kin_cl import *
import ikbtbasics.kin_cl as kc
# generic variables for any manipulator
((th_1, th_2, th_3, th_4, th_5, th_6)) = sp.symbols(('th_1', 'th_2', 'th_3', 'th_4', 'th_5', 'th_6'))
((d_1, d_2, d_3, d_4, d_5, d_6)) = sp.symbols(('d_1', 'd_2', 'd_3', 'd_4', 'd_5', 'd_6'))
((h,l_0, l_1, l_2, l_3, l_4, l_5, l_6)) = sp.symbols(('h','l_0', 'l_1', 'l_2', 'l_3', 'l_4', 'l_5', 'l_6'))
((a_2, a_3)) = sp.symbols(('a_2', 'a_3'))
sp.var('l_5 l_6')
sp.var('th_12, th_23, th_34, th_45, th_56')
sp.var('th_123,th_234,th_345,th_456')
sp.var('c_12 s_12 c_23 s_23 c_34 s_34 c_45 s_45 c_56 s_56 c_13 s_13')
sp.var('x') # super generic place holder
soa_vars = [th_12, th_23, th_34, th_45, th_56] # a list of the sum-of-angles variables
soa_expansions= {}
soa_expansions[th_12] = th_1 + th_2
soa_expansions[th_23] = th_2 + th_3
soa_expansions[th_34] = th_3 + th_4
soa_expansions[th_45] = th_4 + th_5
soa_expansions[th_56] = th_5 + th_6
soa_expansions[th_123] = th_1 + th_2 + th_3
soa_expansions[th_234] = th_2 + th_3 + th_4
soa_expansions[th_345] = th_3 + th_4 + th_5
soa_expansions[th_456] = th_4 + th_5 + th_6
pprotocol = 2
#
# retrieve forward kinematics from a pickle file if it exists.
# if it doesn't, compute the FK and store it in a pickle file.
def kinematics_pickle(rname, dh, constants, pvals, vv, unks, test):
#
# Check for a pickle file of combined pre-computed Mech and Robot objects
#
# TODO: refactor code to get rid of unused "test" argument
pickle_dir = 'fk_eqns/'
if not os.path.isdir(pickle_dir): # if this doesn't exist, create it.
print('Creating a new pickle directory: ./'+pickle_dir)
os.mkdir(pickle_dir)
name = pickle_dir + rname + '_pickle.p'
print('kinematics pickle: trying to open ', name,' in ', os.getcwd())
if(os.path.isfile(name)):
with open(name, 'rb') as pick:
print('\Trying to read pre-computed forward kinematics from '+name)
[m, R, unknowns] = pickle.load(pick)
print('Successfully read pre-computed forward kinematics')
print('pickle contained ', len(unknowns), ' unknowns')
else:
#print 'WRONG - quitting, error: ',sys.exc_info()[0]
#sys.exit
# set up mechanism object instance
m = kc.mechanism(dh, constants, vv)
m.pvals = pvals # store numerical values of parameters
print('Did not find VALID stored pickle file: ', name)
print("Starting Forward Kinematics")
m.forward_kinematics()
print("Completed Forward Kinematics")
print('Starting Sum of Angles scan (slow!)')
# set up Robot Object instance
R = Robot(m, rname) # set up IK structs etc
R.scan_for_equations(unks) # generate equation lists
# below is commented out for testing and devel of sum_of_angles_transform
R.sum_of_angles_transform(unks) # find sum of angles
R.generate_solution_nodes(unks) # generate solution nodes
print(' Storing kinematics pickle for '+rname + '('+name+')')
with open(name,'wb') as pf:
pickle.dump( [m, R, unks], pf, protocol=pprotocol)
unknowns = unks # be sure to return updated unknown list (including SOAs)
return [m,R,unknowns]
def check_the_pickle(dh1, dh2): # check that two mechanisms have identical DH params
flag = False
if (dh1.shape[0] != dh2.shape[0]):
print(' Wrong number of rows!')
flag = True
else:
for r in range(0,dh1.shape[0]):
for c in [0,1,2,3]:
if(dh1[r,c] != dh2[r,c]):
flag = True
if(flag):
print('''\n\n -----------------------------------------------------
DH parameters Differ
Pickle file is out of date.
please remove it and start again
-----------------------------------------------------
''')
quit()
## retrieve thxy from thx, thy
def find_xy(thx, thy):
# lookup table for thxy
print('test: find_xy:',thx, thy)
thxy_lookup = {
th_1:[th_12, th_123],
th_2:[th_12, th_23, th_123, th_234],
th_3:[th_23, th_34, th_123, th_234, th_345],
th_4:[th_34, th_45, th_234, th_345, th_456],
th_5:[th_45, th_56, th_345, th_456],
th_6:[th_56, th_456]
}
# add 3-way SOA's to thxy_lookup
thxy_lookup[th_12] = [th_123]
thxy_lookup[th_23] = [th_123,th_234]
thxy_lookup[th_34] = [th_234,th_345]
thxy_lookup[th_45] = [th_345,th_456]
thxy_lookup[th_56] = [th_456]
# one symbol in common is the th_xy we're looking for
thx_s = set(thxy_lookup[thx])
thy_s = set(thxy_lookup[thy])
thxy_s = thx_s.intersection(thy_s)
thxy = thxy_s.pop()
return thxy
#def find_sum(thx,thy):
# new approach for same problem as find_xy()
# Class to contain all robot info
class Robot:
def __init__(self, Mech=None, name="*Unnamed*"):
self.name = name
# the following data pertain to the solution tree for this Robot
self.solveN = 0 # index of current solution in solving sequence
self.soltag = '' # suffix tag for current solution level leafs
self.params = [] # constant dh params such as l_4 etc.
self.solution_nodes = [] # first one is the root, by solve order
self.variables_symbols = []
#
# "notations" means specifically labeled solution variables such as
# th_1s2 (theta-1, solution 2)
self.notation_graph = set() #solution nodes notation graph
self.notation_collections = [] #solution notations divided into subgroups
self.min_index = 0
self.max_index = 0
# mequation_list: all the 4x4 Matrix FK equations
self.mequation_list = []
# kequation_aux_list: sum of angle eqns such as eg th_23 = th_2+th_3
self.kequation_aux_list = []
# a matrix equation in which to embed important kequations equations
# this is done for compatibility with the id/solvers
# self.SOA_eqns = kc.matrix_equation()
# To add a kequation, use R.kequation_aux_list.append(neweqn)
if(Mech != None): # in testing situations we only need a "Robot" to keep track of solutions above
self.Mech = Mech
self.min_index = 0 # start DOF of the current chain
# min_index starts at 0 for ALL manips.
# max index == index of highest unsolved link variable
# define indices for DH table
d = 2 # joint offset DH param index
th = 3 # joint angle DH param index
self.max_index = -99
assert (self.Mech.DH[0,d]!=0 or self.Mech.DH[0,th] != 0), "You do not have a variable in first DH row!"
for i in [5,4,3,2,1]:
if(self.Mech.DH[i,d]!=0 or self.Mech.DH[i,th] != 0):
self.max_index = i # end DOF of the current chain
break
assert (self.max_index > 0), "Couldn't find mechanism index"
#
# build up the equations to solve:
self.mequation_list = Mech.get_mequation_set() # all the Matrix FK equations
print('ik_classes: length Robot.mequation_list: ', len(self.mequation_list))
def generate_solution_nodes(self, unknowns):
'''generate solution nodes'''
for unk in unknowns:
if unk.solvemethod != '': # this means the unk was not used at all in solution
# typically SOA unknowns like th_23
self.solution_nodes.append(Node(unk))
self.variables_symbols.append(unk.symbol)
print(self.solution_nodes)
print(self.variables_symbols)
# get lists of unsolved equations having 1 and 2 unks
# class Robot:
def scan_for_equations(self,variables):
self.l1 = [] # equations with one unk nown (if any)
self.l2 = [] # equations with two unknowns
self.l3p = [] # 3 OR MORE unknowns
sp.var('x') #this will be used to generate 'algebraic zero'
#elist = self.mequation_list.append(self.kequation_aux_list)
elist = self.mequation_list
#print '------------------------- elist----'
#print elist
#print ('-------- (aux eqns):')
#print (self.kequation_aux_list)
#print ('--------')
assert (len(elist) > 0), ' not enough equations '
#i=0
#for e in self.kequation_aux_list:
#elist[0][3][i] = e # HACK: put aux eqns into row 4 Meqn[0]
#print 'scan_for_equns: putting ', e, 'into eqn'
#i+=1
for eqn in elist:
lhs = eqn.Td #4x4 matrix
rhs = eqn.Ts #4x4 matrix
for i in [0,1,2,3]:
for j in range(0,4):
lh1x1 = lhs[i,j]
rh1x1 = rhs[i,j]
n = count_unknowns(variables, lh1x1) + count_unknowns(variables, rh1x1)
e1 = kc.kequation(lh1x1, rh1x1)
if(n==1):
flag = False
if e1 not in self.l1:
self.l1.append(e1) # only append if not already there
if(n==2):
flag = False
if e1 not in self.l2:
self.l2.append(e1) # only append if not already there
if(n > 2):
if e1 not in self.l3p:
self.l3p.append(e1) # only append if not already there
#Process the SOA equations
for e in self.kequation_aux_list:
lhs = e.LHS
rhs = e.RHS
n = count_unknowns(variables, lhs) + count_unknowns(variables, rhs)
if(n==1):
self.l1.append(kc.kequation(lhs, rhs)) # change from 0, rhs-lhs !! ************
if(n==2):
self.l2.append(kc.kequation(lhs, rhs))
self.l1 = erank(self.l1) # sort the equations (in place) so solvers get preferred eqns first
self.l2 = erank(self.l2)
self.l3p = erank(self.l3p)
return [self.l1, self.l2, self.l3p]
#end of scan_for_eqns
#
# Testing use only:
# Get equation lists from just a matrix equation
# (this is used when generating tests NOT from DH params)
#
def scan_Mequation(self,Meqn,variables):
self.l1 = []
self.l2 = []
for eqn in Meqn.get_kequation_list():
lh1x1 = eqn.LHS #4x4 matrix
rh1x1 = eqn.RHS #4x4 matrix
n = count_unknowns(variables, lh1x1) + count_unknowns(variables, rh1x1)
#e1 = kequation(lh1x1, rh1x1) # change from 0,rh1x1-lh1x1 **********
e1 = eqn
if(n==1):
flag = False
if e1 not in self.l1:
self.l1.append(e1) # only append if not already there
if(n==2):
flag = False
if e1 not in self.l2:
self.l2.append(e1) # only append if not already there
self.l1 = erank(self.l1) # sort the equations (in place) so solvers get preferred eqns first
self.l2 = erank(self.l2)
return [self.l1, self.l2]
#
# identify sum of angles terms and transform them to new variable
#
def sum_of_angles_transform(self,variables):
print('Starting sum-of-angles scan. Please be patient')
unkn_sums_sym = set() #keep track of joint variable symbols
#k = equation number
#i = row, j=col
nits = len(self.mequation_list) * 3 * 4 # total number of equations
barlen = nits/2
it_number = 0
for k in range(0,len(self.mequation_list)): # contains duplicates
Meq = self.mequation_list[k] # get next matrix equation
for i in [0,1,2]: # only first three rows are interesting
for j in [0,1,2,3]: # but check all 4 columns
it_number += 1
#print ' .. '
prog_bar(it_number, nits, barlen, 'Sum of Angles')
#print 'kij: ', k,i,j
#print 'Sum of Angles: eqn,row,col: ', k,i,j
# simplify with lasting effect (note: try sp.trigsimp() for faster????)
Meq.Ts[i,j] = sp.simplify(Meq.Ts[i,j]) # simplify should catch c1s2+s1c2 etc. (RHS)
Meq.Td[i,j] = sp.simplify(Meq.Td[i,j]) # simplify should catch c1s2+s1c2 etc. (LHS)
lhs = Meq.Td[i,j]
rhs = Meq.Ts[i,j]
# simplify LHS
lhs, newj, newe = sum_of_angles_sub(self, lhs, variables)
if newj:
variables.append(newj)
if newe:
self.kequation_aux_list.append(newe)
# simplify RHS
rhs, newj, newe= sum_of_angles_sub(self, rhs, variables)
if newj:
variables.append(newj)
if newe:
self.kequation_aux_list.append(newe)
Meq.Td[i,j] = lhs
Meq.Ts[i,j] = rhs
prog_bar(-1,100,100, '') # clear the progress bar
#x = raw_input('<enter> to cont...')
print('Completed sum-of-angles scan.')
##################
#
# substitute th_23 for th_2+th_3 etc.
# (april: separate out for easier testing)
def sum_of_angles_sub(R, expr, variables):
aw = sp.Wild('aw')
bw = sp.Wild('bw')
cw = sp.Wild('cw')
newjoint = None
tmpeqn = None
found2 = found3 = False
matches = expr.find(sp.sin(aw+bw+cw)) | expr.find(sp.cos(aw+bw+cw))
#print '- - - - -'
#print expr
#print matches
for m in matches: # analyze each match
d = m.match(sp.cos(aw + bw + cw))
d1 = m.match(sp.sin(aw + bw + cw))
if d != None and d1 != None:
d.update(d1)
if d == None and d1 != None:
d = d1
# To find sum-of-angles arguments,
# count number of non-zero elements among aw ... cw
nzer = 0
varlist = []
for k1 in d.keys():
if d[k1] == 0:
nzer += 1
else:
varlist.append(d[k1])
#print 'varlist: ', varlist
if len(varlist) == 2:
found2 = True
if len(varlist) == 3:
found3 = True
newjoint = None
tmpeqn = None
if(found2 or found3): # we've got a SOA!
# generate index of the SOA variable
nil = [] #new index list = 'nil'
for v in varlist: # build the new subscript
nil.append( str(get_variable_index(variables, v)) )
nil.sort() # get consistent order of indices
ni = ''
for c in nil: # make into a string
ni += c # build up subscript e.g. 234
#print 'New index: '+ni
vexists = False
# has this SOA been found before? Did we already make it?
for v in variables:
if v.n == int(ni): # i.e. if th_23 is aready defined
th_subval = v
vexists = True
newjoint = None
tmpeqn = None
th_new = sp.var('th_'+ni) # create iff doesn't yet exist
th_subval = th_new
if not vexists:
print(": found new 'joint' (sumofangle) variable: ", th_new)
# try moving soa equation to Tm.auxeqns
#unkn_sums_sym.add(th_new) #add into the joint variable set
newjoint = kc.unknown(th_new)
newjoint.n = int(ni) # generate e.g. 234 = 10*2 + 34
newjoint.solved = False # just to be clear for count_unknowns
variables.append(newjoint) #add it to unknowns list
tmpeqn = kc.kequation(th_new, d[aw] + d[bw] + d[cw])
print('sum_of_angles_sub: created new equation:', tmpeqn)
#
# Add the def of this SOA to list: eg th23 = th2+th3
# BUT it needs to be embedded into a 4x4 mequation so
# that solvers can scan it properly
R.kequation_aux_list.append(tmpeqn)
# substitute new variable into the kinematic equations
# Problem Dec'21:
# If there is a three-way sub, prefer it to a two-way sub. e.g:
# (a+b+c) -> (abc) instead of (a+bc)(!)
#
#self.mequation_list[k].Td[i,j] = Meq.Td[i,j].subs(d[aw] + d[bw] + d[cw], th_subval)
#self.mequation_list[k].Ts[i,j] = Meq.Ts[i,j].subs(d[aw] + d[bw] + d[cw], th_subval)
expr = expr.subs(d[aw] + d[bw] + d[cw], th_subval)
#print 'sum of angles (ik_classes): NEW Eqns (k,i,j)',k,i,j
#print self.mequation_list[k].Td[i,j]
#print ' = '
#print self.mequation_list[k].Ts[i,j]
#print '========'
if tmpeqn is not None:
print('sum_of_angles_sub: Ive found a new SOA equation, ', tmpeqn)
return (expr, newjoint, tmpeqn)
def get_variable_index(vars, symb):
for v in vars:
if v.n == 0:
print('get_variable_index()/ik_classes: at least one index is not initialized for joint variables (or is 0!)')
quit()
found = False
#print 'get_variable_index: v[i], symb, v[i].n ',str(v.symbol),str(symb), v.n
if v.symbol == symb:
found = True
return v.n
assert found, 'Error: trying to get index of an unknown joint variable' + str(symb)
#if found == False:
#print 'Error: trying to get index of an unknown joint variable'
#print 'symbol: ', symb
#print vars
#quit()
return False
# class kequation() now moved to kin_cl.py
# class unknown(object) now moved to kin_cl.py
# matrix_equation class moved to kin_cl.py
# #
# Print text-based solution graph
#
def output_solution_graph(R):
print('========== Solution output ================')
print(' ' + R.name)
for node in R.solution_nodes:
if node.solveorder != -1: #node is solved
print('\n\n', node.solveorder, node.symbol, ' by method: ', node.solvemethod, ', ', node.nsolutions, ' solution(s)')
print(node.solution_with_notations)
# print all edges in graph
print('========== Solution Graph (Edges) output ================')
for edge in R.notation_graph:
print(edge)
print('========== End Solution output ================')
def erank(list_L): # rearrange list of eqns by length
# by putting shortest eqns last, system will prefer to solve
# shorter equations (i.e. prefer shorter solutions where two exist)
# since the sorting is from lower to higher
# it should not be reversed when putting into the list - D.Z.
sorted_ls = []
list_d = {}
for e in list_L:
count = int(sp.count_ops(e.RHS)) + int(sp.count_ops(e.LHS))
if count not in list_d.keys():
list_d[count] = []
list_d[count].append(e)
keys = list_d.keys()
keys = sorted(keys, reverse= False)
for key in keys:
sorted_ls.extend(list_d[key])
return sorted_ls
############# main test the library #########################
#
if __name__ == "__main__": # tester code for the classes in this file
# testing for these classes and methods now in tests/leavestest.py
# TBD properly integrate with unittest module
pass
|
packages/opal-common/opal_common/fetcher/engine/fetch_worker.py | permitio/opal | 106 | 11114428 | import asyncio
from typing import Coroutine
from ..events import FetchEvent
from ..fetcher_register import FetcherRegister
from ..logger import get_logger
from .base_fetching_engine import BaseFetchingEngine
logger = get_logger("fetch_worker")
async def fetch_worker(queue: asyncio.Queue, engine):
"""The worker task performing items added to the Engine's Queue.
Args:
queue (asyncio.Queue): The Queue
engine (BaseFetchingEngine): The engine itself
"""
engine: BaseFetchingEngine
register: FetcherRegister = engine.register
while True:
# types
event: FetchEvent
callback: Coroutine
# get a event from the queue
event, callback = await queue.get()
# take care of it
try:
# get fetcher for the event
fetcher = register.get_fetcher_for_event(event)
# fetch
async with fetcher:
res = await fetcher.fetch()
data = await fetcher.process(res)
# callback to event owner
try:
await callback(data)
except Exception as err:
logger.exception(f"Fetcher callback - {callback} failed")
await engine._on_failure(err, event)
except Exception as err:
logger.exception("Failed to process fetch event")
await engine._on_failure(err, event)
finally:
# Notify the queue that the "work item" has been processed.
queue.task_done()
|
testrunner/app_level_tests.py | nanjekyejoannah/pypy | 381 | 11114430 | <filename>testrunner/app_level_tests.py
#!/usr/bin/env python
"""
This is what the buildbot runs to execute the app-level tests
on top of pypy-c.
"""
import sys, os
import subprocess
rootdir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
os.environ['PYTHONPATH'] = rootdir
os.environ['PYTEST_PLUGINS'] = ''
popen = subprocess.Popen(
[sys.executable, "testrunner/runner.py",
"--logfile=pytest-A.log",
"--config=pypy/pytest-A.cfg",
"--config=pypy/pytest-A.py",
"--config=~/machine-A_cfg.py",
"--root=pypy", "--timeout=3600",
] + sys.argv[1:],
cwd=rootdir)
try:
ret = popen.wait()
except KeyboardInterrupt:
popen.kill()
print "\ninterrupted"
ret = 1
sys.exit(ret)
|
static.py | wschae/wschae.github.io | 191 | 11114436 | #!/usr/bin/env python
"""static - A stupidly simple WSGI way to serve static (or mixed) content.
(See the docstrings of the various functions and classes.)
Copyright (C) 2006-2009 <NAME> - http://lukearno.com/
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to:
The Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.
Luke Arno can be found at http://lukearno.com/
"""
import mimetypes
import rfc822
import time
import string
import sys
from os import path, stat, getcwd
from wsgiref import util
from wsgiref.headers import Headers
from wsgiref.simple_server import make_server
from optparse import OptionParser
try: from pkg_resources import resource_filename, Requirement
except: pass
try: import kid
except: pass
class MagicError(Exception): pass
class StatusApp:
"""Used by WSGI apps to return some HTTP status."""
def __init__(self, status, message=None):
self.status = status
if message is None:
self.message = status
else:
self.message = message
def __call__(self, environ, start_response, headers=[]):
if self.message:
Headers(headers).add_header('Content-type', 'text/plain')
start_response(self.status, headers)
if environ['REQUEST_METHOD'] == 'HEAD':
return [""]
else:
return [self.message]
class Cling(object):
"""A stupidly simple way to serve static content via WSGI.
Serve the file of the same path as PATH_INFO in self.datadir.
Look up the Content-type in self.content_types by extension
or use 'text/plain' if the extension is not found.
Serve up the contents of the file or delegate to self.not_found.
"""
block_size = 16 * 4096
index_file = 'index.html'
not_found = StatusApp('404 Not Found')
not_modified = StatusApp('304 Not Modified', "")
moved_permanently = StatusApp('301 Moved Permanently')
method_not_allowed = StatusApp('405 Method Not Allowed')
def __init__(self, root, **kw):
"""Just set the root and any other attribs passes via **kw."""
self.root = root
for k, v in kw.iteritems():
setattr(self, k, v)
def __call__(self, environ, start_response):
"""Respond to a request when called in the usual WSGI way."""
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
headers = [('Allow', 'GET, HEAD')]
return self.method_not_allowed(environ, start_response, headers)
path_info = environ.get('PATH_INFO', '')
full_path = self._full_path(path_info)
if not self._is_under_root(full_path):
return self.not_found(environ, start_response)
if path.isdir(full_path):
if full_path[-1] <> '/' or full_path == self.root:
location = util.request_uri(environ, include_query=False) + '/'
if environ.get('QUERY_STRING'):
location += '?' + environ.get('QUERY_STRING')
headers = [('Location', location)]
return self.moved_permanently(environ, start_response, headers)
else:
full_path = self._full_path(path_info + self.index_file)
content_type = self._guess_type(full_path)
try:
etag, last_modified = self._conditions(full_path, environ)
headers = [('Date', rfc822.formatdate(time.time())),
('Last-Modified', last_modified),
('ETag', etag)]
if_modified = environ.get('HTTP_IF_MODIFIED_SINCE')
if if_modified and (rfc822.parsedate(if_modified)
>= rfc822.parsedate(last_modified)):
return self.not_modified(environ, start_response, headers)
if_none = environ.get('HTTP_IF_NONE_MATCH')
if if_none and (if_none == '*' or etag in if_none):
return self.not_modified(environ, start_response, headers)
file_like = self._file_like(full_path)
headers.append(('Content-Type', content_type))
start_response("200 OK", headers)
if environ['REQUEST_METHOD'] == 'GET':
return self._body(full_path, environ, file_like)
else:
return ['']
except (IOError, OSError), e:
print e
return self.not_found(environ, start_response)
def _full_path(self, path_info):
"""Return the full path from which to read."""
return self.root + path_info
def _is_under_root(self, full_path):
"""Guard against arbitrary file retrieval."""
if (path.abspath(full_path) + path.sep)\
.startswith(path.abspath(self.root) + path.sep):
return True
else:
return False
def _guess_type(self, full_path):
"""Guess the mime type using the mimetypes module."""
return mimetypes.guess_type(full_path)[0] or 'text/plain'
def _conditions(self, full_path, environ):
"""Return a tuple of etag, last_modified by mtime from stat."""
mtime = stat(full_path).st_mtime
return str(mtime), rfc822.formatdate(mtime)
def _file_like(self, full_path):
"""Return the appropriate file object."""
return open(full_path, 'rb')
def _body(self, full_path, environ, file_like):
"""Return an iterator over the body of the response."""
way_to_send = environ.get('wsgi.file_wrapper', iter_and_close)
return way_to_send(file_like, self.block_size)
def iter_and_close(file_like, block_size):
"""Yield file contents by block then close the file."""
while 1:
try:
block = file_like.read(block_size)
if block: yield block
else: raise StopIteration
except StopIteration, si:
file_like.close()
return
def cling_wrap(package_name, dir_name, **kw):
"""Return a Cling that serves from the given package and dir_name.
This uses pkg_resources.resource_filename which is not the
recommended way, since it extracts the files.
I think this works fine unless you have some _very_ serious
requirements for static content, in which case you probably
shouldn't be serving it through a WSGI app, IMHO. YMMV.
"""
resource = Requirement.parse(package_name)
return Cling(resource_filename(resource, dir_name), **kw)
def command():
parser = OptionParser(usage="%prog DIR [HOST][:][PORT]",
version="static 0.3.6")
options, args = parser.parse_args()
if len(args) in (1, 2):
if len(args) == 2:
parts = args[1].split(":")
if len(parts) == 1:
host = parts[0]
port = None
elif len(parts) == 2:
host, port = parts
else:
sys.exit("Invalid host:port specification.")
elif len(args) == 1:
host, port = None, None
if not host:
host = '0.0.0.0'
if not port:
port = 8888
try:
port = int(port)
except:
sys.exit("Invalid host:port specification.")
app = Cling(args[0])
try:
make_server(host, port, app).serve_forever()
except KeyboardInterrupt, ki:
print "Cio, baby!"
except:
sys.exit("Problem initializing server.")
else:
parser.print_help(sys.stderr)
sys.exit(1)
def test():
from wsgiref.validate import validator
app = Cling(getcwd())
try:
print "Serving " + getcwd() + " to http://localhost:8888"
make_server('0.0.0.0', 8888, validator(app)).serve_forever()
except KeyboardInterrupt, ki:
print ""
print "Ciao, baby!"
if __name__ == '__main__':
test()
|
designate-8.0.0/designate/storage/impl_sqlalchemy/migrate_repo/versions/083_change_managed_column_types.py | scottwedge/OpenStack-Stein | 145 | 11114511 | <reponame>scottwedge/OpenStack-Stein
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Switch managed_* column types from Unicode to String
Bug #276448
"""
from oslo_log import log as logging
from sqlalchemy.schema import MetaData, Table
from sqlalchemy import String
LOG = logging.getLogger()
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
records = Table('records', meta, autoload=True)
records.columns.managed_extra.alter(type=String(100))
records.columns.managed_plugin_type.alter(type=String(50))
records.columns.managed_plugin_name.alter(type=String(50))
records.columns.managed_resource_type.alter(type=String(50))
records.columns.managed_resource_region.alter(type=String(100))
records.columns.managed_tenant_id.alter(type=String(36))
|
fasterai/sparse/sparsify_callback.py | nathanhubens/fasterai | 191 | 11114515 | <filename>fasterai/sparse/sparsify_callback.py
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_sparsify_callback.ipynb (unless otherwise specified).
__all__ = ['SparsifyCallback']
# Cell
from fastai.vision.all import *
from fastai.callback.all import *
from .sparsifier import *
from .criteria import *
import torch
import torch.nn as nn
import torch.nn.functional as F
# Cell
class SparsifyCallback(Callback):
def __init__(self, end_sparsity, granularity, method, criteria, sched_func, start_sparsity=0, start_epoch=0, end_epoch=None, lth=False, rewind_epoch=0, reset_end=False):
store_attr()
self.current_sparsity, self.previous_sparsity = 0, 0
assert self.start_epoch>=self.rewind_epoch, 'You must rewind to an epoch before the start of the pruning process'
def before_fit(self):
print(f'Pruning of {self.granularity} until a sparsity of {self.end_sparsity}%')
self.end_epoch = self.n_epoch if self.end_epoch is None else self.end_epoch
assert self.end_epoch <= self.n_epoch, 'Your end_epoch must be smaller than total number of epoch'
self.sparsifier = Sparsifier(self.learn.model, self.granularity, self.method, self.criteria)
self.n_batches = math.floor(len(self.learn.dls.dataset)/self.learn.dls.bs)
self.total_iters = self.end_epoch * self.n_batches
self.start_iter = self.start_epoch * self.n_batches
def before_epoch(self):
if self.epoch == self.rewind_epoch:
print(f'Saving Weights at epoch {self.epoch}')
self.sparsifier._save_weights()
def before_batch(self):
if self.epoch>=self.start_epoch:
if self.epoch < self.end_epoch: self._set_sparsity()
self.sparsifier.prune_model(self.current_sparsity)
if self.lth and self.current_sparsity!=self.previous_sparsity: # If sparsity has changed, the network has been pruned
print(f'Resetting Weights to their epoch {self.rewind_epoch} values')
self.sparsifier._reset_weights()
self.previous_sparsity = self.current_sparsity
def before_step(self):
if self.epoch>=self.start_epoch:
self.sparsifier._mask_grad()
def after_epoch(self):
print(f'Sparsity at the end of epoch {self.epoch}: {self.current_sparsity:.2f}%')
def after_fit(self):
print(f'Final Sparsity: {self.current_sparsity:.2f}')
if self.reset_end:
self.sparsifier._reset_weights()
self.sparsifier._clean_buffers() # Remove buffers at the end of training
def _set_sparsity(self):
self.current_sparsity = self.sched_func(start=self.start_sparsity, end=self.end_sparsity, pos=(self.train_iter-self.start_iter)/(self.total_iters-self.start_iter)) |
PythonToolbox/quantconnect/Result.py | BlackBoxAM/Lean | 6,580 | 11114537 | <reponame>BlackBoxAM/Lean<filename>PythonToolbox/quantconnect/Result.py
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from math import isnan
from datetime import datetime
class Result:
'''Result represents the live or backtest result of a successfully executed algorithm'''
def __init__(self, json):
'''Creates a new instance of Result'''
tag = 'result'
# LiveResults special case:
self.LiveMode = 'LiveResults' in json
if self.LiveMode:
tag += 's'
json = json.pop('LiveResults', json)
result = json.pop(tag, json)
self.Statistics = Information(result.pop('Statistics', {}))
self.AlphaRuntimeStatistics = Information(result.pop('AlphaRuntimeStatistics', {}))
self.RuntimeStatistics = Information(result.pop('RuntimeStatistics', {}))
self.ClosedTrades = self.__create_closed_trades_table(result)
self.Charts = self.__create_charts_table(result)
self.ProfitLoss = self.__create_profit_loss_table(result)
self.Orders = self.__create_order_table(result)
self.RollingWindow = self.__create_rolling_window_table(result)
self.Information = Information(json)
def __create_order_table(self, json):
'''Creates a dataframe with the orders information'''
orders = json.pop('Orders', None)
if orders is None: return None
# In Live results, orders is a list, so convert to dict keyed by Id.
if isinstance(orders, list):
orders = {x['Id']: x for x in orders}
def __status_int_to_str(value):
if value is None: return None
values = [ 'New', 'Submitted', 'PartiallyFilled', 'Filled', 'Canceled', 'None', 'Invalid', 'CancelPending' ]
return str(values) if value >= len(values) else values[value]
def __security_type_int_to_str(value):
if value is None: return None
values = [ 'Base', 'Equity', 'Option', 'Commodity', 'Forex', 'Future', 'Cfd', 'Crypto' ]
return str(values) if value >= len(values) else values[value]
def __type_int_to_str(value):
if value is None: return None
values = [ 'Market', 'Limit', 'StopMarket', 'StopLimit', 'MarketOnOpen', 'MarketOnClose', 'OptionExercise' ]
return str(values) if value >= len(values) else values[value]
columns = [
'Id', 'Time', 'SecurityType', 'Symbol', 'PriceCurrency',
'Quantity', 'Direction', 'Price', 'Type', 'Status', 'Tag',
'LastFillTime', 'LastUpdateTime', 'CanceledTime' ]
if self.LiveMode:
columns += ['DeployId']
drop_columns = [
'BrokerId', 'ContingentId', 'CreatedTime', 'IsMarketable', 'Value',
'AbsoluteQuantity', 'OrderSubmissionData', 'Properties', 'TimeInForce']
df = pd.DataFrame([v for k, v in orders.items()], columns = columns + drop_columns)
df = df.set_index('Id').drop(drop_columns, axis=1)
df['Time'] = df['Time'].apply(self.__str_to_datetime)
df['CanceledTime'] = df['CanceledTime'].apply(self.__str_to_datetime)
df['LastFillTime'] = df['LastFillTime'].apply(self.__str_to_datetime)
df['LastUpdateTime'] = df['LastUpdateTime'].apply(self.__str_to_datetime)
df['Symbol'] = df['Symbol'].apply(lambda x: x['ID'])
df['Type'] = df['Type'].apply(__type_int_to_str)
df['Direction'] = df['Direction'].apply(self.__direction_int_to_str)
df['Status'] = df['Status'].apply(__status_int_to_str)
df['SecurityType'] = df['SecurityType'].apply(__security_type_int_to_str)
return df.dropna(how='all', axis=1)
def __create_profit_loss_table(self, json):
'''Creates a dataframe with the algorithm P&L'''
profitLoss = json.pop('ProfitLoss', None)
if profitLoss is None: return None
df = pd.DataFrame({'profit_loss' : profitLoss})
df.index.name = 'time'
df.index = df.index.map(self.__str_to_datetime)
return df
def __create_closed_trades_table(self, json):
'''Creates a dataframe with the closed trades information'''
total = json.get('TotalPerformance', None)
if total is None: return None
trades = total.get('ClosedTrades', None)
if trades is None: return None
df = pd.DataFrame(trades, columns = [
'Symbol', 'Quantity', 'Direction', 'EntryTime', 'EntryPrice',
'ExitPrice', 'ExitTime', 'Duration', 'EndTradeDrawdown',
'MAE', 'MFE', 'ProfitLoss', 'TotalFees'
])
df['Symbol'] = df['Symbol'].apply(lambda x: x['ID'])
df['Direction'] = df['Direction'].apply(self.__direction_int_to_str)
df['EntryTime'] = df['EntryTime'].apply(self.__str_to_datetime)
df['ExitTime'] = df['ExitTime'].apply(self.__str_to_datetime)
df['Duration'] = df['ExitTime'] - df['EntryTime']
return df.set_index('EntryTime')
def __create_charts_table(self, json):
'''Creates a dataframe with the charts information.
By converting the json into a dataframe, it makes data visualization easier'''
charts = json.pop('Charts', None)
if charts is None: return None
df_charts = dict()
for name, chart in charts.items():
# Skip Meta data
if name == 'Meta': continue
columns = list()
for column, series in chart['Series'].items():
df = pd.DataFrame(series['Values'])
df['x'] = pd.to_datetime(df['x'], unit='s')
df = df.rename(index=str, columns={"x": "time", "y": column})
columns.append(df.set_index('time'))
if len(columns) > 1:
df = pd.concat(columns, axis = 1, sort = True)
df = df.fillna(method = 'ffill')
df = df.fillna(method = 'bfill')
df_charts[name] = df
return df_charts
def __create_rolling_window_table(self, json):
'''Creates a dataframe with the rolling statistics information.
By converting the json into a dataframe, it makes data visualization easier'''
rollingWindow = json.pop('RollingWindow', None)
if rollingWindow is None: return None
series = dict()
if 'TotalPerformance' in json:
window = json['TotalPerformance']
if window is None: window = dict()
stats = window.get('PortfolioStatistics', dict())
stats.update(window.get('TradeStatistics', dict()))
series = {'TotalPerformance': pd.Series(stats)}
for row, window in rollingWindow.items():
stats = window.get('PortfolioStatistics', dict())
stats.update(window.get('TradeStatistics', dict()))
series.update({row: pd.Series(stats)})
return pd.DataFrame(series).transpose()
def __direction_int_to_str(self, value):
if value is None: return None
return [ 'Buy', 'Sell', 'Hold' ][value]
def __str_to_datetime(self, value):
if value is None: return None
if isinstance(value, float) and isnan(value): return None
fmt = '%Y-%m-%dT%H:%M:%SZ' if len(value) == 20 else '%Y-%m-%dT%H:%M:%S.%fZ'
return datetime.strptime(value, fmt)
class Information(dict):
def __init__(self, d):
d = d if d is not None else {}
super().__init__(d)
self.__repr = ''
for k, b in d.items():
a = k.replace(' ','').replace('-','')
if isinstance(b, (list, tuple)):
setattr(self, a, [Information(x) if isinstance(x, dict) else x for x in b])
elif isinstance(b, dict):
x = Information(b)
setattr(self, a, x)
s = '\n'.join([f' {l}' for l in repr(x).splitlines()])
self.__repr += f'{a}:\n{s}\n'
else:
setattr(self, a, b)
self.__repr += f'{a}: {b}\n'
def __repr__(self):
return self.__repr |
myia/operations/prim_shape.py | strint/myia | 222 | 11114543 | <reponame>strint/myia
"""Definitions for the primitive `shape`."""
from .. import xtype
from ..lib import (
TYPE,
VALUE,
AbstractArray,
AbstractScalar,
AbstractTuple,
bprop_to_grad_transform,
force_pending,
standard_prim,
)
from ..operations import zeros_like
from . import primitives as P
def pyimpl_shape(array):
"""Implement `shape`."""
return array.shape
@standard_prim(P.shape)
async def infer_shape(self, engine, a: AbstractArray):
"""Infer the return type of primitive `shape`."""
shp = await force_pending(a.xshape())
values = [
AbstractScalar({VALUE: entry, TYPE: xtype.UInt[64]}) for entry in shp
]
return AbstractTuple(values)
@bprop_to_grad_transform(P.shape)
def bprop_shape(arr, out, dout):
"""Backpropagator for primitive `shape`."""
return (zeros_like(arr),)
__operation_defaults__ = {
"name": "shape",
"registered_name": "shape",
"mapping": P.shape,
"python_implementation": pyimpl_shape,
}
__primitive_defaults__ = {
"name": "shape",
"registered_name": "shape",
"type": "backend",
"python_implementation": pyimpl_shape,
"inferrer_constructor": infer_shape,
"grad_transform": bprop_shape,
}
|
koku/masu/test/external/downloader/azure/test_azure_services.py | rubik-ai/koku | 157 | 11114559 | <reponame>rubik-ai/koku
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the AzureService object."""
from datetime import datetime
from unittest.mock import Mock
from unittest.mock import patch
from unittest.mock import PropertyMock
from adal.adal_error import AdalError
from azure.common import AzureException
from azure.core.exceptions import HttpResponseError
from azure.storage.blob import BlobClient
from azure.storage.blob import BlobServiceClient
from azure.storage.blob import ContainerClient
from dateutil.relativedelta import relativedelta
from faker import Faker
from masu.external.downloader.azure.azure_service import AzureCostReportNotFound
from masu.external.downloader.azure.azure_service import AzureService
from masu.external.downloader.azure.azure_service import AzureServiceError
from masu.test import MasuTestCase
from providers.azure.client import AzureClientFactory
FAKE = Faker()
def throw_azure_exception(scope):
"""Raises azure exception."""
raise AzureException()
def throw_azure_http_error(scope):
"""Raises azure http error."""
raise HttpResponseError()
def throw_azure_http_error_403(scope):
"""Raises azure http error."""
error = HttpResponseError()
error.status_code = 403
raise error
class AzureServiceTest(MasuTestCase):
"""Test Cases for the AzureService object."""
def setUp(self):
"""Set up each test."""
super().setUp()
self.subscription_id = FAKE.uuid4()
self.tenant_id = FAKE.uuid4()
self.client_id = FAKE.uuid4()
self.client_secret = FAKE.word()
self.resource_group_name = FAKE.word()
self.storage_account_name = FAKE.word()
self.container_name = FAKE.word()
self.current_date_time = datetime.today()
self.export_directory = FAKE.word()
def get_mock_client(self, blob_list=[], cost_exports=[]):
"""Generate an AzureService instance with mocked AzureClientFactory.
Args:
blob_list (list<Mock>) A list of Mock objects.
The blob_list Mock objects must have these attributes:
- name
cost_exports (list<Mock>) A list of Mock objects.
The cost_exports Mock objects must have these
attributes:
- name
- delivery_info.destination.container
- delivery_info.destination.root_folder_path
- delivery_info.destination.resource_id
Returns:
(AzureService) An instance of AzureService with mocked AzureClientFactory
"""
fake_data = FAKE.binary(length=1024 * 64)
client = None
with patch(
"masu.external.downloader.azure.azure_service.AzureClientFactory", spec=AzureClientFactory
) as mock_factory:
mock_factory.return_value = Mock( # AzureClientFactory()
spec=AzureClientFactory,
cloud_storage_account=Mock(
return_value=Mock( # .cloud_storage_account()
spec=BlobServiceClient,
get_blob_client=Mock(
return_value=Mock( # .get_blob_client()
spec=BlobClient,
# .download_blob().readall()
download_blob=Mock(return_value=Mock(readall=Mock(return_value=fake_data))),
)
),
get_container_client=Mock(
# .get_container_client().list_blobs()
return_value=Mock(spec=ContainerClient, list_blobs=Mock(return_value=blob_list))
),
)
),
# .cost_management_client.exports.list().value
cost_management_client=Mock(exports=Mock(list=Mock(return_value=Mock(value=cost_exports)))),
# .subscription_id
subscription_id=self.subscription_id,
)
client = AzureService(
self.tenant_id,
self.client_id,
self.client_secret,
self.resource_group_name,
self.storage_account_name,
self.subscription_id,
)
return client
def test_initializer(self):
"""Test the AzureService initializer."""
svc = self.get_mock_client()
self.assertIsInstance(svc, AzureService)
@patch("masu.external.downloader.azure.azure_service.AzureClientFactory")
def test_init_no_subscription_id(self, mock_factory):
"""Test that exception is raized with no subscription id provided."""
class MockAzureFactory:
subscription_id = None
factory = MockAzureFactory()
mock_factory.return_value = factory
with self.assertRaises(AzureServiceError):
AzureService(
self.tenant_id, self.client_id, self.client_secret, self.resource_group_name, self.storage_account_name
)
def test_get_cost_export_for_key(self):
"""Test that a cost export is retrieved by a key."""
today = self.current_date_time
yesterday = today - relativedelta(days=1)
test_matrix = [
{"key": "{}_{}_day_{}".format(self.container_name, "blob", today.day), "expected_date": today.date()},
{
"key": "{}_{}_day_{}".format(self.container_name, "blob", yesterday.day),
"expected_date": yesterday.date(),
},
]
for test in test_matrix:
key = test.get("key")
expected_modified_date = test.get("expected_date")
mock_blob = Mock(last_modified=Mock(date=Mock(return_value=expected_modified_date)))
name_attr = PropertyMock(return_value=key)
type(mock_blob).name = name_attr # kludge to set name attribute on Mock
svc = self.get_mock_client(blob_list=[mock_blob])
cost_export = svc.get_cost_export_for_key(key, self.container_name)
self.assertIsNotNone(cost_export)
self.assertEquals(cost_export.name, key)
self.assertEquals(cost_export.last_modified.date(), expected_modified_date)
def test_get_cost_export_for_missing_key(self):
"""Test that a cost export is not retrieved by an incorrect key."""
key = "{}_{}_wrong".format(self.container_name, "blob")
mock_blob = Mock()
name_attr = PropertyMock(return_value=FAKE.word())
type(mock_blob).name = name_attr # kludge to set name attribute on Mock
svc = self.get_mock_client(blob_list=[mock_blob])
with self.assertRaises(AzureCostReportNotFound):
svc.get_cost_export_for_key(key, self.container_name)
def test_get_latest_cost_export_for_path(self):
"""Test that the latest cost export is returned for a given path."""
report_path = "{}_{}".format(self.container_name, "blob")
mock_blob = Mock(last_modified=Mock(date=Mock(return_value=self.current_date_time.date())))
name_attr = PropertyMock(return_value=report_path)
type(mock_blob).name = name_attr # kludge to set name attribute on Mock
svc = self.get_mock_client(blob_list=[mock_blob])
cost_export = svc.get_latest_cost_export_for_path(report_path, self.container_name)
self.assertEquals(cost_export.last_modified.date(), self.current_date_time.date())
def test_get_latest_cost_export_for_path_missing(self):
"""Test that the no cost export is returned for a missing path."""
report_path = FAKE.word()
svc = self.get_mock_client()
with self.assertRaises(AzureCostReportNotFound):
svc.get_latest_cost_export_for_path(report_path, self.container_name)
def test_describe_cost_management_exports(self):
"""Test that cost management exports are returned for the account."""
resource_id = (
f"/subscriptions/{self.subscription_id}/resourceGroups/"
f"{self.resource_group_name}/providers/Microsoft.Storage/"
f"storageAccounts/{self.storage_account_name}"
)
mock_export = Mock(
delivery_info=Mock(
destination=Mock(
container=self.container_name, root_folder_path=self.export_directory, resource_id=resource_id
)
)
)
name_attr = PropertyMock(return_value=f"{self.container_name}_blob")
type(mock_export).name = name_attr # kludge to set name attribute on Mock
svc = self.get_mock_client(cost_exports=[mock_export])
exports = svc.describe_cost_management_exports()
self.assertEquals(len(exports), 1)
for export in exports:
self.assertEquals(export.get("container"), self.container_name)
self.assertEquals(export.get("directory"), self.export_directory)
self.assertIn("{}_{}".format(self.container_name, "blob"), export.get("name"))
def test_get_latest_cost_export_http_error(self):
"""Test that the latest cost export catches the error for Azure HttpError."""
report_path = "{}_{}".format(self.container_name, "blob")
mock_blob = Mock(last_modified=Mock(date=Mock(return_value=self.current_date_time.date())))
name_attr = PropertyMock(return_value=report_path)
type(mock_blob).name = name_attr # kludge to set name attribute on Mock
svc = self.get_mock_client(blob_list=[mock_blob])
svc._cloud_storage_account.get_container_client.side_effect = throw_azure_http_error
with self.assertRaises(AzureCostReportNotFound):
svc.get_latest_cost_export_for_path(report_path, self.container_name)
def test_get_latest_cost_export_http_error_403(self):
"""Test that the latest cost export catches the error for Azure HttpError 403."""
report_path = "{}_{}".format(self.container_name, "blob")
mock_blob = Mock(last_modified=Mock(date=Mock(return_value=self.current_date_time.date())))
name_attr = PropertyMock(return_value=report_path)
type(mock_blob).name = name_attr # kludge to set name attribute on Mock
svc = self.get_mock_client(blob_list=[mock_blob])
svc._cloud_storage_account.get_container_client.side_effect = throw_azure_http_error_403
with self.assertRaises(AzureCostReportNotFound):
svc.get_latest_cost_export_for_path(report_path, self.container_name)
def test_get_latest_cost_export_no_container(self):
"""Test that the latest cost export catches the error for no container."""
report_path = "blob"
container_name = None
mock_blob = Mock(last_modified=Mock(date=Mock(return_value=self.current_date_time.date())))
name_attr = PropertyMock(return_value=report_path)
type(mock_blob).name = name_attr # kludge to set name attribute on Mock
svc = self.get_mock_client(blob_list=[mock_blob])
with self.assertRaises(AzureCostReportNotFound):
svc.get_latest_cost_export_for_path(report_path, container_name)
def test_describe_cost_management_exports_wrong_account(self):
"""Test that cost management exports are not returned from incorrect account."""
resource_id = (
f"/subscriptions/{FAKE.uuid4()}/resourceGroups/"
f"{self.resource_group_name}/providers/Microsoft.Storage/"
f"storageAccounts/{self.storage_account_name}"
)
mock_export = Mock(
delivery_info=Mock(
destination=Mock(
container=self.container_name, root_folder_path=self.export_directory, resource_id=resource_id
)
)
)
name_attr = PropertyMock(return_value=f"{self.container_name}_blob")
type(mock_export).name = name_attr # kludge to set name attribute on Mock
svc = self.get_mock_client(cost_exports=[mock_export])
exports = svc.describe_cost_management_exports()
self.assertEquals(exports, [])
def test_describe_cost_management_exports_no_auth(self):
"""Test that cost management exports are not returned from incorrect account."""
svc = self.get_mock_client(cost_exports=[Mock()])
svc._factory.cost_management_client.exports.list.side_effect = throw_azure_exception
with self.assertRaises(AzureCostReportNotFound):
svc.describe_cost_management_exports()
def test_download_cost_export(self):
"""Test that cost management exports are downloaded."""
key = "{}_{}_day_{}".format(self.container_name, "blob", self.current_date_time.day)
mock_blob = Mock()
name_attr = PropertyMock(return_value=key)
type(mock_blob).name = name_attr # kludge to set name attribute on Mock
client = self.get_mock_client(blob_list=[mock_blob])
file_path = client.download_cost_export(key, self.container_name)
self.assertTrue(file_path.endswith(".csv"))
@patch("masu.external.downloader.azure.azure_service.AzureClientFactory", spec=AzureClientFactory)
def test_get_cost_export_for_key_exception(self, mock_factory):
"""Test that function handles a raised exception."""
mock_factory.return_value = Mock(
spec=AzureClientFactory,
cloud_storage_account=Mock(
return_value=Mock(
spec=BlobServiceClient,
get_container_client=Mock(
return_value=Mock(spec=ContainerClient, list_blobs=Mock(side_effect=AdalError("test error")))
),
)
),
)
with self.assertRaises(AzureServiceError):
service = AzureService(
self.tenant_id, self.client_id, self.client_secret, self.resource_group_name, self.storage_account_name
)
service.get_cost_export_for_key(key=FAKE.word(), container_name=FAKE.word())
@patch("masu.external.downloader.azure.azure_service.AzureClientFactory", spec=AzureClientFactory)
def test_download_cost_report_exception(self, mock_factory):
"""Test that function handles a raised exception."""
key = FAKE.word()
mock_blob = Mock(last_modified=Mock(date=Mock(return_value=datetime.now())))
name_attr = PropertyMock(return_value=key)
type(mock_blob).name = name_attr # kludge to set name attribute on Mock
mock_factory.return_value = Mock(
spec=AzureClientFactory,
cloud_storage_account=Mock(
return_value=Mock(
spec=BlobServiceClient,
get_blob_client=Mock(side_effect=AdalError("test error")),
get_container_client=Mock(
return_value=Mock(spec=ContainerClient, list_blobs=Mock(return_value=[mock_blob]))
),
)
),
)
with self.assertRaises(AzureServiceError):
service = AzureService(
self.tenant_id, self.client_id, self.client_secret, self.resource_group_name, self.storage_account_name
)
service.download_cost_export(key=key, container_name=FAKE.word())
@patch("masu.external.downloader.azure.azure_service.AzureClientFactory", spec=AzureClientFactory)
def test_get_latest_cost_export_for_path_exception(self, mock_factory):
"""Test that function handles a raised exception."""
mock_factory.return_value = Mock(
spec=AzureClientFactory,
cloud_storage_account=Mock(
return_value=Mock(
spec=BlobServiceClient,
get_container_client=Mock(
return_value=Mock(spec=ContainerClient, list_blobs=Mock(side_effect=AdalError("test error")))
),
)
),
)
with self.assertRaises(AzureServiceError):
service = AzureService(
self.tenant_id, self.client_id, self.client_secret, self.resource_group_name, self.storage_account_name
)
service.get_latest_cost_export_for_path(report_path=FAKE.word(), container_name=FAKE.word())
|
examples/language-model/make_data.py | greedyuser/kur | 867 | 11114616 | """
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from vocab import *
import json
import os
if not os.path.exists('./data/'):
os.mkdir('./data/')
def one_hot(v, ndim):
v_one_hot = np.zeros(
(len(v), ndim,)
)
for i in range(len(v)):
v_one_hot[i][v[i]] = 1.0
return v_one_hot
x = []
y = []
all_chars = []
for book in [
'pride_and_prejudice.txt',
'shakespeare.txt'
]:
with open('books/%s' % book, 'r') as infile:
chars = [
c for c in ' '.join(infile.read().lower().split())
if c in set(vocab)
]
all_chars += [' ']
all_chars += chars
all_chars = list(' '.join(''.join(all_chars).split()))
num_chars = len(all_chars)
with open('cleaned.txt', 'w') as outfile:
outfile.write(''.join(all_chars))
x, y = [], []
data_portions = [
('train', 0.8),
('validate', 0.05),
('test', 0.05),
('evaluate', 0.05),
]
dev = False
if dev:
# shrink data to make things go faster
for i in range(len(data_portions)):
data_portions[i] = (
data_portions[i][0],
data_portions[i][1] * 0.1
)
max_i = sum([
int(round(len(all_chars) * fraction))
for name, fraction in data_portions
]) - seq_len
for i in range(max_i):
in_char_seq = all_chars[i: i + seq_len]
# one hot representation
sample_x = np.zeros((len(in_char_seq), n_vocab,))
for j, c in enumerate(in_char_seq):
sample_x[j][char_to_int[c]] = 1
x.append(sample_x)
sample_y = np.zeros(n_vocab)
sample_y[char_to_int[all_chars[i + seq_len]]] = 1
y.append(sample_y)
x, y = np.array(x).astype('int32'), np.array(y).astype('int32')
start_i = 0
for name, fraction in data_portions:
end_i = start_i + int(round(len(x) * fraction))
print(start_i, end_i)
x0 = x[start_i: end_i]
y0 = y[start_i: end_i]
print('dims:')
print(x0.shape)
print(y0.shape)
start_i = end_i
with open('data/%s.jsonl' % name, 'w') as outfile:
for sample_x, sample_y in zip(x0, y0):
outfile.write(json.dumps({
'in_seq': sample_x.tolist(),
'out_char': sample_y.tolist()
}))
outfile.write('\n')
del x0, y0
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/testing/loader.py | jeikabu/lumberyard | 1,738 | 11114623 | <filename>dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/testing/loader.py
import numba.unittest_support as unittest
from unittest import loader, case
from os.path import isdir, isfile, join, dirname, basename
class TestLoader(loader.TestLoader):
def __init__(self, topleveldir=None):
super(TestLoader, self).__init__()
self._top_level_dir = topleveldir or dirname(dirname(dirname(__file__)))
def _find_tests(self, start_dir, pattern, namespace=False):
# Upstream doesn't look for 'load_tests' in start_dir.
if isdir(start_dir) and not namespace and isfile(join(start_dir, '__init__.py')):
name = self._get_name_from_path(start_dir)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package)
if load_tests is not None:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield loader._make_failed_load_tests(package.__name__, e, self.suiteClass)
else:
for t in super(TestLoader, self)._find_tests(start_dir, pattern):
yield t
|
tests/nnapi/specs/V1_2/sin_1D_float_nnfw.mod.py | periannath/ONE | 255 | 11114640 | # model
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{4}") #A vector of inputs
i2 = Output("op2", "TENSOR_FLOAT32", "{4}") #A vector of outputs
model = model.Operation("SIN", i1).To(i2)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[2.0, 90.0, 1.0, 0.012]}
output0 = {i2: # output 0
[0.909297427, 0.893996664, 0.841470985, 0.011999712]}
# Instantiate an example
Example((input0, output0))
|
Algo and DSA/LeetCode-Solutions-master/Python/insufficient-nodes-in-root-to-leaf-paths.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 11114661 | <gh_stars>1000+
# Time: O(n)
# Space: O(h)
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sufficientSubset(self, root, limit):
"""
:type root: TreeNode
:type limit: int
:rtype: TreeNode
"""
if not root:
return None
if not root.left and not root.right:
return None if root.val < limit else root
root.left = self.sufficientSubset(root.left, limit-root.val)
root.right = self.sufficientSubset(root.right, limit-root.val)
if not root.left and not root.right:
return None
return root
|
visualization/evanescent/createsample.py | mika314/simuleios | 197 | 11114687 | #-------------createsample.py--------------------------------------------------#
#
# create sample.py
#
# Purpose: To create sample blender data for testing voxel stuff with.
#
#------------------------------------------------------------------------------#
#import bpy
import numpy as np
import struct
# Files and data and such
vfile = open("sample.raw",'wb')
n = 64
vdata = [[[0 for k in range(3*n)] for j in range(3*n)] for i in range(3*n)]
# creating initial gradient for viewing. 0 to 255 at x,y,z = box_length
for q in range(1,4):
for i in range(64):
for j in range(64):
for k in range(64):
vdata[i*q][j*q][k*q] = (q-1) * 0.33 * 255
#vdata[i*q][j*q][k*q] = (0.11*(q-1)*(i/64) + 0.11*(q-1)*(j/64) + 0.11*(q-1)*(k/64))*255
# function to write data to .raw file for blender
# note, the density muct be an integer between 0 and 255
def voxel_gen(vdata, vfile, ii):
for q in range(1,4):
for i in range(ii):
for j in range(ii):
for k in range(ii):
vfile.write(struct.pack('B',abs(int(vdata[i*q][j*q][k*q]))))
vfile.flush()
vfile.close()
voxel_gen(vdata, vfile, 64)
|
transistor/browsers/splash_browser.py | awesome-archive/transistor | 232 | 11114781 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
transistor.browsers.splash_browser
~~~~~~~~~~~~
This module implements the SplashBrowser class.
Splash browser is a subclass of mechanicalsoup.StatefulBrowser which
adds a few new methods and overrides most existing methods to make it work
with Splash and/or Splash + Crawlera.
It is important that the lua script be formatted with below:
If using Splash + Crawlera:
local entries = splash:history()
local last_response = entries[#entries].response
return {
url = splash:url(),
headers = last_response.headers,
http_status = last_response.status,
cookies = splash:get_cookies(),
html = splash:html(),
har=splash:har(),
png=splash:png()
}
If only using Splash:
return {
url = splash:url(),
cookies = splash:get_cookies(),
html = splash:html(),
har=splash:har(),
png=splash:png()
}
Except, it is not mandatory to return the har, cookies, or png.
:copyright: Copyright (C) 2018 by BOM Quote Limited
:license: The MIT License, see LICENSE for more details.
~~~~~~~~~~~~
"""
import bs4
import sys
import random
import gevent
from requests import Response
from requests.exceptions import Timeout
from mechanicalsoup.stateful_browser import _BrowserState, StatefulBrowser
from mechanicalsoup.utils import LinkNotFoundError
from mechanicalsoup.form import Form
from transistor.utility.utils import obsolete_setter
from transistor.browsers.mixin import SplashBrowserMixin
class SplashBrowser(StatefulBrowser, SplashBrowserMixin):
"""
Make a few changes and overrides to enable a mechanicalsoup StatefulBrowser to
work with Splash alone or Splash + Crawlera.
Note: `select_form`, `submit` and `submit_selected` need refactored
to work with Splash. Not useful/broken at the moment.
"""
retry = 0
def __init__(self, *args, **kwargs):
self._set_raw_content(content=b'')
self._set_status(status='')
self.__state = _BrowserState()
self._test_true = False
self.timeout_exception = False
self.flags = kwargs.pop('flags', None)
self.priority = kwargs.pop('priority', 0)
if kwargs.pop('meta', None):
self._meta = dict(kwargs.pop('meta'))
else:
self._meta = None
self.callback = None
self.errback = None
super().__init__(*args, **kwargs)
@property
def meta(self):
if self._meta is None:
self._meta = {}
return self._meta
def _get_raw_content(self):
return self._content
def _set_raw_content(self, content):
if content is None:
self._content = b''
elif not isinstance(content, bytes):
raise TypeError(
"Response content must be bytes.")
else:
self._content = content
raw_content = property(_get_raw_content, obsolete_setter(_set_raw_content, 'raw_content'))
def _get_status(self):
return self._status
def _set_status(self, status):
if status is None:
self._status = ''
else:
self._status = status
# this is the status code received from Splash service (NOT THE WEBSITE ENDPOINT)
status = property(_get_status, obsolete_setter(_set_status, 'status'))
def get_current_form(self):
"""Get the currently selected form as a :class:`Form` object.
See :func:`select_form`.
"""
return self.__state.form
def get_current_page(self):
"""Get the current page as a soup object."""
return self.__state.page
def get_current_url(self):
"""Get the current url as a soup object. """
return self.__state.url
def get_current_request(self):
"""Get the last sent python-requests <PreparedRequest[POST]> object."""
return self.__state.request
def _update_state(self, response):
"""Dry up the setters from http post and get methods."""
self._set_raw_content(response.content)
self._set_status(response.status_code)
self._add_soup(response, self.soup_config)
self.__state = _BrowserState(page=response.soup,
url=response.url,
request=response.request)
def open(self, url, *args, **kwargs):
"""Open the URL and store the Browser's state in this object.
All arguments are forwarded to :func:`SplashCrawleraBrowser.stateful_post`.
:return: Forwarded from :func:`Browser.stateful_post`.
"""
if self.get_verbose() == 1:
sys.stdout.write('.')
sys.stdout.flush()
elif self.get_verbose() >= 2:
print(url)
resp = self.stateful_post(url, *args, **kwargs)
return resp
def open_fake_page(self, page_text, status_code=None, url=None, soup_config=None):
"""Mock version of :func:`open`.
Behave as if opening a page whose text is ``page_text``, but do not
perform any network access. If ``url`` is set, pretend it is the page's
URL. Useful mainly for testing.
"""
soup_config = soup_config or self.soup_config
self._test_true = True
self._set_raw_content(page_text.encode())
self._set_status(status_code)
self.__state = _BrowserState(
page=bs4.BeautifulSoup(page_text, **soup_config),
url=url)
def refresh(self):
"""Reload the current page with the same request as originally done.
Any change (`select_form`, or any value filled-in in the form) made to
the current page before refresh is discarded.
:raise ValueError: Raised if no refreshable page is loaded, e.g., when
using the shallow ``Browser`` wrapper functions.
:return: Response of the request."""
old_request = self.__state.request
if old_request is None:
raise ValueError('The current page is not refreshable. Either no '
'page is opened or low-level browser methods '
'were used to do so.')
resp = self.session.send(old_request)
self._update_state(resp)
return resp
def select_form(self, selector="form", nr=0):
"""Select a form in the current page.
:param selector: CSS selector or a bs4.element.Tag object to identify
the form to select.
If not specified, ``selector`` defaults to "form", which is
useful if, e.g., there is only one form on the page.
For ``selector`` syntax, see the `.select() method in BeautifulSoup
<https://www.crummy.com/software/BeautifulSoup/bs4/doc/#css-selectors>`__.
:param nr: A zero-based index specifying which form among those that
match ``selector`` will be selected. Useful when one or more forms
have the same attributes as the form you want to select, and its
position on the page is the only way to uniquely identify it.
Default is the first matching form (``nr=0``).
:return: The selected form as a soup object. It can also be
retrieved later with :func:`get_current_form`.
"""
if isinstance(selector, bs4.element.Tag):
if selector.name != "form":
raise LinkNotFoundError
self.__state.form = Form(selector)
else:
# nr is a 0-based index for consistency with mechanize
found_forms = self.get_current_page().select(selector,
limit=nr + 1)
if len(found_forms) != nr + 1:
if self.__debug:
print('select_form failed for', selector)
self.launch_browser()
raise LinkNotFoundError()
self.__state.form = Form(found_forms[-1])
return self.get_current_form()
def submit(self, form, url=None, **kwargs):
"""
Prepares and sends a form request.
NOTE: To submit a form with a :class:`StatefulBrowser` instance, it is
recommended to use :func:`StatefulBrowser.submit_selected` instead of
this method so that the browser state is correctly updated.
:param form: The filled-out form.
:param url: URL of the page the form is on. If the form action is a
relative path, then this must be specified.
:param \*\*kwargs: Arguments forwarded to `requests.Session.request
<http://docs.python-requests.org/en/master/api/#requests.Session.request>`__.
:return: `requests.Response
<http://docs.python-requests.org/en/master/api/#requests.Response>`__
object with a *soup*-attribute added by :func:`add_soup`.
"""
if isinstance(form, Form):
form = form.form
response = self._request(form, url, **kwargs)
self._add_soup(response, self.soup_config)
return response
def submit_selected(self, btnName=None, *args, **kwargs):
"""Submit the form that was selected with :func:`select_form`.
:return: Forwarded from :func:`Browser.submit`.
If there are multiple submit input/button elements, passes ``btnName``
to :func:`Form.choose_submit` on the current form to choose between
them. All other arguments are forwarded to :func:`Browser.submit`.
"""
self.get_current_form().choose_submit(btnName)
referer = self.get_current_url()
if referer:
if 'headers' in kwargs:
kwargs['headers']['Referer'] = referer
else:
kwargs['headers'] = {'Referer': referer}
resp = self.submit(self.__state.form, url=self.__state.url, **kwargs)
# updates the state
self._update_state(resp)
return resp
@staticmethod
def __looks_like_html(blob):
"""Guesses entity type when Content-Type header is missing.
Since Content-Type is not strictly required, some servers leave it out.
"""
text = blob.lstrip().lower()
return text.startswith('<html') or text.startswith('<!doctype')
def _add_soup(self, response, soup_config):
"""Attaches a soup object to a requests response."""
if self.resp_headers:
if ("text/html" in self.resp_content_type_header or
SplashBrowser.__looks_like_html(self.html)):
response.soup = bs4.BeautifulSoup(self.html, **soup_config)
elif SplashBrowser.__looks_like_html(self.html):
response.soup = bs4.BeautifulSoup(self.html, **soup_config)
else:
response.soup = None
return response
def post(self, *args, **kwargs):
"""Straightforward wrapper around `requests.Session.post
<http://docs.python-requests.org/en/master/api/#requests.Session.post>`__.
:return: `requests.Response
<http://docs.python-requests.org/en/master/api/#requests.Response>`__
object with a *soup*-attribute added by :func:`_add_soup`.
"""
try:
response = self.session.post(*args, **kwargs)
self._update_state(response)
return response
except Timeout:
self.timeout_exception = True
print(f'Timeout exception.')
resp = Response()
resp.status_code = 408
self._update_state(resp)
return resp
def stateful_post(self, url, *args, **kwargs):
"""Post to the URL and store the Browser's state, as received from
the response object, in this object.
All arguments are forwarded to :func:`SplashCrawleraBrowser.post`.
:param: url: this will always be the Splash /execute endpoint
:param: kwargs: callback: a user defined callable to call
after the response is returned.
:param: kwargs: errback: a user defined callable to call
after the response is returned.
:return: Forwarded from :func:`SplashCrawleraBrowser.post`.
"""
callback = kwargs.pop('callback', None)
errback = kwargs.pop('errback', None)
if callback is not None and not callable(callback):
raise TypeError(
f'callback must be a callable, got {type(callback).__name__}')
if errback is not None and not callable(errback):
raise TypeError(
f'errback must be a callable, got {type(errback).__name__}')
self.callback = callback
self.errback = errback
if self.get_verbose() == 1:
sys.stdout.write('.')
sys.stdout.flush()
elif self.get_verbose() >= 2:
print(url)
resp = self.post(url, *args, **kwargs)
response_callback = self._response_callback(resp)
if self.callback:
return response_callback, self.callback()
return response_callback
def _response_callback(self, resp):
"""
Callback for after response received. If status code is not 200 then
recursively retry. Retry up to five times.
Flesh this out and do different things based on status code. Probably,
better to get a way from searching strings.
:returns response object
"""
def recurse(response):
"""Recursively call"""
return self._response_callback(response)
print(f'self.ucontent[0:1000] -> {self.ucontent[0:1000]}')
# print(f'info -> {self.resp_content["info"]}')
# print(f'error -> {self.resp_content["error"]}')
if resp.status_code == 200:
return resp
if self.retry < 5:
# check for http503 in content which has a few different meanings
# see note 'HANDLING BANS' about 503 bans when handling your own session:
# https://support.scrapinghub.com/support/solutions/articles/22000188402-using-crawlera-sessions-to-make-multiple-requests-from-the-same-ip
# crawlera sessions operate with (at least) 12 second between requests..
# ..so we wait a random time from 12 - 20 seconds, hope to improve yield
# check for http503 in content: slavebanned, serverbusy, or noslaves
if b'http503' in self.raw_content or '503' in str(self.status):
self.retry += 1
print(f'resp_code -> 503')
print(f'Retying attempt {self.retry}.')
gevent.sleep(random.randint(12, 20))
response = self.refresh()
return recurse(response)
# check for http504 in content which means some sort of timeout
if b'http504' in self.raw_content or '504' in str(self.status):
self.retry += 1
print(f'resp_code -> 504')
print(self.resp_headers)
print(f'Retying attempt {self.retry}.')
gevent.sleep(random.randint(12, 20))
response = self.refresh()
return recurse(response)
print(f'Retried {self.retry} times and all were unsuccessful.')
return resp |
chips/compiler/instruction_utils.py | dillonhuff/Chips-2.0 | 221 | 11114827 | __author__ = "<NAME>"
__copyright__ = "Copyright (C) 2012, <NAME>"
__version__ = "0.1"
from register_map import *
def push(trace, instructions, reg):
"""Push the content of *reg(* onto the stack"""
instructions.append({"trace": trace, "op": "push", "reg": reg})
return instructions
def pop(trace, instructions, reg):
"""Pop one item off the stack into reg"""
instructions.append({"trace": trace, "op": "pop", "reg": reg})
return instructions
def store_object(trace, instructions, n, offset, local, leave_on_stack=False):
"""
Store an item into the specified location
if n = 1 the item is taken from the result register if n = 2 the item is
taken from result and result_hi register if n > 2 the item is taken
from the stack if local is true, the location is assumed to be relative
to the frame register if offset is not specified, assume that address
is in address if leave_on_stack is true the item is copied to the
specified location, but left on the stack. if the item was not on the
stack in the first place, the value is left in the result registers
either way.
"""
if offset is not None:
if local:
instructions.append({
"trace": trace,
"op": "addl",
"z": address,
"a": frame,
"literal": offset})
else:
instructions.append({
"trace": trace,
"op": "literal",
"z": address,
"literal": offset})
if n == 1:
instructions.append({
"trace": trace,
"op": "store",
"b": result,
"a": address})
elif n == 2:
instructions.append({
"trace": trace,
"op": "store",
"b": result,
"a": address})
instructions.append({
"trace": trace,
"op": "addl",
"z": address,
"a": address,
"literal": 1})
instructions.append({
"trace": trace,
"op": "store",
"b": result_hi,
"a": address})
else:
instructions.append({
"trace": trace,
"op": "addl",
"z": address,
"a": address,
"literal": n - 1})
if leave_on_stack:
instructions.append({
"trace": trace,
"op": "addl",
"z": tos_copy,
"a": tos,
"literal": 0})
for i in range(n):
instructions.append({
"trace": trace,
"op": "addl",
"z": tos_copy,
"a": tos_copy,
"literal": -1})
instructions.append({
"trace": trace,
"op": "load",
"z": result,
"a": tos_copy})
instructions.append({
"trace": trace,
"op": "store",
"b": result,
"a": address})
if i < n - 1:
instructions.append({
"trace": trace,
"op": "addl",
"z": address,
"a": address,
"literal": -1})
else:
for i in range(n):
pop(trace, instructions, result)
instructions.append({
"trace": trace,
"op": "store",
"b": result,
"a": address})
if i < n - 1:
instructions.append({
"trace": trace,
"op": "addl",
"z": address,
"a": address,
"literal": -1})
return instructions
def load_object(trace, instructions, n, offset, local):
"""Load an item from the specified location
if n = 1 the item is taken from the result register
if n = 2 the item is taken from result and result_hi register
if n > 2 the item is taken from the stack
If local is true, the location is assumed to be relative to the frame
register If offset is not specified, assume that address is in result.
"""
if offset is not None:
if local:
instructions.append({
"trace": trace,
"op": "addl",
"z": address,
"a": frame,
"literal": offset})
else:
instructions.append({
"trace": trace,
"op": "literal",
"z": address,
"literal": offset})
else:
instructions.append({
"trace": trace,
"op": "addl",
"z": address,
"a": result,
"literal": 0})
if n == 1:
instructions.append({
"trace": trace,
"op": "load",
"z": result,
"a": address})
elif n == 2:
instructions.append({
"trace": trace,
"op": "load",
"z": result,
"a": address})
instructions.append({
"trace": trace,
"op": "addl",
"z": address,
"a": address,
"literal": 1})
instructions.append({
"trace": trace,
"op": "load",
"z": result_hi,
"a": address})
else:
for i in range(n):
instructions.append({
"trace": trace,
"op": "load",
"z": result,
"a": address})
if i < n - 1:
instructions.append({
"trace": trace,
"op": "addl",
"z": address,
"a": address,
"literal": 1})
push(trace, instructions, result)
return instructions
def call(trace, instructions, label):
instructions.append({
"trace": trace,
"op": "addl",
"z": return_frame,
"a": frame,
"literal": 0})
instructions.append({
"trace": trace,
"op": "addl",
"z": frame,
"a": tos,
"literal": 0})
instructions.append({
"trace": trace,
"op": "call",
"z": return_address,
"label": label})
return instructions
def _return(trace, instructions):
instructions.append({
"trace": trace,
"op": "addl",
"z": tos,
"a": frame,
"literal": 0})
instructions.append({
"trace": trace,
"op": "addl",
"z": frame,
"a": return_frame,
"literal": 0})
instructions.append({
"trace": trace,
"op": "return",
"a": return_address})
return instructions
|
vnpy/gateway/bitmex/__init__.py | funrunskypalace/vnpy | 19,529 | 11114832 | from .bitmex_gateway import BitmexGateway
|
音频处理/音频尾部处理.py | liusongtao99/tools_python | 130 | 11114876 | #!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: <EMAIL>
@site: http://www.xingag.top
@software: PyCharm
@file: 尾部处理.py
@time: 2021-01-02 09:59
@description:TODO
"""
from pydub import AudioSegment
from pydub.playback import play
video_path = './raw.mp4'
def speed_change(sound, speed=1.0):
"""
改变音频的速度
参考:https://www.thinbug.com/q/51434897
:param sound:
:param speed:
:return:
"""
sound_with_altered_frame_rate = sound._spawn(sound.raw_data, overrides={
"frame_rate": int(sound.frame_rate * speed)
})
return sound_with_altered_frame_rate.set_frame_rate(sound.frame_rate)
# 注意:加载视频不需要指定format
audio_sgement = AudioSegment.from_file(video_path)
# 截取尾部内容
audio_end = audio_sgement[70 * 1000:70 * 1000 + 3000]
# 变慢速度,具体根据视频速度去调整
audio_end2 = speed_change(audio_end, 0.55)
# 合并两段音频
audio_result = audio_end + audio_end2
# 尾部淡出处理
audio_result.fade_out(1000)
# 实时播放,方便调试
# play(audio_result)
# 视频导出
audio_result.export("result.wav", format='wav')
|
tests/test_sequence_tagger.py | ParikhKadam/flair | 7,539 | 11114878 | <filename>tests/test_sequence_tagger.py
import pytest
from torch.optim import SGD
from torch.optim.adam import Adam
import flair.datasets
from flair.data import MultiCorpus, Sentence
from flair.embeddings import FlairEmbeddings, WordEmbeddings
from flair.models import SequenceTagger
from flair.trainers import ModelTrainer
turian_embeddings = WordEmbeddings("turian")
flair_embeddings = FlairEmbeddings("news-forward-fast")
@pytest.mark.integration
def test_load_use_tagger():
loaded_model: SequenceTagger = SequenceTagger.load("ner-fast")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
# check that "Berlin" is predicted as LOC-Span
assert len(sentence.get_spans("ner")) == 1
assert sentence.get_spans("ner")[0].text == "Berlin"
assert sentence.get_spans("ner")[0].tag == "LOC"
# check that "Berlin" is predicted as S-LOC-Token when force_token_predictions=True
sentence = Sentence("I love Berlin")
loaded_model.predict(sentence, force_token_predictions=True)
assert sentence.get_token(3).text == "Berlin"
assert sentence.get_token(3).tag == "S-LOC"
del loaded_model
loaded_model: SequenceTagger = SequenceTagger.load("pos")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
@pytest.mark.integration
def test_load_use_tagger_keep_embedding():
loaded_model: SequenceTagger = SequenceTagger.load("ner-fast")
sentence = Sentence("I love Berlin")
loaded_model.predict(sentence)
for token in sentence:
assert len(token.embedding.cpu().numpy()) == 0
loaded_model.predict(sentence, embedding_storage_mode="cpu")
for token in sentence:
assert len(token.embedding.cpu().numpy()) > 0
del loaded_model
@pytest.mark.integration
def test_all_tag_proba_embedding():
loaded_model: SequenceTagger = SequenceTagger.load("ner-fast")
sentence = Sentence("I love Berlin")
loaded_model.predict(sentence, return_probabilities_for_all_classes=True)
for token in sentence:
assert len(token.get_tags_proba_dist(loaded_model.tag_type)) == len(loaded_model.label_dictionary)
score_sum = 0
for label in token.get_tags_proba_dist(loaded_model.tag_type):
assert label.data_point == token
score_sum += label.score
assert abs(score_sum - 1.0) < 1.0e-5
@pytest.mark.integration
def test_train_load_use_tagger(results_base_path, tasks_base_path):
corpus = flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 3: "ner"})
tag_dictionary = corpus.make_label_dictionary("ner")
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=2,
max_epochs=2,
shuffle=False,
)
del trainer, tagger, tag_dictionary, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
# clean up results directory
del loaded_model
@pytest.mark.integration
def test_train_load_use_tagger_empty_tags(results_base_path, tasks_base_path):
corpus = flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 2: "ner"})
tag_dictionary = corpus.make_label_dictionary("ner")
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=2,
max_epochs=2,
shuffle=False,
)
del trainer, tagger, tag_dictionary, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
@pytest.mark.integration
def test_train_load_use_tagger_disjunct_tags(results_base_path, tasks_base_path):
corpus = flair.datasets.ColumnCorpus(
data_folder=tasks_base_path / "fashion_disjunct",
column_format={0: "text", 3: "ner"},
)
tag_dictionary = corpus.make_label_dictionary("ner")
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
allow_unk_predictions=True,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=2,
max_epochs=2,
shuffle=False,
)
@pytest.mark.integration
def test_train_load_use_tagger_large(results_base_path, tasks_base_path):
corpus = flair.datasets.UD_ENGLISH().downsample(0.05)
tag_dictionary = corpus.make_label_dictionary("pos")
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="pos",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=32,
max_epochs=2,
shuffle=False,
)
del trainer, tagger, tag_dictionary, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
@pytest.mark.integration
def test_train_load_use_tagger_flair_embeddings(results_base_path, tasks_base_path):
corpus = flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 3: "ner"})
tag_dictionary = corpus.make_label_dictionary("ner")
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=flair_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=2,
max_epochs=2,
shuffle=False,
)
del trainer, tagger, tag_dictionary, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
@pytest.mark.integration
def test_train_load_use_tagger_adam(results_base_path, tasks_base_path):
corpus = flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 3: "ner"})
tag_dictionary = corpus.make_label_dictionary("ner")
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=2,
max_epochs=2,
shuffle=False,
optimizer=Adam,
)
del trainer, tagger, tag_dictionary, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
@pytest.mark.integration
def test_train_load_use_tagger_multicorpus(results_base_path, tasks_base_path):
corpus_1 = flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 3: "ner"})
corpus_2 = flair.datasets.NER_GERMAN_GERMEVAL(base_path=tasks_base_path).downsample(0.1)
corpus = MultiCorpus([corpus_1, corpus_2])
tag_dictionary = corpus.make_label_dictionary("ner")
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
allow_unk_predictions=True,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=2,
max_epochs=2,
shuffle=False,
)
del trainer, tagger, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
@pytest.mark.integration
def test_train_resume_tagger(results_base_path, tasks_base_path):
corpus_1 = flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 3: "ner"})
corpus_2 = flair.datasets.NER_GERMAN_GERMEVAL(base_path=tasks_base_path).downsample(0.1)
corpus = MultiCorpus([corpus_1, corpus_2])
tag_dictionary = corpus.make_label_dictionary("ner")
model: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
)
# train model for 2 epochs
trainer = ModelTrainer(model, corpus)
trainer.train(results_base_path, max_epochs=2, shuffle=False, checkpoint=True)
del model
# load the checkpoint model and train until epoch 4
checkpoint_model = SequenceTagger.load(results_base_path / "checkpoint.pt")
trainer.resume(model=checkpoint_model, max_epochs=4)
# clean up results directory
del trainer
@pytest.mark.integration
def test_find_learning_rate(results_base_path, tasks_base_path):
corpus = flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 3: "ner"})
tag_dictionary = corpus.make_label_dictionary("ner")
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.find_learning_rate(results_base_path, optimizer=SGD, iterations=5)
del trainer, tagger, tag_dictionary, corpus
|
transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/idx_selector.py | wwhio/awesome-DeepLearning | 1,150 | 11114880 | <filename>transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/prune/idx_selector.py
"""Define some functions to sort substructures of parameter by importance.
"""
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
from ..core import GraphWrapper
from ..common import get_logger
from ..core import Registry
__all__ = ["IDX_SELECTOR"]
IDX_SELECTOR = Registry('idx_selector')
@IDX_SELECTOR.register
def default_idx_selector(group, scores, ratios):
"""Get the pruned indices by scores of master tensor.
This function return a list of parameters' pruned indices on given axis.
Each element of list is a tuple with format (name, axis, indices)
in which 'name' is parameter's name and 'axis' is the axis pruning on and
`indices` is indices to be pruned.
Args:
group(Group): A group of pruning operations.
scores(dict): The key is name of tensor, the value is a dict with axis as key and scores as value.
ratios(dict): The pruned ratio of each tensor. The key is name of tensor and the value is the pruned ratio.
Returns:
list: pruned indices with format (name, axis, pruned_indices).
"""
# sort channels by the master convolution's score
name = group.master["name"]
axis = group.master["axis"]
score = scores[name][axis]
# get max convolution groups attribution
max_groups = 1
for pruning_details in group.all_pruning_details():
groups = pruning_details.op.attr("groups")
if groups is not None and groups > max_groups:
max_groups = groups
if max_groups > 1:
score = score.reshape([max_groups, -1])
group_size = score.shape[1]
# get score for each group of channels
score = np.mean(score, axis=1)
sorted_idx = score.argsort()
ratio = ratios[name]
pruned_num = int(round(len(sorted_idx) * ratio))
pruned_idx = sorted_idx[:pruned_num]
# convert indices of channel groups to indices of channels.
if max_groups > 1:
correct_idx = []
for idx in pruned_idx:
for offset in range(group_size):
correct_idx.append(idx * group_size + offset)
pruned_idx = correct_idx[:]
ret = []
for _pruning_details in group.all_pruning_details():
ret.append((_pruning_details.name, _pruning_details.axis, pruned_idx,
_pruning_details.transform))
return ret
@IDX_SELECTOR.register
def optimal_threshold(group, scores, ratios):
"""Get the pruned indices by scores of master tensor.
This function return a list of parameters' pruned indices on given axis.
Each element of list is a tuple with format (name, axis, indices)
in which 'name' is parameter's name and 'axis' is the axis pruning on and
`indices` is indices to be pruned.
Args:
group(Group): A group of pruning operations.
scores(dict): The key is name of tensor, the value is a dict with axis as key and scores as value.
ratios(dict): The pruned ratio of each tensor. The key is name of tensor and the value is the pruned ratio.
Returns:
list: pruned indices with format (name, axis, pruned_indices).
"""
# sort channels by the master tensor
name = group.master["name"]
axis = group.master["axis"]
score = scores[name][axis]
ratio = ratios[name]
score[score < 1e-18] = 1e-18
score_sorted = np.sort(score)
score_square = score_sorted**2
total_sum = score_square.sum()
acc_sum = 0
for i in range(score_square.size):
acc_sum += score_square[i]
if acc_sum / total_sum > ratio:
break
th = (score_sorted[i - 1] + score_sorted[i]) / 2 if i > 0 else 0
pruned_idx = np.squeeze(np.argwhere(score < th))
idxs = []
for _pruning_details in group.all_pruning_details():
idxs.append((_pruning_details.name, _pruning_details.axis, pruned_idx,
_pruning_details.transform))
return idxs
|
service/argo/rest_api.py | simpsonw/atmosphere | 197 | 11114891 | <reponame>simpsonw/atmosphere
"""
Client to access Argo REST API
"""
import json
import requests
from threepio import celery_logger as logger
from service.argo.exception import ResponseNotJSON
try:
from json import JSONDecodeError
except ImportError:
# python2 does not has JSONDecodeError
JSONDecodeError = ValueError
class ArgoAPIClient:
"""
REST API Client for Argo.
A thin layer of abstraction over Argo REST API endpoints
"""
def __init__(self, api_host, port, k8s_token, wf_namespace, verify=True):
"""
init the API client with all necessary credentials
Args:
api_host (str): hostname of where Argo server is hosted
port (int): port of Argo server
k8s_token (str): k8s token to authenticate with Argo server
wf_namespace (str): k8s namespace used for the workflow
verify (bool): verify SSL/TLS cert or not
"""
self._host = api_host
self._port = port
self._base_url = "https://{}:{}".format(self._host, self._port)
self._token = k8s_token
self._namespace = wf_namespace
self._verify = verify
def get_workflow(self, wf_name, fields=""):
"""
Endpoint for fetching a workflow
Args:
wf_name (str): name of the workflow
fields (str): fields to be included in the response
Returns:
dict: response text as JSON object
"""
api_url = "/api/v1/workflows/{}/{}"
api_url = api_url.format(self._namespace, wf_name)
if fields:
api_url = "{}?fields={}".format(api_url, fields)
json_resp = self._req("get", api_url)
return json_resp
def list_workflow(self):
"""
Endpoint for fetching a list of workflows
Returns:
dict: response text as JSON object
"""
api_url = "/api/v1/workflows/" + self._namespace
json_resp = self._req("get", api_url)
return json_resp
def run_workflow(self, wf_json):
"""
Endpoint for running a workflow
Args:
wf_json (dict): workflow definition as JSON object
Returns:
dict: response text as JSON object
"""
api_url = "/api/v1/workflows/" + self._namespace
json_data = {}
json_data["namespace"] = self._namespace
json_data["serverDryRun"] = False
json_data["workflow"] = wf_json
json_resp = self._req("post", api_url, json_data=json_data)
return json_resp
def get_log_for_pod_in_workflow(
self, wf_name, pod_name, container_name="main"
):
"""
Get the logs of a pod in a workflow
Args:
wf_name (str): name of the workflow
pod_name (str): name of the pod
container_name (str, optional): name of the container. Defaults to "main".
Returns:
list: a list of lines of logs
"""
api_url = "/api/v1/workflows/{}/{}/{}/log?logOptions.timestamps=true&logOptions.container={}"
api_url = api_url.format(
self._namespace, wf_name, pod_name, container_name
)
resp = self._req("get", api_url, json_resp=False)
logs = []
# each line is a json obj
for line in resp.split("\n"):
try:
line = line.strip()
if not line:
continue
log_json = json.loads(line)
if "result" not in log_json or "content" not in log_json[
"result"]:
continue
logs.append(log_json["result"]["content"])
except Exception:
continue
return logs
def get_workflow_template(self, wf_temp_name):
"""
fetch a workflow template by its name
Args:
wf_temp_name (str): name of the workflow template
Returns:
dict: response text as JSON object
"""
api_url = "/api/v1/workflows-templates/{}/{}"
api_url = api_url.format(self._namespace, wf_temp_name)
json_resp = self._req("get", api_url)
return json_resp
def list_workflow_templates(self):
"""
fetch a list of workflow templates
Returns:
dict: response text as JSON object
"""
api_url = "/api/v1/workflows-templates/{}"
api_url = api_url.format(self._namespace)
json_resp = self._req("get", api_url)
return json_resp
def create_workflow_template(self, wf_temp_def_json):
"""
create workflow template
Args:
wf_temp_def (dict): definition of the workflow template
Returns:
dict: response text as JSON object
"""
api_url = "/api/v1/workflow-templates/" + self._namespace
json_data = {}
json_data["namespace"] = self._namespace
json_data["template"] = wf_temp_def_json
json_resp = self._req("post", api_url, json_data=json_data)
return json_resp
def update_workflow_template(self, wf_temp_name, wf_temp_def_json):
"""
update workflow template with the given name
Args:
wf_temp_def (dict): definition of the workflow template
Returns:
dict: response text as JSON object
"""
api_url = "/api/v1/workflow-templates/{}/{}".format(
self._namespace, wf_temp_name
)
json_data = {}
json_data["namespace"] = self._namespace
json_data["template"] = wf_temp_def_json
json_resp = self._req("put", api_url, json_data=json_data)
return json_resp
def submit_workflow_template(self, wf_temp_name, wf_param=[]):
"""
submit a workflow template for execution with parameters.
this will create a workflow.
Args:
wf_temp_name (str): name of the workflow template
wf_param ([str]): list of parameters, in the form of ["NAME1=VAL1", "NAME2=VAL2"]
Returns:
dict: response text as JSON object
"""
api_url = "/api/v1/workflows/{}/submit".format(self._namespace)
json_data = {}
json_data["namespace"] = self._namespace
json_data["resourceKind"] = "WorkflowTemplate"
json_data["resourceName"] = wf_temp_name
json_data["submitOptions"] = {}
json_data["submitOptions"]["parameters"] = wf_param
json_resp = self._req("post", api_url, json_data=json_data)
return json_resp
def delete_workflow_template(self, wf_temp_name):
"""
delete a workflow templates with given name
Args:
wf_temp_name (str): name of the workflow template
Returns:
dict: response text as JSON object
"""
api_url = "/api/v1/workflow-templates/{}/{}"
api_url = api_url.format(self._namespace, wf_temp_name)
json_resp = self._req("delete", api_url)
return json_resp
def _req(
self, method, url, json_data={}, additional_headers={}, json_resp=True
):
"""
send a request with given method to the given url
Args:
method (str): HTTP method
url (str): api url to send the request to
json_data (dict, optional): JSON payload. Defaults to None.
additional_header (dict, optional): additional headers. Defaults to None.
json_resp (bool, optional): if response is json. Defaults to True.
Raises:
ResponseNotJSON: raised when the response is not JSON
HTTPError: requert failed
Returns:
dict: response text as JSON object
"""
try:
headers = {}
headers["Host"] = self.host
headers["Accept"] = "application/json;q=0.9,*/*;q=0.8"
headers["Content-Type"] = "application/json"
if self._token:
headers["Authorization"] = "Bearer " + self._token
if additional_headers:
headers.update(additional_headers)
full_url = self.base_url + url
requests_func = _http_method(method)
if json_data:
resp = requests_func(
full_url,
headers=headers,
json=json_data,
verify=self.verify
)
else:
resp = requests_func(
full_url, headers=headers, verify=self.verify
)
resp.raise_for_status()
if json_resp:
return json.loads(resp.text)
return resp.text
except JSONDecodeError as exc:
msg = "ARGO - REST API, {}, {}".format(type(exc), resp.text)
logger.exception(msg)
raise ResponseNotJSON("ARGO, Fail to parse response body as JSON")
except requests.exceptions.HTTPError as exc:
msg = "ARGO - REST API, {}, {}".format(type(exc), resp.text)
logger.exception(msg)
raise exc
@property
def host(self):
"""
hostname of the Argo API Server.
e.g. localhost
Returns:
str: hostname of the Argo API Server
"""
return self._host
@property
def base_url(self):
"""
base url for the Argo API Server.
e.g. http://localhost:1234
Returns:
str: base url for the Argo API Server
"""
return self._base_url
@property
def namespace(self):
"""
k8s namespace used for the workflow
Returns:
str: k8s namespace
"""
return self._namespace
@property
def verify(self):
"""
whether to verify SSL/TLS cert of api host or not
Returns:
bool: whether to verify SSL/TLS cert of api host or not
"""
return self._verify
def _http_method(method_str):
"""
Return function for given HTTP Method from requests library
Args:
method_str (str): HTTP method, "get", "post", etc.
Returns:
function: requests.get, requests.post, etc. None if no match
"""
if method_str == "get":
return requests.get
if method_str == "post":
return requests.post
if method_str == "delete":
return requests.delete
if method_str == "put":
return requests.put
if method_str == "options":
return requests.options
return None
|
ochre/select_vudnc_files.py | KBNLresearch/ochre | 113 | 11114898 | #!/usr/bin/env python
import click
import os
import json
from nlppln.utils import cwl_file
@click.command()
@click.argument('dir_in', type=click.Path(exists=True))
def command(dir_in):
files_out = []
newspapers = ['ad1951', 'nrc1950', 't1950', 'tr1950', 'vk1951']
for np in newspapers:
path = os.path.join(dir_in, np)
for f in os.listdir(path):
fi = os.path.join(path, f)
if fi.endswith('.folia.xml'):
files_out.append(cwl_file(fi))
stdout_text = click.get_text_stream('stdout')
stdout_text.write(json.dumps({'out_files': files_out}))
if __name__ == '__main__':
command()
|
setup.py | DuncanBetts/flask-redis | 409 | 11114906 | import codecs
import os
import re
from setuptools import find_packages, setup
NAME = "flask-redis"
KEYWORDS = ["flask", "redis"]
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Flask",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
]
PROJECT_URLS = {
"Bug Tracker": "https://github.com/underyx/flask-redis/issues",
"Source Code": "https://github.com/underyx/flask-redis",
}
INSTALL_REQUIRES = ["Flask>=0.8", "redis>=2.7.6"]
EXTRAS_REQUIRE = {"tests": ["coverage", "pytest", "pytest-mock"]}
EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["tests"] + ["pre-commit"]
def read(*parts):
"""
Build an absolute path from *parts* and return the contents of the resulting file.
Assumes UTF-8 encoding.
"""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read("flask_redis", "__init__.py")
def find_meta(meta):
"""Extract __*meta*__ from META_FILE."""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
setup(
name=find_meta("title"),
description=find_meta("description"),
version=find_meta("version"),
url=find_meta("url"),
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
download_url=find_meta("url") + "releases",
keywords=KEYWORDS,
long_description=(
read("README.md")
+ "\n\n"
+ re.sub("^#", "##", read("CHANGELOG.md"))
+ "\n\n"
+ re.sub("^#", "##", read("AUTHORS.md"))
),
long_description_content_type="text/markdown",
packages=find_packages(),
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
include_package_data=True,
)
|
test/package/test_trace_dep/__init__.py | Hacky-DH/pytorch | 60,067 | 11114935 | <filename>test/package/test_trace_dep/__init__.py<gh_stars>1000+
import torch
import yaml
class SumMod(torch.nn.Module):
def forward(self, inp):
return torch.sum(inp)
|
src/genie/libs/parser/ios/show_vlan.py | nujo/genieparser | 204 | 11114948 | <reponame>nujo/genieparser
"""
IOS Parsers
"""
# genieparser
from genie.libs.parser.iosxe.show_vlan import ShowVlan as ShowVlan_iosxe, \
ShowVlanMtu as ShowVlanMtu_iosxe,\
ShowVlanAccessMap as ShowVlanAccessMap_iosxe,\
ShowVlanRemoteSpan as ShowVlanRemoteSpan_iosxe,\
ShowVlanFilter as ShowVlanFilter_iosxe
class ShowVlan(ShowVlan_iosxe):
pass
class ShowVlanMtu(ShowVlanMtu_iosxe):
pass
class ShowVlanAccessMap(ShowVlanAccessMap_iosxe):
pass
class ShowVlanRemoteSpan(ShowVlanRemoteSpan_iosxe):
pass
class ShowVlanFilter(ShowVlanFilter_iosxe):
pass |
gengine/maintenance/scripts/generate_revision.py | greck2908/gamification-engine | 347 | 11114972 | # -*- coding: utf-8 -*-
import sys
import os
import pyramid_dogpile_cache
from pyramid.config import Configurator
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from sqlalchemy import engine_from_config
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> <message> [var=value]\n'
'(example: "%s production.ini new_table_xy_created")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 3:
usage(argv)
config_uri = argv[1]
message = argv[2]
options = parse_vars(argv[3:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
durl = os.environ.get("DATABASE_URL") # heroku
if durl:
settings['sqlalchemy.url'] = durl
murl = os.environ.get("MEMCACHED_URL")
if murl:
settings['urlcache_url'] = murl
revision(settings, message, options)
def revision(settings, message, options):
engine = engine_from_config(settings, 'sqlalchemy.')
config = Configurator(settings=settings)
pyramid_dogpile_cache.includeme(config)
from gengine.metadata import (
init_session,
init_declarative_base,
init_db
)
init_session()
init_declarative_base()
init_db(engine)
from gengine.app.cache import init_caches
init_caches()
from gengine.metadata import (
Base,
)
if options.get("reset_db", False):
Base.metadata.drop_all(engine)
engine.execute("DROP SCHEMA IF EXISTS public CASCADE")
engine.execute("CREATE SCHEMA IF NOT EXISTS public")
from alembic.config import Config
from alembic import command
alembic_cfg = Config(attributes={
'engine': engine,
'schema': 'public'
})
script_location = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
'app/alembic'
)
alembic_cfg.set_main_option("script_location", script_location)
command.revision(alembic_cfg,message,True)
engine.dispose()
if __name__ == '__main__':
main() |
mayan/apps/tags/__init__.py | eshbeata/open-paperless | 2,743 | 11115003 | from __future__ import unicode_literals
default_app_config = 'tags.apps.TagsApp'
|
InnerEye/Common/resource_monitor.py | JacopoTeneggi/InnerEye-DeepLearning | 402 | 11115019 | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from __future__ import annotations
import logging
import time
from dataclasses import dataclass
from multiprocessing import Process
from pathlib import Path
from typing import Dict, List, Tuple
import GPUtil
import pandas as pd
import psutil
import tensorboardX
import torch
from GPUtil import GPU
from InnerEye.ML.utils.ml_util import is_gpu_available
COL_GPU = "gpu"
COL_METRIC = "metric"
COL_VALUE = "value"
def memory_in_gb(bytes: int) -> float:
"""
Converts a memory amount in bytes to gigabytes.
:param bytes:
:return:
"""
gb = 2 ** 30
return bytes / gb
@dataclass
class GpuUtilization:
# The numeric ID of the GPU
id: int
# GPU load, as a number between 0 and 1
load: float
# Memory utilization, as a number between 0 and 1
mem_util: float
# Allocated memory by pytorch
mem_allocated_gb: float
# Reserved memory by pytorch
mem_reserved_gb: float
# Number of observations that are stored in the present object
count: int
def __add__(self, other: GpuUtilization) -> GpuUtilization:
return GpuUtilization(
id=self.id,
load=self.load + other.load,
mem_util=self.mem_util + other.mem_util,
mem_allocated_gb=self.mem_allocated_gb + other.mem_allocated_gb,
mem_reserved_gb=self.mem_reserved_gb + other.mem_reserved_gb,
count=self.count + other.count
)
def max(self, other: GpuUtilization) -> GpuUtilization:
"""
Computes the metric-wise maximum of the two GpuUtilization objects.
:param other:
:return:
"""
return GpuUtilization(
# Effectively ignore ID. We could enforce consistent IDs, but then we could not compute overall max.
id=self.id,
load=max(self.load, other.load),
mem_util=max(self.mem_util, other.mem_util),
mem_allocated_gb=max(self.mem_allocated_gb, other.mem_allocated_gb),
mem_reserved_gb=max(self.mem_reserved_gb, other.mem_reserved_gb),
# Max does not make sense for the count field, hence just add up to see how many items we have done max for
count=self.count + other.count
)
def average(self) -> GpuUtilization:
"""
Returns a GPU utilization object that contains all metrics of the present object, divided by the number
of observations.
:return:
"""
return GpuUtilization(
id=self.id,
load=self.load / self.count,
mem_util=self.mem_util / self.count,
mem_allocated_gb=self.mem_allocated_gb / self.count,
mem_reserved_gb=self.mem_reserved_gb / self.count,
count=1
)
@property
def name(self) -> str:
"""
Gets a string name for the GPU that the present objet describes, "GPU1" for GPU with id == 1.
"""
return f"GPU{self.id}"
def enumerate(self, prefix: str = "") -> List[Tuple[str, float]]:
"""
Lists all metrics stored in the present object, as (metric_name, value) pairs suitable for logging in
Tensorboard.
:param prefix: If provided, this string as used as an additional prefix for the metric name itself. If prefix
is "max", the metric would look like "maxLoad_Percent"
:return: A list of (name, value) tuples.
"""
return [
(f'{prefix}MemUtil_Percent', round(self.mem_util * 100, 2)),
(f'{prefix}Load_Percent', round(self.load * 100, 2)),
(f'{prefix}MemReserved_GB', round(self.mem_reserved_gb, 4)),
(f'{prefix}MemAllocated_GB', round(self.mem_allocated_gb, 4))
]
@staticmethod
def from_gpu(gpu: GPU) -> GpuUtilization:
"""
Creates a GpuUtilization object from data coming from the gputil library.
:param gpu: GPU diagnostic data from gputil.
:return:
"""
return GpuUtilization(
id=gpu.id,
load=gpu.load,
mem_util=gpu.memoryUtil,
mem_allocated_gb=memory_in_gb(torch.cuda.memory_allocated(int(gpu.id))),
mem_reserved_gb=memory_in_gb(torch.cuda.memory_reserved(int(gpu.id))),
count=1
)
RESOURCE_MONITOR_AGGREGATE_METRICS = "aggregate_resource_usage.csv"
class ResourceMonitor(Process):
"""
Monitor and log GPU and CPU stats in TensorBoard in a separate process.
"""
def __init__(self,
interval_seconds: int,
tensorboard_folder: Path,
csv_results_folder: Path):
"""
Creates a process that will monitor CPU and GPU utilization.
:param interval_seconds: The interval in seconds at which usage statistics should be written.
:param tensorboard_folder: The path in which to create a tensorboard logfile.
:param csv_results_folder: The path in which the CSV file with aggregate metrics will be created.
When running in AzureML, this should NOT reside inside the /logs folder.
"""
super().__init__(name="Resource Monitor", daemon=True)
self._interval_seconds = interval_seconds
self.tensorboard_folder = tensorboard_folder
self.gpu_aggregates: Dict[int, GpuUtilization] = dict()
self.gpu_max: Dict[int, GpuUtilization] = dict()
self.writer = tensorboardX.SummaryWriter(str(self.tensorboard_folder))
self.step = 0
self.aggregate_metrics: List[str] = []
self.aggregate_metrics_file = csv_results_folder / RESOURCE_MONITOR_AGGREGATE_METRICS
def log_to_tensorboard(self, label: str, value: float) -> None:
"""
Write a scalar metric value to Tensorboard, marked with the present step.
:param label: The name of the metric.
:param value: The value.
"""
self.writer.add_scalar(label, value, global_step=self.step)
def update_metrics(self, gpus: List[GPU]) -> None:
"""
Updates the stored GPU utilization metrics with the current status coming from gputil, and logs
them to Tensorboard.
:param gpus: The current utilization information, read from gputil, for all available GPUs.
"""
for gpu in gpus:
gpu_util = GpuUtilization.from_gpu(gpu)
for (metric_name, value) in gpu_util.enumerate():
self.log_to_tensorboard(f"{gpu_util.name}/{metric_name}", value)
id = gpu_util.id
# Update the total utilization
if id in self.gpu_aggregates:
self.gpu_aggregates[id] = self.gpu_aggregates[id] + gpu_util
else:
self.gpu_aggregates[id] = gpu_util
# Update the maximum utilization
if id in self.gpu_max:
self.gpu_max[id] = self.gpu_max[id].max(gpu_util)
else:
self.gpu_max[id] = gpu_util
def run(self) -> None:
if self._interval_seconds <= 0:
logging.warning("Resource monitoring requires an interval that is larger than 0 seconds, but "
f"got: {self._interval_seconds}. Exiting.")
self.kill()
logging.info(f"Process '{self.name}' started with pid: {self.pid}")
gpu_available = is_gpu_available()
while True:
if gpu_available:
self.update_metrics(GPUtil.getGPUs())
# log the CPU utilization
self.log_to_tensorboard('CPU/Load_Percent', psutil.cpu_percent(interval=None))
self.log_to_tensorboard('CPU/MemUtil_Percent', psutil.virtual_memory()[2])
self.step += 1
self.store_to_file()
# pause the thread for the requested delay
time.sleep(self._interval_seconds)
def store_to_file(self) -> None:
"""
Writes the current aggregate metrics (average and maximum) to a file inside the csv_results_folder.
"""
aggregate_metrics: List[str] = [f"{COL_GPU},{COL_METRIC},{COL_VALUE}"]
for util in self.gpu_aggregates.values():
for (metric, value) in util.average().enumerate():
aggregate_metrics.append(f"{util.name},{metric},{value}")
for util in self.gpu_max.values():
for (metric, value) in util.enumerate(prefix="Max"):
aggregate_metrics.append(f"{util.name},{metric},{value}")
self.aggregate_metrics_file.write_text("\n".join(aggregate_metrics))
def read_aggregate_metrics(self) -> Dict[str, Dict[str, float]]:
"""
Reads the file containing aggregate metrics, and returns them parsed
as nested dictionaries mapping from GPU name to metric name to value.
"""
if not self.aggregate_metrics_file.is_file():
return dict()
df = pd.read_csv(self.aggregate_metrics_file)
pivot = df.pivot(index=COL_GPU, columns=COL_METRIC, values=COL_VALUE)
result = {index: series.to_dict() for index, series in pivot.iterrows()}
return result
|
src/python/k4a/tests/test_functional_api_azurekinect.py | seanyen/Azure-Kinect-Sensor-SDK | 1,120 | 11115085 | '''
test_device_azurekinect.py
Tests for the Device class for Azure Kinect device.
Copyright (C) Microsoft Corporation. All rights reserved.
'''
import unittest
import copy
from time import sleep
import numpy as np
import k4a
import test_config
def k4a_device_set_and_get_color_control(
device:k4a.Device,
color_control_command:k4a.EColorControlCommand):
saved_value = 0
# Get the step size.
supports_auto = device.color_ctrl_cap.__dict__[color_control_command].supports_auto
min_value = device.color_ctrl_cap.__dict__[color_control_command].min_value
max_value = device.color_ctrl_cap.__dict__[color_control_command].max_value
step_value = device.color_ctrl_cap.__dict__[color_control_command].step_value
default_value = device.color_ctrl_cap.__dict__[color_control_command].default_value
mode = k4a.EColorControlMode.MANUAL
# Read the original value.
(saved_value, mode) = device.get_color_control(color_control_command)
# Write a new value.
new_value = 0
if (saved_value + step_value <= max_value):
new_value = saved_value + step_value
else:
new_value = saved_value - step_value
status1 = device.set_color_control(
color_control_command,
mode,
new_value)
# Read back the value to check that it was written.
(new_value_readback, mode) = device.get_color_control(color_control_command)
# Write the original saved value.
status2 = device.set_color_control(
color_control_command,
mode,
saved_value)
# Read back the value to check that it was written.
(saved_value_readback, mode) = device.get_color_control(color_control_command)
return (status1, status2, saved_value, saved_value_readback, new_value, new_value_readback)
class Test_Functional_API_Device_AzureKinect(unittest.TestCase):
'''Test Device class for Azure Kinect device.
'''
@classmethod
def setUpClass(cls):
cls.device = k4a.Device.open()
assert(cls.device is not None)
cls.lock = test_config.glb_lock
@classmethod
def tearDownClass(cls):
# Stop the cameras and imus before closing device.
cls.device.stop_cameras()
cls.device.stop_imu()
cls.device.close()
del cls.device
def test_functional_fast_api_open_twice_expected_fail(self):
device2 = k4a.Device.open()
self.assertIsNone(device2)
device2 = k4a.Device.open(1000000)
self.assertIsNone(device2)
def test_functional_fast_api_device_shallow_copy(self):
device2 = copy.copy(self.device)
self.assertIsNone(device2)
def test_functional_fast_api_device_deep_copy(self):
device2 = copy.deepcopy(self.device)
self.assertIsNone(device2)
def test_functional_fast_api_get_serial_number(self):
serial_number = self.device.serial_number
self.assertIsInstance(serial_number, str)
self.assertGreater(len(serial_number), 0)
# Helper method for test_set_serial_number().
@staticmethod
def set_serial_number(device:k4a.Device, serial_number:str):
device.serial_number = serial_number
def test_functional_fast_api_set_serial_number(self):
self.assertRaises(AttributeError,
Test_Functional_API_Device_AzureKinect.set_serial_number,
self.device, "not settable")
def test_functional_fast_api_get_capture(self):
# Start the cameras.
status = self.device.start_cameras(
k4a.DEVICE_CONFIG_BGRA32_2160P_WFOV_UNBINNED_FPS15)
self.assertEqual(status, k4a.EStatus.SUCCEEDED)
# Get a capture, waiting indefinitely.
capture = self.device.get_capture(-1)
self.assertIsNotNone(capture)
# Stop the cameras.
self.device.stop_cameras()
def test_functional_fast_api_get_imu_sample(self):
# Start the cameras.
status = self.device.start_cameras(
k4a.DEVICE_CONFIG_BGRA32_2160P_WFOV_UNBINNED_FPS15)
self.assertEqual(status, k4a.EStatus.SUCCEEDED)
# Start the imu.
status = self.device.start_imu()
self.assertEqual(status, k4a.EStatus.SUCCEEDED)
# Get an imu sample, waiting indefinitely.
imu_sample = self.device.get_imu_sample(-1)
self.assertIsNotNone(imu_sample)
# Stop the cameras and imu.
self.device.stop_cameras()
self.device.stop_imu()
def test_functional_fast_api_get_serial_number(self):
serial_number = self.device.serial_number
self.assertIsInstance(serial_number, str)
self.assertNotEqual(len(serial_number), 0)
def test_functional_fast_api_get_hardware_version(self):
hardware_version = self.device.hardware_version
self.assertIsInstance(hardware_version, k4a.HardwareVersion)
self.assertNotEqual(str(hardware_version), 0)
def test_functional_fast_api_get_color_ctrl_cap(self):
color_ctrl_cap = self.device.color_ctrl_cap
self.assertNotEqual(str(color_ctrl_cap), 0)
def test_functional_fast_api_get_sync_out_connected(self):
sync_out_connected = self.device.sync_out_connected
self.assertIsInstance(sync_out_connected, bool)
def test_functional_fast_api_get_sync_in_connected(self):
sync_in_connected = self.device.sync_in_connected
self.assertIsInstance(sync_in_connected, bool)
def test_functional_fast_api_start_stop_cameras(self):
status = self.device.start_cameras(k4a.DEVICE_CONFIG_BGRA32_2160P_WFOV_UNBINNED_FPS15)
self.assertEqual(status, k4a.EStatus.SUCCEEDED)
self.device.stop_cameras()
def test_functional_fast_api_start_stop_imu(self):
status = self.device.start_cameras(k4a.DEVICE_CONFIG_BGRA32_2160P_WFOV_UNBINNED_FPS15)
self.assertEqual(status, k4a.EStatus.SUCCEEDED)
status = self.device.start_imu()
self.assertEqual(status, k4a.EStatus.SUCCEEDED)
self.device.stop_imu()
self.device.stop_cameras()
def test_functional_fast_api_get_color_control(self):
color_control_commands = [
k4a.EColorControlCommand.BACKLIGHT_COMPENSATION,
k4a.EColorControlCommand.BRIGHTNESS,
k4a.EColorControlCommand.CONTRAST,
k4a.EColorControlCommand.EXPOSURE_TIME_ABSOLUTE,
k4a.EColorControlCommand.GAIN,
k4a.EColorControlCommand.POWERLINE_FREQUENCY,
k4a.EColorControlCommand.SATURATION,
k4a.EColorControlCommand.SHARPNESS,
k4a.EColorControlCommand.WHITEBALANCE
]
for command in color_control_commands:
with self.subTest(command = command):
(value, mode) = self.device.get_color_control(command)
self.assertIsNotNone(value)
self.assertIsNotNone(mode)
def test_functional_fast_api_set_color_control(self):
color_control_commands = [
k4a.EColorControlCommand.BACKLIGHT_COMPENSATION,
k4a.EColorControlCommand.BRIGHTNESS,
k4a.EColorControlCommand.CONTRAST,
#k4a.EColorControlCommand.EXPOSURE_TIME_ABSOLUTE,
k4a.EColorControlCommand.GAIN,
k4a.EColorControlCommand.POWERLINE_FREQUENCY,
k4a.EColorControlCommand.SATURATION,
k4a.EColorControlCommand.SHARPNESS,
#k4a.EColorControlCommand.WHITEBALANCE
]
for command in color_control_commands:
with self.subTest(command = command):
(status1, status2, saved_value, saved_value_readback,
new_value, new_value_readback) = \
k4a_device_set_and_get_color_control(self.device, command)
self.assertTrue(k4a.K4A_SUCCEEDED(status1))
self.assertTrue(k4a.K4A_SUCCEEDED(status2))
self.assertEqual(saved_value, saved_value_readback)
self.assertEqual(new_value, new_value_readback)
self.assertNotEqual(saved_value, new_value)
def test_functional_fast_api_get_raw_calibration(self):
raw_calibration = self.device.get_raw_calibration()
self.assertIsNotNone(raw_calibration)
def test_functional_fast_api_get_calibration(self):
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EDepthMode.PASSIVE_IR,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
with self.subTest(
depth_mode = depth_mode,
color_resolution = color_resolution):
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
self.assertIsNotNone(calibration)
self.assertIsInstance(calibration, k4a.Calibration)
class Test_Functional_API_Capture_AzureKinect(unittest.TestCase):
'''Test Capture class for Azure Kinect device.
'''
@classmethod
def setUpClass(cls):
cls.device = k4a.Device.open()
assert(cls.device is not None)
cls.lock = test_config.glb_lock
@classmethod
def tearDownClass(cls):
# Stop the cameras and imus before closing device.
cls.device.stop_cameras()
cls.device.stop_imu()
cls.device.close()
del cls.device
def setUp(self):
status = self.device.start_cameras(
k4a.DEVICE_CONFIG_BGRA32_2160P_WFOV_UNBINNED_FPS15)
self.assertEqual(status, k4a.EStatus.SUCCEEDED)
self.capture = self.device.get_capture(-1)
self.assertIsNotNone(self.capture)
def tearDown(self):
self.device.stop_cameras()
self.device.stop_imu()
del self.capture
def check_copy(self, image1:k4a.Image, image2:k4a.Image):
# Check that the images are not the same instance.
self.assertIsNot(image1, image2)
# Check that the image contents are equal.
if (image1.data.ndim == 3):
self.assertEqual(image1.data[0, 0, 0], image2.data[0, 0, 0])
self.assertEqual(image1.data[100, 100, 1], image2.data[100, 100, 1])
self.assertEqual(image1.data[100, 50, 2], image2.data[100, 50, 2])
elif (image1.data.ndim == 2):
self.assertEqual(image1.data[0, 0], image2.data[0, 0])
self.assertEqual(image1.data[100, 100], image2.data[100, 100])
self.assertEqual(image1.data[100, 50], image2.data[100, 50])
self.assertEqual(image1.image_format, image2.image_format)
self.assertEqual(image1.size_bytes, image2.size_bytes)
self.assertEqual(image1.width_pixels, image2.width_pixels)
self.assertEqual(image1.height_pixels, image2.height_pixels)
self.assertEqual(image1.stride_bytes, image2.stride_bytes)
self.assertEqual(image1.device_timestamp_usec, image2.device_timestamp_usec)
self.assertEqual(image1.system_timestamp_nsec, image2.system_timestamp_nsec)
self.assertEqual(image1.exposure_usec, image2.exposure_usec)
self.assertEqual(image1.white_balance, image2.white_balance)
self.assertEqual(image1.iso_speed, image2.iso_speed)
def test_functional_fast_api_capture_shallow_copy(self):
capture2 = copy.copy(self.capture)
# Check that the copy of the capture is not the same as the original.
self.assertIsNotNone(capture2)
self.assertIsNot(capture2, self.capture)
# Check that the images are not the same as in the original.
self.check_copy(capture2.color, self.capture.color)
self.check_copy(capture2.depth, self.capture.depth)
self.check_copy(capture2.ir, self.capture.ir)
self.assertAlmostEqual(capture2.temperature, self.capture.temperature, 4)
# Check that the image handles are the same.
self.assertIs(capture2.color._image_handle, self.capture.color._image_handle)
self.assertIs(capture2.depth._image_handle, self.capture.depth._image_handle)
self.assertIs(capture2.ir._image_handle, self.capture.ir._image_handle)
# Check that modifying one also modifies the other.
self.capture.temperature = self.capture.temperature + 1
self.assertEqual(capture2.temperature, self.capture.temperature)
self.capture.color.white_balance = self.capture.color.white_balance + 1
self.assertEqual(capture2.color.white_balance, self.capture.color.white_balance)
self.capture.color.data[0, 0, 0] = self.capture.color.data[0, 0, 0] + 1
self.assertEqual(capture2.color.data[0, 0, 0], self.capture.color.data[0, 0, 0])
# Check that the copy of capture is still valid even if the original
# capture is deleted. This is because the original capture's reference
# count is increased when the copy is made.
del capture2
self.assertIsNotNone(self.capture)
self.assertIsNotNone(self.capture.color)
self.assertIsNotNone(self.capture.depth)
self.assertIsNotNone(self.capture.ir)
self.assertIsNotNone(self.capture.temperature)
def test_functional_fast_api_capture_deep_copy(self):
capture2 = copy.deepcopy(self.capture)
# Check that the copy of the capture is not the same as the original.
self.assertIsNotNone(capture2)
self.assertIsNot(capture2, self.capture)
# Check that the images are not the same as in the original.
self.check_copy(capture2.color, self.capture.color)
self.check_copy(capture2.depth, self.capture.depth)
self.check_copy(capture2.ir, self.capture.ir)
self.assertAlmostEqual(capture2.temperature, self.capture.temperature, 4)
# Check that the image handles are not the same.
self.assertIsNot(capture2.color._image_handle, self.capture.color._image_handle)
self.assertIsNot(capture2.depth._image_handle, self.capture.depth._image_handle)
self.assertIsNot(capture2.ir._image_handle, self.capture.ir._image_handle)
# Check that modifying one does not modify the other.
self.capture.temperature = self.capture.temperature + 1
self.assertNotAlmostEqual(capture2.temperature, self.capture.temperature, 4)
self.capture.color.white_balance = self.capture.color.white_balance + 1
self.assertNotAlmostEqual(capture2.color.white_balance, self.capture.color.white_balance, 4)
self.capture.color.data[0, 0, 0] = self.capture.color.data[0, 0, 0] + 1
self.assertNotEqual(capture2.color.data[0, 0, 0], self.capture.color.data[0, 0, 0])
# Check that the copy of capture is still valid even if the original
# capture is deleted. This is because the original capture's reference
# count is increased when the copy is made.
del capture2
self.assertIsNotNone(self.capture)
self.assertIsNotNone(self.capture.color)
self.assertIsNotNone(self.capture.depth)
self.assertIsNotNone(self.capture.ir)
self.assertIsNotNone(self.capture.temperature)
def test_functional_fast_api_get_color(self):
color = self.capture.color
self.assertIsNotNone(color)
self.assertEqual(color.width_pixels, 3840)
self.assertEqual(color.height_pixels, 2160)
self.assertEqual(color.image_format, k4a.EImageFormat.COLOR_BGRA32)
def test_functional_fast_api_get_depth(self):
depth = self.capture.depth
self.assertIsNotNone(depth)
self.assertEqual(depth.width_pixels, 1024)
self.assertEqual(depth.height_pixels, 1024)
self.assertEqual(depth.image_format, k4a.EImageFormat.DEPTH16)
def test_functional_fast_api_get_ir(self):
ir = self.capture.ir
self.assertIsNotNone(ir)
self.assertEqual(ir.width_pixels, 1024)
self.assertEqual(ir.height_pixels, 1024)
self.assertEqual(ir.image_format, k4a.EImageFormat.IR16)
def test_functional_fast_api_set_color(self):
color1 = self.capture.color
self.assertIsNotNone(color1)
self.assertEqual(color1.width_pixels, 3840)
self.assertEqual(color1.height_pixels, 2160)
self.assertEqual(color1.image_format, k4a.EImageFormat.COLOR_BGRA32)
color2 = copy.deepcopy(color1)
self.capture.color = color2
color3 = self.capture.color
self.assertIsNotNone(color3)
self.assertEqual(color3.width_pixels, 3840)
self.assertEqual(color3.height_pixels, 2160)
self.assertEqual(color3.image_format, k4a.EImageFormat.COLOR_BGRA32)
self.assertIsNot(color3, color1)
self.assertIsNot(color2, color1)
self.assertIs(color3, color2)
def test_functional_fast_api_set_depth(self):
depth1 = self.capture.depth
self.assertIsNotNone(depth1)
self.assertEqual(depth1.width_pixels, 1024)
self.assertEqual(depth1.height_pixels, 1024)
self.assertEqual(depth1.image_format, k4a.EImageFormat.DEPTH16)
depth2 = copy.deepcopy(depth1)
self.capture.depth = depth2
depth3 = self.capture.depth
self.assertIsNotNone(depth3)
self.assertEqual(depth3.width_pixels, 1024)
self.assertEqual(depth3.height_pixels, 1024)
self.assertEqual(depth3.image_format, k4a.EImageFormat.DEPTH16)
self.assertIsNot(depth3, depth1)
self.assertIsNot(depth2, depth1)
self.assertIs(depth3, depth2)
def test_functional_fast_api_set_ir(self):
ir1 = self.capture.ir
self.assertIsNotNone(ir1)
self.assertEqual(ir1.width_pixels, 1024)
self.assertEqual(ir1.height_pixels, 1024)
self.assertEqual(ir1.image_format, k4a.EImageFormat.IR16)
ir2 = copy.deepcopy(ir1)
self.capture.ir = ir2
ir3 = self.capture.ir
self.assertIsNotNone(ir3)
self.assertEqual(ir3.width_pixels, 1024)
self.assertEqual(ir3.height_pixels, 1024)
self.assertEqual(ir3.image_format, k4a.EImageFormat.IR16)
self.assertIsNot(ir3, ir1)
self.assertIsNot(ir2, ir1)
self.assertIs(ir3, ir2)
class Test_Functional_API_Image_AzureKinect(unittest.TestCase):
'''Test Image class for Azure Kinect device.
'''
@classmethod
def setUpClass(cls):
cls.device = k4a.Device.open()
assert(cls.device is not None)
cls.lock = test_config.glb_lock
@classmethod
def tearDownClass(cls):
# Stop the cameras and imus before closing device.
cls.device.stop_cameras()
cls.device.stop_imu()
cls.device.close()
del cls.device
def setUp(self):
status = self.device.start_cameras(
k4a.DEVICE_CONFIG_BGRA32_2160P_WFOV_UNBINNED_FPS15)
self.assertEqual(status, k4a.EStatus.SUCCEEDED)
self.capture = self.device.get_capture(-1)
self.assertIsNotNone(self.capture)
self.color = self.capture.color
self.depth = self.capture.depth
self.ir = self.capture.ir
def tearDown(self):
self.device.stop_cameras()
self.device.stop_imu()
del self.ir
del self.depth
del self.color
del self.capture
def check_copy(self, image1:k4a.Image, image2:k4a.Image):
# Check that the images are not the same instance.
self.assertIsNot(image1, image2)
# Check that the image contents are equal.
if (image1.data.ndim == 3):
self.assertEqual(image1.data[0, 0, 0], image2.data[0, 0, 0])
self.assertEqual(image1.data[100, 100, 1], image2.data[100, 100, 1])
self.assertEqual(image1.data[100, 50, 2], image2.data[100, 50, 2])
elif (image1.data.ndim == 2):
self.assertEqual(image1.data[0, 0], image2.data[0, 0])
self.assertEqual(image1.data[100, 100], image2.data[100, 100])
self.assertEqual(image1.data[100, 50], image2.data[100, 50])
self.assertEqual(image1.image_format, image2.image_format)
self.assertEqual(image1.size_bytes, image2.size_bytes)
self.assertEqual(image1.width_pixels, image2.width_pixels)
self.assertEqual(image1.height_pixels, image2.height_pixels)
self.assertEqual(image1.stride_bytes, image2.stride_bytes)
self.assertEqual(image1.device_timestamp_usec, image2.device_timestamp_usec)
self.assertEqual(image1.system_timestamp_nsec, image2.system_timestamp_nsec)
self.assertEqual(image1.exposure_usec, image2.exposure_usec)
self.assertEqual(image1.white_balance, image2.white_balance)
self.assertEqual(image1.iso_speed, image2.iso_speed)
def test_functional_fast_api_image_shallow_copy(self):
color2 = copy.copy(self.color)
depth2 = copy.copy(self.depth)
ir2 = copy.copy(self.ir)
# Check that the images are not the same as in the original.
self.check_copy(self.color, color2)
self.check_copy(self.depth, depth2)
self.check_copy(self.ir, ir2)
# Check that the image handles are the same.
self.assertIs(color2._image_handle, self.color._image_handle)
self.assertIs(depth2._image_handle, self.depth._image_handle)
self.assertIs(ir2._image_handle, self.ir._image_handle)
# Check that modifying one also modifies the other.
self.color.white_balance = self.color.white_balance + 1
self.assertEqual(color2.white_balance, self.color.white_balance)
self.color.data[0, 0, 0] = self.color.data[0, 0, 0] + 1
self.assertEqual(color2.data[0, 0, 0], self.color.data[0, 0, 0])
# Check that the copy of capture is still valid even if the original
# capture is deleted. This is because the original capture's reference
# count is increased when the copy is made.
del color2
del depth2
del ir2
self.assertIsNotNone(self.color)
self.assertIsNotNone(self.depth)
self.assertIsNotNone(self.ir)
def test_functional_fast_api_image_deep_copy(self):
color2 = copy.deepcopy(self.color)
depth2 = copy.deepcopy(self.depth)
ir2 = copy.deepcopy(self.ir)
# Check that the images are not the same as in the original.
self.check_copy(self.color, color2)
self.check_copy(self.depth, depth2)
self.check_copy(self.ir, ir2)
# Check that the image handles are the same.
self.assertIsNot(color2._image_handle, self.color._image_handle)
self.assertIsNot(depth2._image_handle, self.depth._image_handle)
self.assertIsNot(ir2._image_handle, self.ir._image_handle)
# Check that modifying one does not modifies the other.
self.color.white_balance = self.color.white_balance + 1
self.assertNotEqual(color2.white_balance, self.color.white_balance)
self.color.data[0, 0, 0] = self.color.data[0, 0, 0] + 1
self.assertNotEqual(color2.data[0, 0, 0], self.color.data[0, 0, 0])
# Check that the copy of capture is still valid even if the original
# capture is deleted. This is because the original capture's reference
# count is increased when the copy is made.
del color2
del depth2
del ir2
self.assertIsNotNone(self.color)
self.assertIsNotNone(self.depth)
self.assertIsNotNone(self.ir)
def test_functional_fast_api_get_data(self):
data = self.color.data
self.assertIsNotNone(data)
self.assertIsInstance(data, np.ndarray)
def test_functional_fast_api_get_image_format(self):
image_format = self.color.image_format
self.assertIsNotNone(image_format)
self.assertIsInstance(image_format, k4a.EImageFormat)
def test_functional_fast_api_get_size_bytes(self):
size_bytes = self.color.size_bytes
self.assertIsNotNone(size_bytes)
self.assertIsInstance(size_bytes, int)
self.assertNotEqual(size_bytes, 0)
def test_functional_fast_api_get_width_pixels(self):
width_pixels = self.color.width_pixels
self.assertIsNotNone(width_pixels)
self.assertIsInstance(width_pixels, int)
self.assertNotEqual(width_pixels, 0)
def test_functional_fast_api_get_height_pixels(self):
height_pixels = self.color.height_pixels
self.assertIsNotNone(height_pixels)
self.assertIsInstance(height_pixels, int)
self.assertNotEqual(height_pixels, 0)
def test_functional_fast_api_get_stride_bytes(self):
stride_bytes = self.color.stride_bytes
self.assertIsNotNone(stride_bytes)
self.assertIsInstance(stride_bytes, int)
self.assertNotEqual(stride_bytes, 0)
def test_functional_fast_api_get_device_timestamp_usec(self):
device_timestamp_usec = self.color.device_timestamp_usec
self.assertIsNotNone(device_timestamp_usec)
self.assertIsInstance(device_timestamp_usec, int)
def test_functional_fast_api_get_system_timestamp_nsec(self):
system_timestamp_nsec = self.color.system_timestamp_nsec
self.assertIsNotNone(system_timestamp_nsec)
self.assertIsInstance(system_timestamp_nsec, int)
def test_functional_fast_api_get_exposure_usec(self):
exposure_usec = self.color.exposure_usec
self.assertIsNotNone(exposure_usec)
self.assertIsInstance(exposure_usec, int)
def test_functional_fast_api_get_white_balance(self):
white_balance = self.color.white_balance
self.assertIsNotNone(white_balance)
self.assertIsInstance(white_balance, int)
def test_functional_fast_api_get_iso_speed(self):
iso_speed = self.color.iso_speed
self.assertIsNotNone(iso_speed)
self.assertIsInstance(iso_speed, int)
def test_functional_fast_api_set_device_timestamp_usec(self):
self.color.device_timestamp_usec = 10
device_timestamp_usec = self.color.device_timestamp_usec
self.assertIsNotNone(device_timestamp_usec)
self.assertIsInstance(device_timestamp_usec, int)
self.assertEqual(device_timestamp_usec, 10)
def test_functional_fast_api_set_system_timestamp_nsec(self):
self.color.system_timestamp_nsec = 10
system_timestamp_nsec = self.color.system_timestamp_nsec
self.assertIsNotNone(system_timestamp_nsec)
self.assertIsInstance(system_timestamp_nsec, int)
self.assertEqual(system_timestamp_nsec, 10)
def test_functional_fast_api_set_exposure_usec(self):
self.color.exposure_usec = 10
exposure_usec = self.color.exposure_usec
self.assertIsNotNone(exposure_usec)
self.assertIsInstance(exposure_usec, int)
self.assertEqual(exposure_usec, 10)
def test_functional_fast_api_set_white_balance(self):
self.color.white_balance = 1000
white_balance = self.color.white_balance
self.assertIsNotNone(white_balance)
self.assertIsInstance(white_balance, int)
self.assertEqual(white_balance, 1000)
def test_functional_fast_api_set_iso_speed(self):
self.color.iso_speed = 100
iso_speed = self.color.iso_speed
self.assertIsNotNone(iso_speed)
self.assertIsInstance(iso_speed, int)
self.assertEqual(iso_speed, 100)
class Test_Functional_API_Calibration_AzureKinect(unittest.TestCase):
'''Test Calibration class for Azure Kinect device.
'''
@classmethod
def setUpClass(cls):
cls.device = k4a.Device.open()
assert(cls.device is not None)
cls.lock = test_config.glb_lock
@classmethod
def tearDownClass(cls):
# Stop the cameras and imus before closing device.
cls.device.stop_cameras()
cls.device.stop_imu()
cls.device.close()
del cls.device
def test_functional_fast_api_get_calibration_from_device(self):
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EDepthMode.PASSIVE_IR,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
with self.subTest(
depth_mode = depth_mode,
color_resolution = color_resolution):
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
self.assertIsNotNone(calibration)
self.assertIsInstance(calibration, k4a.Calibration)
def test_functional_fast_api_get_calibration_from_raw(self):
raw_calibration = self.device.get_raw_calibration()
self.assertIsNotNone(raw_calibration)
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EDepthMode.PASSIVE_IR,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
with self.subTest(
depth_mode = depth_mode,
color_resolution = color_resolution):
calibration = k4a.Calibration.create_from_raw(
raw_calibration,
depth_mode,
color_resolution)
self.assertIsNotNone(calibration)
self.assertIsInstance(calibration, k4a.Calibration)
class Test_Functional_API_Transformation_AzureKinect(unittest.TestCase):
'''Test Transformation class for Azure Kinect device.
'''
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EDepthMode.PASSIVE_IR,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
calibration_types = [
k4a.ECalibrationType.COLOR,
k4a.ECalibrationType.DEPTH
]
@classmethod
def setUpClass(cls):
cls.device = k4a.Device.open()
assert(cls.device is not None)
cls.lock = test_config.glb_lock
cls.calibration = cls.device.get_calibration(
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EColorResolution.RES_2160P)
@classmethod
def tearDownClass(cls):
# Stop the cameras and imus before closing device.
cls.device.stop_cameras()
cls.device.stop_imu()
cls.device.close()
del cls.device
del cls.calibration
def test_functional_fast_api_point_3d_to_point_3d(self):
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
source_camera = k4a.ECalibrationType.COLOR
target_camera = k4a.ECalibrationType.DEPTH
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(x, y, z) = transformation.point_3d_to_point_3d(
(300.0, 300.0, 500.0),
source_camera,
target_camera)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
self.assertIsNotNone(z)
def test_functional_fast_api_pixel_2d_to_point_3d(self):
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
source_camera = k4a.ECalibrationType.COLOR
target_camera = k4a.ECalibrationType.DEPTH
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(x, y, z) = transformation.pixel_2d_to_point_3d(
(300.0, 300.0),
500.0,
source_camera,
target_camera)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
self.assertIsNotNone(z)
def test_functional_fast_api_point_3d_to_pixel_2d(self):
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
source_camera = k4a.ECalibrationType.COLOR
target_camera = k4a.ECalibrationType.DEPTH
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(x, y) = transformation.point_3d_to_pixel_2d(
(300.0, 300.0, 500.0),
source_camera,
target_camera)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
def test_functional_fast_api_pixel_2d_to_pixel_2d(self):
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
source_camera = k4a.ECalibrationType.COLOR
target_camera = k4a.ECalibrationType.DEPTH
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(x, y) = transformation.pixel_2d_to_pixel_2d(
(300.0, 300.0),
500.0,
source_camera,
target_camera)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
def test_functional_fast_api_color_2d_to_depth_2d(self):
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
source_camera = k4a.ECalibrationType.COLOR
target_camera = k4a.ECalibrationType.DEPTH
# Get a depth image.
device_config = k4a.DeviceConfiguration(
color_format = k4a.EImageFormat.COLOR_BGRA32,
color_resolution = color_resolution,
depth_mode = depth_mode,
camera_fps = k4a.EFramesPerSecond.FPS_15,
synchronized_images_only = True,
depth_delay_off_color_usec = 0,
wired_sync_mode = k4a.EWiredSyncMode.STANDALONE,
subordinate_delay_off_master_usec = 0,
disable_streaming_indicator = False
)
self.device.start_cameras(device_config)
capture = self.device.get_capture(-1)
self.device.stop_cameras()
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(x, y) = transformation.color_2d_to_depth_2d(
(capture.color.height_pixels/4, capture.color.width_pixels/4),
capture.depth)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
def test_functional_fast_api_depth_image_to_color_camera(self):
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
# Get a depth image.
device_config = k4a.DeviceConfiguration(
color_format = k4a.EImageFormat.COLOR_BGRA32,
color_resolution = color_resolution,
depth_mode = depth_mode,
camera_fps = k4a.EFramesPerSecond.FPS_15,
synchronized_images_only = True,
depth_delay_off_color_usec = 0,
wired_sync_mode = k4a.EWiredSyncMode.STANDALONE,
subordinate_delay_off_master_usec = 0,
disable_streaming_indicator = False
)
self.device.start_cameras(device_config)
capture = self.device.get_capture(-1)
depth = capture.depth
self.device.stop_cameras()
del capture
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
transformed_depth = transformation.depth_image_to_color_camera(depth)
self.assertIsNotNone(transformed_depth)
def test_functional_fast_api_depth_image_to_color_camera_custom(self):
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
# Get a depth image.
device_config = k4a.DeviceConfiguration(
color_format = k4a.EImageFormat.COLOR_BGRA32,
color_resolution = color_resolution,
depth_mode = depth_mode,
camera_fps = k4a.EFramesPerSecond.FPS_15,
synchronized_images_only = True,
depth_delay_off_color_usec = 0,
wired_sync_mode = k4a.EWiredSyncMode.STANDALONE,
subordinate_delay_off_master_usec = 0,
disable_streaming_indicator = False
)
self.device.start_cameras(device_config)
capture = self.device.get_capture(-1)
depth = capture.depth
self.device.stop_cameras()
del capture
# Create a custom image.
custom = k4a.Image.create(
k4a.EImageFormat.CUSTOM16,
depth.width_pixels,
depth.height_pixels,
depth.width_pixels * 2)
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(transformed_depth, transformed_custom) = \
transformation.depth_image_to_color_camera_custom(
depth,
custom,
k4a.ETransformInterpolationType.LINEAR,
0)
self.assertIsNotNone(transformed_depth)
self.assertIsNotNone(transformed_custom)
def test_functional_fast_api_color_image_to_depth_camera(self):
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
# Get a depth and color image.
device_config = k4a.DeviceConfiguration(
color_format = k4a.EImageFormat.COLOR_BGRA32,
color_resolution = color_resolution,
depth_mode = depth_mode,
camera_fps = k4a.EFramesPerSecond.FPS_15,
synchronized_images_only = True,
depth_delay_off_color_usec = 0,
wired_sync_mode = k4a.EWiredSyncMode.STANDALONE,
subordinate_delay_off_master_usec = 0,
disable_streaming_indicator = False
)
self.device.start_cameras(device_config)
capture = self.device.get_capture(-1)
depth = capture.depth
color = capture.color
self.device.stop_cameras()
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
transformed_color = transformation.color_image_to_depth_camera(
depth,
color)
self.assertIsNotNone(transformed_color)
def test_functional_fast_api_depth_image_to_point_cloud(self):
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
# Get a depth image.
device_config = k4a.DeviceConfiguration(
color_format = k4a.EImageFormat.COLOR_BGRA32,
color_resolution = color_resolution,
depth_mode = depth_mode,
camera_fps = k4a.EFramesPerSecond.FPS_15,
synchronized_images_only = True,
depth_delay_off_color_usec = 0,
wired_sync_mode = k4a.EWiredSyncMode.STANDALONE,
subordinate_delay_off_master_usec = 0,
disable_streaming_indicator = False
)
self.device.start_cameras(device_config)
capture = self.device.get_capture(-1)
depth = capture.depth
self.device.stop_cameras()
del capture
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
point_cloud = transformation.depth_image_to_point_cloud(
depth,
k4a.ECalibrationType.DEPTH)
self.assertIsNotNone(point_cloud)
#
# The following tests may take a long time.
# It is not recommended to run them frequently.
#
def test_functional_api_point_3d_to_point_3d(self):
for depth_mode in Test_Transformation_AzureKinect.depth_modes:
for color_resolution in Test_Transformation_AzureKinect.color_resolutions:
for source_camera in Test_Transformation_AzureKinect.calibration_types:
for target_camera in Test_Transformation_AzureKinect.calibration_types:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution,
source_camera = source_camera,
target_camera = target_camera):
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(x, y, z) = transformation.point_3d_to_point_3d(
(300.0, 300.0, 500.0),
source_camera,
target_camera)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
self.assertIsNotNone(z)
def test_functional_api_pixel_2d_to_point_3d(self):
for depth_mode in Test_Transformation_AzureKinect.depth_modes:
for color_resolution in Test_Transformation_AzureKinect.color_resolutions:
for source_camera in Test_Transformation_AzureKinect.calibration_types:
for target_camera in Test_Transformation_AzureKinect.calibration_types:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution,
source_camera = source_camera,
target_camera = target_camera):
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(x, y, z) = transformation.pixel_2d_to_point_3d(
(300.0, 300.0),
500.0,
source_camera,
target_camera)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
self.assertIsNotNone(z)
def test_functional_api_point_3d_to_pixel_2d(self):
for depth_mode in Test_Transformation_AzureKinect.depth_modes:
for color_resolution in Test_Transformation_AzureKinect.color_resolutions:
for source_camera in Test_Transformation_AzureKinect.calibration_types:
for target_camera in Test_Transformation_AzureKinect.calibration_types:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution,
source_camera = source_camera,
target_camera = target_camera):
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(x, y) = transformation.point_3d_to_pixel_2d(
(300.0, 300.0, 500.0),
source_camera,
target_camera)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
def test_functional_api_pixel_2d_to_pixel_2d(self):
for depth_mode in Test_Transformation_AzureKinect.depth_modes:
for color_resolution in Test_Transformation_AzureKinect.color_resolutions:
for source_camera in Test_Transformation_AzureKinect.calibration_types:
for target_camera in Test_Transformation_AzureKinect.calibration_types:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution,
source_camera = source_camera,
target_camera = target_camera):
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(x, y) = transformation.pixel_2d_to_pixel_2d(
(300.0, 300.0),
500.0,
source_camera,
target_camera)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
def test_functional_api_color_2d_to_depth_2d(self):
for depth_mode in Test_Transformation_AzureKinect.depth_modes[:4]:
for color_resolution in Test_Transformation_AzureKinect.color_resolutions:
for source_camera in Test_Transformation_AzureKinect.calibration_types:
for target_camera in Test_Transformation_AzureKinect.calibration_types:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution,
source_camera = source_camera,
target_camera = target_camera):
# Get a depth image.
device_config = k4a.DeviceConfiguration(
color_format = k4a.EImageFormat.COLOR_BGRA32,
color_resolution = color_resolution,
depth_mode = depth_mode,
camera_fps = k4a.EFramesPerSecond.FPS_15,
synchronized_images_only = True,
depth_delay_off_color_usec = 0,
wired_sync_mode = k4a.EWiredSyncMode.STANDALONE,
subordinate_delay_off_master_usec = 0,
disable_streaming_indicator = False
)
self.device.start_cameras(device_config)
capture = self.device.get_capture(-1)
self.device.stop_cameras()
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(x, y) = transformation.color_2d_to_depth_2d(
(capture.color.height_pixels/4, capture.color.width_pixels/4),
capture.depth)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
def test_functional_api_depth_image_to_color_camera(self):
for depth_mode in Test_Transformation_AzureKinect.depth_modes[:4]:
for color_resolution in Test_Transformation_AzureKinect.color_resolutions:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution):
# Get a depth image.
device_config = k4a.DeviceConfiguration(
color_format = k4a.EImageFormat.COLOR_BGRA32,
color_resolution = color_resolution,
depth_mode = depth_mode,
camera_fps = k4a.EFramesPerSecond.FPS_15,
synchronized_images_only = True,
depth_delay_off_color_usec = 0,
wired_sync_mode = k4a.EWiredSyncMode.STANDALONE,
subordinate_delay_off_master_usec = 0,
disable_streaming_indicator = False
)
self.device.start_cameras(device_config)
capture = self.device.get_capture(-1)
depth = capture.depth
self.device.stop_cameras()
del capture
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
transformed_depth = transformation.depth_image_to_color_camera(depth)
self.assertIsNotNone(transformed_depth)
def test_functional_api_depth_image_to_color_camera_custom(self):
for depth_mode in Test_Transformation_AzureKinect.depth_modes[:4]:
for color_resolution in Test_Transformation_AzureKinect.color_resolutions:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution):
# Get a depth image.
device_config = k4a.DeviceConfiguration(
color_format = k4a.EImageFormat.COLOR_BGRA32,
color_resolution = color_resolution,
depth_mode = depth_mode,
camera_fps = k4a.EFramesPerSecond.FPS_15,
synchronized_images_only = True,
depth_delay_off_color_usec = 0,
wired_sync_mode = k4a.EWiredSyncMode.STANDALONE,
subordinate_delay_off_master_usec = 0,
disable_streaming_indicator = False
)
self.device.start_cameras(device_config)
capture = self.device.get_capture(-1)
depth = capture.depth
self.device.stop_cameras()
del capture
# Create a custom image.
custom = k4a.Image.create(
k4a.EImageFormat.CUSTOM16,
depth.width_pixels,
depth.height_pixels,
depth.width_pixels * 2)
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
(transformed_depth, transformed_custom) = \
transformation.depth_image_to_color_camera_custom(
depth,
custom,
k4a.ETransformInterpolationType.LINEAR,
0)
self.assertIsNotNone(transformed_depth)
self.assertIsNotNone(transformed_custom)
def test_functional_api_color_image_to_depth_camera(self):
for depth_mode in Test_Transformation_AzureKinect.depth_modes[:4]:
for color_resolution in Test_Transformation_AzureKinect.color_resolutions:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution):
# Get a depth and color image.
device_config = k4a.DeviceConfiguration(
color_format = k4a.EImageFormat.COLOR_BGRA32,
color_resolution = color_resolution,
depth_mode = depth_mode,
camera_fps = k4a.EFramesPerSecond.FPS_15,
synchronized_images_only = True,
depth_delay_off_color_usec = 0,
wired_sync_mode = k4a.EWiredSyncMode.STANDALONE,
subordinate_delay_off_master_usec = 0,
disable_streaming_indicator = False
)
self.device.start_cameras(device_config)
capture = self.device.get_capture(-1)
depth = capture.depth
color = capture.color
self.device.stop_cameras()
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
transformed_color = transformation.color_image_to_depth_camera(
depth,
color)
self.assertIsNotNone(transformed_color)
def test_functional_api_depth_image_to_point_cloud(self):
for depth_mode in Test_Transformation_AzureKinect.depth_modes[:4]:
for color_resolution in Test_Transformation_AzureKinect.color_resolutions:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution):
# Get a depth image.
device_config = k4a.DeviceConfiguration(
color_format = k4a.EImageFormat.COLOR_BGRA32,
color_resolution = color_resolution,
depth_mode = depth_mode,
camera_fps = k4a.EFramesPerSecond.FPS_15,
synchronized_images_only = True,
depth_delay_off_color_usec = 0,
wired_sync_mode = k4a.EWiredSyncMode.STANDALONE,
subordinate_delay_off_master_usec = 0,
disable_streaming_indicator = False
)
self.device.start_cameras(device_config)
capture = self.device.get_capture(-1)
depth = capture.depth
self.device.stop_cameras()
del capture
# Get calibration.
calibration = self.device.get_calibration(
depth_mode,
color_resolution)
# Create transformation.
transformation = k4a.Transformation(calibration)
# Apply transformation.
point_cloud = transformation.depth_image_to_point_cloud(
depth,
k4a.ECalibrationType.DEPTH)
self.assertIsNotNone(point_cloud)
if __name__ == '__main__':
unittest.main() |
parse_text_http_headers.py | DazEB2/SimplePyScripts | 117 | 11115110 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import re
from typing import Dict
HTTP_HEADER_PATTERN = re.compile(r'([\w-]+): (.*)', flags=re.IGNORECASE)
def parse(text: str) -> Dict[str, str]:
return dict(HTTP_HEADER_PATTERN.findall(text))
if __name__ == '__main__':
text_http_headers = """
POST /index.php?do=search HTTP/1.1
Host: online.anidub.com
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
Accept-Language: ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3
Accept-Encoding: gzip, deflate, br
Content-Type: application/x-www-form-urlencoded
Content-Length: 112
Origin: https://online.anidub.com
Connection: keep-alive
Referer: https://online.anidub.com/index.php?do=search
Upgrade-Insecure-Requests: 1
TE: Trailers
Pragma: no-cache
Cache-Control: no-cache
"""
headers = parse(text_http_headers)
print(headers)
assert headers == {
'Host': 'online.anidub.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': '112',
'Origin': 'https://online.anidub.com',
'Connection': 'keep-alive',
'Referer': 'https://online.anidub.com/index.php?do=search',
'Upgrade-Insecure-Requests': '1',
'TE': 'Trailers',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
|
tests/cpydiff/modules_array_subscrstep.py | sebastien-riou/micropython | 13,648 | 11115164 | <reponame>sebastien-riou/micropython<gh_stars>1000+
"""
categories: Modules,array
description: Subscript with step != 1 is not yet implemented
cause: Unknown
workaround: Unknown
"""
import array
a = array.array("b", (1, 2, 3))
print(a[3:2:2])
|
tests/algorithms/test_rvea.py | jarreguit/pymoo | 762 | 11115172 | <filename>tests/algorithms/test_rvea.py<gh_stars>100-1000
import numpy as np
from pymoo.algorithms.moo.rvea import APDSurvival, RVEA
from pymoo.factory import DTLZ2
from pymoo.core.population import Population
from tests.util import path_to_test_resource
def test_survival():
problem = DTLZ2(n_obj=3)
for k in range(1, 11):
print("TEST RVEA GEN", k)
ref_dirs = np.loadtxt(path_to_test_resource('rvea', f"ref_dirs_{k}.txt"))
F = np.loadtxt(path_to_test_resource('rvea', f"F_{k}.txt"))
pop = Population.new(F=F)
algorithm = RVEA(ref_dirs)
algorithm.setup(problem, termination=('n_gen', 500))
algorithm.n_gen = k
algorithm.pop = pop
survival = APDSurvival(ref_dirs)
survivors = survival.do(problem, algorithm.pop, n_survive=len(pop), algorithm=algorithm, return_indices=True)
apd = pop[survivors].get("apd")
correct_apd = np.loadtxt(path_to_test_resource('rvea', f"apd_{k}.txt"))
np.testing.assert_allclose(apd, correct_apd)
|
adaptdl/adaptdl/conftest.py | jessezbj/adaptdl | 294 | 11115228 | # Copyright 2020 Petuum, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import multiprocessing as mp
import os
import signal
import tempfile
import portpicker
def elastic_multiprocessing(func):
"""
Decorator which runs a function inside a temporary local environment
which mimics a real AdaptDLJob. Runs replicas of the decorated function
in their own processes, and sets up the shared environment, including
environment variables and shared directories. The decorated function is
always started with a single replica, but can optionally return an integer
number of replicas to trigger a restart using that many replicas.
```python
@elastic_multiprocessing
def test_my_stuff():
from adaptdl.env import num_replicas, num_restarts
if num_restarts() == 0:
print(num_replicas) # Outputs '1'.
return 5 # Restart using 5 replicas.
if num_restarts() == 1:
print(num_replicas) # Outputs '5'.
return 0 # No more restarts, this line can be omitted.
```
.. warning::
The replica processes are forked from the current main process. This
means that mutations to global variables in the main process prior to
calling the decorated function may be observed by the child processes!
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
num_restarts = 0
num_replicas = 1
with tempfile.TemporaryDirectory() as tmpdir:
while num_replicas:
assert isinstance(num_replicas, int)
master_port = portpicker.pick_unused_port()
queue = mp.Queue() # For passing return values back.
def run(rank): # Invoked in each child process.
os.environ["ADAPTDL_CHECKPOINT_PATH"] = str(tmpdir)
os.environ["ADAPTDL_JOB_ID"] = "tmpjob"
os.environ["ADAPTDL_MASTER_PORT"] = str(master_port)
os.environ["ADAPTDL_REPLICA_RANK"] = str(rank)
os.environ["ADAPTDL_NUM_REPLICAS"] = str(num_replicas)
os.environ["ADAPTDL_NUM_NODES"] = "1"
os.environ["ADAPTDL_NUM_RESTARTS"] = str(num_restarts)
ret = None
try:
ret = func(*args, **kwargs)
finally:
queue.put((rank, ret))
# Start each replica in a separate child process.
procs = [mp.Process(target=run, args=(rank,))
for rank in range(num_replicas)]
for proc in procs:
proc.start()
try: # Wait for results from child processes.
for i in range(num_replicas):
rank, ret = queue.get()
procs[rank].join()
assert procs[rank].exitcode == 0
if i == 0: # All return values should be the same.
num_replicas = ret
assert num_replicas == ret
finally:
# Clean up any remaining child processes.
for proc in procs:
if proc.is_alive():
os.kill(proc.pid, signal.SIGKILL)
proc.join()
# Clean up the queue.
queue.close()
num_restarts += 1
return wrapper
|
esphome/components/am43/cover/__init__.py | OttoWinter/esphomeyaml | 249 | 11115229 | <reponame>OttoWinter/esphomeyaml<gh_stars>100-1000
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import cover, ble_client
from esphome.const import CONF_ID, CONF_PIN
CODEOWNERS = ["@buxtronix"]
DEPENDENCIES = ["ble_client"]
AUTO_LOAD = ["am43", "sensor"]
CONF_INVERT_POSITION = "invert_position"
am43_ns = cg.esphome_ns.namespace("am43")
Am43Component = am43_ns.class_(
"Am43Component", cover.Cover, ble_client.BLEClientNode, cg.Component
)
CONFIG_SCHEMA = (
cover.COVER_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(Am43Component),
cv.Optional(CONF_PIN, default=8888): cv.int_range(min=0, max=0xFFFF),
cv.Optional(CONF_INVERT_POSITION, default=False): cv.boolean,
}
)
.extend(ble_client.BLE_CLIENT_SCHEMA)
.extend(cv.COMPONENT_SCHEMA)
)
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
cg.add(var.set_pin(config[CONF_PIN]))
cg.add(var.set_invert_position(config[CONF_INVERT_POSITION]))
yield cg.register_component(var, config)
yield cover.register_cover(var, config)
yield ble_client.register_ble_node(var, config)
|
allennlp/modules/transformer/layer_norm.py | MSLars/allennlp | 11,433 | 11115245 | import torch
from allennlp.modules.transformer.transformer_module import TransformerModule
class LayerNorm(torch.nn.LayerNorm, TransformerModule):
_pretrained_mapping = {"gamma": "weight", "beta": "bias"}
|
libcity/data/dataset/eta_encoder/abstract_eta_encoder.py | moghadas76/test_bigcity | 221 | 11115258 | from logging import getLogger
class AbstractETAEncoder(object):
"""ETA Encoder
ETA Encoder is used to encode the spatiotemporal information in trajectory.
We abstract the encoding operation from the Dataset Module to facilitate developers
to achive more flexible and diverse trajectory representation extraction. It is worth
noting that the representation extraction involved here is not learnable and fixed.
Any learnable representation extraction, e.g. embedding, should be emplemented in
Model Module.
Attributes:
config (libcity.ConfigParser): The configuration of the encoder.
pad_item (dict): The key is a feature's name and the value should be corresponding
padding value. If a feature dose not need to be padded, don't insert it into
this dict. In other word, libcity.dataset.Batch will pad all features in pad_item.keys().
feature_dict (dict): The key is a feature's name and the value should be the data type of
the corresponding feature. When libcity.dataset.Batch converts the encoded trajectory tuple
to tensor, It will refer to this attribute to know the feature name and data type corresponding
to each element in the tuple.
data_feature (dict): The data_feature contains the statistics features of the encoded dataset, which is
used to init the model. For example, if the model use torch.nn.Embedding to embed location id and time id,
the data_feature should contain loc_size and time_size to tell model how to init the embedding layer.
"""
def __init__(self, config):
"""Init Encoder with its config
Args:
config (libcity.ConfigParser): Dict-like Object. Can access any config by config[key].
"""
self.config = config
self._logger = getLogger()
self.pad_item = {}
self.feature_dict = {}
self.data_feature = {}
self.cache_file_name = ''
def encode(self, uid, trajectories, dyna_feature_column):
"""Encode trajectories of user uid.
Args:
uid (int): The uid of user. If there is no need to encode uid, just keep it.
trajectories (list of trajectory): The trajectories of user. Each trajectory is
a sequence of spatiotemporal point. The spatiotemporal point is represented by
a list. Thus, a trajectory is represented by a list of lists. For example:
trajectory1 = [
[dyna_id, type, time, entity_id, traj_id, coordinates/location, properties],
[dyna_id, type, time, entity_id, traj_id, coordinates/location, properties],
.....
]
Every spatiotemporal tuple contains all useful information in a record of the Raw
Data (refer to corresponding .dyna file for details). In addition, the trajectories
are represented as:
[
[ # trajectory1
[dyna_id, type, time, entity_id, traj_id, coordinates/location, properties],
[dyna_id, type, time, entity_id, traj_id, coordinates/location, properties],
...
],
trajectory2,
...
]
dyna_feature_column (dict): The key is a feature's name and the value should be corresponding
column id in .dyna file.
Returns:
list: The return value of this function is the list of encoded trajectories.
Same as the input format, each encoded trajectory should be a tuple, which contains
all features extracted from the input trajectory. The encoded trajectory will
subsequently be converted to a torch.tensor and then directly input to the model.
(see more in libcity.Batch)
Take the DeeptteEncoder as an example.
encoded_trajectory = [current_longi, current_lati, current_tim, current_dis, current_state,
uid, weekid, timeid, dist, time]
Please make sure the order of the features in the list is consistent with the order
of the features in self.feature_dict.
"""
def gen_data_feature(self):
"""After encode all trajectories, this method will be called to tell encoder that you can generate the
data_feature and pad_item
"""
|
Chapter04/DeepLearningDemo.py | Tanishadel/Mastering-Machine-Learning-for-Penetration-Testing | 241 | 11115327 | from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.utils import np_utils
seed = 7
numpy.random.seed(seed)
(X_train, y_train), (X_test, y_test) = mnist.load_data()
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train / 255
X_test = X_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
model = Sequential()
model.add(Dense(num_pixels, input_dim=num_pixels,
activation='relu'))
model.add(Dense(num_classes,activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
|
pysb/examples/tutorial_c.py | FFroehlich/pysb | 105 | 11115331 | from pysb import *
Model()
Monomer('Raf', ['s', 'k'], {'s': ['u', 'p']})
Monomer('MEK', ['s218', 's222', 'k'], {'s218': ['u', 'p'], 's222': ['u', 'p']})
Parameter('kf', 1e-5)
Parameter('Raf_0', 7e4)
Parameter('MEK_0', 3e6)
|
cycles/utils/setMat_crackedCeramic.py | HTDerekLiu/BlenderToolbox | 208 | 11115335 | # Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from include import *
def setMat_crackedCeramic(mesh, meshColor, crackScale, crackDisp):
mat = bpy.data.materials.new('MeshMaterial')
mesh.data.materials.append(mat)
mesh.active_material = mat
mat.use_nodes = True
tree = mat.node_tree
# init color node
C = initColorNode(tree, meshColor)
GLO = tree.nodes.new('ShaderNodeBsdfGlossy')
GLO.inputs['Roughness'].default_value = 0.316
DIF = tree.nodes.new('ShaderNodeBsdfDiffuse')
tree.links.new(C.outputs['Color'], DIF.inputs['Color'])
MIXS = tree.nodes.new('ShaderNodeMixShader')
MIXS.inputs['Fac'].default_value = 0.327
tree.links.new(DIF.outputs['BSDF'], MIXS.inputs[1])
tree.links.new(GLO.outputs['BSDF'], MIXS.inputs[2])
VOR = tree.nodes.new('ShaderNodeTexVoronoi')
VOR.inputs['Scale'].default_value = crackScale
VOR.location.x -= 200
VOR.location.y -= 200
DISP = tree.nodes.new('ShaderNodeDisplacement')
DISP.inputs[1].default_value = 0.0
DISP.inputs[2].default_value = crackDisp
DISP.location.x -= 400
DISP.location.y -= 200
tree.links.new(VOR.outputs['Fac'], DISP.inputs['Height'])
tree.links.new(MIXS.outputs[0], tree.nodes['Material Output'].inputs['Surface'])
tree.links.new(DISP.outputs[0], tree.nodes['Material Output'].inputs['Displacement'])
|
deepFEPE/models/DeepFNetSampleLoss.py | KuangHaofei/pytorch-deepFEPE | 112 | 11115364 | """ DeepF for sample loss
Keep but not tested (you-yi on 07/13/2020)
Authors: <NAME>, <NAME>
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
from torch.nn.functional import grid_sample
import numpy as np
import cv2
import dsac_tools.utils_F as utils_F # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
# import utils_F.compute_epi_residual as compute_epi_residual
# import utils_F.compute_epi_residual_non_rob as compute_epi_residual_non_rob
from models.GoodCorresNet import GoodCorresNet
# from models.ConfNet import VggDepthEstimator
# from models.ConfNet import VggDepthEstimatorOLD as VggDepthEstimator
# from models.ImageFeatNet import Conv
# from models.ImageFeatNet import VggDepthEstimatorSeperate as VggDepthEstimator
from models.ErrorEstimators import *
from batch_svd import batch_svd # https://github.com/KinglittleQ/torch-batch-svd.git
class NormalizeAndExpand(nn.Module):
def __init__(self, is_cuda=True, is_test=False):
super(NormalizeAndExpand, self).__init__()
self.ones_b = Variable(torch.ones((1, 1, 1)), volatile=is_test)
self.T_b = Variable(torch.zeros(1, 3, 3), volatile=is_test)
if is_cuda:
self.ones_b = self.ones_b.cuda()
self.T_b = self.T_b.cuda()
def normalize(self, pts):
T = self.T_b.expand(pts.size(0), 3, 3).clone()
ones = self.ones_b.expand(pts.size(0), pts.size(1), 1)
pts = torch.cat((pts, ones), 2)
c = torch.mean(pts,1)
newpts_ = (pts - c.unsqueeze(1)) # First center to zero mean
meandist = newpts_[:,:,:2].pow(2).sum(2).sqrt().mean(1)
scale = 1.0/meandist
T[:,0,0] = scale
T[:,1,1] = scale
T[:,2,2] = 1
T[:,0,2] = -c[:,0]*scale
T[:,1,2] = -c[:,1]*scale
pts_out = torch.bmm(T, pts.permute(0,2,1))
return pts_out, T
def forward(self, pts):
pts1, T1 = self.normalize(pts[:,:,:2])
pts2, T2 = self.normalize(pts[:,:,2:])
return pts1, pts2, T1, T2
class NormalizeAndExpand_K(nn.Module):
def __init__(self, is_cuda=True, is_test=False):
super(NormalizeAndExpand_K, self).__init__()
self.ones_b = Variable(torch.ones((1, 1, 1)), volatile=is_test)
self.T_b = Variable(torch.zeros(1, 3, 3), volatile=is_test)
if is_cuda:
self.ones_b = self.ones_b.cuda()
self.T_b = self.T_b.cuda()
def normalize(self, pts, K_invs):
T = K_invs
ones = self.ones_b.expand(pts.size(0), pts.size(1), 1)
pts = torch.cat((pts, ones), 2)
pts_out = torch.bmm(T, pts.permute(0,2,1))
return pts_out, T
def forward(self, pts, K_invs):
pts1, T1 = self.normalize(pts[:,:,:2], K_invs)
pts2, T2 = self.normalize(pts[:,:,2:], K_invs)
return pts1, pts2, T1, T2
class NormalizeAndExpand_HW(nn.Module):
# so that the coordintes are normalized to [-1, 1] for both H and W
def __init__(self, image_size, is_cuda=True, is_test=False):
super(NormalizeAndExpand_HW, self).__init__()
self.ones_b = Variable(torch.ones((1, 1, 1)), volatile=is_test)
# self.T_b = Variable(torch.zeros(1, 3, 3), volatile=is_test)
H, W = image_size[0], image_size[1]
self.T = torch.tensor([[2./W, 0., -1.], [0., 2./H, -1.], [0., 0., 1.]]).float().unsqueeze(0)
if is_cuda:
self.ones_b = self.ones_b.cuda()
# self.T_b = self.T_b.cuda()
self.T = self.T.cuda()
def normalize(self, pts):
ones = self.ones_b.expand(pts.size(0), pts.size(1), 1)
pts = torch.cat((pts, ones), 2)
pts_out = self.T @ pts.permute(0,2,1)
return pts_out, self.T
def forward(self, pts):
pts1, T1 = self.normalize(pts[:,:,:2])
pts2, T2 = self.normalize(pts[:,:,2:])
return pts1, pts2, T1, T2
class Fit(nn.Module):
def __init__(self, is_cuda=True, is_test=False, if_cpu_svd=False, normalize_SVD=True, if_sample_loss=False):
super(Fit, self).__init__()
# self.svd = bsvd(is_cuda, is_test)
self.ones_b = Variable(torch.ones((1, 1, 1)).float())
self.zero_b = Variable(torch.zeros((1, 1, 1)).float())
self.T_b = torch.zeros(1, 3, 3).float()
self.mask = Variable(torch.ones(3))
self.mask[-1] = 0
self.normalize_SVD = normalize_SVD
self.if_cpu_svd = if_cpu_svd
if self.if_cpu_svd:
self.mask_cpu = self.mask.clone()
self.if_sample_loss = if_sample_loss
if is_cuda:
self.ones_b = self.ones_b.cuda()
self.zero_b = self.zero_b.cuda()
self.T_b = self.T_b.cuda()
self.mask = self.mask.cuda()
self.is_cuda = is_cuda
# self.bsvd = bsvd_torch()
def normalize(self, pts, weights):
device = pts.device
T = Variable(self.T_b.to(device).expand(pts.size(0), 3, 3)).clone()
ones = self.ones_b.to(device).expand(pts.size(0), pts.size(1), 1)
denom = weights.sum(1)
#
# c = torch.mean(pts,1)
# newpts_ = (pts - c.unsqueeze(1))
# meandist = newpts_[:,:,:2].pow(2).sum(2).sqrt().mean(1)
c = torch.sum(pts*weights,1)/denom
# print(c.size(), pts.size())
newpts_ = (pts - c.unsqueeze(1))
meandist = ((weights*(newpts_[:,:,:2].pow(2).sum(2).sqrt().unsqueeze(2))).sum(1)/denom).squeeze(1)
scale = 1.4142/meandist
T[:,0,0] = scale
T[:,1,1] = scale
T[:,2,2] = 1
T[:,0,2] = -c[:,0]*scale
T[:,1,2] = -c[:,1]*scale
# pts_ = torch.cat((pts, ones), 2)
# print(pts.device, weights.device, T.device, self.T_b.device)
pts_out = torch.bmm(T, pts.permute(0,2,1))
return pts_out, T
# def weighted_svd(self, pts1, pts2, weights):
# weights = weights.squeeze(1).unsqueeze(2)
# pts1n, T1 = self.normalize(pts1, weights)
# pts2n, T2 = self.normalize(pts2, weights)
# p = torch.cat((pts1n[:,0].unsqueeze(1)*pts2n,
# pts1n[:,1].unsqueeze(1)*pts2n,
# pts2n), 1).permute(0,2,1)
# X = p*weights
# out_b = []
# for b in range(X.size(0)):
# _, _, V = torch.svd(X[b])
# F = V[:,-1].view(3,3)
# U, S, V = torch.svd(F)
# F_ = U.mm((S*self.mask).diag()).mm(V.t())
# out_b.append(F_.unsqueeze(0))
# out = torch.cat(out_b, 0)
# out = T1.permute(0,2,1).bmm(out).bmm(T2)
# return out
def weighted_svd(self, pts1, pts2, weights, if_print=False):
device = weights.device
weights = weights.squeeze(1).unsqueeze(2)
ones = torch.ones_like(weights)
if self.is_cuda:
ones = ones.cuda()
pts1n, T1 = self.normalize(pts1, ones)
pts2n, T2 = self.normalize(pts2, ones)
# pts1n, T1 = self.normalize(pts1, weights)
# pts2n, T2 = self.normalize(pts2, weights)
p = torch.cat((pts2n[:,0].unsqueeze(1)*pts1n,
pts2n[:,1].unsqueeze(1)*pts1n,
pts1n), 1).permute(0,2,1)
# # if self.normalize_SVD:
# # p = torch.nn.functional.normalize(p, dim=2)
# X = p*torch.sqrt(weights)
if self.normalize_SVD:
p = torch.nn.functional.normalize(p, dim=2)
X = p*weights
out_b = []
F_vecs_list = []
if self.if_cpu_svd:
for b in range(X.size(0)):
_, _, V = torch.svd(X[b].cpu())
F = V[:,-1].view(3,3)
F_vecs_list.append(V[:,-1]/(V[:,-1].norm()))
U, S, V = torch.svd(F)
F_ = U.mm((S*self.mask.cpu()).diag()).mm(V.t())
out_b.append(F_.unsqueeze(0))
out = torch.cat(out_b, 0).cuda()
F_vecs= torch.stack(F_vecs_list).cuda()
else:
for b in range(X.size(0)):
_, _, V = torch.svd(X[b])
F = V[:,-1].view(3,3)
F_vecs_list.append(V[:,-1]/(V[:,-1].norm()))
U, S, V = torch.svd(F)
F_ = U.mm((S*self.mask.to(device)).diag()).mm(V.t())
out_b.append(F_.unsqueeze(0))
out = torch.cat(out_b, 0)
F_vecs = torch.stack(F_vecs_list)
if if_print:
print(F_vecs.size(), p.size(), weights.size())
print('----F_vecs')
print(F_vecs[0].detach().cpu().numpy())
print('----p')
print(p[0].detach().cpu().numpy())
print('----weights')
print(weights[:2].squeeze().detach().cpu().numpy(), torch.sum(weights[:2], dim=1).squeeze().detach().cpu().numpy())
residual = (X @ F_vecs.unsqueeze(-1)).squeeze(-1) # [B, N, 1]
# residual_nonWeighted = (p @ F_vecs.unsqueeze(-1)).squeeze(-1) # [B, N, 1]
# print(residual.size())
# print(residual.norm(p=2, dim=1).size())
out = T2.permute(0,2,1).bmm(out).bmm(T1)
return out, residual.squeeze(-1)
def weighted_svd_batch(self, pts1, pts2, weights, if_print=False):
device = weights.device
weights = weights.squeeze(1).unsqueeze(2)
ones = torch.ones_like(weights)
if self.is_cuda:
ones = ones.cuda()
pts1n, T1 = self.normalize(pts1, ones)
pts2n, T2 = self.normalize(pts2, ones)
# pts1n, T1 = self.normalize(pts1, weights)
# pts2n, T2 = self.normalize(pts2, weights)
p = torch.cat((pts2n[:,0].unsqueeze(1)*pts1n,
pts2n[:,1].unsqueeze(1)*pts1n,
pts1n), 1).permute(0,2,1)
# # if self.normalize_SVD:
# # p = torch.nn.functional.normalize(p, dim=2)
# X = p*torch.sqrt(weights)
if self.normalize_SVD:
p = torch.nn.functional.normalize(p, dim=2)
X = p*weights
Us, Ss, Vs = batch_svd(X)
Fs = Vs[:, :, -1].view(-1, 3, 3)
F_vecs = torch.nn.functional.normalize(Vs[:, :, -1], p=2, dim=1)
Us, Ss, Vs = batch_svd(Fs)
out = Us @ torch.diag_embed(Ss*self.mask.unsqueeze(0)) @ Vs.transpose(1, 2)
# out_b = []
# F_vecs_list = []
# if self.if_cpu_svd:
# for b in range(X.size(0)):
# _, _, V = torch.svd(X[b].cpu())
# F = V[:,-1].view(3,3)
# F_vecs_list.append(V[:,-1]/(V[:,-1].norm()))
# U, S, V = torch.svd(F)
# F_ = U.mm((S*self.mask.cpu()).diag()).mm(V.t())
# out_b.append(F_.unsqueeze(0))
# out = torch.cat(out_b, 0).cuda()
# F_vecs= torch.stack(F_vecs_list).cuda()
# else:
# for b in range(X.size(0)):
# _, _, V = torch.svd(X[b])
# F = V[:,-1].view(3,3)
# F_vecs_list.append(V[:,-1]/(V[:,-1].norm()))
# U, S, V = torch.svd(F)
# F_ = U.mm((S*self.mask.to(device)).diag()).mm(V.t())
# out_b.append(F_.unsqueeze(0))
# out = torch.cat(out_b, 0)
# F_vecs = torch.stack(F_vecs_list)
# if if_print:
# print(F_vecs.size(), p.size(), weights.size())
# print('----F_vecs')
# print(F_vecs[0].detach().cpu().numpy())
# print('----p')
# print(p[0].detach().cpu().numpy())
# print('----weights')
# print(weights[:2].squeeze().detach().cpu().numpy(), torch.sum(weights[:2], dim=1).squeeze().detach().cpu().numpy())
residual = (X @ F_vecs.unsqueeze(-1)).squeeze(-1) # [B, N, 1]
# residual_nonWeighted = (p @ F_vecs.unsqueeze(-1)).squeeze(-1) # [B, N, 1]
# print(residual.size())
# print(residual.norm(p=2, dim=1).size())
out = T2.permute(0,2,1).bmm(out).bmm(T1)
return out, residual.squeeze(-1)
def get_unique(self, xs, topk, matches_good_unique_nums, pts1, pts2): # [B, N]
xs_topk_list = []
topK_indices_list = []
pts1_list = []
pts2_list = []
for x, matches_good_unique_num, pt1, pt2 in zip(xs, matches_good_unique_nums, pts1, pts2):
# x_unique = torch.unique(x) # no gradients!!!
x_unique = x[:, :matches_good_unique_num]
# print(x_unique_topK)
x_unique_topK, topK_indices = torch.topk(x_unique, topk, dim=1)
xs_topk_list.append(x_unique_topK)
topK_indices_list.append(topK_indices.squeeze())
pt1_topK, pt2_topK = pt1[topK_indices.squeeze(), :], pt2[topK_indices.squeeze(), :]
pts1_list.append(pt1_topK)
pts2_list.append(pt2_topK)
return torch.stack(xs_topk_list), torch.stack(topK_indices_list), torch.stack(pts1_list), torch.stack(pts2_list)
def forward(self, pts1, pts2, weights, if_print=False, matches_good_unique_nums=None):
out, residual = self.weighted_svd(pts1, pts2, weights, if_print=if_print)
out_dict = {'out': out, 'residual': residual}
# if not(self.if_sample_loss):
# return out, residual, None, None
topK = 20
selects_each_sample = 100
# print(weights.size()) # [B, 1, N]
weights_topK, indices_topK, pts1_topK, pts2_topK = self.get_unique(weights, topK, matches_good_unique_nums, pts1, pts2)
# print(indices_topK, indices_topK.size())
# print(indices_topK.size()) # [8, 10]
weights_mask = torch.zeros(weights.size(0), weights.size(2), device=weights.device).float() # [B, topK]
# print(indices_topK.size(), torch.max(indices_topK), weights_mask.size())
weights_mask = weights_mask.scatter_(1, indices_topK, 1.)
# print(torch.sum(weights_mask, dim=1))
# print(pts1.size(), weights.size(), indices_topK.size()) # torch.Size([8, 1000, 3]) torch.Size([8, 1, 1000]) torch.Size([8, 100])
pts1_topK = torch.gather(pts1, 1, indices_topK.unsqueeze(-1).expand(-1, -1, 3))
pts2_topK = torch.gather(pts2, 1, indices_topK.unsqueeze(-1).expand(-1, -1, 3))
weights_topK = torch.gather(weights, 2, indices_topK.unsqueeze(1))
# a = torch.index_select(pts1, 1, indices_topK.unsqueeze(-1))
# mask_select = weights_mask.byte().unsqueeze(-1)
# a = torch.masked_select(pts1, mask_select)
# out_topK, residual_topK = self.weighted_svd(pts1_topK, pts2_topK, weights_topK, if_print=if_print)
out_topK, residual_topK = self.weighted_svd_batch(pts1_topK, pts2_topK, weights_topK, if_print=if_print)
out_dict.update({'out_topK': out_topK, 'residual_topK': residual_topK})
# out, residual = self.weighted_svd(pts1, pts2, weights * weights_mask.unsqueeze(1), if_print=if_print)
out_sample_selected_list = []
weights_sample_selected_accu_list = []
for batch_idx, (matches_good_unique_num, weights_sample) in enumerate(zip(matches_good_unique_nums.cpu().numpy(), weights.detach().cpu().numpy())):
selected_corres_idx_per_sample_list = []
p = weights_sample.flatten()[:matches_good_unique_num]
p = p / np.sum(p)
for select_idx in range(selects_each_sample):
selected_corres_idx = np.random.choice(matches_good_unique_num, topK, p=p)
# selected_corres_idx = np.random.choice(matches_good_unique_num, topK)
selected_corres_idx_per_sample_list.append(selected_corres_idx)
selected_corres_idx_per_sample = np.stack(selected_corres_idx_per_sample_list) # [selects_each_sample, topK]
pts1_sample = pts1[batch_idx:batch_idx+1].expand(selects_each_sample, -1, -1)
pts1_sample_selected = torch.gather(pts1_sample, 1, torch.from_numpy(selected_corres_idx_per_sample).unsqueeze(-1).expand(-1, -1, 3).cuda()) # [selects_each_sample, topK, 3]
pts2_sample = pts2[batch_idx:batch_idx+1].expand(selects_each_sample, -1, -1)
pts2_sample_selected = torch.gather(pts2_sample, 1, torch.from_numpy(selected_corres_idx_per_sample).unsqueeze(-1).expand(-1, -1, 3).cuda()) # [selects_each_sample, topK, 3]
weights_sample = weights[batch_idx:batch_idx+1].expand(selects_each_sample, -1, -1)
weights_sample_selected = torch.gather(weights_sample, 2, torch.from_numpy(selected_corres_idx_per_sample).unsqueeze(1).cuda()) # [selects_each_sample, 1, topK]
weights_sample_selected_normalized = torch.nn.functional.normalize(weights_sample_selected, p=1, dim=2) # [selects_each_sample, 1, topK]
weights_sample_selected_accu = torch.prod(weights_sample_selected * 1000., dim=2) # [selects_each_sample, 1]
weights_sample_selected_accu = weights_sample_selected_accu / (torch.sum(weights_sample_selected_accu)+1e-10)
# print(weights_sample_selected_accu, torch.sum(weights_sample_selected_accu))
weights_sample_selected_accu_list.append(weights_sample_selected_accu)
# out_sample_selected, _ = self.weighted_svd(pts1_sample_selected, pts2_sample_selected, weights_sample_selected_normalized, if_print=False) # [selects_each_sample, 3, 3]
out_sample_selected, _ = self.weighted_svd_batch(pts1_sample_selected, pts2_sample_selected, weights_sample_selected, if_print=False) # [selects_each_sample, 3, 3]
out_sample_selected_list.append(out_sample_selected)
out_sample_selected_batch = torch.stack(out_sample_selected_list) # [B, selects_each_sample, 3, 3]
weights_sample_selected_accu_batch = torch.stack(weights_sample_selected_accu_list) # [B, selects_each_sample, 1]
# return out_topK, residual_topK, out_sample_selected_batch, weights_sample_selected_accu_batch
out_dict.update({'out_sample_selected_batch': out_sample_selected_batch, 'weights_sample_selected_accu_batch': weights_sample_selected_accu_batch})
return out_dict
class Norm8PointNet(nn.Module):
def __init__(self, depth, image_size, if_quality, if_goodCorresArch=False, if_tri_depth=False, if_sample_loss=False, if_learn_offsets=False, if_des=False, des_size=None, quality_size=0, is_cuda=True, is_test=False, if_cpu_svd=False, **params):
super(Norm8PointNet, self).__init__()
print('====Loading [email protected]')
if not if_quality:
quality_size = 0
self.if_quality = if_quality
if if_quality:
print('----Quality!!!!!!@Norm8PointNet')
if if_learn_offsets:
print('----if_learn_offsets!!!!!!@Norm8PointNet')
print('----CPU svd@Norm8PointNet!!!!!!' if if_cpu_svd else '----GPU svd@Norm8PointNet!!!!!!')
self.if_des = if_des
self.if_goodCorresArch = if_goodCorresArch
self.if_learn_offsets = if_learn_offsets
self.image_size = image_size # list of [H, W, 3]
self.if_tri_depth = if_tri_depth
self.depth_size = 1 if self.if_tri_depth else 0
if if_tri_depth:
print('----Tri depth!!!!!!@Norm8PointNet')
self.if_sample_loss = if_sample_loss
if if_sample_loss:
print('----if_sample_loss!!!!!!@Norm8PointNet')
if if_des:
# self.input_weights = ErrorEstimatorDes(4+quality_size, des_size)
# self.update_weights = ErrorEstimatorDes(6+quality_size, des_size)
# self.input_weights = ErrorEstimatorFeatFusion(4+quality_size, des_size)
# self.update_weights = ErrorEstimatorFeatFusion(6+quality_size+1, des_size) # +1 for the added in residual
# if if_learn_offsets:
# self.update_offsets = ErrorEstimatorFeatFusion(6+quality_size+1, des_size, output_size=4) # +1 for the added in residual
self.input_weights = ErrorEstimator(4+quality_size+des_size)
self.update_weights = ErrorEstimator(6+quality_size+1+des_size) # +1 for the added in residual
# self.input_weights = ErrorEstimatorFeatFusion2Head(4+quality_size, des_size)
# self.update_weights = ErrorEstimatorFeatFusion2Head(6+quality_size+1, des_size) # +1 for the added in residual
if if_learn_offsets:
self.update_offsets = ErrorEstimator(6+quality_size+1+des_size, output_size=4) # +1 for the added in residual
print('----DES feat@Norm8PointNet!!!!!!')
else:
if self.if_goodCorresArch:
print('----goodCorresArch@Norm8PointNet!!!!!!')
self.input_weights = GoodCorresNet(4+quality_size, bn=False)
self.update_weights = GoodCorresNet(6+quality_size, bn=False)
else:
self.input_weights = ErrorEstimator(4+quality_size)
self.update_weights = ErrorEstimator(4+quality_size+3+self.depth_size) # +3 for weights, epi_res and redisual, +1 for tri depth!
if if_learn_offsets:
self.update_offsets = ErrorEstimator(4+quality_size+2+self.depth_size, output_size=4, if_bn=False) # +1 for the added in residual
if is_test:
self.input_weights.eval()
self.update_weights.eval()
if if_learn_offsets:
self.update_offsets.eval()
self.norm = NormalizeAndExpand(is_cuda, is_test)
self.norm_K = NormalizeAndExpand_K(is_cuda, is_test)
self.norm_HW = NormalizeAndExpand_HW(self.image_size, is_cuda, is_test)
self.fit = Fit(is_cuda, is_test, if_cpu_svd, if_sample_loss=if_sample_loss)
self.depth = depth
self.mask = Variable(torch.ones(3)).cuda()
self.mask[-1] = 0
def get_input(self, data_batch, offsets=None, iter=None):
pts = data_batch['matches_xy_ori']
if offsets is not None:
# print('------ ', iter)
# print(pts.permute(0, 2, 1)[0, :2, :].clone().detach().cpu().numpy())
# print(offsets[0, :2, :].clone().detach().cpu().numpy())
pts = pts + offsets.permute(0, 2, 1)
# pts1, pts2, T1, T2 = self.norm(pts) # pts: [b, N, 2] # \in [-1, 1]
# pts1, pts2, T1, T2 = self.norm_K(pts, data_batch['K_invs']) # pts: [b, N, 2] # \in [-1, 1]
pts1, pts2, T1, T2 = self.norm_HW(pts)
# print(pts1.max(-1)[0].max(0)[0], pts1.min(-1)[0].min(0)[0])
# pts1_recover = torch.inverse(T1) @ pts1
# print(pts1_recover.max(-1)[0].max(0)[0], pts1_recover.min(-1)[0].min(0)[0])
pts1 = pts1.permute(0,2,1)
pts2 = pts2.permute(0,2,1)
if self.if_quality:
quality = data_batch['quality']
weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2, quality), 2).permute(0,2,1) # [0, 1]
else:
weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2), 2).permute(0,2,1) # [0, 1]
# if self.if_quality:
# quality = data_batch['quality']
# weight_in = torch.cat((pts1[:,:,:2], pts2[:,:,:2], quality), 2).permute(0,2,1) # [0, 1]
# else:
# weight_in = torch.cat((pts1[:,:,:2], pts2[:,:,:2]), 2).permute(0,2,1) # [0, 1]
# f1 = data_batch['Ks'][:, 0, 0]
# f2 = data_batch['Ks'][:, 1, 1]
# w2 = data_batch['Ks'][:, 0, 2]
# h2 = data_batch['Ks'][:, 1, 2]
# print(w2/f1)
# print(h2/f2)
# print(f1, f2)
return weight_in, pts1, pts2, T1, T2
def get_depth(self, data_batch, F_out, T1, T2):
F_ests = T2.permute(0,2,1) @ F_out @ T1
E_ests = data_batch['Ks'].transpose(1, 2) @ F_ests @ data_batch['Ks']
depth_list = []
for E_hat, K, match in zip(E_ests, data_batch['Ks'], data_batch['matches_xy_ori']):
K = K.cpu().numpy()
p1p2 = match.cpu().numpy()
x1 = p1p2[:, :2]
x2 = p1p2[:, 2:]
num_inlier, R, t, mask_new = cv2.recoverPose(E_hat.detach().cpu().numpy().astype(np.float64), x1, x2, focal=K[0, 0], pp=(K[0, 2], K[1, 2]))
R1 = np.eye(3)
t1 = np.zeros((3, 1))
M1 = np.hstack((R1, t1))
M2 = np.hstack((R, t))
# print(np.linalg.norm(t))
X_tri_homo = cv2.triangulatePoints(np.matmul(K, M1), np.matmul(K, M2), x1.T, x2.T)
X_tri = X_tri_homo[:3, :]/X_tri_homo[-1, :]
depth = X_tri[-1, :].T
depth_list.append(depth)
# print(depth.flatten()[:10])
depths = np.stack(depth_list) # [B, N]
return torch.from_numpy(depths).unsqueeze(1).float().cuda()
def forward(self, data_batch):
pts_normalized_in, pts1, pts2, T1, T2 = self.get_input(data_batch)
if self.if_des:
# des1, des2 = data_batch['feats_im1'], data_batch['feats_im2'] # [B, D, N]
# des_in = torch.cat((des1, des2), 1)
# des_in = data_batch['feats_im12_var']
des_in = data_batch['feats_im12_groupConv']
# logits = self.input_weights(pts_normalized_in, des_in)
logits = self.input_weights(torch.cat((pts_normalized_in, des_in), 1))
else:
logits = self.input_weights(pts_normalized_in)
weights = F.softmax(logits, dim=2)
# weights = torch.sigmoid(logits)
matches_good_unique_nums = data_batch['matches_good_unique_nums'] # [B]
# matches_good_unique_num = None
if self.if_tri_depth:
t_scene_scale = data_batch['t_scene_scale']
out_layers = []
out_topK_layers = []
epi_res_layers = []
residual_layers = []
weights_layers = [weights]
logits_layers = [logits]
out_sample_selected_batch_layers = []
weights_sample_selected_accu_batch_layers = []
for iter in range(self.depth-1):
out_dict = self.fit(pts1, pts2, weights, matches_good_unique_nums=matches_good_unique_nums)
out, residual = out_dict['out'], out_dict['residual']
residual_layers.append(residual)
out_layers.append(out)
out_topK_layers.append(out_dict['out_topK'])
out_sample_selected_batch_layers.append(out_dict['out_sample_selected_batch'])
weights_sample_selected_accu_batch_layers.append(out_dict['weights_sample_selected_accu_batch'])
if self.if_tri_depth:
tri_depths = self.get_depth(data_batch, out, T1, T2) # [B, 1, N]
tri_depths = torch.clamp(tri_depths * t_scene_scale, -150., 150.)
epi_res = utils_F.compute_epi_residual(pts1, pts2, out).unsqueeze(1)
epi_res_layers.append(epi_res)
if self.if_tri_depth:
net_in = torch.cat((pts_normalized_in, weights, epi_res, tri_depths), 1)
else:
# net_in = torch.cat((pts_normalized_in, weights, epi_res), 1)
net_in = torch.cat((pts_normalized_in, weights, epi_res), 1)
if self.if_learn_offsets:
if self.if_des:
offsets = self.update_offsets(net_in, des_in)
else:
offsets = self.update_offsets(net_in)
# if iter == 0:
offsets_accu = offsets
# else:
# offsets_accu += offsets
pts_normalized_in, pts1, pts2, T1, T2 = self.get_input(data_batch, offsets_accu, iter)
if self.if_tri_depth:
net_in = torch.cat((pts_normalized_in, weights, epi_res, tri_depths), 1)
else:
# net_in = torch.cat((pts_normalized_in, weights, epi_res), 1)
net_in = torch.cat((pts_normalized_in, weights, epi_res), 1)
if self.if_des:
logits = self.update_weights(net_in, des_in)
else:
logits = self.update_weights(net_in)
weights = F.softmax(logits, dim=2)
# weights = torch.sigmoid(logits)
weights_layers.append(weights)
logits_layers.append(logits)
out_dict = self.fit(pts1, pts2, weights, matches_good_unique_nums=matches_good_unique_nums)
out, residual = out_dict['out'], out_dict['residual']
residual_layers.append(residual)
out_layers.append(out)
out_topK_layers.append(out_dict['out_topK'])
out_sample_selected_batch_layers.append(out_dict['out_sample_selected_batch'])
weights_sample_selected_accu_batch_layers.append(out_dict['weights_sample_selected_accu_batch'])
preds = {
# "cls_logit": cls_logit,
"logits": logits.squeeze(1), # [batch_size, N]
'logits_layers': logits_layers,
'F_est': out,
'epi_res_layers': epi_res_layers,
'T1': T1,
'T2': T2,
'out_layers': out_layers,
'out_topK_layers': out_topK_layers,
'pts1': pts1,
'pts2': pts2,
'weights': weights,
'residual_layers': residual_layers,
'weights_layers': weights_layers,
'out_sample_selected_batch_layers': out_sample_selected_batch_layers,
'weights_sample_selected_accu_batch_layers': weights_sample_selected_accu_batch_layers
}
if self.if_learn_offsets:
preds.update({'offsets': offsets_accu})
if self.if_tri_depth:
preds.update({'tri_depths': tri_depths})
return preds
# class Norm8PointNet_bkg(nn.Module):
# def __init__(self, depth, if_quality, if_goodCorresArch=False, if_learn_offsets=False, if_des=False, des_size=None, quality_size=0, is_cuda=True, is_test=False, if_cpu_svd=False, **params):
# super(Norm8PointNet, self).__init__()
# print('====Loading Norm8PointNet@<EMAIL>F<EMAIL>.py')
# if not if_quality:
# quality_size = 0
# self.if_quality = if_quality
# if if_quality:
# print('----Quality!!!!!!')
# self.if_des = if_des
# self.if_goodCorresArch = if_goodCorresArch
# if if_des:
# # self.input_weights = ErrorEstimatorDes(4+quality_size, des_size)
# # self.update_weights = ErrorEstimatorDes(6+quality_size, des_size)
# self.input_weights = ErrorEstimatorFeatFusion(4+quality_size, des_size*2)
# self.update_weights = ErrorEstimatorFeatFusion(6+quality_size, des_size*2)
# if if_learn_offsets:
# self.update_offsets = ErrorEstimatorFeatFusion(6+quality_size+1, des_size*2, output_size=4, if_bn=False)
# print('----DES feat@Norm8PointNet!!!!!!')
# else:
# if self.if_goodCorresArch:
# print('----goodCorresArch@Norm8PointNet!!!!!!')
# self.input_weights = GoodCorresNet(4+quality_size)
# self.update_weights = GoodCorresNet(6+quality_size)
# else:
# self.input_weights = ErrorEstimator(4+quality_size)
# self.update_weights = ErrorEstimator(6+quality_size)
# if if_learn_offsets:
# self.update_offsets = ErrorEstimator(6+quality_size+1, output_size=4, if_bn=False)
# if is_test:
# self.input_weights.eval()
# self.update_weights.eval()
# if if_learn_offsets:
# self.update_offsets.eval()
# self.norm = NormalizeAndExpand(is_cuda, is_test)
# self.norm_K = NormalizeAndExpand_K(is_cuda, is_test)
# self.fit = Fit(is_cuda, is_test, if_cpu_svd)
# print('----CPU svd!!!!!!' if if_cpu_svd else '----GPU svd!!!!!!')
# self.depth = depth
# self.mask = Variable(torch.ones(3)).cuda()
# self.mask[-1] = 0
# def forward(self, data_batch):
# pts = data_batch['matches_xy_ori']
# # pts1, pts2, T1, T2 = self.norm(pts) # pts: [b, N, 2] # \in [-1, 1]
# pts1, pts2, T1, T2 = self.norm_K(pts, data_batch['K_invs']) # pts: [b, N, 2] # \in [-1, 1]
# if self.if_des:
# # des1, des2 = data_batch['des1'].transpose(1, 2), data_batch['des2'].transpose(1, 2)
# des1, des2 = data_batch['feats_im1'], data_batch['feats_im2'] # [B, D, N]
# des_in = torch.cat((des1, des2), 1)
# pts1 = pts1.permute(0,2,1)
# pts2 = pts2.permute(0,2,1)
# if self.if_quality:
# quality = data_batch['quality']
# weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2, quality), 2).permute(0,2,1) # [0, 1]
# else:
# weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2), 2).permute(0,2,1) # [0, 1]
# if self.if_des:
# logits = self.input_weights(weight_in, des_in)
# else:
# logits = self.input_weights(weight_in)
# weights = F.softmax(logits, dim=2)
# # weights = torch.sigmoid(logits)
# out_layers = []
# epi_res_layers = []
# residual_layers = []
# weights_layers = [weights]
# for iter in range(self.depth-1):
# out, residual = self.fit(pts1, pts2, weights)
# out_layers.append(out)
# residual_layers.append(residual)
# epi_res = utils_F.compute_epi_residual(pts1, pts2, out).unsqueeze(1)
# epi_res_layers.append(epi_res)
# net_in = torch.cat((weight_in, weights, epi_res), 1)
# if self.if_des:
# logits = self.update_weights(net_in, des_in)
# else:
# logits = self.update_weights(net_in)
# weights = F.softmax(logits, dim=2)
# # weights = torch.sigmoid(logits)
# weights_layers.append(weights)
# out, residual = self.fit(pts1, pts2, weights, if_print=False)
# residual_layers.append(residual)
# preds = {
# # "cls_logit": cls_logit,
# "logits": logits.squeeze(1), # [batch_size, N]
# 'F_est': out,
# 'epi_res_layers': epi_res_layers,
# 'T1': T1,
# 'T2': T2,
# 'out_layers': out_layers,
# 'pts1': pts1,
# 'pts2': pts2,
# 'weights': weights,
# 'residual_layers': residual_layers,
# 'weights_layers': weights_layers
# }
# return preds
# class Norm8PointNetMixWeights(nn.Module):
# def __init__(self, depth, if_quality, if_des, if_goodCorresArch, quality_size=0, is_cuda=True, is_test=False):
# super(Norm8PointNetMixWeights, self).__init__()
# if not if_quality:
# quality_size = 0
# self.if_quality = if_quality
# if if_quality:
# print('----Quality!!!!!!')
# self.if_des = if_des
# self.if_goodCorresArch = if_goodCorresArch
# self.input_weights = ErrorEstimator(4+quality_size)
# self.update_weights = ErrorEstimator(6+quality_size)
# if is_test:
# self.input_weights.eval()
# self.update_weights.eval()
# self.norm = NormalizeAndExpand(is_cuda, is_test)
# self.norm_K = NormalizeAndExpand_K(is_cuda, is_test)
# self.fit = Fit(is_cuda, is_test)
# self.depth = depth
# self.mask = Variable(torch.ones(3)).cuda()
# self.mask[-1] = 0
# def forward(self, data_batch):
# pts = data_batch['matches_xy_ori']
# # pts1, pts2, T1, T2 = self.norm(pts) # pts: [b, N, 2] # \in [-1, 1]
# pts1, pts2, T1, T2 = self.norm_K(pts, data_batch['K_invs']) # pts: [b, N, 2] # \in [-1, 1]
# pts1 = pts1.permute(0,2,1)
# pts2 = pts2.permute(0,2,1)
# weights_im1 = data_batch['feats_im1'] # [B, 1, N]
# weights_im2 = data_batch['feats_im2'] # [B, 1, N]
# if self.if_quality:
# quality = data_batch['quality']
# weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2, quality), 2).permute(0,2,1) # [B, D, N]
# else:
# weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2), 2).permute(0,2,1) # [0, 1]
# logits = self.input_weights(weight_in)
# weights = F.softmax(logits, dim=2) # [B, 1, N]
# weights = weights * weights_im1 * weights_im2
# out_a = []
# for iter in range(self.depth-1):
# out = self.fit(pts1, pts2, weights)
# out_a.append(out)
# res = utils_F.compute_epi_residual(pts1, pts2, out, clamp_at=0.05).unsqueeze(1)
# # res_np = res.detach().cpu().numpy().squeeze()
# # print(res_np, res_np.shape, np.amax(res_np, 1), np.amin(res_np, 1), np.mean(res_np, 1), np.median(res_np, 1))
# net_in = torch.cat((weight_in, weights, res), 1)
# logits = self.update_weights(net_in)
# weights = F.softmax(logits, dim=2) * weights_im1 * weights_im2
# out = self.fit(pts1, pts2, weights)
# preds = {
# # "cls_logit": cls_logit,
# "logits": logits.squeeze(1), # [batch_size, N]
# 'F_est': out,
# 'res': weights.squeeze(1),
# 'T1': T1,
# 'T2': T2,
# 'out_a': out_a,
# 'pts1': pts1,
# 'pts2': pts2,
# 'weights': weights
# }
# return preds
# class NWeightMixer(nn.Module):
# def __init__(self, input_size):
# super(NWeightMixer, self).__init__()
# inplace = True
# hasbias = True
# learn_affine = True
# self.fw = nn.Sequential(
# nn.Conv1d(input_size, 16, kernel_size=1, bias=hasbias),
# # nn.InstanceNorm1d(64, affine=learn_affine),
# nn.LeakyReLU(inplace=inplace),
# nn.Conv1d(16,32, kernel_size=1, bias=hasbias),
# # nn.InstanceNorm1d(128, affine=learn_affine),
# nn.LeakyReLU(inplace=inplace),
# nn.Conv1d(32,16,kernel_size=1, bias=hasbias),
# # nn.InstanceNorm1d(1024, affine=learn_affine),
# nn.LeakyReLU(inplace=inplace),
# nn.Conv1d(16,1, kernel_size=1, bias=hasbias),
# nn.
# def forward(self, data):
# # print('ErrorEstimator')
# return self.fw(data)
|
pyez/gather_facts.py | rsmekala/junosautomation | 117 | 11115401 | <reponame>rsmekala/junosautomation
from jnpr.junos import Device
from pprint import pprint
dev = Device(host='xxxx', user='demo', password='<PASSWORD>')
dev.open()
pprint (dev.facts)
# As dev.facts is a dictionary, we can fetch any specific data
print dev.facts['serialnumber']
print dev.facts['version']
print dev.facts['version_info']
print dev.facts['version_info'].major
|
data/transcoder_evaluation_gfg/python/COUNT_PALINDROME_SUB_STRINGS_STRING.py | mxl1n/CodeGen | 241 | 11115417 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( str , n ) :
dp = [ [ 0 for x in range ( n ) ] for y in range ( n ) ]
P = [ [ False for x in range ( n ) ] for y in range ( n ) ]
for i in range ( n ) :
P [ i ] [ i ] = True
for i in range ( n - 1 ) :
if ( str [ i ] == str [ i + 1 ] ) :
P [ i ] [ i + 1 ] = True
dp [ i ] [ i + 1 ] = 1
for gap in range ( 2 , n ) :
for i in range ( n - gap ) :
j = gap + i ;
if ( str [ i ] == str [ j ] and P [ i + 1 ] [ j - 1 ] ) :
P [ i ] [ j ] = True
if ( P [ i ] [ j ] == True ) :
dp [ i ] [ j ] = ( dp [ i ] [ j - 1 ] + dp [ i + 1 ] [ j ] + 1 - dp [ i + 1 ] [ j - 1 ] )
else :
dp [ i ] [ j ] = ( dp [ i ] [ j - 1 ] + dp [ i + 1 ] [ j ] - dp [ i + 1 ] [ j - 1 ] )
return dp [ 0 ] [ n - 1 ]
#TOFILL
if __name__ == '__main__':
param = [
(['E', 'E', 'J', 'P', 'T', 'U', 'X', 'Y', 'Z', 'e', 'f', 'h', 'l', 'm', 'n', 'o', 'z'],11,),
(['8', '7', '3', '4', '9', '5', '3', '1', '4', '0', '6', '8', '2', '5', '8', '3', '5', '2', '8', '6', '6', '3', '5', '7', '5', '5', '3', '7'],27,),
(['0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],23,),
(['f', 'E', 'e', 'z', 'B', 'o', 'i', 'v', 'K', 'u', 'P', 'C', 'z', 'f', 'k', 'J', 't', 'R', 't', 'A', 'f', 'G', 'D', 'X', 'H', 'e', 'p', 'l', 'l', 'k', 'Z', 'Y', 'u', 'g', 'H', 'C', 'f', 'J', 'H', 'W'],27,),
(['0', '0', '0', '1', '1', '1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3', '4', '4', '4', '4', '4', '4', '5', '5', '5', '5', '6', '6', '7', '7', '9', '9', '9', '9', '9', '9'],35,),
(['1', '0', '1', '1', '0', '0', '1', '1', '1', '0', '1', '0', '1', '1', '0', '1', '0', '1', '1', '1', '1', '1', '0', '1', '1', '0', '1', '0', '1', '1', '0', '0', '1', '0', '1', '0', '0', '0', '0', '0', '1', '1', '0', '1', '0', '1'],43,),
(['C', 'C', 'D', 'F', 'L', 'M', 'P', 'X', 'a', 'f', 'i', 'j', 'w'],9,),
(['7', '9', '0', '2', '8', '0', '7', '5', '9', '4', '5', '4', '8', '1', '9', '5', '3', '2', '4', '1', '2'],16,),
(['0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],32,),
(['m', 'X', 'N', 'M'],3,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
dnnweaver2/scalar/ops.py | ZixuanJiang/dnnweaver2 | 159 | 11115421 | <filename>dnnweaver2/scalar/ops.py
from dnnweaver2.scalar.dtypes import Dtype
class ScalarOp(object):
def __init__(self, op_str, dtype):
self.op_str = op_str
self.dtype = dtype
def __str__(self):
if isinstance(self.dtype, Dtype):
return '{}({})'.format(self.op_str, self.dtype.__str__())
else:
ret = str(self.op_str)
ret += '('
ret += ','.join([x.__str__() for x in self.dtype])
ret += ')'
return ret
class ScalarOpTypes(object):
def __init__(self):
self.MulOp = {}
self.MacOp = {}
self.SqrOp = {}
self.CmpOp = {}
self.AddOp = {}
self.SubOp = {}
self.RshiftOp = {}
def MUL(self, dtypes):
assert len(dtypes) == 2
if dtypes not in self.MulOp:
self.MulOp[dtypes] = ScalarOp('Multiply', dtypes)
return self.MulOp[dtypes]
def MAC(self, dtypes):
assert len(dtypes) == 3
if dtypes not in self.MacOp:
self.MacOp[dtypes] = ScalarOp('Multiply-Accumulate', dtypes)
return self.MacOp[dtypes]
def SQR(self, dtypes):
assert isinstance(dtypes, Dtype)
if dtypes not in self.SqrOp:
self.SqrOp[dtypes] = ScalarOp('Square', dtypes)
return self.SqrOp[dtypes]
def CMP(self, dtypes):
assert isinstance(dtypes, Dtype), 'Got Dtypes: {}'.format(dtypes)
if dtypes not in self.CmpOp:
self.CmpOp[dtypes] = ScalarOp('Compare', dtypes)
return self.CmpOp[dtypes]
def ADD(self, dtypes):
assert len(dtypes) == 2
if dtypes not in self.AddOp:
self.AddOp[dtypes] = ScalarOp('Addition', dtypes)
return self.AddOp[dtypes]
def SUB(self, dtypes):
assert len(dtypes) == 2
if dtypes not in self.SubOp:
self.SubOp[dtypes] = ScalarOp('Subtract', dtypes)
return self.SubOp[dtypes]
def RSHIFT(self, dtypes):
assert isinstance(dtypes, Dtype), 'Got Dtypes: {}'.format(dtypes)
if dtypes not in self.RshiftOp:
self.RshiftOp[dtypes] = ScalarOp('Rshift', dtypes)
return self.RshiftOp[dtypes]
Ops = ScalarOpTypes()
|
cosrlib/sources/webarchive.py | commonsearch/cosr-back | 141 | 11115436 | <reponame>commonsearch/cosr-back<filename>cosrlib/sources/webarchive.py
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tempfile
from cosrlib.sources import Source
from cosrlib.url import URL
import warc
from gzipstream import GzipStreamFile
try:
from http_parser.parser import HttpParser
except ImportError:
from http_parser.pyparser import HttpParser
class WebarchiveSource(Source):
""" Generic .warc Source """
def get_partitions(self):
# .txt file with one .warc path per line
if self.args.get("list"):
with open(self.args["list"], "rb") as f:
return [{
"path": x.strip(),
"source": "warc"
} for x in f.readlines()]
# Direct list of .warc filepaths
elif self.args.get("paths"):
return [{
"path": path,
"source": "warc"
} for path in self.args["paths"]]
# Single .warc
else:
return [{
"path": self.args["path"],
"source": "warc"
}]
def _warc_reader_from_file(self, filereader, filepath):
""" Creates a WARC record iterator from a file reader """
if filepath.endswith(".warc"):
return warc.WARCFile(fileobj=filereader)
else:
# TODO: investigate how we could use cloudflare's zlib
return warc.WARCFile(fileobj=GzipStreamFile(filereader))
def open_warc_stream(self, filepath):
""" Creates a WARC record iterator from the filepath given to the Source """
filereader = open(filepath, "rb")
return self._warc_reader_from_file(filereader, filepath)
def iter_items(self, partition):
""" Yields objects in the source's native format """
warc_stream = self.open_warc_stream(partition["path"])
for record in warc_stream:
if not record.url:
continue
if record['Content-Type'] != 'application/http; msgtype=response':
continue
url = URL(record.url, check_encoding=True)
do_parse, index_level = self.qualify_url(url)
if not do_parse:
continue
payload = record.payload.read()
parser = HttpParser()
parser.execute(payload, len(payload))
headers = parser.get_headers()
if 'text/html' not in headers.get("content-type", ""):
# print "Not HTML?", record.url, headers
continue
yield url, headers, "html", index_level, parser.recv_body()
def create_warc_from_corpus(documents, filename=None):
""" Used mainly in tests to generate small .warc files """
if filename is None:
fd, filename = tempfile.mkstemp(suffix=".warc")
os.close(fd)
f = warc.open(filename, "w")
for doc in documents:
headers = "Connection: close\r\nContent-Type: text/html"
if "headers" in doc:
headers = "\r\n".join(["%s: %s" % (k, v) for k, v in doc["headers"].iteritems()])
payload = "HTTP/1.1 200 OK\r\n" + headers + "\r\n\r\n" + doc["content"]
record = warc.WARCRecord(payload=payload, headers={
"Content-Type": "application/http; msgtype=response",
"WARC-Type": "response",
"WARC-Target-URI": doc["url"]
})
f.write_record(record)
f.close()
return filename
|
demo/tracking/deep_sort.py | shachargluska/centerpose | 245 | 11115471 | <gh_stars>100-1000
import time
import numpy as np
from .feature_extractor import Extractor
from .sort.detection import Detection
from .sort.nn_matching import NearestNeighborDistanceMetric
from .sort.preprocessing import non_max_suppression
from .sort.tracker import Tracker
class DeepSort(object):
def __init__(self, model_path):
self.min_confidence = 0.3
self.nms_max_overlap = 1.0
self.extractor = Extractor(model_path, use_cuda=True)
max_cosine_distance = 0.2
nn_budget = 100
metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
self.tracker = Tracker(metric)
def update(self, bbox_xywh, confidences, ori_img):
self.height, self.width = ori_img.shape[:2]
# generate detections
try :
features = self._get_features(bbox_xywh, ori_img)
except :
print('a')
detections = [Detection(bbox_xywh[i], conf, features[i]) for i,conf in enumerate(confidences) if conf>self.min_confidence]
# run on non-maximum supression
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = non_max_suppression( boxes, self.nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# update tracker
self.tracker.predict()
self.tracker.update(detections)
# output bbox identities
outputs = []
for track in self.tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
box = track.to_tlwh()
x1,y1,x2,y2 = self._xywh_to_xyxy_centernet(box)
track_id = track.track_id
outputs.append(np.array([x1,y1,x2,y2,track_id], dtype=np.int))
if len(outputs) > 0:
outputs = np.stack(outputs,axis=0)
return outputs
# for centernet (x1,x2 w,h -> x1,y1,x2,y2)
def _xywh_to_xyxy_centernet(self, bbox_xywh):
x1,y1,w,h = bbox_xywh
x1 = max(x1,0)
y1 = max(y1,0)
x2 = min(int(x1+w),self.width-1)
y2 = min(int(y1+h),self.height-1)
return int(x1),int(y1),x2,y2
# for yolo (centerx,centerx, w,h -> x1,y1,x2,y2)
def _xywh_to_xyxy_yolo(self, bbox_xywh):
x,y,w,h = bbox_xywh
x1 = max(int(x-w/2),0)
x2 = min(int(x+w/2),self.width-1)
y1 = max(int(y-h/2),0)
y2 = min(int(y+h/2),self.height-1)
return x1,y1,x2,y2
def _get_features(self, bbox_xywh, ori_img):
features = []
for box in bbox_xywh:
x1,y1,x2,y2 = self._xywh_to_xyxy_centernet(box)
im = ori_img[y1:y2,x1:x2]
feature = self.extractor(im)[0]
features.append(feature)
if len(features):
features = np.stack(features, axis=0)
else:
features = np.array([])
return features
if __name__ == '__main__':
pass
|
explorer/python-api/explorer_python_api/app.py | johnnynetgevity/cardano-sl | 4,174 | 11115482 | <reponame>johnnynetgevity/cardano-sl<gh_stars>1000+
import atexit
import json
import logging
import os
import records
import requests
import pytz
import time
from flask import Flask, request
from flask.logging import default_handler
from prometheus_flask_exporter import PrometheusMetrics
from prometheus_client import CollectorRegistry
from explorer_python_api import db
# variables that are accessible from anywhere
commonDataStruct = {}
logger_name = __name__
logger = logging.getLogger(__name__)
logger.addHandler(default_handler)
def create_app():
app = Flask(__name__)
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
metrics_registry = CollectorRegistry()
metrics = PrometheusMetrics(app, registry=metrics_registry)
dbuser = os.environ.get('DBUSER', 'explorer_python_api')
dbname = os.environ.get('DBNAME', 'explorer_python_api')
dbsockpath = os.environ.get('DBSOCKPATH', '/tmp')
addr_max_len = os.environ.get('ADDRMAXLEN', '200')
dbstring = f'postgres:///{dbname}?user={dbuser}&host={dbsockpath}'
explorer_url = os.environ.get('EXPLORERURL', 'http://localhost:8100')
dbc = records.Database(dbstring)
@app.route('/api/addresses/summary/<address>')
@metrics.do_not_track()
@metrics.histogram('address_summary_hist', 'Address Summary Histogram',
labels={'status': lambda r: r.status_code})
def address_summary(address):
app.logger.debug("Address summary page accessed")
resp = getAddressSummary(address[:int(addr_max_len)])
if type(resp) == bytes:
return resp
return json.dumps(resp)
@app.route('/', defaults={'u_path': ''})
@app.route('/<path:u_path>')
@metrics.do_not_track()
@metrics.histogram('proxy_hist', 'Proxy Histogram',
labels={'status': lambda r: r.status_code})
def explorer_proxy(u_path):
app.logger.info(f'API proxied to Explorer: {u_path}')
return requests.get(f'{explorer_url}/{u_path}').content
def getAddressSummary(caAddress):
p1 = time.time()
caTxSentRecord = db.caTxSentRecord(dbc, caAddress)
caTxReceivedRecord = db.caTxReceivedRecord(dbc, caAddress)
caTxRecord = db.caTxRecord(dbc, caAddress)
caTxNum = len(caTxRecord)
if caTxNum == 0:
app.logger.info(f'{explorer_url}/api/addresses/summary/{caAddress}')
return requests.get(f'{explorer_url}/api/addresses/summary/{caAddress}').content
# These address metrics can be logged prior to any heavy processing
# Seeing these logs may help explain any timeouts
app.logger.debug(f'caAddress: {caAddress}')
app.logger.debug(f'caTxNum: {caTxNum}')
app.logger.debug(f'len(caTxSentRecord): {len(caTxSentRecord)}')
app.logger.debug(f'len(caTxReceivedRecord): {len(caTxReceivedRecord)}')
txs = []
caBalanceInput = 0
caBalanceOutput = 0
caBalanceFee = 0
ctbSelfInputSum = 0
ctbSelfOutputSum = 0
txLoopCount = 0
inputRecord = 0
outputRecord = 0
for tx in caTxRecord:
txtemp = {}
txtempinput = []
txtempinputs = []
txtempoutput = []
txtempoutputs = []
ctbInputSum = 0
ctbOutputSum = 0
txtemp['ctbId'] = tx['ctsid']
txtemp['ctbTimeIssued'] = int(tx['ctstxtimeissued'].replace(tzinfo=pytz.utc).timestamp())
while inputRecord < len(caTxSentRecord) and caTxSentRecord[inputRecord]['ctsid'] == tx['ctsid']:
txinput = caTxSentRecord[inputRecord]
txtempinput.append(txinput['ctsinputaddr'])
txtempinput.append({ "getCoin": str(txinput['ctsinput']) })
ctbInputSum = ctbInputSum + txinput['ctsinput']
if txinput['ctsinputaddr'] == caAddress:
ctbSelfInputSum = ctbSelfInputSum + txinput['ctsinput']
txtempinputs.append(txtempinput)
txtempinput = []
inputRecord = inputRecord + 1
while outputRecord < len(caTxReceivedRecord) and caTxReceivedRecord[outputRecord]['ctsid'] == tx['ctsid']:
txoutput = caTxReceivedRecord[outputRecord]
txtempoutput.append(txoutput['ctsoutputaddr'])
txtempoutput.append({ "getCoin": str(txoutput['ctsoutput']) })
ctbOutputSum = ctbOutputSum + txoutput['ctsoutput']
if txoutput['ctsoutputaddr'] == caAddress:
ctbSelfOutputSum = ctbSelfOutputSum + txoutput['ctsoutput']
txtempoutputs.append(txtempoutput)
txtempoutput = []
outputRecord = outputRecord + 1
txtemp['ctbInputs'] = txtempinputs
txtemp['ctbOutputs'] = txtempoutputs
txtemp['ctbInputSum'] = { "getCoin": str(ctbInputSum) }
txtemp['ctbOutputSum'] = { "getCoin": str(ctbOutputSum) }
txtemp['ctsFees'] = { "getCoin": str(tx['ctsfees']) }
caBalanceFee = caBalanceFee + tx['ctsfees']
caBalanceInput = caBalanceInput + ctbInputSum
caBalanceOutput = caBalanceOutput + ctbOutputSum
txs.append(txtemp)
txLoopCount = txLoopCount + 1
caBalance = ctbSelfOutputSum - ctbSelfInputSum
addressReport = { "Right": {
"caAddress": caAddress,
"caType": "CPubKeyAddress",
"caTxNum": caTxNum,
"caBalance": { "getCoin": str(caBalance) },
"caTotalInput": { "getCoin": str(ctbSelfInputSum) },
"caTotalOutput": { "getCoin": str(ctbSelfOutputSum) },
"caTotalFee": { "getCoin": str(caBalanceFee) },
"caTxList": txs
}
}
app.logger.debug(f'caBalance: {caBalance}')
app.logger.debug(f'caTotalInput: {ctbSelfInputSum}'),
app.logger.debug(f'caTotalOutput: {ctbSelfOutputSum}'),
app.logger.debug(f'caTotalFee: {caBalanceFee}'),
p2 = time.time()
app.logger.info(f'[{p2-p1:.3f}s] API request for address summary: {caAddress}')
return addressReport
return app
app = create_app()
|
python-gurobi model/Netflow problem.py | woruoweikunlun/Python-Gurobi | 162 | 11115497 | <reponame>woruoweikunlun/Python-Gurobi
#!/usr/bin/python
# Copyright 2016, Gurobi Optimization, Inc.
# Solve a multi-commodity flow problem. Two products ('Pencils' and 'Pens')
# are produced in 2 cities ('Detroit' and 'Denver') and must be sent to
# warehouses in 3 cities ('Boston', 'New York', and 'Seattle') to
# satisfy demand ('inflow[h,i]').
#
# Flows on the transportation network must respect arc capacity constraints
# ('capacity[i,j]'). The objective is to minimize the sum of the arc
# transportation costs ('cost[i,j]').
from gurobipy import *
# Model data
commodities = ['Pencils', 'Pens']
nodes = ['Detroit', 'Denver', 'Boston', 'New York', 'Seattle']
arcs, capacity = multidict({
('Detroit', 'Boston'): 100,
('Detroit', 'New York'): 80,
('Detroit', 'Seattle'): 120,
('Denver', 'Boston'): 120,
('Denver', 'New York'): 120,
('Denver', 'Seattle'): 120 })
arcs = tuplelist(arcs)
cost = {
('Pencils', 'Detroit', 'Boston'): 10,
('Pencils', 'Detroit', 'New York'): 20,
('Pencils', 'Detroit', 'Seattle'): 60,
('Pencils', 'Denver', 'Boston'): 40,
('Pencils', 'Denver', 'New York'): 40,
('Pencils', 'Denver', 'Seattle'): 30,
('Pens', 'Detroit', 'Boston'): 20,
('Pens', 'Detroit', 'New York'): 20,
('Pens', 'Detroit', 'Seattle'): 80,
('Pens', 'Denver', 'Boston'): 60,
('Pens', 'Denver', 'New York'): 70,
('Pens', 'Denver', 'Seattle'): 30 }
inflow = {
('Pencils', 'Detroit'): 50,
('Pencils', 'Denver'): 60,
('Pencils', 'Boston'): -50,
('Pencils', 'New York'): -50,
('Pencils', 'Seattle'): -10,
('Pens', 'Detroit'): 60,
('Pens', 'Denver'): 40,
('Pens', 'Boston'): -40,
('Pens', 'New York'): -30,
('Pens', 'Seattle'): -30 }
# Create optimization model
m = Model('netflow')
# Create variables
flow = {}
for h in commodities:
for i,j in arcs:
flow[h,i,j] = m.addVar(ub=capacity[i,j], obj=cost[h,i,j],
name='flow_%s_%s_%s' % (h, i, j))
m.update()
# Arc capacity constraints
for i,j in arcs:
m.addConstr(quicksum(flow[h,i,j] for h in commodities) <= capacity[i,j],
'cap_%s_%s' % (i, j))
# Flow conservation constraints
for h in commodities:
for j in nodes:
m.addConstr(
quicksum(flow[h,i,j] for i,j in arcs.select('*',j)) +
inflow[h,j] ==
quicksum(flow[h,j,k] for j,k in arcs.select(j,'*')),
'node_%s_%s' % (h, j))
# Compute optimal solution
m.optimize()
# Print solution
if m.status == GRB.Status.OPTIMAL:
solution = m.getAttr('x', flow)
for h in commodities:
print('\nOptimal flows for %s:' % h)
for i,j in arcs:
if solution[h,i,j] > 0:
print('%s -> %s: %g' % (i, j, solution[h,i,j]))
|
lvsr/ops.py | dendisuhubdy/attention-lvcsr | 295 | 11115600 | from __future__ import print_function
import math
try:
import fst
except ImportError:
print("No PyFST module, trying to work without it. If you want to run the "
"language model, please install openfst and PyFST")
import numpy
import theano
import itertools
from theano import tensor, Op
from theano.gradient import disconnected_type
from fuel.utils import do_not_pickle_attributes
from picklable_itertools.extras import equizip
from collections import defaultdict, deque
from toposort import toposort_flatten
from lvsr.error_rate import reward_matrix, gain_matrix
EPSILON = 0
MAX_STATES = 7
NOT_STATE = -1
def read_symbols(fname):
syms = fst.SymbolTable('eps')
with open(fname) as sf:
for line in sf:
s,i = line.strip().split()
syms[s] = int(i)
return syms
@do_not_pickle_attributes('fst')
class FST(object):
"""Picklable wrapper around FST."""
def __init__(self, path):
self.path = path
def load(self):
self.fst = fst.read(self.path)
self.isyms = dict(self.fst.isyms.items())
def __getitem__(self, state):
"""Returns all arcs of the state i"""
return self.fst[state]
def combine_weights(self, *args):
# Protection from underflow when -x is too small
m = max(args)
return m - math.log(sum(math.exp(m - x) for x in args if x is not None))
def get_arcs(self, state, character):
return [(state, arc.nextstate, arc.ilabel, float(arc.weight))
for arc in self[state] if arc.ilabel == character]
def transition(self, states, character):
arcs = list(itertools.chain(
*[self.get_arcs(state, character) for state in states]))
next_states = {}
for next_state in {arc[1] for arc in arcs}:
next_states[next_state] = self.combine_weights(
*[states[arc[0]] + arc[3] for arc in arcs
if arc[1] == next_state])
return next_states
def expand(self, states):
seen = set()
depends = defaultdict(list)
queue = deque()
for state in states:
queue.append(state)
seen.add(state)
while len(queue):
state = queue.popleft()
for arc in self.get_arcs(state, EPSILON):
depends[arc[1]].append((arc[0], arc[3]))
if arc[1] in seen:
continue
queue.append(arc[1])
seen.add(arc[1])
depends_for_toposort = {key: {state for state, weight in value}
for key, value in depends.items()}
order = toposort_flatten(depends_for_toposort)
next_states = states
for next_state in order:
next_states[next_state] = self.combine_weights(
*([next_states.get(next_state)] +
[next_states[prev_state] + weight
for prev_state, weight in depends[next_state]]))
return next_states
def explain(self, input_):
input_ = list(input_)
states = {self.fst.start: 0}
print("Initial states: {}".format(states))
states = self.expand(states)
print("Expanded states: {}".format(states))
for char, ilabel in zip(input_, [self.isyms[char] for char in input_]):
states = self.transition(states, ilabel)
print("{} consumed: {}".format(char, states))
states = self.expand(states)
print("Expanded states: {}".format(states))
result = None
for state, weight in states.items():
if numpy.isfinite(weight + float(self.fst[state].final)):
print("Finite state {} with path weight {} and its own weight {}".format(
state, weight, self.fst[state].final))
result = self.combine_weights(
result, weight + float(self.fst[state].final))
print("Total weight: {}".format(result))
return result
class FSTTransitionOp(Op):
"""Performs transition in an FST.
Given a state and an input symbol (character) returns the next state.
Parameters
----------
fst : FST instance
remap_table : dict
Maps neutral network characters to FST characters.
"""
__props__ = ()
def __init__(self, fst, remap_table):
self.fst = fst
self.remap_table = remap_table
def pad(self, arr, value):
return numpy.pad(arr, (0, MAX_STATES - len(arr)),
mode='constant', constant_values=value)
def perform(self, node, inputs, output_storage):
all_states, all_weights, all_inputs = inputs
# Each row of all_states contains a set of states
# padded with NOT_STATE.
all_next_states = []
all_next_weights = []
for states, weights, input_ in equizip(all_states, all_weights, all_inputs):
states_dict = dict(zip(states, weights))
del states_dict[NOT_STATE]
next_states_dict = self.fst.transition(
states_dict, self.remap_table[input_])
next_states_dict = self.fst.expand(next_states_dict)
if next_states_dict:
next_states, next_weights = zip(*next_states_dict.items())
else:
# No adequate state when no arc exists for now
next_states, next_weights = [], []
all_next_states.append(self.pad(next_states, NOT_STATE))
all_next_weights.append(self.pad(next_weights, 0))
output_storage[0][0] = numpy.array(all_next_states, dtype='int64')
output_storage[1][0] = numpy.array(all_next_weights)
def make_node(self, states, weights, input_):
# check that the theano version has support for __props__
assert hasattr(self, '_props')
states = theano.tensor.as_tensor_variable(states)
weights = theano.tensor.as_tensor_variable(weights)
input_ = theano.tensor.as_tensor_variable(input_)
return theano.Apply(self,
[states, weights, input_],
[states.type(), weights.type()])
class FSTCostsOp(Op):
"""Returns transition costs for all possible input symbols.
Parameters
----------
fst : FST instance
remap_table : dict
Maps neutral network characters to FST characters.
no_transition_cost : float
Cost of going to the start state when no arc for an input
symbol is available.
Notes
-----
It is assumed that neural network characters start from zero.
"""
__props__ = ()
def __init__(self, fst, remap_table, no_transition_cost):
self.fst = fst
self.remap_table = remap_table
self.no_transition_cost = no_transition_cost
def perform(self, node, inputs, output_storage):
all_states, all_weights = inputs
all_costs = []
for states, weights in zip(all_states, all_weights):
states_dict = dict(zip(states, weights))
del states_dict[NOT_STATE]
costs = (numpy.ones(len(self.remap_table), dtype=theano.config.floatX)
* self.no_transition_cost)
if states_dict:
total_weight = self.fst.combine_weights(*states_dict.values())
for nn_character, fst_character in self.remap_table.items():
next_states_dict = self.fst.transition(states_dict, fst_character)
next_states_dict = self.fst.expand(next_states_dict)
if next_states_dict:
next_total_weight = self.fst.combine_weights(*next_states_dict.values())
costs[nn_character] = next_total_weight - total_weight
all_costs.append(costs)
output_storage[0][0] = numpy.array(all_costs)
def make_node(self, states, weights):
# check that the theano version has support for __props__
assert hasattr(self, '_props')
states = theano.tensor.as_tensor_variable(states)
weights = theano.tensor.as_tensor_variable(weights)
return theano.Apply(self,
[states, weights], [theano.tensor.matrix()])
class RewardOp(Op):
__props__ = ()
def __init__(self, eos_label, alphabet_size):
"""Computes matrices of rewards and gains."""
self.eos_label = eos_label
self.alphabet_size = alphabet_size
def perform(self, node, inputs, output_storage):
groundtruth, recognized = inputs
if (groundtruth.ndim != 2 or recognized.ndim != 2
or groundtruth.shape[1] != recognized.shape[1]):
raise ValueError
batch_size = groundtruth.shape[1]
all_rewards = numpy.zeros(
recognized.shape + (self.alphabet_size,), dtype='int64')
all_gains = numpy.zeros(
recognized.shape + (self.alphabet_size,), dtype='int64')
alphabet = list(range(self.alphabet_size))
for index in range(batch_size):
y = list(groundtruth[:, index])
y_hat = list(recognized[:, index])
try:
eos_pos = y.index(self.eos_label)
y = y[:eos_pos + 1]
except:
# Sometimes groundtruth is in fact also a prediction
# and in this case it might not have EOS label
pass
if self.eos_label in y_hat:
y_hat_eos_pos = y_hat.index(self.eos_label)
y_hat_trunc = y_hat[:y_hat_eos_pos + 1]
else:
y_hat_trunc = y_hat
rewards_trunc = reward_matrix(
y, y_hat_trunc, alphabet, self.eos_label)
# pass freshly computed rewards to gain_matrix to speed things up
# a bit
gains_trunc = gain_matrix(y, y_hat_trunc, alphabet,
given_reward_matrix=rewards_trunc)
gains = numpy.ones((len(y_hat), len(alphabet))) * -1000
gains[:(gains_trunc.shape[0] - 1), :] = gains_trunc[:-1, :]
rewards = numpy.ones((len(y_hat), len(alphabet))) * -1
rewards[:(rewards_trunc.shape[0] - 1), :] = rewards_trunc[:-1, :]
all_rewards[:, index, :] = rewards
all_gains[:, index, :] = gains
output_storage[0][0] = all_rewards
output_storage[1][0] = all_gains
def grad(self, *args, **kwargs):
return disconnected_type(), disconnected_type()
def make_node(self, groundtruth, recognized):
recognized = tensor.as_tensor_variable(recognized)
groundtruth = tensor.as_tensor_variable(groundtruth)
return theano.Apply(
self, [groundtruth, recognized], [tensor.ltensor3(), tensor.ltensor3()])
|
pyobjus/consts/__init__.py | zenvarlab/pyobjus | 114 | 11115656 | <gh_stars>100-1000
'''
Tools to allow reading const ObjC values. See pyobjus.consts.corebluetooth for
an example of usage.
Under normal circumstances, this is just a convoluted way to generate a class
with attributes containing the default values.
But with 'PYOBJUS_DEV' in the environment, this will instead automatically
generate an ObjC source file, defining a class with getter methods returning
the requested values. That source will then be built using make_dylib, loaded
via load_dylib, and wrapped to provide the same interface as the normal class.
In addition, a report will be printed to the console showing the values from
the generated class::
ObjC Const Report - CBAdvertisementDataKeys
===========================================
LocalName = (NSString*)'kCBAdvDataLocalName'
ManufacturerData = (NSString*)'kCBAdvDataManufacturerData'
ServiceUUIDs = (NSString*)'kCBAdvDataServiceUUIDs'
This is done to provide the values so that the defaults can be set
appropriately for release.
Note that this has only been tested with NSString* values, and may need
adjustment to work with other types.
'''
from __future__ import print_function, absolute_import
from os import makedirs, environ
from os.path import expanduser, join, exists, getmtime
from hashlib import md5
from pyobjus import autoclass
from ..dylib_manager import make_dylib, load_dylib
try:
from six import with_metaclass
except ImportError:
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
const_m_template = '''\
{imports}
@interface {name} : NSObject
@end
@implementation {name}
{props}
@end
'''
const_m_import_template = '#import <{0}/{0}.h>'
const_m_prop_template = '''\
+ ({type}) get{key} {{
return {const};
}}
'''
def load_consts(name, frameworks=None, properties=None):
frameworks = ['Foundation'] + (frameworks or [])
properties = properties or {}
keys = []
consts = {}
types = {}
defaults = {}
for k, v in properties.items():
keys.append(k)
consts[k], types[k], defaults[k] = v
if 'PYOBJUS_DEV' in environ:
libdir = expanduser('~/.pyobjus/libs')
if not exists(libdir):
makedirs(libdir)
src_file = join(libdir, name + '.src')
m_file = join(libdir, name + '.m')
dylib_file = join(libdir, name + '.dylib')
src_data = md5(str((name, frameworks, consts))).digest()
force_rebuild = True
if exists(src_file):
with open(src_file) as f:
prev_src_data = f.read()
if src_data == prev_src_data:
force_rebuild = False
if (force_rebuild or not exists(dylib_file) or
getmtime(dylib_file) < getmtime(src_file)):
with open(src_file, 'w') as f:
f.write(src_data)
with open(m_file, 'w') as f:
imports = '\n'.join([const_m_import_template.format(fw)
for fw in frameworks])
props = '\n'.join([const_m_prop_template.format(type=types[k],
key=k,
const=consts[k])
for k in keys])
f.write(const_m_template.format(imports=imports, name=name,
props=props))
try:
make_dylib(m_file, frameworks=frameworks)
except Exception:
# might just not be writable, in which case if the file
# exists we can load it below, otherwise load_dylib()
# will throw an exception anyway
print('failed to make dylib -', name)
try:
load_dylib(dylib_file)
objc_class = autoclass(name)
class wrapper(object):
def __getattr__(self, item):
try:
value = getattr(objc_class, 'get' + item)
except AttributeError:
return object.__getattribute__(self, item)
else:
value = value()
try:
if hasattr(value, 'cString'):
return value.cString()
elif hasattr(value, 'floatValue'):
return value.floatValue()
except Exception:
pass
return value
wrapper.__name__ = name
rv = wrapper()
print('ObjC Const Report -', name)
print('=' * (len(name) + 20))
for k in keys:
try:
v = repr(getattr(rv, k))
except Exception as e:
v = str(e)
print('{} = ({}){}'.format(k, types[k], v))
print()
return rv
except Exception:
# we don't care what the exception was, just use the defaults
print('failed to load dylib -', name)
return type(name, (object,), defaults)()
class Const(object):
def __init__(self, name, default='', type='NSString*'):
self.spec = name, type, default
class ObjcConstMeta(type):
def __new__(cls, name, bases, dct):
if name == 'ObjcConstType':
return super(ObjcConstMeta, cls).__new__(cls, name, bases, dct)
frameworks = []
props = {}
for k, v in dct.items():
if k == 'frameworks':
frameworks = v
elif isinstance(v, Const):
props[k] = v.spec
rv = load_consts(name, frameworks, props)
rv.__doc__ = dct.get('__doc__', '')
return rv
class ObjcConstType(with_metaclass(ObjcConstMeta)):
__abstract__ = True
frameworks = []
|
autofixture_tests/urls.py | jayvdb/django-autofixture | 332 | 11115674 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from django.http import HttpResponse
def handle404(request):
return HttpResponse('404')
def handle500(request):
return HttpResponse('500')
handler404 = 'autofixture_tests.urls.handle404'
handler500 = 'autofixture_tests.urls.handle500'
urlpatterns = [
]
|
adalink/errors.py | pomplesiegel/Adafruit_Adalink | 119 | 11115704 | <filename>adalink/errors.py<gh_stars>100-1000
# adalink Errors
#
# Common errors and exceptions raised by adalink.
#
# Author: <NAME>
class AdaLinkError(Exception):
"""Class to represent an error from AdaLink. Base-class for all AdaLink
errors.
"""
pass
|
autogl/module/nas/estimator/__init__.py | THUMNLab/AutoGL | 824 | 11115740 | <filename>autogl/module/nas/estimator/__init__.py
import importlib
import os
from .base import BaseEstimator
NAS_ESTIMATOR_DICT = {}
def register_nas_estimator(name):
def register_nas_estimator_cls(cls):
if name in NAS_ESTIMATOR_DICT:
raise ValueError(
"Cannot register duplicate NAS estimator ({})".format(name)
)
if not issubclass(cls, BaseEstimator):
raise ValueError(
"Model ({}: {}) must extend NAS estimator".format(name, cls.__name__)
)
NAS_ESTIMATOR_DICT[name] = cls
return cls
return register_nas_estimator_cls
from .one_shot import OneShotEstimator
from .train_scratch import TrainEstimator
def build_nas_estimator_from_name(name: str) -> BaseEstimator:
"""
Parameters
----------
name: ``str``
the name of nas estimator.
Returns
-------
BaseEstimator:
the NAS estimator built using default parameters
Raises
------
AssertionError
If an invalid name is passed in
"""
assert name in NAS_ESTIMATOR_DICT, "HPO module do not have name " + name
return NAS_ESTIMATOR_DICT[name]()
__all__ = ["BaseEstimator", "OneShotEstimator", "TrainEstimator"]
|
tests/test_domaintools.py | target/huntlib | 116 | 11115759 | #!/usr/bin/env python
import os
import unittest
from multiprocessing import cpu_count
from unittest import TestCase
from huntlib.domaintools import DomainTools
import pandas as pd
import numpy as np
class TestDomainTools(TestCase):
_handle = None
_nonexistent_domain = "ladfownlasdfabwhxpaowanlsuwjn.com"
@classmethod
def setUpClass(self):
'''
Authenticate to DomainTools once, and reuse that connection for all the
tests in this module.
'''
# This uses the default creds stored in ~/.huntlibrc
dt = DomainTools()
self._handle = dt
def test_account_information(self):
limits = self._handle.account_information()
self.assertIsInstance(
limits,
dict,
f"The return type of account_information was {type(limits)} not 'dict'."
)
self.assertGreater(
len(limits),
0,
"account_information() did not return any information."
)
def test_available_api_calls(self):
api_calls = self._handle.available_api_calls()
self.assertIsInstance(
api_calls,
list,
f"The return type of available_api_calls was {type(api_calls)} not 'list'."
)
self.assertGreater(
len(api_calls),
0,
"available_api_calls() did not return any information."
)
def test_whois_domain(self):
whois = self._handle.whois('google.com')
self.assertIsInstance(
whois,
dict,
f"The return type of whois() was {type(whois)} not 'dict'."
)
self.assertIn(
"registrant",
whois,
"Couldn't find the 'registrant' field in whois data."
)
self.assertEqual(
"Google LLC",
whois['registrant'],
"The registrant information does not seem to be correct."
)
# Test against a domain that doesn't exist
whois = self._handle.whois(self._nonexistent_domain)
self.assertDictEqual(
whois,
{},
"The WHOIS for a non-existent domain was not empty."
)
def test_whois_ipv4(self):
whois = self._handle.whois('192.168.3.11')
self.assertIsInstance(
whois,
dict,
f"The return type of whois() was {type(whois)} not 'dict'."
)
self.assertIn(
"registrant",
whois,
"Couldn't find the 'registrant' field in whois data."
)
self.assertEqual(
"Google LLC",
whois['registrant'],
"The registrant information does not seem to be correct."
)
def test_parsed_whois_domain(self):
whois = self._handle.parsed_whois('google.com')
self.assertIsInstance(
whois,
dict,
f"The return type of whois() was {type(whois)} not 'dict'."
)
self.assertIn(
"registrant",
whois,
"Couldn't find the 'registrant_org' field in whois data."
)
self.assertEqual(
"Google LLC",
whois['registrant'],
"The registrant information does not seem to be correct."
)
self.assertIn(
'registration',
whois,
"Couldn't find the 'registration' field in the parsed whois data."
)
self.assertIn(
'created',
whois['registration'],
"Couldn't find the 'created' field in the 'registration' dict."
)
self.assertEqual(
"1997-09-15",
whois['registration']['created'],
"The registration create information does not seem to be correct."
)
# Test against a domain that doesn't exist
whois = self._handle.whois(self._nonexistent_domain)
self.assertDictEqual(
whois,
{},
"The parsed WHOIS for a non-existent domain was not empty."
)
def test_parsed_whois_ipv4(self):
whois = self._handle.parsed_whois('8.8.8.8')
self.assertIsInstance(
whois,
dict,
f"The return type of whois() was {type(whois)} not 'dict'."
)
def test_enrich(self):
df = pd.DataFrame(['google.com', 'microsoft.com', '8.8.8.8', 'wstwc.cn', self._nonexistent_domain], columns=["domain"])
enriched_df = self._handle.enrich(df, column='domain')
self.assertGreater(
enriched_df.shape[1],
10,
"Enriched DataFrame does not have the correct number of columns."
)
self.assertIn(
'dt_enrich.active',
enriched_df.columns,
"Could not find the 'dt_enrich.active' column in the enriched frame."
)
def test_brand_monitor(self):
# Because this depends so much on which domains are registered
# each day, it's hard to find a real term that has guaranteed
# matches. So we just look for anything with the letter 'a' in
# it, which is virtualy assured.
domains = self._handle.brand_monitor('a')
self.assertIsInstance(
domains,
list,
f"The return from brand_monitor() was a {type(domains)}, not a list."
)
self.assertGreater(
len(domains),
0,
"The brand_monitor search returned no results."
)
def test_domain_reputation(self):
# We can't predict a given domain name's exact reputation for
# testing purposes, but we can make a couple of assumptions.
# ASSUMPTION 1: We're using the domaintools API, and their own
# domain is whitelisted to give a consistent 0.0 risk score.
risk = self._handle.domain_reputation('domaintools.com')
self.assertEqual(
risk['risk_score'],
0.0,
"The 'domaintools.com' domain should have a 0.0 risk score."
)
# ASSUMPTION 2: Any given domain in a 'risky' TLD should have a positive,
# non-zero score
risk = self._handle.domain_reputation('domaintools.xyz')
self.assertGreater(
risk['risk_score'],
0.0,
"The non-existent domain should have a non-zero risk score."
)
# Finally, we just need to test that we get an empty dict when we pass in
# an IP, since the API endpoint doesn't actually support IPs
risk = self._handle.domain_reputation('8.8.8.8')
self.assertDictEqual(
risk,
{},
"Domain reputation lookup on an IP failed to return an empty dict."
)
# Test against a domain that doesn't exist
risk = self._handle.domain_reputation(self._nonexistent_domain)
self.assertDictEqual(
risk,
{},
"Domain reputation result for a non-existent domain was not empty."
)
def test_risk(self):
risk = self._handle.risk('wstwc.cn')
self.assertIsInstance(
risk,
dict,
"Risk scoring should have returned a dict."
)
self.assertIn(
"proximity",
risk,
"The returned risk data did not contain a 'proximity' risk value."
)
# Test against a domain that doesn't exist
risk = self._handle.risk(self._nonexistent_domain)
self.assertDictEqual(
risk,
dict(),
"The returned risk data for a non-existent domain was not empty."
)
|
unittest_reinvent/running_modes/lib_invent_tests/logger_tests/__init__.py | lilleswing/Reinvent-1 | 183 | 11115772 | <reponame>lilleswing/Reinvent-1
from unittest_reinvent.running_modes.lib_invent_tests.logger_tests.test_reinforcement_logger import \
TestReinforcementLogger |
test/test_khan_dl.py | rand-net/khan-dl | 835 | 11115779 | import unittest
import youtube_dl
import sys
sys.path.append("../khan_dl")
from khan_dl.khan_dl import *
class TestKhanDL(unittest.TestCase):
def test_get_courses(self):
print("test_get_courses")
khan_dl = KhanDL()
courses, courses_url = khan_dl.get_courses("https://www.khanacademy.org/math")
self.assertIsNotNone(courses)
self.assertIsNotNone(courses_url)
self.assertEqual(len(courses), len(courses_url))
def test_get_course_page(self):
print("test_get_course_page")
khan_dl = KhanDL()
khan_dl.course_url = "https://www.khanacademy.org/math/precalculus"
khan_dl.get_course_page()
self.assertIsNotNone(khan_dl.course_page)
def test_get_course_title(self):
print("test_get_course_title")
khan_dl = KhanDL()
khan_dl.course_url = "https://www.khanacademy.org/math/precalculus"
khan_dl.get_course_page()
khan_dl.get_course_title()
self.assertEqual(khan_dl.course_title, "Precalculus")
def test_get_course_unit_urls(self):
print("test_get_course_unit_urls")
khan_dl = KhanDL()
khan_dl.course_url = "https://www.khanacademy.org/math/precalculus"
khan_dl.get_course_page()
khan_dl.get_course_unit_urls()
self.assertEqual(len(khan_dl.course_unit_urls), 10)
def test_get_course_unit_titles(self):
print("test_get_course_unit_titles")
khan_dl = KhanDL()
khan_dl.course_url = "https://www.khanacademy.org/math/precalculus"
khan_dl.get_course_page()
khan_dl.get_course_unit_titles()
self.assertIsNotNone(khan_dl.course_unit_titles)
self.assertEqual(len(khan_dl.course_unit_titles), 10)
def test_get_course_unit_slugs(self):
print("test_get_course_unit_slugs")
khan_dl = KhanDL()
khan_dl.course_url = "https://www.khanacademy.org/math/precalculus"
khan_dl.get_course_page()
khan_dl.get_course_title()
khan_dl.get_course_unit_titles()
khan_dl.get_course_unit_slugs()
self.assertEqual(len(khan_dl.course_unit_slugs), 10)
def test_youtube_dl_down_playlist(self):
print("test_youtube_dl_down_playlist")
course_unit_url = (
"https://www.khanacademy.org/math/precalculus/x9e81a4f98389efdf:complex"
)
lesson_youtube_ids = []
youtube_dl_opts = {}
with youtube_dl.YoutubeDL(youtube_dl_opts) as ydl:
info_dict = ydl.extract_info(course_unit_url, download=False)
for video in info_dict["entries"]:
video_id = video.get("id", None)
lesson_youtube_ids.append(video_id)
self.assertIsNotNone(lesson_youtube_ids)
self.assertEqual(len(lesson_youtube_ids), 22)
def test_lesson_title_match_youtube_ids(self):
print("test_lesson_title_match_youtube_ids")
khan_dl = KhanDL()
khan_dl.course_url = "https://www.khanacademy.org/math/trigonometry"
khan_dl.get_course_page()
khan_dl.get_course_title()
khan_dl.get_course_unit_titles()
khan_dl.get_course_unit_slugs()
khan_dl.get_course_unit_urls()
khan_dl.get_course_all_slugs()
khan_dl.get_course_youtube_ids()
self.assertEqual(len(khan_dl.course_all_slugs), len(khan_dl.lesson_youtube_ids))
if __name__ == "__main__":
unittest.main()
|
boto3_type_annotations_with_docs/boto3_type_annotations/elastictranscoder/waiter.py | cowboygneox/boto3_type_annotations | 119 | 11115791 | from typing import Dict
from botocore.waiter import Waiter
class JobComplete(Waiter):
def wait(self, Id: str, WaiterConfig: Dict = None):
"""
Polls :py:meth:`ElasticTranscoder.Client.read_job` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/elastictranscoder-2012-09-25/ReadJob>`_
**Request Syntax**
::
waiter.wait(
Id='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type Id: string
:param Id: **[REQUIRED]**
The identifier of the job for which you want to get detailed information.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""
pass
|
blacksheep/server/authentication/jwt.py | q0w/BlackSheep | 420 | 11115823 | from typing import Optional, Sequence
from guardpost.asynchronous.authentication import AuthenticationHandler
from guardpost.authentication import Identity
from guardpost.jwks import KeysProvider
from guardpost.jwts import InvalidAccessToken, JWTValidator
from jwt.exceptions import InvalidTokenError
from blacksheep.baseapp import get_logger
from blacksheep.messages import Request
class JWTBearerAuthentication(AuthenticationHandler):
"""
AuthenticationHandler that can parse and verify JWT Bearer access tokens to identify
users.
JWTs are validated using public RSA keys, and keys can be fetched automatically from
OpenID Connect (OIDC) discovery, if an `authority` is provided.
It is possible to use several instances of this class, to support authentication
through several identity providers (e.g. Azure Active Directory, Auth0, Azure Active
Directory B2C).
"""
def __init__(
self,
*,
valid_audiences: Sequence[str],
valid_issuers: Optional[Sequence[str]] = None,
authority: Optional[str] = None,
require_kid: bool = True,
keys_provider: Optional[KeysProvider] = None,
keys_url: Optional[str] = None,
cache_time: float = 10800,
auth_mode: str = "JWT Bearer"
):
"""
Creates a new instance of JWTBearerAuthentication, which tries to
obtains the identity of the user from the "Authorization" request header,
handling JWT Bearer tokens. Only standard authorization headers starting
with the `Bearer ` string are handled.
Parameters
----------
valid_audiences : Sequence[str]
Sequence of acceptable audiences (aud).
valid_issuers : Optional[Sequence[str]]
Sequence of acceptable issuers (iss). Required if `authority` is not
provided. If authority is specified and issuers are not, then valid
issuers are set as [authority].
authority : Optional[str], optional
If provided, keys are obtained from a standard well-known endpoint.
This parameter is ignored if `keys_provider` is given.
algorithms : Sequence[str], optional
Sequence of acceptable algorithms, by default ["RS256"].
require_kid : bool, optional
According to the specification, a key id is optional in JWK. However,
this parameter lets control whether access tokens missing `kid` in their
headers should be handled or rejected. By default True, thus only JWTs
having `kid` header are accepted.
keys_provider : Optional[KeysProvider], optional
If provided, the exact `KeysProvider` to be used when fetching keys.
By default None
keys_url : Optional[str], optional
If provided, keys are obtained from the given URL through HTTP GET.
This parameter is ignored if `keys_provider` is given.
cache_time : float, optional
If >= 0, JWKS are cached in memory and stored for the given amount in
seconds. By default 10800 (3 hours).
auth_mode : str, optional
When authentication succeeds, the declared authentication mode. By default,
"JWT Bearer".
"""
self.logger = get_logger()
if authority and not valid_issuers:
valid_issuers = [authority]
if not authority and not valid_issuers:
raise TypeError("Specify either an authority or valid issuers.")
assert valid_issuers is not None
self._validator = JWTValidator(
authority=authority,
require_kid=require_kid,
keys_provider=keys_provider,
keys_url=keys_url,
valid_issuers=valid_issuers,
valid_audiences=valid_audiences,
cache_time=cache_time,
)
self.auth_mode = auth_mode
self._validator.logger = self.logger
async def authenticate(self, context: Request) -> Optional[Identity]:
authorization_value = context.get_first_header(b"Authorization")
if not authorization_value:
context.identity = Identity({})
return None
if not authorization_value.startswith(b"Bearer "):
self.logger.debug(
"Invalid Authorization header, not starting with `Bearer `, "
"the header is ignored."
)
context.identity = Identity({})
return None
token = authorization_value[7:].decode()
try:
decoded = await self._validator.validate_jwt(token)
except (InvalidAccessToken, InvalidTokenError) as ex:
# pass, because the application might support more than one
# authentication method and several JWT Bearer configurations
self.logger.debug(
"JWT Bearer - access token not valid for this configuration: %s",
str(ex),
)
pass
else:
context.identity = Identity(decoded, self.auth_mode)
return context.identity
context.identity = Identity({})
return None
|
querybook/server/logic/result_store.py | shivammmmm/querybook | 1,144 | 11115832 | <filename>querybook/server/logic/result_store.py
import csv
from io import StringIO
import sys
from typing import List
from datetime import datetime
from app.db import with_session
from models.result_store import KeyValueStore
# HACK: https://stackoverflow.com/questions/15063936/csv-error-field-larger-than-field-limit-131072
csv.field_size_limit(sys.maxsize)
@with_session
def create_key_value_store(key, value, commit=True, session=None):
return KeyValueStore.create(
{"key": key, "value": value}, commit=commit, session=session
)
@with_session
def update_key_value_store(key, value, commit=True, session=None): # csv
kvs = get_key_value_store(key, session=session)
kvs.value = value
kvs.updated_at = datetime.utcnow()
if commit:
session.commit()
else:
session.flush()
kvs.id
return kvs
@with_session
def upsert_key_value_store(key, value, commit=True, session=None):
kvp = get_key_value_store(key, session=session)
if kvp:
return update_key_value_store(key, value, commit, session=session)
else:
return create_key_value_store(key, value, commit, session=session)
@with_session
def get_key_value_store(key, session=None):
return KeyValueStore.get(session=session, key=key)
@with_session
def delete_key_value_store(key, commit=True, session=None):
item = get_key_value_store(key=key, session=session)
if item:
session.delete(item)
if commit:
session.commit()
def string_to_csv(raw_csv_str: str) -> List[List[str]]:
# Remove NULL byte to make sure csv conversion works
raw_csv_str = raw_csv_str.replace("\x00", "")
result = []
if len(raw_csv_str) > 0:
raw_results = StringIO(raw_csv_str)
csv_reader = csv.reader(raw_results, delimiter=",")
result = [row for row in csv_reader]
return result
|
python/src/main/python/pygw/statistics/binning_strategy/numeric_range_field_value_binning_strategy.py | radiant-maxar/geowave | 280 | 11115855 | <reponame>radiant-maxar/geowave<filename>python/src/main/python/pygw/statistics/binning_strategy/numeric_range_field_value_binning_strategy.py<gh_stars>100-1000
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from pygw.base.type_conversions import StringArrayType
from pygw.config import geowave_pkg
from .field_value_binning_strategy import FieldValueBinningStrategy
class NumericRangeFieldValueBinningStrategy(FieldValueBinningStrategy):
"""
Statistic binning strategy that bins statistic values by the numeric representation of the value of a given field.
By default it will truncate decimal places and will bin by the integer. However, an "offset" and "interval" can be
provided to bin numbers at any regular step-sized increment from an origin value. A statistic using this binning
strategy can be constrained using numeric ranges (A Range can be used as a constraint).
"""
def __init__(self, fields=None, interval=1, offset=0, java_ref=None):
if java_ref is None:
java_ref = geowave_pkg.core.store.statistics.binning.NumericRangeFieldValueBinningStrategy(
float(interval), float(offset), StringArrayType().to_java(fields))
super().__init__(fields, java_ref)
|
configs/_base_/det_pipelines/psenet_pipeline.py | yuexy/mmocr | 2,261 | 11115894 | <gh_stars>1000+
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='LoadTextAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(
type='ScaleAspectJitter',
img_scale=[(3000, 736)],
ratio_range=(0.5, 3),
aspect_ratio_range=(1, 1),
multiscale_mode='value',
long_size_bound=1280,
short_size_bound=640,
resize_type='long_short_bound',
keep_ratio=False),
dict(type='PSENetTargets'),
dict(type='RandomFlip', flip_ratio=0.5, direction='horizontal'),
dict(type='RandomRotateTextDet'),
dict(
type='RandomCropInstances',
target_size=(640, 640),
instance_key='gt_kernels'),
dict(type='Pad', size_divisor=32),
dict(
type='CustomFormatBundle',
keys=['gt_kernels', 'gt_mask'],
visualize=dict(flag=False, boundary_key='gt_kernels')),
dict(type='Collect', keys=['img', 'gt_kernels', 'gt_mask'])
]
# for ctw1500
img_scale_test_ctw1500 = (1280, 1280)
test_pipeline_ctw1500 = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale_test_ctw1500,
flip=False,
transforms=[
dict(type='Resize', img_scale=(1280, 1280), keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# for icdar2015
img_scale_test_icdar2015 = (2240, 2240)
test_pipeline_icdar2015 = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale_test_icdar2015,
flip=False,
transforms=[
dict(type='Resize', img_scale=(1280, 1280), keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
|
contracts/migrations/0021_idv_piid_verbose_name.py | mepsd/CLAC | 126 | 11115898 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-10 18:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contracts', '0020_add_indexes'),
]
operations = [
migrations.AlterField(
model_name='contract',
name='idv_piid',
field=models.CharField(db_index=True, max_length=128, verbose_name='contract number'),
),
]
|
tools/kernel_names.py | datalayer-contrib/jupyterwidgets-tutorial | 342 | 11115912 | <reponame>datalayer-contrib/jupyterwidgets-tutorial
from argparse import ArgumentParser
from pathlib import Path
import nbformat
NB_VERSION = 4
def change_kernel_name(notebook_name, kernel_name, display_name=None):
"""
Change the name of the notebook kernel.
"""
dname = display_name if display_name else kernel_name
notebook = nbformat.read(notebook_name, NB_VERSION)
current_kname = notebook['metadata']['kernelspec']['name']
current_dname = notebook['metadata']['kernelspec']['display_name']
if current_kname == kernel_name and current_dname == dname:
print('not changing kernel of {}'.format(notebook_name))
return
notebook['metadata']['kernelspec']['name'] = kernel_name
notebook['metadata']['kernelspec']['display_name'] = dname
# print('\nHad this been a real operation, would have changed {}'.format(notebook_name))
# print('\t\tcurrent: {} to new: {}\n'.format(current_kname, kernel_name))
nbformat.write(notebook, notebook_name)
def get_kernel_name(notebook_name):
"""
Return the name of the kernel in the notebook.
"""
notebook = nbformat.read(notebook_name, NB_VERSION)
kname = notebook['metadata']['kernelspec']['name']
return kname
if __name__ == '__main__':
parser = ArgumentParser(description='Get or set kernel names for all '
'notebooks in a directory.')
parser.add_argument('-d', '--directory', default='.',
help='Directory in which to look for notebooks.')
parser.add_argument('-s', '--set',
dest='kernel_name',
metavar='kernel_name',
help="Set the kernel to this name for each notebook.")
parser.add_argument('--display-name',
help="Display name of the kernel (default is same as "
"kernel name).")
args = parser.parse_args()
directory = args.directory if args.directory else '.'
p = Path(directory)
notebooks = list(p.glob('**/*.ipynb'))
if not notebooks:
raise RuntimeError('No notebooks found at path {}'.format(directory))
for notebook in notebooks:
nb_str = str(notebook)
if args.kernel_name:
change_kernel_name(nb_str, args.kernel_name,
display_name=args.display_name)
else:
kname = get_kernel_name(nb_str)
print('{}\t\t\t\t{}'.format(nb_str, kname))
|
test/test_bindings.py | hujiawei-sjtu/sdf_tools | 159 | 11115932 | #! /usr/bin/env python
import unittest
import numpy as np
from sdf_tools import utils_2d
class TestSDFTools(unittest.TestCase):
def test_sdf_tools(self):
res = 0.05
x_width = 20
y_height = 40
grid_world = np.zeros([y_height, x_width], dtype=np.uint8)
grid_world[1, 3] = 1
center_x = 0
center_y = 0
sdf_origin = [center_x - x_width / 2, center_y - y_height / 2]
sdf, sdf_gradient = utils_2d.compute_sdf_and_gradient(grid_world, res, sdf_origin)
self.assertAlmostEqual(sdf[1, 3], -res)
self.assertAlmostEqual(sdf[2, 3], res)
self.assertAlmostEqual(sdf[0, 3], res)
self.assertAlmostEqual(sdf[1, 2], res)
self.assertAlmostEqual(sdf[1, 4], res)
self.assertGreater(sdf[3, 6], 3 * res)
self.assertEqual(sdf.shape, (y_height, x_width))
self.assertEqual(sdf_gradient.shape, (y_height, x_width, 2))
np.testing.assert_allclose(sdf_gradient[1, 4], [1.5, 0])
if __name__ == '__main__':
unittest.main()
|
sasila/system_normal/manager/spider_manager.py | iiiusky/Sasila | 327 | 11115959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import threading
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf-8')
class SpiderManager(object):
def __init__(self):
self.spider_list = dict()
def set_spider(self, spider):
self.spider_list[spider._spider_id] = spider
def del_spider(self, spider_id):
if spider_id in self.spider_list.keys():
self.spider_list[spider_id].stop()
del self.spider_list[spider_id]
def init_system(self):
pass
def get_all_spider(self):
return json.dumps(self.spider_list.keys())
def find_spider(self, spider_id):
pass
def start_spider(self, spider_id):
if self.spider_list[spider_id]._spider_status == "stopped":
thread = threading.Thread(target=self.spider_list[spider_id].start)
thread.setDaemon(True)
thread.start()
def restart_spider(self, spider_id):
thread = threading.Thread(target=self.spider_list[spider_id].restart)
thread.setDaemon(True)
thread.start()
def stop_spider(self, spider_id):
self.spider_list[spider_id].stop()
def get_spider_detail(self, spider_id):
return str(self.spider_list[spider_id]._process_count)
|
reclist/datasets.py | nsbits/reclist | 183 | 11116008 | import json
import tempfile
import zipfile
import os
from reclist.abstractions import RecDataset
from reclist.utils.config import *
class MovieLensDataset(RecDataset):
"""
MovieLens 25M Dataset
Reference: https://files.grouplens.org/datasets/movielens/ml-25m-README.html
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def load(self):
cache_dir = get_cache_directory()
filepath = os.path.join(cache_dir, "movielens_25m.zip")
if not os.path.exists(filepath) or self.force_download:
download_with_progress(MOVIELENS_DATASET_S3_URL, filepath)
with tempfile.TemporaryDirectory() as temp_dir:
with zipfile.ZipFile(filepath, "r") as zip_file:
zip_file.extractall(temp_dir)
with open(os.path.join(temp_dir, "dataset.json")) as f:
data = json.load(f)
self._x_train = data["x_train"]
self._y_train = None
self._x_test = data["x_test"]
self._y_test = data["y_test"]
self._catalog = self._convert_catalog_keys(data["catalog"])
def _convert_catalog_keys(self, catalog):
"""
Convert catalog keys from string to integer type
JSON encodes all keys to strings, so the catalog dictionary
will be loaded up string representation of movie IDs.
"""
converted_catalog = {}
for k, v in catalog.items():
converted_catalog[int(k)] = v
return converted_catalog
class CoveoDataset(RecDataset):
"""
Coveo SIGIR data challenge dataset
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def load(self):
cache_directory = get_cache_directory()
filename = os.path.join(cache_directory, "coveo_sigir.zip") # TODO: make var somewhere
if not os.path.exists(filename) or self.force_download:
download_with_progress(COVEO_INTERACTION_DATASET_S3_URL, filename)
with tempfile.TemporaryDirectory() as temp_dir:
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(temp_dir)
with open(os.path.join(temp_dir, 'dataset.json')) as f:
data = json.load(f)
self._x_train = data["x_train"]
self._y_train = None
self._x_test = data["x_test"]
self._y_test = data["y_test"]
self._catalog = data["catalog"]
class SpotifyDataset(RecDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def load(self):
data = self.load_spotify_playlist_dataset()
self._x_train = data["train"]
self._y_train = None
self._x_test = data['test']
self._y_test = None
self._catalog = data["catalog"]
# generate NEP dataset here for now
test_pairs = [(playlist[:-1], [playlist[-1]]) for playlist in self._x_test if len(playlist) > 1]
self._x_test, self._y_test = zip(*test_pairs)
def load_spotify_playlist_dataset(self):
cache_directory = get_cache_directory()
filename = os.path.join(cache_directory, "small_spotify_playlist.zip") # TODO: make var somewhere
if not os.path.exists(filename) or self.force_download:
download_with_progress(SPOTIFY_PLAYLIST_DATASET_S3_URL, filename)
with tempfile.TemporaryDirectory() as temp_dir:
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(temp_dir)
with open(os.path.join(temp_dir, 'dataset.json')) as f:
data = json.load(f)
return data
|
test/test_kp_term_in_kp.py | Usama0121/flashtext | 5,330 | 11116018 | <filename>test/test_kp_term_in_kp.py
from collections import defaultdict
from flashtext import KeywordProcessor
import logging
import unittest
import json
import re
logger = logging.getLogger(__name__)
class TestKPDictionaryLikeFeatures(unittest.TestCase):
def setUp(self):
logger.info("Starting...")
def tearDown(self):
logger.info("Ending.")
def test_term_in_dictionary(self):
keyword_processor = KeywordProcessor()
keyword_processor.add_keyword('j2ee', 'Java')
keyword_processor.add_keyword('colour', 'color')
keyword_processor.get_keyword('j2ee')
self.assertEqual(keyword_processor.get_keyword('j2ee'),
'Java',
"get_keyword didn't return expected Keyword")
self.assertEqual(keyword_processor['colour'],
'color',
"get_keyword didn't return expected Keyword")
self.assertEqual(keyword_processor['Test'],
None,
"get_keyword didn't return expected Keyword")
self.assertTrue('colour' in keyword_processor,
"get_keyword didn't return expected Keyword")
self.assertFalse('Test' in keyword_processor,
"get_keyword didn't return expected Keyword")
def test_term_in_dictionary_case_sensitive(self):
keyword_processor = KeywordProcessor(case_sensitive=True)
keyword_processor.add_keyword('j2ee', 'Java')
keyword_processor.add_keyword('colour', 'color')
keyword_processor.get_keyword('j2ee')
self.assertEqual(keyword_processor.get_keyword('j2ee'),
'Java',
"get_keyword didn't return expected Keyword")
self.assertEqual(keyword_processor['colour'],
'color',
"get_keyword didn't return expected Keyword")
self.assertEqual(keyword_processor['J2ee'],
None,
"get_keyword didn't return expected Keyword")
self.assertTrue('colour' in keyword_processor,
"get_keyword didn't return expected Keyword")
self.assertFalse('Colour' in keyword_processor,
"get_keyword didn't return expected Keyword")
if __name__ == '__main__':
unittest.main()
|
Angel/Libraries/DevIL-SDK-1.7.8/srcs/BuildImageLibraries.py | Tifox/Grog-Knight | 171 | 11116045 | <filename>Angel/Libraries/DevIL-SDK-1.7.8/srcs/BuildImageLibraries.py<gh_stars>100-1000
#!/usr/bin/python
# This file builds all the mac version of the image libraries.
# It won't work on Windows as it's using the Unix build methods.
import os
import sys
import shutil
FILE_PATH = os.path.abspath(__file__)
ROOT_DIR = os.path.dirname(FILE_PATH)
INST_DIR = os.path.join(ROOT_DIR, "install")
BASE_CFLAGS = "-I%s/include -L%s/lib -m32" % (INST_DIR, INST_DIR)
BASE_CFLAGS += " -O -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.7.sdk -mmacosx-version-min=10.7 -arch i386"
BASE_CPPFLAGS = BASE_CFLAGS
BASE_LDFLAGS = "-L%s/lib -lstdc++ -m32 -arch i386" % (INST_DIR)
BASE_ARCHFLAGS = "-arch i386"
CLEAN = False
LIBLIST = {
"devil": ["DevIL-1.7.8.tar.gz", "devil-1.7.8", ["libIL.dylib", "libILU.dylib", "libILUT.dylib"]],
"jasper": ["jasper-1.900.1.zip", "jasper-1.900.1", ["libjasper.dylib"]],
"libjpeg": ["jpegsrc.v8a.tar.gz", "jpeg-8a", ["libjpeg.dylib"]],
"lcms": ["lcms-1.19.tar.gz", "lcms-1.19", ["liblcms.dylib"]],
"libpng": ["libpng-1.4.1.tar.gz", "libpng-1.4.1", ["libpng.dylib"]],
"libmng": ["libmng-1.0.10.tar.gz", "libmng-1.0.10", ["libmng.dylib"]],
"libtiff": ["tiff-3.9.2.tar.gz", "tiff-3.9.2", ["libtiff.dylib"]],
}
def resetStuff():
os.chdir(ROOT_DIR)
os.environ['CFLAGS'] = BASE_CFLAGS
os.environ['CPPFLAGS'] = BASE_CPPFLAGS
os.environ['LDFLAGS'] = BASE_LDFLAGS
os.environ['ARCHFLAGS'] = BASE_ARCHFLAGS
#####################################
# clean up if necessary #
#####################################
if "clean" in sys.argv:
CLEAN = True
if (CLEAN):
if (os.path.exists(INST_DIR)):
os.system("rm -rf %s" % INST_DIR)
for lib, data in LIBLIST.iteritems():
if (os.path.exists(data[1])):
print "Clearing out %s" % data[1]
os.system("rm -rf %s" % data[1])
sys.exit(0)
#####################################
# unarchive everything #
#####################################
for libname, data in LIBLIST.iteritems():
lib = data[0]
directory = data[1]
if os.path.exists(directory):
print "Skipping %s, %s already exists" % (lib, directory)
continue
if (libname == "libpng"):
print "Copying libpng from Angel libraries..."
os.system("cp -R ../../libpng-1.4.1 ./")
elif lib.endswith(".zip"):
print "Unzipping %s..." % lib
os.system("unzip %s > /dev/null" % lib)
elif (lib.endswith(".tar.gz") or lib.endswith(".tgz")):
print "Untarring %s..." % lib
os.system("tar -xzf %s > /dev/null" % lib)
else:
print "Problem, don't know how to unarchive %s" % lib
#####################################
# build libpng #
#####################################
if ("libpng" in LIBLIST.keys()):
resetStuff()
os.chdir(LIBLIST["libpng"][1])
config_string = "./configure --disable-dependency-tracking --prefix=%s --host=i686-apple-darwin10 " % (INST_DIR)
os.system(config_string)
os.system('make install')
os.system('install_name_tool -id @executable_path/../Frameworks/libpng.dylib %s/lib/libpng.dylib' % (INST_DIR))
#####################################
# build libjpeg #
#####################################
if ("libjpeg" in LIBLIST.keys()):
resetStuff()
os.chdir(LIBLIST["libjpeg"][1])
config_string = "./configure --prefix=%s --host=i686-apple-darwin10 " % (INST_DIR)
os.system(config_string)
os.system('make install')
os.system('install_name_tool -id @executable_path/../Frameworks/libjpeg.dylib %s/lib/libjpeg.dylib' % (INST_DIR))
#####################################
# build libtiff #
#####################################
if ("libtiff" in LIBLIST.keys()):
resetStuff()
os.chdir(LIBLIST["libtiff"][1])
config_string = "./configure --prefix=%s --enable-shared --host=i686-apple-darwin10 " % (INST_DIR)
os.system(config_string)
os.system('make install')
os.system('install_name_tool -id @executable_path/../Frameworks/libtiff.dylib %s/lib/libtiff.dylib' % (INST_DIR))
#####################################
# build liblcms #
#####################################
if ("lcms" in LIBLIST.keys()):
resetStuff()
os.chdir(LIBLIST["lcms"][1])
config_string = "./configure --prefix=%s --host=i686-apple-darwin10 " % (INST_DIR)
os.system(config_string)
os.system('make install')
os.system('install_name_tool -id @executable_path/../Frameworks/liblcms.dylib %s/lib/liblcms.dylib' % (INST_DIR))
#####################################
# build libmng #
#####################################
if ("libmng" in LIBLIST.keys()):
resetStuff()
os.chdir(LIBLIST["libmng"][1])
os.environ['LIBTOOLIZE'] = "glibtoolize"
os.system("ln -s makefiles/configure.in .")
os.system("ln -s makefiles/acinclude.m4 .")
os.system("ln -s makefiles/Makefile.am .")
# need to patch the configure file to build with latest autoconf
srcFile = "configure.in"
srcFileHandle = open(srcFile)
srcData = srcFileHandle.read()
srcFileHandle.close()
badString = "\nAM_C_PROTOTYPES"
goodString = ""
srcData = srcData.replace(badString, goodString)
srcFileHandle = open(srcFile, 'w')
srcFileHandle.write(srcData)
srcFileHandle.close()
os.system("autoreconf -fvi")
config_string = "./configure --prefix=%s --host=i686-apple-darwin10 " % (INST_DIR)
os.system(config_string)
os.system('make install')
os.system('install_name_tool -id @executable_path/../Frameworks/libmng.dylib %s/lib/libmng.dylib' % (INST_DIR))
#####################################
# build libjasper #
#####################################
if ("jasper" in LIBLIST.keys()):
resetStuff()
os.chdir(LIBLIST["jasper"][1])
config_string = "./configure --prefix=%s --enable-shared --host=i686-apple-darwin10 " % (INST_DIR)
os.system(config_string)
os.system('make install')
os.system('install_name_tool -id @executable_path/../Frameworks/libjasper.dylib %s/lib/libjasper.dylib' % (INST_DIR))
#####################################
# build libil #
#####################################
if ("devil" in LIBLIST.keys()):
resetStuff()
os.chdir(LIBLIST["devil"][1])
# ILU contains a bug that keeps it from building on OS X. Let's patch it.
srcFile = "src-ILU/ilur/ilur.c"
srcFileHandle = open(srcFile)
srcData = srcFileHandle.read()
srcFileHandle.close()
badString = """
#include <malloc.h>
"""
goodString = """
#if defined(__APPLE__)
#include "sys/malloc.h"
#else
#include <malloc.h>
#endif
"""
srcData = srcData.replace(badString, goodString)
srcFileHandle = open(srcFile, 'w')
srcFileHandle.write(srcData)
srcFileHandle.close()
# replace old png functions with new hotness
for oldFile in ['src-IL/src/il_icon.c', 'src-IL/src/il_png.c']:
oldFileHandle = open(oldFile)
oldData = oldFileHandle.read()
oldFileHandle.close()
badString = "png_set_gray_1_2_4_to_8"
goodString = "png_set_expand_gray_1_2_4_to_8"
oldData = oldData.replace(badString, goodString)
badString = "png_check_sig(Signature, 8);"
goodString = "!png_sig_cmp(Signature, 0, 8);"
newData = oldData.replace(badString, goodString)
oldFileHandle = open(oldFile, 'w')
oldFileHandle.write(newData)
oldFileHandle.close()
config_string = "./configure --prefix=%s --enable-ILU --enable-ILUT --host=i686-apple-darwin10" % (INST_DIR)
print(config_string)
os.system(config_string)
os.system('make install')
for f in ['IL', 'ILU', 'ILUT']:
correction_string = "install_name_tool -id @executable_path/../Frameworks/lib%s.dylib %s/lib/lib%s.1.dylib" % (f, INST_DIR, f)
os.system(correction_string)
if (f == 'ILU' or f == 'ILUT'):
correction_string = "install_name_tool -change %s/lib/libIL.1.dylib @executable_path/../Frameworks/libIL.dylib %s/lib/lib%s.1.dylib" % (INST_DIR, INST_DIR, f)
os.system(correction_string)
if (f == 'ILUT'):
correction_string = "install_name_tool -change %s/lib/libILU.1.dylib @executable_path/../Frameworks/libILU.dylib %s/lib/lib%s.1.dylib" % (INST_DIR, INST_DIR, f)
os.system(correction_string)
move_string = "mv %s/lib/lib%s.1.dylib %s/lib/lib%s.dylib" % (INST_DIR, f, INST_DIR, f)
os.system(move_string)
#####################################
# copying #
#####################################
resetStuff()
libpath = os.path.join(INST_DIR, "lib")
exppath = os.path.join(INST_DIR, "exp")
if not os.path.exists(libpath):
os.makedirs(libpath)
if not os.path.exists(exppath):
os.makedirs(exppath)
os.chdir(libpath)
for libname, data in LIBLIST.iteritems():
files = data[2]
for f in files:
true_lib = os.path.realpath(f)
shutil.copyfile(true_lib, os.path.join(exppath, f))
#####################################
# cleanup #
#####################################
# we don't care about the extra stuff that most of these libs install
# for dd in ["bin", "man", "share", "lib/pkgconfig", "lib/*.la"]:
# dd = os.path.join(INST_DIR, dd)
# os.system("rm -rf %s" % dd)
|
test/test_console_notifier.py | trae-horton/secret-bridge | 152 | 11116061 | <reponame>trae-horton/secret-bridge<filename>test/test_console_notifier.py<gh_stars>100-1000
import unittest
from pathlib import Path
from config import Config
from notifiers import Registry
from models.finding import Finding
class TestConsoleNotifier(unittest.TestCase):
def test_console_notifier(self):
config_path = Path(__file__).parent.parent / 'config.toml'
config = Config.load_file(config_path)
notifier = Registry.get("console")(config)
findings = [
Finding("testfile.py", 123, "test_secret_type",
"https://www.example.com")
]
notifier.process(findings, 'test-detector')
if __name__ == "__main__":
unittest.main()
|
ipypublish/__init__.py | parmentelat/ipypublish | 220 | 11116082 | from ipypublish.scripts import nb_setup # noqa: F401
__version__ = "0.10.12"
|
tests/basic_checks/issue_46_non_existant_signal_column.py | jmabry/pyaf | 377 | 11116106 | import numpy as np
import pandas as pd
import pyaf.ForecastEngine as autof
try:
df = pd.DataFrame([[0 , 0.54543]], columns = ['date' , 'signal'])
lEngine = autof.cForecastEngine()
lEngine.train(df , 'date' , 'signal_non_existant', 1);
raise Exception("NOT_OK")
except Exception as e:
# should fail
print(str(e));
assert(str(e) == "PYAF_ERROR_SIGNAL_COLUMN_NOT_FOUND signal_non_existant")
if(str(e) == "NOT_OK"):
raise
pass
|
src/brain.py | Atharv-cyber/Jarvis-1 | 251 | 11116117 | <gh_stars>100-1000
import re
import webbrowser
import os
import random
import urllib
import thread
import yaml
from src import google_tts
from src.wikipedia import wikipedia
from src import network
from src.some_functions import *
from src import common
speak_engine = google_tts.Google_TTS()
with open('config.yml', 'r') as f:
config = yaml.load(f)
class Brain():
'''
This class will load core things in Jarvis' brain
'''
def process(self, text):
words = text.lower().split(' ')
if 'open' in words:
speak_engine.say("I'm on it. Stand By.")
websites = config["config"]["websites"]
website_to_open = text[text.index('open') + 5:]
if website_to_open in websites:
url = websites[website_to_open]
webbrowser.open_new_tab(url)
if 'search' in words:
speak_engine.say("I'm looking for it. Please stand by!")
term_to_search = text[text.index('search') + 7:]
summary = wikipedia.summary(term_to_search)
summary = " ".join(re.findall('\w+.', summary))
summary = summary[:99]
speak_engine.say(summary)
return True
if 'where' in words and ('are' in words or 'am' in words) and ('we' in words or 'i' in words) or 'location' in words:
speak_engine.say("I am tracking the location. Stand by.")
speak_engine.say(network.currentLocation())
return True
if 'play' in words:
if 'a' in words and 'song' in words:
thread.start_new_thread(play_music, ())
return True
if 'current' in words and 'time' in words:
time = common.getCurrentTime()
speak_engine.say(time)
return True
'''Handling Mathematical/Computational queries'''
if 'add' in words or 'subtract' in words or 'multiply' in words or 'divide' in words:
try:
nums = re.findall('\d+', text)
if len(nums) < 2:
mod_text = words_to_nums(text)
nums += re.findall('\d+', mod_text)
print nums
nums = map(int, nums)
if 'add' in words:
speak_engine.say("It is " + str(sum(nums)))
if 'subtract' in words:
speak_engine.say("It is " + str(nums[1] - nums[0]))
if 'multiply' in words:
speak_engine.say("It is " + str(nums[0] * nums[1]))
if 'divide' in words:
speak_engine.say("It is " + str(nums[0] / nums[1]))
except:
speak_engine.say(
"Perhaps my Mathematical part of brain is malfunctioning.")
return True
return False
|
pytorch-pretrained-bert/eval/evaluate_ae.py | lianapanatau/BERT-for-RRC-ABSA | 425 | 11116136 | <reponame>lianapanatau/BERT-for-RRC-ABSA
import argparse
import time
import json
import numpy as np
import math
import random
import xml.etree.ElementTree as ET
from subprocess import check_output
def label_rest_xml(fn, output_fn, corpus, label):
dom=ET.parse(fn)
root=dom.getroot()
pred_y=[]
for zx, sent in enumerate(root.iter("sentence") ) :
tokens=corpus[zx]
lb=label[zx]
opins=ET.Element("Opinions")
token_idx, pt, tag_on=0, 0, False
start, end=-1, -1
for ix, c in enumerate(sent.find('text').text):
if token_idx<len(tokens) and pt>=len(tokens[token_idx] ):
pt=0
token_idx+=1
if token_idx<len(tokens) and lb[token_idx]==1 and pt==0 and c!=' ':
if tag_on:
end=ix
tag_on=False
opin=ET.Element("Opinion")
opin.attrib['target']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
start=ix
tag_on=True
elif token_idx<len(tokens) and lb[token_idx]==2 and pt==0 and c!=' ' and not tag_on:
start=ix
tag_on=True
elif token_idx<len(tokens) and (lb[token_idx]==0 or lb[token_idx]==1) and tag_on and pt==0:
end=ix
tag_on=False
opin=ET.Element("Opinion")
opin.attrib['target']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
elif token_idx>=len(tokens) and tag_on:
end=ix
tag_on=False
opin=ET.Element("Opinion")
opin.attrib['target']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
if c==' ':
pass
elif tokens[token_idx][pt:pt+2]=='``' or tokens[token_idx][pt:pt+2]=="''":
pt+=2
else:
pt+=1
if tag_on:
tag_on=False
end=len(sent.find('text').text)
opin=ET.Element("Opinion")
opin.attrib['target']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
sent.append(opins )
dom.write(output_fn)
def label_laptop_xml(fn, output_fn, corpus, label):
dom=ET.parse(fn)
root=dom.getroot()
pred_y=[]
for zx, sent in enumerate(root.iter("sentence") ):
tokens=corpus[zx]
lb=label[zx]
opins=ET.Element("aspectTerms")
token_idx, pt, tag_on=0, 0, False
start, end=-1, -1
for ix, c in enumerate(sent.find('text').text):
if token_idx<len(tokens) and pt>=len(tokens[token_idx] ):
pt=0
token_idx+=1
if token_idx<len(tokens) and lb[token_idx]==1 and pt==0 and c!=' ':
if tag_on:
end=ix
tag_on=False
opin=ET.Element("aspectTerm")
opin.attrib['term']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
start=ix
tag_on=True
elif token_idx<len(tokens) and lb[token_idx]==2 and pt==0 and c!=' ' and not tag_on:
start=ix
tag_on=True
elif token_idx<len(tokens) and (lb[token_idx]==0 or lb[token_idx]==1) and tag_on and pt==0:
end=ix
tag_on=False
opin=ET.Element("aspectTerm")
opin.attrib['term']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
elif token_idx>=len(tokens) and tag_on:
end=ix
tag_on=False
opin=ET.Element("aspectTerm")
opin.attrib['term']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
if c==' ' or ord(c)==160:
pass
elif tokens[token_idx][pt:pt+2]=='``' or tokens[token_idx][pt:pt+2]=="''":
pt+=2
else:
pt+=1
if tag_on:
tag_on=False
end=len(sent.find('text').text)
opin=ET.Element("aspectTerm")
opin.attrib['term']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
sent.append(opins )
dom.write(output_fn)
def evaluate(pred_fn, command, template):
with open(pred_fn) as f:
pred_json=json.load(f)
y_pred=[]
for ix, logit in enumerate(pred_json["logits"]):
pred=[0]*len(pred_json["raw_X"][ix])
for jx, idx in enumerate(pred_json["idx_map"][ix]):
lb=np.argmax(logit[jx])
if lb==1: #B
pred[idx]=1
elif lb==2: #I
if pred[idx]==0: #only when O->I (I->I and B->I ignored)
pred[idx]=2
y_pred.append(pred)
if 'REST' in command:
command=command.split()
label_rest_xml(template, command[6], pred_json["raw_X"], y_pred)
acc=check_output(command ).split()
return float(acc[9][10:])
elif 'Laptops' in command:
command=command.split()
label_laptop_xml(template, command[4], pred_json["raw_X"], y_pred)
acc=check_output(command ).split()
return float(acc[15])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--pred_json', type=str)
args = parser.parse_args()
if 'rest' in args.pred_json:
command="java -cp eval/A.jar absa16.Do Eval -prd ae/official_data/rest_pred.xml -gld ae/official_data/EN_REST_SB1_TEST.xml.gold -evs 2 -phs A -sbt SB1"
template="ae/official_data/EN_REST_SB1_TEST.xml.A"
elif 'laptop' in args.pred_json:
command="java -cp eval/eval.jar Main.Aspects ae/official_data/laptop_pred.xml ae/official_data/Laptops_Test_Gold.xml"
template="ae/official_data/Laptops_Test_Data_PhaseA.xml"
print(evaluate(args.pred_json, command, template) )
|
tests/test_metrics.py | Jeanselme/xgboost-survival-embeddings | 197 | 11116170 | import pandas as pd
import numpy as np
from xgbse.metrics import concordance_index, approx_brier_score, dist_calibration_score
from xgbse import XGBSEDebiasedBCE
from xgbse.non_parametric import get_time_bins, calculate_kaplan_vectorized
from tests.data import get_data
(
X_train,
X_test,
X_valid,
T_train,
T_test,
T_valid,
E_train,
E_test,
E_valid,
y_train,
y_test,
y_valid,
features,
) = get_data()
# generating Kaplan Meier for all tests
time_bins = get_time_bins(T_train, E_train, 100)
mean, high, low = calculate_kaplan_vectorized(
T_train.values.reshape(1, -1), E_train.values.reshape(1, -1), time_bins
)
km_survival = pd.concat([mean] * len(y_train))
km_survival = km_survival.reset_index(drop=True)
# generating xgbse predictions for all tests
xgbse_model = XGBSEDebiasedBCE()
xgbse_model.fit(
X_train,
y_train,
num_boost_round=1000,
validation_data=(X_valid, y_valid),
early_stopping_rounds=10,
verbose_eval=0,
time_bins=time_bins,
)
preds = xgbse_model.predict(X_test)
# generating dummy predictions
dummy_preds = pd.DataFrame({100: [0.5] * len(y_test)})
# functions to make testing easier
def is_brier_score_return_correct_len():
return len(approx_brier_score(y_train, km_survival, aggregate=None)) == len(
km_survival.columns
)
def is_dist_cal_return_correct_len():
return len(dist_calibration_score(y_train, km_survival, returns="histogram")) == 10
def is_dist_cal_return_correct_type():
result = dist_calibration_score(y_train, km_survival, returns="all")
return type(result) == dict
# testing
def test_concordance_index():
assert concordance_index(y_train, km_survival) == 0.5
assert concordance_index(y_test, preds) > 0.5
assert np.isclose(
concordance_index(y_test, T_test.values, risk_strategy="precomputed"),
0,
atol=0.02,
)
assert np.isclose(
concordance_index(y_test, -T_test.values, risk_strategy="precomputed"),
1,
atol=0.02,
)
def test_approx_brier_score():
assert approx_brier_score(y_test, preds) < 0.25
assert approx_brier_score(y_train, km_survival) < 0.2
assert approx_brier_score(y_test, dummy_preds) == 0.25
assert is_brier_score_return_correct_len()
def test_dist_calibration_score():
assert dist_calibration_score(y_train, km_survival) > 0.90
assert dist_calibration_score(y_train, km_survival, returns="statistic") < 1.0
assert dist_calibration_score(y_train, km_survival, returns="max_deviation") < 0.01
assert is_dist_cal_return_correct_len()
assert is_dist_cal_return_correct_type()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.