max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
projects/mmdet3d_plugin/models/utils/dgcnn_attn.py | XiangTodayEatsWhat/detr3d | 237 | 12783093 | <reponame>XiangTodayEatsWhat/detr3d<filename>projects/mmdet3d_plugin/models/utils/dgcnn_attn.py
import math
import torch
import torch.nn as nn
from mmcv.cnn.bricks.registry import ATTENTION
from mmcv.runner.base_module import BaseModule
@ATTENTION.register_module()
class DGCNNAttn(BaseModule):
"""A warpper for DGCNN-type self-attention.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
dropout (float):w A Dropout layer on attn_output_weights. Default: 0..
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
dropout=0.,
init_cfg=None,
**kwargs):
super(DGCNNAttn, self).__init__(init_cfg)
self.embed_dims = embed_dims
self.num_heads = num_heads
self.dropout = dropout
self.conv1 = nn.Sequential(nn.Conv2d(self.embed_dims*2, self.embed_dims, kernel_size=1, bias=False),
nn.BatchNorm2d(self.embed_dims),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(self.embed_dims*2, self.embed_dims, kernel_size=1, bias=False),
nn.BatchNorm2d(self.embed_dims),
nn.ReLU(inplace=True))
self.K = kwargs['K']
self.dropout = nn.Dropout(dropout)
def forward(self,
query,
key=None,
value=None,
residual=None,
query_pos=None,
key_pos=None,
attn_mask=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `DGCNN`.
**kwargs allow passing a more general data flow when combining
with other operations in `DGCNN`.
Args:
query (Tensor): The input query with shape [num_queries, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
residual (Tensor): This tensor, with the same shape as x,
will be used for the residual link.
If None, `x` will be used. Defaults to None.
query_pos (Tensor): The positional encoding for query, with
the same shape as `x`. If not None, it will
be added to `x` before forward function. Defaults to None.
Returns:
Tensor: forwarded results with shape [num_queries, bs, embed_dims].
"""
if residual is None:
residual = query
if query_pos is not None:
query = query + query_pos
query = query.permute(1, 0, 2) # [bs, num_queries, embed_dims]
edge_feats = self.edge_feats(query, K=self.K)
edge_feats1 = self.conv1(edge_feats)
edge_feats1 = edge_feats1.max(dim=-1)[0]
out = edge_feats1
edge_feats1 = self.edge_feats(edge_feats1.permute(0, 2, 1))
edge_feats2 = self.conv2(edge_feats1)
edge_feats2 = edge_feats2.max(dim=-1)[0]
out = out + edge_feats2
out = out.permute(2, 0, 1)
return residual + self.dropout(out)
def edge_feats(self, query, K=16):
# (B, N, N)
affinity = torch.cdist(query, query)
# (B, N, K)
_, topk = torch.topk(affinity, k=K, dim=2)
B, N, C = query.size()
idx_base = torch.arange(0, B, device=query.device).view(-1, 1, 1) * N
idx = topk + idx_base
idx = idx.view(-1)
query = query.reshape(B*N, C)
query_neighbor = query[idx, :].view(B, N, K, C)
query = query.reshape(B, N, 1, C).repeat(1, 1, K, 1)
out = torch.cat((query_neighbor, query), dim=-1).permute(0, 3, 1, 2).contiguous()
return out
|
Lib/test/test_async.py | pyparallel/pyparallel | 652 | 12783125 | import os
import sys
import atexit
import unittest
import tempfile
import async
import _async
import socket
from socket import (
AF_INET,
SOCK_STREAM,
)
def tcpsock():
return socket.socket(AF_INET, SOCK_STREAM)
CHARGEN = [
r""" !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefg""",
r"""!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh""",
r""""#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghi""",
r"""#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghij""",
r"""$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijk""",
]
QOTD = 'An apple a day keeps the doctor away.\r\n'
ECHO_HOST = ('echo.snakebite.net', 7)
QOTD_HOST = ('qotd.snakebite.net', 17)
DISCARD_HOST = ('discard.snakebite.net', 9)
DAYTIME_HOST = ('daytime.snakebite.net', 13)
CHARGEN_HOST = ('chargen.snakebite.net', 19)
SERVICES_IP = socket.getaddrinfo(*ECHO_HOST)[0][4][0]
ECHO_IP = (SERVICES_IP, 7)
DISCARD_IP = (SERVICES_IP, 9)
DAYTIME_IP = (SERVICES_IP, 13)
CHARGEN_IP = (SERVICES_IP, 19)
NO_CB = None
NO_EB = None
HOST = '1192.168.3.11'
ADDR = (HOST, 0)
TEMPDIR = None
def rmtempdir():
if TEMPDIR:
TEMPDIR.cleanup()
def tempfile():
if not TEMPDIR:
TEMPDIR = tempfile.TemporaryDirectory()
assert os.path.isdir(TEMPDIR)
atexit.register(rmtempdir)
assert os.path.isdir(TEMPDIR)
f = tempfile.NamedTemporaryFile(dir=TEMPDIR, delete=False)
assert os.path.isfile(f)
return f
def tempfilename():
f = tempfile()
f.close()
return f.name
class TestBasic(unittest.TestCase):
def test_calling_run_with_no_events_fails(self):
self.assertRaises(AsyncRunCalledWithoutEventsError, _async.run_once)
class TestSubmitWork(unittest.TestCase):
def test_submit_simple_work(self):
def f(i):
return i * 2
def cb(r):
_async.call_from_main_thread(
self.assertEqual,
(r, 4),
)
_async.submit_work(f, 2, None, cb, None)
_async.run()
def test_value_error_in_callback(self):
def f():
return laksjdflaskjdflsakjdfsalkjdf
_async.submit_work(f, None, None, None, None)
self.assertRaises(NameError, _async.run)
def test_value_error_in_callback_then_run(self):
def f():
return laksjdflaskjdflsakjdfsalkjdf
_async.submit_work(f, None, None, None, None)
self.assertRaises(NameError, _async.run)
_async.run()
def test_multiple_value_errors_in_callback_then_run(self):
def f():
return laksjdflaskjdflsakjdfsalkjdf
_async.submit_work(f, None, None, None, None)
_async.submit_work(f, None, None, None, None)
self.assertRaises(NameError, _async.run)
self.assertRaises(NameError, _async.run)
_async.run()
def test_call_from_main_thread(self):
d = {}
def f(i):
_async.call_from_main_thread_and_wait(
d.__setitem__,
('foo', i*2),
)
return _async.call_from_main_thread_and_wait(
d.__getitem__, 'foo'
)
def cb(r):
_async.call_from_main_thread(
self.assertEqual,
(r, 4),
)
_async.submit_work(f, 2, None, cb, None)
_async.run()
def test_call_from_main_thread_decorator(self):
@async.call_from_main_thread
def f():
self.assertFalse(_async.is_parallel_thread)
_async.submit_work(f, None, None, None, None)
_async.run()
def test_submit_simple_work_errback_invoked(self):
def f():
return laksjdflaskjdflsakjdfsalkjdf
def test_e(et, ev, eb):
try:
f()
except NameError as e2:
self.assertEqual(et, e2.__class__)
self.assertEqual(ev, e2.args[0])
self.assertEqual(eb.__class__, e2.__traceback__.__class__)
else:
self.assertEqual(0, 1)
def cb(r):
_async.call_from_main_thread(self.assertEqual, (0, 1))
def eb(e):
_async.call_from_main_thread_and_wait(test_e, e)
_async.submit_work(f, None, None, cb, eb)
_async.run()
class TestSubmitFileIO(unittest.TestCase):
def test_write(self):
n = tempfilename()
f = open(n, 'w')
_async.submit_io(f.write, b'foo', None, None, None)
_async.run()
f.close()
with open(n, 'w') as f:
self.assertEqual(f.read(), b'foo')
def test_read(self):
@async.call_from_main_thread
def cb(d):
self.assertEqual(d, b'foo')
n = tempfilename()
with open(n, 'w') as f:
f.write(b'foo')
f = open(n, 'r')
_async.submit_io(f.read, None, None, cb, None)
_async.run()
class TestConnectSocketIO(unittest.TestCase):
def test_backlog(self):
sock = tcpsock()
port = sock.bind(ADDR)
sock.listen(100)
self.assertEqual(sock.backlog, 100)
sock.close()
def test_connect(self):
@async.call_from_main_thread
def cb():
self.assertEqual(1, 1)
sock = tcpsock()
_async.connect(sock, DISCARD_IP, 1, None, cb, NO_EB)
_async.run()
def test_connect_with_data(self):
@async.call_from_main_thread
def cb(sock):
self.assertEqual(1, 1)
sock = tcpsock()
_async.connect(sock, DISCARD_IP, 1, b'buf', cb, NO_EB)
_async.run()
def test_connect_with_data(self):
@async.call_from_main_thread
def cb(sock):
self.assertEqual(1, 1)
sock = tcpsock()
_async.connect(sock, DISCARD_IP, 1, b'buf', cb, NO_EB)
_async.run()
def test_connect_then_recv(self):
@async.call_from_main_thread
def _check(data):
self.assertEqual(data, QOTD)
def read_cb(sock, data):
_check(data)
def connect_cb(sock):
_async.recv(sock, read_cb, NO_EB)
sock = tcpsock()
_async.connect(sock, QOTD_IP, 1, None, connect_cb, NO_EB)
_async.run()
def test_connect_with_data_then_recv(self):
@async.call_from_main_thread
def _check(data):
self.assertEqual(data, b'hello')
def read_cb(sock, data):
_check(data)
def connect_cb(sock):
_async.recv(sock, read_cb, NO_EB)
sock = tcpsock()
_async.connect(sock, ECHO_IP, 1, b'hello', connect_cb, NO_EB)
_async.run()
def test_connect_then_send_then_recv(self):
@async.call_from_main_thread
def _check(data):
self.assertEqual(data, b'hello')
def read_cb(sock, data):
_check(data)
def connect_cb(sock):
_async.recv(sock, read_cb, NO_EB)
_async.send(sock, b'hello', NO_CB, NO_EB)
sock = tcpsock()
_async.connect(sock, ECHO_IP, 1, None, connect_cb, NO_EB)
_async.run()
def test_recv_before_connect_with_data_then_recv(self):
@async.call_from_main_thread
def _check(data):
self.assertEqual(data, b'hello')
def read_cb(sock, data):
_check(data)
sock = tcpsock()
_async.recv(sock, read_cb, NO_EB)
_async.connect(sock, ECHO_IP, 1, b'hello', NO_CB, NO_EB)
_async.run()
def test_recv_before_connect_then_send_then_recv(self):
@async.call_from_main_thread
def _check(data):
self.assertEqual(data, b'hello')
def read_cb(sock, data):
_check(data)
def connect_cb(sock):
_async.send(sock, b'hello', NO_CB, NO_EB)
sock = tcpsock()
_async.recv(sock, read_cb, NO_EB)
_async.connect(sock, ECHO_IP, 1, None, connect_cb, NO_EB)
_async.run()
class TestAcceptSocketIO(unittest.TestCase):
def test_accept(self):
@async.call_from_main_thread
def new_connection(sock, data):
self.assertEqual(data, b'hello')
sock = tcpsock()
port = sock.bind(ADDR)
addr = sock.getsockname()
sock.listen(1)
_async.accept(sock, new_connection, NO_EB)
client = tcpsock()
_async.connect(client, addr, 1, b'hello', NO_CB, NO_EB)
_async.run()
sock.close()
def test_accept_backlog2(self):
counter = 0
@async.call_from_main_thread
def new_connection(sock, data):
self.assertEqual(data, b'hello')
counter += 1
sock = tcpsock()
port = sock.bind(ADDR)
addr = sock.getsockname()
sock.listen(2)
_async.accept(sock, new_connection, NO_EB)
client = tcpsock()
_async.connect(client, addr, 2, b'hello', NO_CB, NO_EB)
_async.run()
self.assertEqual(counter, 2)
if __name__ == '__main__':
unittest.main()
# vim:set ts=8 sw=4 sts=4 tw=78 et:
|
5 - unit testing/vehicle_info_test.py | mickeybeurskens/betterpython | 523 | 12783179 | import unittest
from vehicle_info_after import VehicleInfo
class TestVehicleInfoMethods(unittest.TestCase):
pass
# def test_compute_tax_non_electric(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertEqual(v.compute_tax(), 500)
# def test_compute_tax_electric(self):
# v = VehicleInfo("BMW", True, 10000)
# self.assertEqual(v.compute_tax(), 200)
# def test_compute_tax_exemption(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertEqual(v.compute_tax(5000), 250)
# def test_compute_tax_exemption_negative(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertRaises(ValueError, v.compute_tax, -5000)
# def test_compute_tax_exemption_high(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertEqual(v.compute_tax(20000), 0)
# def test_can_lease_false(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertEqual(v.can_lease(5000), False)
# def test_can_lease_true(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertEqual(v.can_lease(15000), True)
# def test_can_lease_negative_income(self):
# v = VehicleInfo("BMW", False, 10000)
# self.assertRaises(ValueError, v.can_lease, -5000)
# run the actual unittests
unittest.main()
|
src/azure-cli/azure/cli/command_modules/lab/_params.py | YuanyuanNi/azure-cli | 3,287 | 12783203 | <filename>src/azure-cli/azure/cli/command_modules/lab/_params.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.command_modules.lab.validators import validate_artifacts, validate_template_id
from azure.cli.core.util import get_json_object
def load_arguments(self, _):
with self.argument_context('lab custom-image create') as c:
c.argument('name', options_list=['--name', '-n'])
with self.argument_context('lab vm create') as c:
c.argument('name', options_list=['--name', '-n'])
# Authentication related arguments
for arg_name in ['admin_username', 'admin_password', 'authentication_type', 'ssh_key', 'generate_ssh_keys',
'saved_secret']:
c.argument(arg_name, arg_group='Authentication')
c.argument('generate_ssh_keys', action='store_true')
# Add Artifacts from json object
c.argument('artifacts', type=get_json_object)
# Image related arguments
c.ignore('os_type', 'gallery_image_reference', 'custom_image_id')
# Network related arguments
for arg_name in ['ip_configuration', 'subnet', 'vnet_name']:
c.argument(arg_name, arg_group='Network')
c.ignore('lab_subnet_name', 'lab_virtual_network_id', 'disallow_public_ip_address', 'network_interface')
# Creating VM in the different location then lab is an officially unsupported scenario
c.ignore('location')
c.argument('allow_claim', action='store_true')
with self.argument_context('lab vm list') as c:
for arg_name in ['filters', 'all', 'claimable', 'environment']:
c.argument(arg_name, arg_group='Filter')
for arg_name in ['all', 'claimable']:
c.argument(arg_name, action='store_true')
with self.argument_context('lab vm claim') as c:
c.argument('name', options_list=['--name', '-n'], id_part='child_name_1')
c.argument('lab_name', id_part='name')
with self.argument_context('lab vm apply-artifacts') as c:
c.argument('artifacts', type=get_json_object, validator=validate_artifacts)
c.argument('name', options_list=['--name', '-n'])
with self.argument_context('lab formula') as c:
c.argument('name', options_list=['--name', '-n'])
with self.argument_context('lab secret') as c:
from azure.mgmt.devtestlabs.models import Secret
c.argument('name', options_list=['--name', '-n'])
c.argument('secret', options_list=['--value'], type=lambda x: Secret(value=x))
c.ignore('user_name')
with self.argument_context('lab formula export-artifacts') as c:
# Exporting artifacts does not need expand filter
c.ignore('expand')
with self.argument_context('lab environment') as c:
c.argument('name', options_list=['--name', '-n'])
c.ignore('user_name')
with self.argument_context('lab environment create') as c:
c.argument('arm_template', validator=validate_template_id)
c.argument('parameters', type=get_json_object)
with self.argument_context('lab arm-template') as c:
c.argument('name', options_list=['--name', '-n'])
with self.argument_context('lab arm-template show') as c:
c.argument('export_parameters', action='store_true')
|
modules/src/dictionary.py | rampreeth/JARVIS-on-Messenger | 1,465 | 12783212 | <reponame>rampreeth/JARVIS-on-Messenger
import os
import requests
import requests_cache
import config
from templates.text import TextTemplate
WORDS_API_KEY = os.environ.get('WORDS_API_KEY', config.WORDS_API_KEY)
def process(input, entities):
output = {}
try:
word = entities['word'][0]['value']
with requests_cache.enabled('dictionary_cache', backend='sqlite', expire_after=86400):
r = requests.get('https://wordsapiv1.p.mashape.com/words/' + word + '/definitions', headers={
'X-Mashape-Key': WORDS_API_KEY
})
data = r.json()
output['input'] = input
output['output'] = TextTemplate(
'Definition of ' + word + ':\n' + data['definitions'][0]['definition']).get_message()
output['success'] = True
except:
error_message = 'I couldn\'t find that definition.'
error_message += '\nPlease ask me something else, like:'
error_message += '\n - define comfort'
error_message += '\n - cloud definition'
error_message += '\n - what does an accolade mean?'
output['error_msg'] = TextTemplate(error_message).get_message()
output['success'] = False
return output
|
packages/syft/src/syft/proto/core/auth/signed_message_pb2.py | vishalbelsare/PySyft | 8,428 | 12783243 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/core/auth/signed_message.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# third party
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
# syft absolute
from syft.proto.core.common import (
common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2,
)
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n$proto/core/auth/signed_message.proto\x12\x0esyft.core.auth\x1a%proto/core/common/common_object.proto\x1a\x1bgoogle/protobuf/empty.proto"\x80\x01\n\rSignedMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x10\n\x08obj_type\x18\x02 \x01(\t\x12\x11\n\tsignature\x18\x03 \x01(\x0c\x12\x12\n\nverify_key\x18\x04 \x01(\x0c\x12\x0f\n\x07message\x18\x05 \x01(\x0c"\x1f\n\tVerifyKey\x12\x12\n\nverify_key\x18\x01 \x01(\x0c"0\n\tVerifyAll\x12#\n\x03\x61ll\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Emptyb\x06proto3'
)
_SIGNEDMESSAGE = DESCRIPTOR.message_types_by_name["SignedMessage"]
_VERIFYKEY = DESCRIPTOR.message_types_by_name["VerifyKey"]
_VERIFYALL = DESCRIPTOR.message_types_by_name["VerifyAll"]
SignedMessage = _reflection.GeneratedProtocolMessageType(
"SignedMessage",
(_message.Message,),
{
"DESCRIPTOR": _SIGNEDMESSAGE,
"__module__": "proto.core.auth.signed_message_pb2"
# @@protoc_insertion_point(class_scope:syft.core.auth.SignedMessage)
},
)
_sym_db.RegisterMessage(SignedMessage)
VerifyKey = _reflection.GeneratedProtocolMessageType(
"VerifyKey",
(_message.Message,),
{
"DESCRIPTOR": _VERIFYKEY,
"__module__": "proto.core.auth.signed_message_pb2"
# @@protoc_insertion_point(class_scope:syft.core.auth.VerifyKey)
},
)
_sym_db.RegisterMessage(VerifyKey)
VerifyAll = _reflection.GeneratedProtocolMessageType(
"VerifyAll",
(_message.Message,),
{
"DESCRIPTOR": _VERIFYALL,
"__module__": "proto.core.auth.signed_message_pb2"
# @@protoc_insertion_point(class_scope:syft.core.auth.VerifyAll)
},
)
_sym_db.RegisterMessage(VerifyAll)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_SIGNEDMESSAGE._serialized_start = 125
_SIGNEDMESSAGE._serialized_end = 253
_VERIFYKEY._serialized_start = 255
_VERIFYKEY._serialized_end = 286
_VERIFYALL._serialized_start = 288
_VERIFYALL._serialized_end = 336
# @@protoc_insertion_point(module_scope)
|
Models/regressionTemplateTF/model.py | UTS-AnimalLogicAcademy/nuke-ML-server | 123 | 12783247 | # Copyright (c) 2020 Foundry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import print_function
import sys
import os
import time
import scipy.misc
import numpy as np
import cv2
import tensorflow as tf
tf.compat.v1.disable_eager_execution() # For TF 2.x compatibility
from models.baseModel import BaseModel
from models.common.model_builder import baseline_model
from models.common.util import print_, get_ckpt_list, linear_to_srgb, srgb_to_linear
import message_pb2
class Model(BaseModel):
"""Load your trained model and do inference in Nuke"""
def __init__(self):
super(Model, self).__init__()
self.name = 'Regression Template TF'
self.n_levels = 3
self.scale = 0.5
dir_path = os.path.dirname(os.path.realpath(__file__))
self.checkpoints_dir = os.path.join(dir_path, 'checkpoints')
self.patch_size = 50
self.output_param_number = 1
# Initialise checkpoint name to the latest checkpoint
ckpt_names = get_ckpt_list(self.checkpoints_dir)
if not ckpt_names: # empty list
self.checkpoint_name = ''
else:
latest_ckpt = tf.compat.v1.train.latest_checkpoint(self.checkpoints_dir)
if latest_ckpt is not None:
self.checkpoint_name = latest_ckpt.split('/')[-1]
else:
self.checkpoint_name = ckpt_names[-1]
self.prev_ckpt_name = self.checkpoint_name
# Silence TF log when creating tf.Session()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Define options
self.gamma_to_predict = 1.0
self.predict = False
self.options = ('checkpoint_name', 'gamma_to_predict',)
self.buttons = ('predict',)
# Define inputs/outputs
self.inputs = {'input': 3}
self.outputs = {'output': 3}
def load(self, model):
# Check if empty or invalid checkpoint name
if self.checkpoint_name=='':
ckpt_names = get_ckpt_list(self.checkpoints_dir)
if not ckpt_names:
raise ValueError("No checkpoints found in {}".format(self.checkpoints_dir))
else:
raise ValueError("Empty checkpoint name, try an available checkpoint in {} (ex: {})"
.format(self.checkpoints_dir, ckpt_names[-1]))
print_("Loading trained model checkpoint...\n", 'm')
# Load from given checkpoint file name
self.saver.restore(self.sess, os.path.join(self.checkpoints_dir, self.checkpoint_name))
print_("...Checkpoint {} loaded\n".format(self.checkpoint_name), 'm')
def inference(self, image_list):
"""Do an inference on the model with a set of inputs.
# Arguments:
image_list: The input image list
Return the result of the inference.
"""
image = image_list[0]
image = linear_to_srgb(image).copy()
if not hasattr(self, 'sess'):
# Initialise tensorflow graph
tf.compat.v1.reset_default_graph()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth=True
self.sess=tf.compat.v1.Session(config=config)
# Input is stacked histograms of original and gamma-graded images.
input_shape = [1, 2, 100]
# Initialise input placeholder size
self.input = tf.compat.v1.placeholder(tf.float32, shape=input_shape)
self.model = baseline_model(
input_shape=input_shape[1:],
output_param_number=self.output_param_number)
self.infer_op = self.model(self.input)
# Load latest model checkpoint
self.saver = tf.compat.v1.train.Saver()
self.load(self.model)
self.prev_ckpt_name = self.checkpoint_name
# If checkpoint name has changed, load new checkpoint
if self.prev_ckpt_name != self.checkpoint_name or self.checkpoint_name == '':
self.load(self.model)
# If checkpoint correctly loaded, update previous checkpoint name
self.prev_ckpt_name = self.checkpoint_name
# Preprocess image same way we preprocessed it for training
# Here for gamma correction compute histograms
def histogram(x, value_range=[0.0, 1.0], nbins=100):
"""Return histogram of tensor x"""
h, w, c = x.shape
hist = tf.histogram_fixed_width(x, value_range, nbins=nbins)
hist = tf.divide(hist, h * w * c)
return hist
with tf.compat.v1.Session() as sess:
# Convert to grayscale
img_gray = tf.image.rgb_to_grayscale(image)
img_gray = tf.image.resize(img_gray, [self.patch_size, self.patch_size])
# Apply gamma correction
img_gray_grade = tf.math.pow(img_gray, self.gamma_to_predict)
img_grade = tf.math.pow(image, self.gamma_to_predict)
# Compute histograms
img_hist = histogram(img_gray)
img_grade_hist = histogram(img_gray_grade)
hists_op = tf.stack([img_hist, img_grade_hist], axis=0)
hists, img_grade = sess.run([hists_op, img_grade])
res_img = srgb_to_linear(img_grade)
hists_batch = np.expand_dims(hists, 0)
start = time.time()
# Run model inference
inference = self.sess.run(self.infer_op, feed_dict={self.input: hists_batch})
duration = time.time() - start
print('Inference duration: {:4.3f}s'.format(duration))
res = inference[-1]
print("Predicted gamma: {}".format(res))
# If predict button is pressed in Nuke
if self.predict:
script_msg = message_pb2.FieldValuePairAttrib()
script_msg.name = "PythonScript"
# Create a Python script message to run in Nuke
python_script = self.nuke_script(res)
script_msg_val = script_msg.values.add()
script_msg_str = script_msg_val.string_attributes.add()
script_msg_str.values.extend([python_script])
return [res_img, script_msg]
return [res_img]
def nuke_script(self, res):
"""Return the Python script function to create a pop up window in Nuke."""
popup_msg = "Predicted gamma: {}".format(res)
script = "nuke.message('{}')\n".format(popup_msg)
return script |
pyscreenshot/plugins/xwd.py | ponty/pyscreenshot | 416 | 12783252 | import logging
from easyprocess import EasyProcess
from pyscreenshot.plugins.backend import CBackend
from pyscreenshot.tempexport import RunProgError, read_func_img
from pyscreenshot.util import extract_version
log = logging.getLogger(__name__)
PROGRAM = "xwd"
# wikipedia: https://en.wikipedia.org/wiki/Xwd
# xwd | xwdtopnm | pnmtopng > Screenshot.png
# xwdtopnm is buggy: https://bugs.launchpad.net/ubuntu/+source/netpbm-free/+bug/1379480
# solution : imagemagick convert
# xwd -root -display :0 | convert xwd:- file.png
# TODO: xwd sometimes grabs the wrong window so this backend will be not added now
def read_xwd_img():
def run_prog(fpng, bbox=None):
fxwd = fpng + ".xwd"
pxwd = EasyProcess([PROGRAM, "-root", "-out", fxwd])
pxwd.call()
if pxwd.return_code != 0:
raise RunProgError(pxwd.stderr)
pconvert = EasyProcess(["convert", "xwd:" + fxwd, fpng])
pconvert.call()
if pconvert.return_code != 0:
raise RunProgError(pconvert.stderr)
im = read_func_img(run_prog)
return im
class XwdWrapper(CBackend):
name = "xwd"
is_subprocess = True
def grab(self, bbox=None):
im = read_xwd_img()
if bbox:
im = im.crop(bbox)
return im
def backend_version(self):
return extract_version(EasyProcess([PROGRAM, "-version"]).call().stdout)
|
nevergrad/optimization/test_special.py | risto-trajanov/nevergrad | 3,217 | 12783260 | <reponame>risto-trajanov/nevergrad
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import collections
import typing as tp
import pytest
import numpy as np
from .optimizerlib import registry
from . import test_optimizerlib
KEY = "NEVERGRAD_SPECIAL_TESTS"
if not os.environ.get(KEY, ""):
pytest.skip(f"These tests only run if {KEY} is set in the environment", allow_module_level=True)
@pytest.mark.parametrize("dimension", (2, 4, 7, 77))
@pytest.mark.parametrize("num_workers", (1,))
@pytest.mark.parametrize("scale", (4.0,))
@pytest.mark.parametrize("baseline", ["MetaModel", "CMA", "ECMA"])
@pytest.mark.parametrize("budget", [400, 4000])
@pytest.mark.parametrize("ellipsoid", [True, False])
def test_metamodel_sqp_chaining(
dimension: int, num_workers: int, scale: float, budget: int, ellipsoid: bool, baseline: str
) -> None:
"""The test can operate on the sphere or on an elliptic funciton."""
target = test_optimizerlib.QuadFunction(scale=scale, ellipse=ellipsoid)
baseline = baseline if dimension > 1 else "OnePlusOne"
chaining = "ChainMetaModelSQP"
# In both cases we compare MetaModel and CMA for a same given budget.
# But we expect MetaModel to be clearly better only for a larger budget in the ellipsoid case.
contextual_budget = budget if ellipsoid else 3 * budget
contextual_budget *= 5 * int(max(1, np.sqrt(scale)))
num_trials = 27
successes = 0.0
durations: tp.Dict[str, float] = collections.defaultdict(int)
for _ in range(num_trials):
if successes >= num_trials / 2:
break
# Let us run the comparison.
recoms: tp.Dict[str, np.ndarray] = {}
for name in (chaining, baseline):
opt = registry[name](dimension, contextual_budget, num_workers=num_workers)
t0 = time.time()
recoms[name] = opt.minimize(target).value
durations[name] += time.time() - t0
if target(recoms[baseline]) < target(recoms[chaining]):
successes += 1
if target(recoms[baseline]) == target(recoms[chaining]):
successes += 0.5
if successes <= num_trials // 2:
print(
f"ChainMetaModelSQP fails ({successes}/{num_trials}) for d={dimension}, scale={scale}, "
f"num_workers={num_workers}, ellipsoid={ellipsoid}, budget={budget}, vs {baseline}"
)
raise AssertionError("ChaingMetaModelSQP fails by performance.")
print(
f"ChainMetaModelSQP wins for d={dimension}, scale={scale}, num_workers={num_workers}, "
f"ellipsoid={ellipsoid}, budget={budget}, vs {baseline}"
)
assert durations[chaining] < 7 * durations[baseline], "Computationally more than 7x more expensive."
@pytest.mark.parametrize("args", test_optimizerlib.get_metamodel_test_settings(special=True))
@pytest.mark.parametrize("baseline", ("CMA", "ECMA"))
def test_metamodel_special(baseline: str, args: tp.Tuple[tp.Any, ...]) -> None:
"""The test can operate on the sphere or on an elliptic funciton."""
kwargs = dict(zip(test_optimizerlib.META_TEST_ARGS, args))
test_optimizerlib.check_metamodel(baseline=baseline, **kwargs)
|
test/unit/test_noise_gates.py | stjordanis/pyquil | 677 | 12783312 | import numpy as np
from pyquil.gates import RZ, RX, I, CZ, ISWAP, CPHASE
from pyquil.noise_gates import _get_qvm_noise_supported_gates, THETA
def test_get_qvm_noise_supported_gates_from_compiler_isa(compiler_isa):
gates = _get_qvm_noise_supported_gates(compiler_isa)
for q in [0, 1, 2]:
for g in [
I(q),
RX(np.pi / 2, q),
RX(-np.pi / 2, q),
RX(np.pi, q),
RX(-np.pi, q),
RZ(THETA, q),
]:
assert g in gates
assert CZ(0, 1) in gates
assert CZ(1, 0) in gates
assert ISWAP(1, 2) in gates
assert ISWAP(2, 1) in gates
assert CPHASE(THETA, 2, 0) in gates
assert CPHASE(THETA, 0, 2) in gates
ASPEN_8_QUBITS_NO_RX = {8, 9, 10, 18, 19, 28, 29, 31}
ASPEN_8_QUBITS_NO_RZ = {8, 9, 10, 18, 19, 28, 29, 31}
ASPEN_8_EDGES_NO_CZ = {(0, 1), (10, 11), (1, 2), (21, 22), (17, 10), (12, 25)}
def test_get_qvm_noise_supported_gates_from_aspen8_isa(qcs_aspen8_quantum_processor, noise_model_dict):
gates = _get_qvm_noise_supported_gates(qcs_aspen8_quantum_processor.to_compiler_isa())
for q in range(len(qcs_aspen8_quantum_processor._isa.architecture.nodes)):
if q not in ASPEN_8_QUBITS_NO_RX:
for g in [
RX(np.pi / 2, q),
RX(-np.pi / 2, q),
RX(np.pi, q),
RX(-np.pi, q),
]:
assert g in gates
if q not in ASPEN_8_QUBITS_NO_RZ:
assert RZ(THETA, q) in gates
for edge in qcs_aspen8_quantum_processor._isa.architecture.edges:
if (
edge.node_ids[0],
edge.node_ids[1],
) in ASPEN_8_EDGES_NO_CZ:
continue
assert CZ(edge.node_ids[0], edge.node_ids[1]) in gates
assert CZ(edge.node_ids[1], edge.node_ids[0]) in gates
|
setup.py | fdvty/open-box | 184 | 12783317 | <gh_stars>100-1000
#!/usr/bin/env python
# This en code is licensed under the MIT license found in the
# LICENSE file in the root directory of this en tree.
import sys
import importlib.util
from pathlib import Path
from distutils.core import setup
from setuptools import find_packages
requirements = dict()
for extra in ["dev", "main"]:
# Skip `package @ git+[repo_url]` because not supported by pypi
if (3, 6) <= sys.version_info < (3, 7):
requirements[extra] = [r
for r in Path("requirements/%s_py36.txt" % extra).read_text().splitlines()
if '@' not in r
]
else:
requirements[extra] = [r
for r in Path("requirements/%s.txt" % extra).read_text().splitlines()
if '@' not in r
]
# Find version number
spec = importlib.util.spec_from_file_location("openbox.pkginfo", str(Path(__file__).parent / "openbox" / "pkginfo.py"))
pkginfo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(pkginfo)
version = pkginfo.version
package_name = pkginfo.package_name
# Get the platform info.
def get_platform():
platforms = {
'linux': 'Linux',
'linux1': 'Linux',
'linux2': 'Linux',
'darwin': 'OSX',
'win32': 'Windows'
}
if sys.platform not in platforms:
raise ValueError('Unsupported platform - %s.' % sys.platform)
return platforms[sys.platform]
platform = get_platform()
# Get readme strings.
def readme() -> str:
return open("README.md", encoding='utf-8').read()
setup(
name=package_name,
version=version,
description="Efficient and generalized blackbox optimization (BBO) system",
long_description=readme(),
long_description_content_type="text/markdown",
url='https://github.com/PKU-DAIR/open-box',
author="<NAME> from <EMAIL> <EMAIL>",
packages=find_packages(),
license="MIT",
install_requires=requirements["main"],
extras_require={"dev": requirements["dev"]},
package_data={"open-box": ["py.typed"]},
include_package_data=True,
python_requires='>=3.6.0',
entry_points={
"console_scripts": [
"openbox = openbox.__main__:main",
]
}
)
|
img2dataset/logger.py | rom1504/img2dataset | 482 | 12783349 | """logging utils for the downloader"""
import wandb
import time
from collections import Counter
import fsspec
import json
from multiprocessing import Process, Queue
import queue
class CappedCounter:
"""Maintain a counter with a capping to avoid memory issues"""
def __init__(self, max_size=10 ** 5):
self.max_size = max_size
self.counter = Counter()
def increment(self, key):
if len(self.counter) >= self.max_size:
self._keep_most_frequent()
self.counter[key] += 1
def _keep_most_frequent(self):
self.counter = Counter(dict(self.counter.most_common(int(self.max_size / 2))))
def most_common(self, k):
return self.counter.most_common(k)
def update(self, counter):
self.counter.update(counter.counter)
if len(self.counter) >= self.max_size:
self._keep_most_frequent()
def dump(self):
return self.counter
@classmethod
def load(cls, d, max_size=10 ** 5):
c = CappedCounter(max_size)
c.counter = Counter(d)
return c
class Logger:
"""logger which logs when number of calls reaches a value or a time interval has passed"""
def __init__(self, processes_count=1, min_interval=0):
"""Log only every processes_count and if min_interval (seconds) have elapsed since last log"""
# wait for all processes to return
self.processes_count = processes_count
self.processes_returned = 0
# min time (in seconds) before logging a new table (avoids too many logs)
self.min_interval = min_interval
self.last = time.perf_counter()
# keep track of whether we logged the last call
self.last_call_logged = False
self.last_args = None
self.last_kwargs = None
def __call__(self, *args, **kwargs):
self.processes_returned += 1
if self.processes_returned % self.processes_count == 0 and time.perf_counter() - self.last > self.min_interval:
self.do_log(*args, **kwargs)
self.last = time.perf_counter()
self.last_call_logged = True
else:
self.last_call_logged = False
self.last_args = args
self.last_kwargs = kwargs
def do_log(self, *args, **kwargs):
raise NotImplementedError()
def sync(self):
"""Ensure last call is logged"""
if not self.last_call_logged:
self.do_log(*self.last_args, **self.last_kwargs)
# reset for next file
self.processes_returned = 0
class SpeedLogger(Logger):
"""Log performance metrics"""
def __init__(self, prefix, enable_wandb, **logger_args):
super().__init__(**logger_args)
self.prefix = prefix
self.start = time.perf_counter()
self.count = 0
self.success = 0
self.failed_to_download = 0
self.failed_to_resize = 0
self.enable_wandb = enable_wandb
def __call__(
self, duration, count, success, failed_to_download, failed_to_resize
): # pylint: disable=arguments-differ
self.count += count
self.success += success
self.failed_to_download += failed_to_download
self.failed_to_resize += failed_to_resize
super().__call__(duration, self.count, self.success, self.failed_to_download, self.failed_to_resize)
def do_log(
self, duration, count, success, failed_to_download, failed_to_resize
): # pylint: disable=arguments-differ
img_per_sec = count / duration
success_ratio = 1.0 * success / count
failed_to_download_ratio = 1.0 * failed_to_download / count
failed_to_resize_ratio = 1.0 * failed_to_resize / count
print(
" - ".join(
[
f"{self.prefix:<7}",
f"success: {success_ratio:.3f}",
f"failed to download: {failed_to_download_ratio:.3f}",
f"failed to resize: {failed_to_resize_ratio:.3f}",
f"images per sec: {img_per_sec:.0f}",
f"count: {count}",
]
)
)
if self.enable_wandb:
wandb.log(
{
f"{self.prefix}/img_per_sec": img_per_sec,
f"{self.prefix}/success": success_ratio,
f"{self.prefix}/failed_to_download": failed_to_download_ratio,
f"{self.prefix}/failed_to_resize": failed_to_resize_ratio,
f"{self.prefix}/count": count,
}
)
class StatusTableLogger(Logger):
"""Log status table to W&B, up to `max_status` most frequent items"""
def __init__(self, max_status=100, min_interval=60, enable_wandb=False, **logger_args):
super().__init__(min_interval=min_interval, **logger_args)
# avoids too many errors unique to a specific website (SSL certificates, etc)
self.max_status = max_status
self.enable_wandb = enable_wandb
def do_log(self, status_dict, count): # pylint: disable=arguments-differ
if self.enable_wandb:
status_table = wandb.Table(
columns=["status", "frequency", "count"],
data=[[k, 1.0 * v / count, v] for k, v in status_dict.most_common(self.max_status)],
)
wandb.run.log({"status": status_table})
def write_stats(
output_folder,
shard_id,
count,
successes,
failed_to_download,
failed_to_resize,
start_time,
end_time,
status_dict,
oom_shard_count,
):
"""Write stats to disk"""
stats = {
"count": count,
"successes": successes,
"failed_to_download": failed_to_download,
"failed_to_resize": failed_to_resize,
"duration": end_time - start_time,
"status_dict": status_dict.dump(),
}
fs, output_path = fsspec.core.url_to_fs(output_folder)
shard_name = "{shard_id:0{oom_shard_count}d}".format(shard_id=shard_id, oom_shard_count=oom_shard_count)
json_file = f"{output_path}/{shard_name}_stats.json"
with fs.open(json_file, "w") as f:
json.dump(stats, f, indent=4)
# https://docs.python.org/3/library/multiprocessing.html
# logger process that reads stats files regularly, aggregates and send to wandb / print to terminal
class LoggerProcess(Process):
"""Logger process that reads stats files regularly, aggregates and send to wandb / print to terminal"""
def __init__(self, output_folder, enable_wandb, wandb_project, config_parameters, processes_count, log_interval=60):
super().__init__()
self.log_interval = log_interval
self.enable_wandb = enable_wandb
self.fs, self.output_path = fsspec.core.url_to_fs(output_folder)
self.stats_files = set()
self.wandb_project = wandb_project
self.config_parameters = config_parameters
self.processes_count = processes_count
self.q = Queue()
def run(self):
"""Run logger process"""
if self.enable_wandb:
self.current_run = wandb.init(project=self.wandb_project, config=self.config_parameters, anonymous="allow")
else:
self.current_run = None
self.total_speed_logger = SpeedLogger(
"total", processes_count=self.processes_count, enable_wandb=self.enable_wandb
)
self.status_table_logger = StatusTableLogger(
processes_count=self.processes_count, enable_wandb=self.enable_wandb
)
start_time = time.perf_counter()
last_check = 0
total_status_dict = CappedCounter()
while True:
time.sleep(0.1)
try:
self.q.get(False)
last_one = True
except queue.Empty as _:
last_one = False
if not last_one and time.perf_counter() - last_check < self.log_interval:
continue
try:
# read stats files
stats_files = self.fs.glob(self.output_path + "/*.json")
# get new stats files
new_stats_files = set(stats_files) - self.stats_files
if len(new_stats_files) == 0:
if last_one:
self.finish()
return
# read new stats files
for stats_file in new_stats_files:
with self.fs.open(stats_file, "r") as f:
stats = json.load(f)
SpeedLogger("worker", enable_wandb=self.enable_wandb)(
duration=stats["duration"],
count=stats["count"],
success=stats["successes"],
failed_to_download=stats["failed_to_download"],
failed_to_resize=stats["failed_to_resize"],
)
self.total_speed_logger(
duration=time.perf_counter() - start_time,
count=stats["count"],
success=stats["successes"],
failed_to_download=stats["failed_to_download"],
failed_to_resize=stats["failed_to_resize"],
)
status_dict = CappedCounter.load(stats["status_dict"])
total_status_dict.update(status_dict)
self.status_table_logger(total_status_dict, self.total_speed_logger.count)
self.stats_files.add(stats_file)
last_check = time.perf_counter()
if last_one:
self.finish()
return
except Exception as e: # pylint: disable=broad-except
print(e)
self.finish()
return
def finish(self):
"""Finish logger process"""
self.total_speed_logger.sync()
self.status_table_logger.sync()
if self.current_run is not None:
self.current_run.finish()
def join(self, timeout=None):
"""Stop logger process"""
self.q.put("stop")
super().join()
self.q.close()
|
src/orders/migrations/0007_OrderPromoCodes.py | denkasyanov/education-backend | 151 | 12783352 | <gh_stars>100-1000
# Generated by Django 2.2.13 on 2020-09-30 13:14
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_PromoCodeComments'),
]
operations = [
migrations.AddField(
model_name='order',
name='promocode',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='orders.PromoCode'),
),
]
|
examples/simple.py | ldn-softdev/pyeapi | 126 | 12783385 | #!/usr/bin/env python
from __future__ import print_function
import pyeapi
connection = pyeapi.connect(host='192.168.1.16')
output = connection.execute(['enable', 'show version'])
print(('My system MAC address is', output['result'][1]['systemMacAddress']))
|
i3pystatus/pomodoro.py | fkusei/i3pystatus | 413 | 12783389 | import subprocess
from datetime import datetime, timedelta
from i3pystatus import IntervalModule
from i3pystatus.core.desktop import DesktopNotification
STOPPED = 0
RUNNING = 1
BREAK = 2
class Pomodoro(IntervalModule):
"""
This plugin shows Pomodoro timer.
Left click starts/restarts timer.
Right click stops it.
Example color settings.
.. code-block:: python
color_map = {
'stopped': '#2ECCFA',
'running': '#FFFF00',
'break': '#37FF00'
}
"""
settings = (
('sound',
'Path to sound file to play as alarm. Played by "aplay" utility'),
('pomodoro_duration',
'Working (pomodoro) interval duration in seconds'),
('break_duration', 'Short break duration in seconds'),
('long_break_duration', 'Long break duration in seconds'),
('short_break_count', 'Short break count before first long break'),
('format', 'format string, available formatters: current_pomodoro, '
'total_pomodoro, time'),
('inactive_format', 'format string to display when no timer is running'),
('color', 'dictionary containing a mapping of statuses to colours')
)
inactive_format = 'Start Pomodoro'
color_map = {
'stopped': '#2ECCFA',
'running': '#FFFF00',
'break': '#37FF00'
}
color = None
sound = None
interval = 1
short_break_count = 3
format = '☯ {current_pomodoro}/{total_pomodoro} {time}'
pomodoro_duration = 25 * 60
break_duration = 5 * 60
long_break_duration = 15 * 60
on_rightclick = "stop"
on_leftclick = "start"
def init(self):
# state could be either running/break or stopped
self.state = STOPPED
self.current_pomodoro = 0
self.total_pomodoro = self.short_break_count + 1 # and 1 long break
self.time = None
if self.color is not None and type(self.color) == dict:
self.color_map.update(self.color)
def run(self):
if self.time and datetime.utcnow() >= self.time:
if self.state == RUNNING:
self.state = BREAK
if self.current_pomodoro == self.short_break_count:
self.time = datetime.utcnow() + \
timedelta(seconds=self.long_break_duration)
else:
self.time = datetime.utcnow() + \
timedelta(seconds=self.break_duration)
text = 'Go for a break!'
else:
self.state = RUNNING
self.time = datetime.utcnow() + \
timedelta(seconds=self.pomodoro_duration)
text = 'Back to work!'
self.current_pomodoro = (self.current_pomodoro + 1) % self.total_pomodoro
self._alarm(text)
if self.state == RUNNING or self.state == BREAK:
min, sec = divmod((self.time - datetime.utcnow()).total_seconds(), 60)
text = '{:02}:{:02}'.format(int(min), int(sec))
sdict = {
'time': text,
'current_pomodoro': self.current_pomodoro + 1,
'total_pomodoro': self.total_pomodoro
}
color = self.color_map['running'] if self.state == RUNNING else self.color_map['break']
text = self.format.format(**sdict)
else:
text = self.inactive_format
color = self.color_map['stopped']
self.output = {
'full_text': text,
'color': color
}
def start(self):
self.state = RUNNING
self.time = datetime.utcnow() + timedelta(seconds=self.pomodoro_duration)
self.current_pomodoro = 0
def stop(self):
self.state = STOPPED
self.time = None
def _alarm(self, text):
notification = DesktopNotification(title='Alarm!', body=text)
notification.display()
if self.sound is not None:
subprocess.Popen(['aplay',
self.sound,
'-q'],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
tests/test_runtime.py | NathanDeMaria/aws-lambda-r-runtime | 134 | 12783400 | import base64
import json
import re
import unittest
import boto3
from tests import get_version, get_function_name, is_local
from tests.sam import LocalLambdaServer, start_local_lambda
class TestRuntimeLayer(unittest.TestCase):
lambda_server: LocalLambdaServer = None
@classmethod
def setUpClass(cls):
if is_local():
cls.lambda_server = start_local_lambda(template_path="test-template.yaml",
parameter_overrides={'Version': get_version()},
)
def get_client(self):
return self.lambda_server.get_client() if is_local() else boto3.client('lambda')
def test_script(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("ExampleFunction"),
Payload=json.dumps({'x': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertEqual(2, result)
def test_lowercase_extension(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("LowerCaseExtensionFunction"),
Payload=json.dumps({'x': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertEqual(2, result)
def test_multiple_arguments(self):
lambda_client = self.get_client()
payload = {'x': 'bar', 'y': 1}
response = lambda_client.invoke(FunctionName=get_function_name("MultipleArgumentsFunction"),
Payload=json.dumps(payload),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertDictEqual(payload, result)
@unittest.skipIf(is_local(), 'Lambda local does not support log retrieval')
def test_debug_logging(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("LoggingFunction"),
LogType='Tail',
Payload=json.dumps({'x': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertEqual(1, result)
log = base64.b64decode(response['LogResult']).decode('utf-8')
self.assertIn("runtime:Sourcing 'script.R'", log)
self.assertIn("runtime:Invoking function 'handler_with_debug_logging' with parameters:\n$x\n[1] 1", log)
self.assertIn("runtime:Function returned:\n[1] 1", log)
self.assertIn("runtime:Posted result:\n", log)
@unittest.skipIf(is_local(), 'Lambda local does not support log retrieval')
def test_no_debug_logging(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("ExampleFunction"),
LogType='Tail',
Payload=json.dumps({'x': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertEqual(2, result)
log = base64.b64decode(response['LogResult']).decode('utf-8')
self.assertNotIn("Sourcing ", log)
self.assertNotIn("Invoking function ", log)
self.assertNotIn("Function returned:", log)
self.assertNotIn("Posted result:", log)
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_missing_source_file(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("MissingSourceFileFunction"),
Payload=json.dumps({'y': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('Source file does not exist: missing.[R|r]', json_payload['errorMessage'])
self.assertEqual('simpleError', json_payload['errorType'])
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_missing_function(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("MissingFunctionFunction"),
Payload=json.dumps({'y': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('Function "handler_missing" does not exist', json_payload['errorMessage'])
self.assertEqual('simpleError', json_payload['errorType'])
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_function_as_variable(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("HandlerAsVariableFunction"),
Payload=json.dumps({'y': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('Function "handler_as_variable" does not exist', json_payload['errorMessage'])
self.assertEqual('simpleError', json_payload['errorType'])
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_missing_argument(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("ExampleFunction"))
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('argument "x" is missing, with no default', json_payload['errorMessage'])
self.assertEqual('simpleError', json_payload['errorType'])
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_unused_argument(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("ExampleFunction"),
Payload=json.dumps({'x': 1, 'y': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('unused argument (y = 1)', json_payload['errorMessage'])
self.assertEqual('simpleError', json_payload['errorType'])
# @unittest.skipIf(is_local(), 'Fails locally with "argument list too long"')
@unittest.skip('Fails with timeout')
def test_long_argument(self):
lambda_client = self.get_client()
payload = {x: x for x in range(0, 100000)}
response = lambda_client.invoke(FunctionName=get_function_name("VariableArgumentsFunction"),
Payload=json.dumps(payload),
)
raw_payload = response['Payload'].read().decode('utf-8')
result = json.loads(raw_payload)
self.assertEqual(1, result)
@unittest.skipIf(is_local(), 'Lambda local does not pass errors properly')
def test_missing_library(self):
lambda_client = self.get_client()
response = lambda_client.invoke(FunctionName=get_function_name("MissingLibraryFunction"),
Payload=json.dumps({'y': 1}),
)
raw_payload = response['Payload'].read().decode('utf-8')
json_payload = json.loads(raw_payload)
self.assertEqual('Unhandled', response['FunctionError'])
self.assertIn('there is no package called ‘Matrix’', json_payload['errorMessage'])
error_type = 'packageNotFoundError' if get_version() == '3_6_0' else 'simpleError'
self.assertEqual(error_type, json_payload['errorType'])
@classmethod
def tearDownClass(cls):
if is_local():
cls.lambda_server.kill()
|
tools/check_python_format.py | cockroachzl/recommenders-addons | 584 | 12783416 | <filename>tools/check_python_format.py
#!/usr/bin/env python
from subprocess import check_call, CalledProcessError
def check_bash_call(string):
check_call(["bash", "-c", string])
def _run_format():
files_changed = False
try:
check_bash_call(
"find . -name '*.py' -print0 | xargs -0 yapf --style=./.yapf -dr")
except CalledProcessError:
check_bash_call(
"find . -name '*.py' -print0 | xargs -0 yapf --style=./.yapf -ir")
files_changed = True
if files_changed:
print("Some files have changed.")
print("Please use 'yapf --style=google -ri ./**/*.py' before commit.")
else:
print("No formatting needed.")
if files_changed:
exit(1)
def run_format():
try:
_run_format()
except CalledProcessError as error:
print("Yapf check returned exit code", error.returncode)
exit(error.returncode)
if __name__ == "__main__":
run_format()
|
examples/csj/s0/csj_tools/wn.2.prep.text.py | treeaaa/wenet | 1,166 | 12783422 | import os
import sys
# train test1 test2 test3
def readtst(tstfn):
outlist = list()
with open(tstfn) as br:
for aline in br.readlines():
aline = aline.strip()
outlist.append(aline)
return outlist
def split_train_tests_xml(xmlpath, test1fn, test2fn, test3fn):
test1list = readtst(test1fn)
test2list = readtst(test2fn)
test3list = readtst(test3fn)
outtrainlist = list() # full path ".xml.simp" files
outt1list = list() # test 1, full path ".xml.simp" files
outt2list = list()
outt3list = list()
for afile in os.listdir(xmlpath):
if not afile.endswith('.xml.simp'):
continue
afile2 = xmlpath + '/' + afile
aid = afile.split('.')[0]
if aid in test1list:
outt1list.append(afile2)
elif aid in test2list:
outt2list.append(afile2)
elif aid in test3list:
outt3list.append(afile2)
else:
outtrainlist.append(afile2)
return outtrainlist, outt1list, outt2list, outt3list
def all_wavs(wavpath):
wavlist = list()
for afile in os.listdir(wavpath):
if not afile.endswith('.wav'):
continue
afile2 = wavpath + '/' + afile
wavlist.append(afile2)
return wavlist
def gen_text(xmllist, outpath):
# id \t text
# e.g., /workspace/asr/wenet/examples/csj/s0/data/xml/S11M1689.xml.simp
# ID = S11M1689_stime_etime
outtxtfn = os.path.join(outpath, 'text')
with open(outtxtfn, 'w') as bw:
for xmlfn in xmllist:
aid = xmlfn.split('/')[-1]
aid2 = aid.split('.')[0]
with open(xmlfn) as br:
for aline in br.readlines():
aline = aline.strip()
# stime \t etime \t text1 \t text2 \t text3 \t text4 \t text5
cols = aline.split('\t')
# TODO different between "< 7" and "< 4"? strange
# -> use "< 4", DO NOT use "< 7" !
if len(cols) < 4:
continue
stime = cols[0]
etime = cols[1]
atxt = cols[3].replace(' ', '')
afullid = '{}_{}_{}'.format(aid2, stime, etime)
aoutline = '{}\t{}\n'.format(afullid, atxt)
bw.write(aoutline)
def parse_xml_set(xmllist):
outset = set()
for xml in xmllist:
aid = xml.split('/')[-1]
aid2 = aid.split('.')[0]
outset.add(aid2)
return outset
def gen_wav_scp(xmllist, wavlist, outpath):
# xmlset = pure id set, alike 'S04F1228'
# can be from train, test1, test2, or test3
xmlset = parse_xml_set(xmllist)
outwavscpfn = os.path.join(outpath, 'wav.scp')
with open(outwavscpfn, 'w') as bw:
for wav in wavlist:
# wav is alike "/workspace/asr/wenet/examples/csj/s0/data
# /wav/S04F1228.wav_00458.875_00459.209.wav"
aid = wav.split('/')[-1]
cols = aid.split('_')
aid2 = cols[0].split('.')[0]
if aid2 not in xmlset:
continue
stime = cols[1]
etime = cols[2].replace('.wav', '')
afullid = '{}_{}_{}'.format(aid2, stime, etime)
wavabspath = os.path.abspath(wav)
aoutline = '{}\t{}\n'.format(afullid, wavabspath)
bw.write(aoutline)
def prep_text_wavscp(
xmlpath, wavpath, test1fn, test2fn, test3fn,
outtrainpath, out1path, out2path, out3path):
trainlist, t1list, t2list, t3list = split_train_tests_xml(
xmlpath,
test1fn,
test2fn,
test3fn)
wavlist = all_wavs(wavpath)
gen_text(trainlist, outtrainpath)
gen_text(t1list, out1path)
gen_text(t2list, out2path)
gen_text(t3list, out3path)
gen_wav_scp(trainlist, wavlist, outtrainpath)
gen_wav_scp(t1list, wavlist, out1path)
gen_wav_scp(t2list, wavlist, out2path)
gen_wav_scp(t3list, wavlist, out3path)
if __name__ == '__main__':
if len(sys.argv) < 10:
print(
"Usage: {}".format(sys.argv[0]) + "<xmlpath> " +
"<wavpath> <test1fn> <test2fn> <test3fn> " +
"<outtrainpath> <out1path> <out2path> <out3path>")
exit(1)
xmlpath = sys.argv[1]
wavpath = sys.argv[2]
test1fn = sys.argv[3]
test2fn = sys.argv[4]
test3fn = sys.argv[5]
outtrainpath = sys.argv[6]
out1path = sys.argv[7]
out2path = sys.argv[8]
out3path = sys.argv[9]
prep_text_wavscp(xmlpath, wavpath, test1fn,
test2fn, test3fn, outtrainpath,
out1path, out2path, out3path)
|
hal_fuzz/hal_fuzz/handlers/stm32f4_hal/stm32f4_i2c.py | diagprov/hal-fuzz | 117 | 12783451 | <filename>hal_fuzz/hal_fuzz/handlers/stm32f4_hal/stm32f4_i2c.py
import sys
from unicorn.arm_const import *
from ...util import *
import sys
from ..fuzz import fuzz_remaining, get_fuzz
from ...models.i2c import I2CModel
def HAL_I2C_Init(uc):
pass
def HAL_I2C_Mem_Read(uc):
# HAL_StatusTypeDef __fastcall HAL_I2C_Mem_Read(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size, uint32_t Timeout)
device_id = uc.reg_read(UC_ARM_REG_R0)
dev_addr = uc.reg_read(UC_ARM_REG_R1)
mem_addr = uc.reg_read(UC_ARM_REG_R2)
mem_addr_size = uc.reg_read(UC_ARM_REG_R3)
dst_buf = struct.unpack("<I", uc.mem_read(uc.reg_read(UC_ARM_REG_SP), 4))[0]
dst_buf_size = struct.unpack("<I", uc.mem_read(uc.reg_read(UC_ARM_REG_SP) + 0x4, 4))[0]
timeout = struct.unpack("<I", uc.mem_read(uc.reg_read(UC_ARM_REG_SP) + 0x8, 4))[0]
assert(dst_buf != 0)
assert(dst_buf_size < 1000)
assert(mem_addr < 65535)
assert(dst_buf_size >= mem_addr_size)
#stuff = I2CModel.rx(device_id, dev_addr, mem_addr_size)
stuff = get_fuzz(mem_addr_size)
uc.mem_write(dst_buf, stuff)
uc.reg_write(UC_ARM_REG_R0, 0)
print(b"<<< " + stuff)
def HAL_I2C_Mem_Write(uc):
uc.reg_write(UC_ARM_REG_R0, 0)
|
python/gpu-enabled-multiprocessing.py | GangababuManam/tensorflow-101 | 832 | 12783462 | <reponame>GangababuManam/tensorflow-101<gh_stars>100-1000
import pandas as pd
import multiprocessing
from multiprocessing import Pool
def train(index, df):
import tensorflow as tf
import keras
from keras.models import Sequential
#------------------------------
#this block enables GPU enabled multiprocessing
core_config = tf.ConfigProto()
core_config.gpu_options.allow_growth = True
session = tf.Session(config=core_config)
keras.backend.set_session(session)
#------------------------------
#prepare input and output values
df = df.drop(columns=['index'])
data = df.drop(columns=['target']).values
target = df['target']
#------------------------------
model = Sequential()
model.add(Dense(5 #num of hidden units
, input_shape=(data.shape[1],))) #num of features in input layer
model.add(Activation('sigmoid'))
model.add(Dense(1))#number of nodes in output layer
model.add(Activation('sigmoid'))
model.compile(loss='mse', optimizer=keras.optimizers.Adam())
#------------------------------
model.fit(data, target, epochs = 5000, verbose = 1)
model.save("model_for_%s.hdf5" % index)
#------------------------------
#finally, close sessions
session.close()
keras.backend.clear_session()
#-----------------------------
#main program
multiprocessing.set_start_method('spawn', force=True)
df = pd.read_csv("dataset.csv")
my_tuple = [(i, df[df['index'] == i]) for i in range(0, 20)]
with Pool(10) as pool:
pool.starmap(train, my_tuple)
|
src/networks/lenet.py | francesco-p/FACIL | 243 | 12783471 | from torch import nn
import torch.nn.functional as F
class LeNet(nn.Module):
"""LeNet-like network for tests with MNIST (28x28)."""
def __init__(self, in_channels=1, num_classes=10, **kwargs):
super().__init__()
# main part of the network
self.conv1 = nn.Conv2d(in_channels, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 16, 120)
self.fc2 = nn.Linear(120, 84)
# last classifier layer (head) with as many outputs as classes
self.fc = nn.Linear(84, num_classes)
# and `head_var` with the name of the head, so it can be removed when doing incremental learning experiments
self.head_var = 'fc'
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc(out)
return out
|
egg/platform_arm.py | TheMartianObserver/nsimd | 247 | 12783490 | <reponame>TheMartianObserver/nsimd<filename>egg/platform_arm.py
# Copyright (c) 2020 Agenium Scale
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file gives the implementation of platform ARM, i.e. ARM SIMD.
# Reading this file is rather straightforward. ARM SIMD extensions are rather
# coherent and consistent. It implements the following architectures:
# - ARMv7 -> 128 bits registers without f16 and f64 support
# - Aarch32 -> 128 bits registers with optional f16 and without f64 support
# - Aarch64 -> 128 bits registers with optional f16 and f64 support
# - SVE -> up to 2048 bits registers
# The first three SIMD extensions are collectively called NEON. Aarch32 and
# Aarch64 correspond respectively to ARMv8 32 and 64 bits chips. Note that
# the ARM documentation says that ARMv7, Aarch32 are different but it seems
# that they differ by only a handful of intrinsics which are not in the scope
# of NSIMD so we have implemented the following:
#
# - ARMv7 \ -> neon128
# - Aarch32 /
# - Aarch64 -> aarch64
# - SVE -> sve
import common
# -----------------------------------------------------------------------------
# Helpers
def neon_typ(typ):
prefix = {'i': 'int', 'u': 'uint', 'f': 'float'}
return '{}{}x{}_t'.format(prefix[typ[0]], typ[1:], 128 // int(typ[1:]))
def half_neon64_typ(typ):
prefix = {'i': 'int', 'u': 'uint', 'f': 'float'}
return '{}{}x{}_t'.format(prefix[typ[0]], typ[1:], 64 // int(typ[1:]))
def sve_typ(typ):
prefix = {'i': 'svint', 'u': 'svuint', 'f': 'svfloat'}
return '{}{}_t'.format(prefix[typ[0]], typ[1:])
def suf(typ):
if typ[0] == 'i':
return 's{}'.format(typ[1:])
else:
return typ
neon = ['neon128', 'aarch64']
fixed_sized_sve = ['sve128', 'sve256', 'sve512', 'sve1024', 'sve2048']
sve = ['sve'] + fixed_sized_sve
fmtspec = {}
def convert_from_predicate(opts, op):
if opts.sve_emulate_bool:
return '''svsel({op},
svdup_n_u{typnbits}_x({svtrue}, (u{typnbits})~0),
svdup_n_u{typnbits}_x({svtrue}, 0))'''. \
format(op=op, **fmtspec)
else:
return op
def convert_to_predicate(opts, op):
if opts.sve_emulate_bool:
# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve
# it needs to be deleted when the bug is corrected
return '''svcmpeq({svtrue}, (svuint{typnbits}_t){op},
svdup_n_u{typnbits}_x({svtrue},
(u{typnbits})~0))'''.format(op=op, **fmtspec)
else:
return op
# -----------------------------------------------------------------------------
# Implementation of mandatory functions for this module
def get_simd_exts():
return ['neon128', 'aarch64', 'sve', 'sve128', 'sve256', 'sve512',
'sve1024', 'sve2048']
def get_prev_simd_ext(simd_ext):
if simd_ext in ['neon128', 'aarch64']:
return 'cpu'
elif simd_ext in sve:
return 'aarch64'
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
def emulate_fp16(simd_ext):
if not simd_ext in get_simd_exts():
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
if simd_ext in sve:
return False
else:
return True
def get_type(opts, simd_ext, typ, nsimd_typ):
if simd_ext in neon:
if typ == 'f64':
if simd_ext == 'neon128':
return 'typedef struct {{ double v0; double v1; }} {};'. \
format(nsimd_typ)
else:
return 'typedef {} {};'.format(neon_typ('f64'), nsimd_typ)
elif typ == 'f16':
return '''
#ifdef NSIMD_ARM_FP16
typedef float16x8_t {nsimd_typ};
#else
typedef struct {{ float32x4_t v0; float32x4_t v1; }}
{nsimd_typ};
#endif
'''.format(nsimd_typ=nsimd_typ) # extra \n are necessary
else:
return 'typedef {} {};'.format(neon_typ(typ), nsimd_typ)
elif simd_ext == 'sve':
return 'typedef {} {};'.format(sve_typ(typ), nsimd_typ)
elif simd_ext in fixed_sized_sve:
return 'typedef {} {} __attribute__((arm_sve_vector_bits({})));'. \
format(sve_typ(typ), nsimd_typ, simd_ext[3:])
else:
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
def get_logical_type(opts, simd_ext, typ, nsimd_typ):
if typ not in common.types:
raise ValueError('Unknown type "{}"'.format(typ))
if simd_ext not in get_simd_exts():
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
if typ in common.ftypes + common.itypes:
typ2 = 'u{}'.format(typ[1:]);
else:
typ2 = typ
if simd_ext == 'neon128':
if typ == 'f16':
return \
'''
#ifdef NSIMD_ARM_FP16
typedef uint16x8_t {nsimd_typ};
#else
typedef struct {{ uint32x4_t v0; uint32x4_t v1; }} {nsimd_typ};
#endif
'''.format(nsimd_typ=nsimd_typ) # extra \n are necessary
elif typ == 'f64':
return 'typedef struct {{ u64 v0; u64 v1; }} {};'.format(nsimd_typ)
else:
return get_type(opts, simd_ext, typ2, nsimd_typ)
if simd_ext == 'aarch64':
if typ == 'f16':
return get_logical_type(opts, 'neon128', 'f16', nsimd_typ)
else:
return get_type(opts, simd_ext, typ2, nsimd_typ)
elif simd_ext in sve:
if opts.sve_emulate_bool:
return get_type(opts, simd_ext, 'u' + typ[1:], nsimd_typ)
elif simd_ext in fixed_sized_sve:
return \
'typedef svbool_t {} __attribute__((arm_sve_vector_bits({})));'. \
format(nsimd_typ, simd_ext[3:])
else:
return 'typedef svbool_t {};'.format(nsimd_typ)
def get_nb_registers(simd_ext):
if simd_ext in neon:
return '16'
elif simd_ext in sve:
return '32'
else:
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
def get_native_soa_typ(simd_ext, typ, deg):
prefix = { 'i': 'int', 'u': 'uint', 'f': 'float' }[typ[0]]
if simd_ext in sve:
return 'sv{}x{}_t'.format(prefix + typ[1:], deg)
else:
return '{}{}x{}x{}_t'.format(prefix, typ[1:], 128 // int(typ[1:]),
deg)
def get_SoA_type(simd_ext, typ, deg, nsimd_typ):
if simd_ext != 'sve':
raise ValueError('SIMD extension must be "sve"')
prefix = { 'i': 'int', 'u': 'uint', 'f': 'float' }[typ[0]]
return 'typedef {} {};'.format(get_native_soa_typ(simd_ext, typ, deg),
nsimd_typ)
def has_compatible_SoA_types(simd_ext):
if simd_ext not in neon + sve:
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
return False
# -----------------------------------------------------------------------------
def get_additional_include(func, platform, simd_ext):
ret = '''#include <nsimd/cpu/cpu/{}.h>
'''.format(func)
if simd_ext in sve:
ret += '''#include <nsimd/arm/aarch64/{}.h>
'''.format(func)
if func in ['load2u', 'load3u', 'load4u', 'load2a', 'load3a', 'load4a']:
deg = func[4]
ret += '''#if NSIMD_CXX > 0
extern "C" {{
#endif
NSIMD_INLINE nsimd_{simd_ext}_vu16x{deg}
nsimd_{func}_{simd_ext}_u16(const u16*);
# if NSIMD_CXX > 0
}} // extern "C"
#endif
'''.format(func=func, deg=deg, simd_ext=simd_ext)
if func in ['mask_storea1', 'mask_storeu1', 'masko_loada1',
'masko_loadu1', 'maskz_loada1', 'maskz_loadu1'] and \
simd_ext not in sve:
ret += '''#include <nsimd/scalar_utilities.h>
'''
if func == 'mask_for_loop_tail' and simd_ext not in sve:
ret += '''#include <nsimd/arm/{simd_ext}/set1.h>
#include <nsimd/arm/{simd_ext}/set1l.h>
#include <nsimd/arm/{simd_ext}/iota.h>
#include <nsimd/arm/{simd_ext}/lt.h>
'''.format(simd_ext=simd_ext)
if simd_ext == 'neon128' and func == 'notl':
ret += '''#include <nsimd/arm/neon128/notb.h>
'''
if simd_ext in neon and func == 'ne':
ret += '''#include <nsimd/arm/{simd_ext}/eq.h>
# include <nsimd/arm/{simd_ext}/notl.h>
'''.format(simd_ext=simd_ext)
if simd_ext in neon and func in ['fms', 'fnms']:
ret += '''#include <nsimd/arm/{simd_ext}/ne.h>
#include <nsimd/arm/{simd_ext}/fma.h>
#include <nsimd/arm/{simd_ext}/fnma.h>
'''.format(simd_ext=simd_ext)
if func == 'shra':
ret += '''#include <nsimd/arm/{simd_ext}/shr.h>
'''.format(simd_ext=simd_ext)
if func in ['loadlu', 'loadla']:
ret += '''#include <nsimd/arm/{simd_ext}/eq.h>
# include <nsimd/arm/{simd_ext}/set1.h>
# include <nsimd/arm/{simd_ext}/{load}.h>
# include <nsimd/arm/{simd_ext}/notl.h>
'''.format(load='load' + func[5], simd_ext=simd_ext)
if func in ['storelu', 'storela']:
ret += '''#include <nsimd/arm/{simd_ext}/if_else1.h>
# include <nsimd/arm/{simd_ext}/set1.h>
# include <nsimd/arm/{simd_ext}/{store}.h>
'''.format(store='store' + func[6], simd_ext=simd_ext)
if func == 'to_logical':
ret += '''#include <nsimd/arm/{simd_ext}/reinterpret.h>
#include <nsimd/arm/{simd_ext}/ne.h>
''' .format(simd_ext=simd_ext)
if func == 'zip':
ret += '''#include <nsimd/arm/{simd_ext}/ziplo.h>
#include <nsimd/arm/{simd_ext}/ziphi.h>
'''.format(simd_ext=simd_ext)
if func == 'unzip':
ret += '''#include <nsimd/arm/{simd_ext}/unziplo.h>
#include <nsimd/arm/{simd_ext}/unziphi.h>
'''.format(simd_ext=simd_ext)
if func == 'adds':
ret += '''#include <nsimd/arm/{simd_ext}/add.h>
'''.format(simd_ext=simd_ext)
if func == 'subs':
ret += '''#include <nsimd/arm/{simd_ext}/sub.h>
'''.format(simd_ext=simd_ext)
if func in ['gather', 'scatter'] and simd_ext == 'sve':
ret += '''#include <nsimd/arm/sve/len.h>
'''
return ret
# -----------------------------------------------------------------------------
# Emulators
def emulate_op1(op, simd_ext, typ):
if simd_ext in neon:
le = 128 // int(typ[1:]);
return '''int i;
{typ} buf[{le}];
vst1q_{suf}(buf, {in0});
for (i=0; i < {le}; i += nsimd_len_cpu_{typ}()) {{
nsimd_storeu_cpu_{typ}( & buf[i], nsimd_{op}_cpu_{typ}(
nsimd_loadu_cpu_{typ}(&buf[i])));}}
return vld1q_{suf}(buf); '''. \
format(op=op, le=le, **fmtspec)
if simd_ext in sve:
le = 2048 // int(typ[1:]);
return '''int i;
{typ} buf[{le}];
svst1_{suf}({svtrue}, buf, {in0});
for (i=0; i < simd_len_{simd_ext}_{typ}();
i += nsimd_len_cpu_{typ}()) {{
nsimd_storeu_cpu_{typ}( & buf[i], nsimd_{op}_cpu_{typ}(
nsimd_loadu_cpu_{typ}(&buf[i])));}}
return svld1_{suf}({svtrue}, buf); '''. \
format(op=op, le=le, **fmtspec)
def emulate_op2(op, simd_ext, typ):
if simd_ext in neon:
le = 128 // int(typ[1:]);
return '''int i;
{typ} buf0[{le}], buf1[{le}];
vst1q_{suf}(buf0, {in0});
vst1q_{suf}(buf1, {in1});
for (i=0; i < {le}; i++) {{
buf0[i] = ({typ})(buf0[i] {op} buf1[i]);}}
return vld1q_{suf}(buf0); '''. \
format(op=op, le=le, **fmtspec)
if simd_ext in sve:
le = 2048 // int(typ[1:]);
return '''int i;
{typ} buf0[{le}], buf1[{le}];
svst1_{suf}({svtrue}, buf0, {in0});
svst1_{suf}({svtrue}, buf1, {in1});
for (i=0; i < nsimd_len_{simd_ext}_{typ}(); i++) {{
buf0[i] = ({typ})(buf0[i] {op} buf1[i]);}}
return svld1_{suf}({svtrue}, buf0); '''. \
format(op=op, le=le, **fmtspec)
def emulate_lop2_neon(opts, op, simd_ext, typ):
le = 128 // int(typ[1:]);
ltyp = get_logical_type(opts, simd_ext, typ)
lsuf = suf(ltyp)
return '''int i;
{ltyp} buf0[{le}], buf1[{le}];
vst1q_{lsuf}(buf0, {in0});
vst1q_{lsuf}(buf1, {in1});
for (i = 0; i < {le}; i++) {{
buf0[i] = buf0[i] {op} buf1[i] ? ({ltyp})-1 : 0;
}}
return vld1q_{lsuf}(buf0);'''. \
format(op=op, le=le, ltyp=ltyp, lsuf=lsuf, **fmtspec)
def emulate_op3_neon(op, simd_ext, typ):
le = 128 // int(typ[1:]);
return '''int i;
{typ} buf0[{le}], buf1[{le}], buf2[{le}];
vst1q_{suf}(buf0, {in0});
vst1q_{suf}(buf1, {in1});
vst1q_{suf}(buf2, {in2});
for (i = 0; i < {le}; i += nsimd_len_cpu_{typ}()) {{
nsimd_storeu_cpu_{typ}(&buf0[i], nsimd_{op}_cpu_{typ}(
nsimd_loadu_cpu_{typ}(&buf0[i]),
nsimd_loadu_cpu_{typ}(&buf1[i]),
nsimd_loadu_cpu_{typ}(&buf2[i])));
}}
return vld1q_{suf}(buf0);'''.format(op=op, le=le, **fmtspec)
def emulate_f64_neon(simd_ext, op, params):
fmtspec2 = fmtspec.copy()
fmtspec2['op'] = op
fmtspec2['buf_ret_decl'] = 'nsimd_cpu_{}f64 buf_ret;'. \
format('v' if params[0] == 'v' else 'vl')
fmtspec2['buf_decl'] = '\n'.join(['nsimd_cpu_{}f64 buf{};'. \
format('v' if p[1] == 'v' else 'vl', p[0]) \
for p in common.enum(params[1:])])
fmtspec2['bufs'] = ','.join(['buf{}'.format(i) \
for i in range(0, len(params) - 1)])
fmtspec2['ret_decl'] = 'nsimd_{}_{}f64 ret;'. \
format(simd_ext, 'v' if params[0] == 'v' else 'vl')
buf_set = '\n'.join('''buf{i}.v0 = {ini}.v0;
buf{i}.v1 = {ini}.v1;'''. \
format(i=i, ini=fmtspec['in{}'.format(i)]) \
for i in range(0, len(params) - 1))
return '''{buf_ret_decl}
{buf_decl}
{ret_decl}
{buf_set}
buf_ret = nsimd_{op}_cpu_f64({bufs});
ret.v0 = buf_ret.v0;
ret.v1 = buf_ret.v1;
return ret;'''.format(buf_set=buf_set, **fmtspec2)
# -----------------------------------------------------------------------------
def f16f64(simd_ext, typ, op, armop, arity, forced_intrinsics = ''):
fmtspec2 = fmtspec.copy()
tmpl = ', '.join(['{{in{}}}.v{{{{i}}}}'.format(i).format(**fmtspec) \
for i in range(0, arity)])
fmtspec2['args1'] = tmpl.format(i='0')
fmtspec2['args2'] = tmpl.format(i='1')
fmtspec2['armop'] = armop
fmtspec2['op'] = op
if simd_ext in neon and typ == 'f16':
if forced_intrinsics != '':
fmtspec2['intrinsics'] = forced_intrinsics
else:
temp = ', '.join(['{{in{}}}'.format(i).format(**fmtspec) \
for i in range(0, arity)])
fmtspec2['intrinsics'] = 'return v{}q_f16({});'.format(armop, temp)
return '''#ifdef NSIMD_ARM_FP16
{intrinsics}
#else
nsimd_{simd_ext}_vf16 ret;
ret.v0 = nsimd_{op}_{simd_ext}_f32({args1});
ret.v1 = nsimd_{op}_{simd_ext}_f32({args2});
return ret;
#endif'''.format(**fmtspec2)
elif simd_ext == 'neon128' and typ == 'f64':
return emulate_f64_neon(simd_ext, op, ['v'] * (arity + 1))
return ''
# -----------------------------------------------------------------------------
# Lenghts
def max_len(simd_ext, typ):
if simd_ext == 'sve':
return 2048 // int(typ[1:])
elif simd_ext in fixed_sized_sve:
return int(simd_ext[3:]) // int(typ[1:])
else:
return 128 // int(typ[1:])
def real_len(simd_ext, typ):
if simd_ext == 'sve':
return 'nsimd_len_sve_{typ}()'.format(**fmtspec)
else:
return max_len(simd_ext, typ)
# -----------------------------------------------------------------------------
# Loads of degree 1, 2, 3 and 4
def load1234(opts, simd_ext, typ, deg):
if simd_ext in neon:
if deg == 1:
normal = 'return vld{deg}q_{suf}({in0});'. \
format(deg=deg, **fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
/* Note that we can do much better but is it useful? */
nsimd_{simd_ext}_vf16 ret;
f32 buf[4];
buf[0] = nsimd_u16_to_f32(*(u16*){in0});
buf[1] = nsimd_u16_to_f32(*((u16*){in0} + 1));
buf[2] = nsimd_u16_to_f32(*((u16*){in0} + 2));
buf[3] = nsimd_u16_to_f32(*((u16*){in0} + 3));
ret.v0 = vld1q_f32(buf);
buf[0] = nsimd_u16_to_f32(*((u16*){in0} + 4));
buf[1] = nsimd_u16_to_f32(*((u16*){in0} + 5));
buf[2] = nsimd_u16_to_f32(*((u16*){in0} + 6));
buf[3] = nsimd_u16_to_f32(*((u16*){in0} + 7));
ret.v1 = vld1q_f32(buf);
return ret;
#endif'''.format(normal=normal, **fmtspec)
elif typ == 'f64' and simd_ext == 'neon128':
return \
'''nsimd_neon128_vf64 ret;
ret.v0 = *{in0};
ret.v1 = *({in0} + 1);
return ret;'''.format(**fmtspec)
else:
return normal
else:
normal = \
'''nsimd_{simd_ext}_v{typ}x{deg} ret;
{soa_typ} buf = vld{deg}q_{suf}({in0});
{assignment}
return ret;'''. \
format(deg=deg, soa_typ=get_native_soa_typ(simd_ext, typ, deg),
assignment='\n'.join(['ret.v{i} = buf.val[{i}];'. \
format(i=i) for i in range(0, deg)]), **fmtspec)
if typ == 'f16':
assignment = \
'''vst1q_u16(buf, temp.val[{{i}}]);
ret.v{{i}} = nsimd_loadu_{simd_ext}_f16((f16 *)buf);'''. \
format(**fmtspec)
return \
'''{soa_typ} temp = vld{deg}q_u16((u16 *){in0});
u16 buf[8];
nsimd_{simd_ext}_vf16x{deg} ret;
{assignment}
return ret;'''. \
format(deg=deg, assignment='\n'.join([assignment. \
format(i=i) for i in range(0, deg)]),
soa_typ=get_native_soa_typ(simd_ext, 'u16', deg),
**fmtspec)
elif typ in 'f64' and simd_ext == 'neon128':
return \
'nsimd_neon128_vf64x{} ret;\n'.format(deg) + \
'\n'.join(['ret.v{i}.v0 = *({in0} + {i});'. \
format(i=i, **fmtspec) for i in range(0, deg)]) + \
'\n'.join(['ret.v{i}.v1 = *({in0} + {ipd});'. \
format(i=i, ipd=i + deg, **fmtspec) \
for i in range(0, deg)]) + \
'\nreturn ret;\n'
elif typ in ['i64', 'u64'] and simd_ext == 'neon128':
return \
'''nsimd_neon128_v{typ}x{deg} ret;
{typ} buf[2];'''.format(deg=deg, **fmtspec) + \
'\n'.join(['''buf[0] = *({in0} + {i});
buf[1] = *({in0} + {ipd});
ret.v{i} = vld1q_{suf}(buf);'''. \
format(i=i, ipd=i + deg, **fmtspec) \
for i in range(0, deg)]) + \
'\nreturn ret;\n'
else:
return normal
else:
if deg == 1:
return 'return svld{deg}_{suf}({svtrue}, {in0});'. \
format(deg=deg, **fmtspec)
else:
return \
'''nsimd_{simd_ext}_v{typ}x{deg} ret;
{sve_typ} buf = svld{deg}_{suf}({svtrue}, {in0});
{assignment}
return ret;'''.format(assignment=\
'\n'.join(['ret.v{i} = svget{deg}_{suf}(buf, {i});'. \
format(i=i, deg=deg, **fmtspec) \
for i in range(deg)]),
sve_typ=get_native_soa_typ('sve', typ, deg),
deg=deg, **fmtspec)
# -----------------------------------------------------------------------------
# Mask loads
def maskoz_load(oz, simd_ext, typ):
if simd_ext in sve:
return 'return svsel_{suf}({in0}, svld1_{suf}({in0}, {in1}), {oz});'. \
format(oz='{in2}'.format(**fmtspec) if oz == 'o' \
else 'svdup_n_{suf}(({typ})0)'.format(**fmtspec),
**fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''nsimd_neon128_vf64 ret;
if ({in0}.v0) {{
ret.v0 = {in1}[0];
}} else {{
ret.v0 = {oz0};
}}
if ({in0}.v1) {{
ret.v1 = {in1}[1];
}} else {{
ret.v1 = {oz1};
}}
return ret;'''.format(
oz0 = '0.0f' if oz == 'z' else '{in2}.v0'.format(**fmtspec),
oz1 = '0.0f' if oz == 'z' else '{in2}.v1'.format(**fmtspec),
**fmtspec)
le = 128 // int(typ[1:])
normal = '''int i;
{typ} buf[{le}];
u{typnbits} mask[{le}];
vst1q_{suf}(buf, {oz});
vst1q_u{typnbits}(mask, {in0});
for (i = 0; i < {le}; i++) {{
if (mask[i]) {{
buf[i] = {in1}[i];
}}
}}
return vld1q_{suf}(buf);'''. \
format(oz='vdupq_n_{suf}(({typ})0)'.format(**fmtspec) \
if oz == 'z' else '{in2}'.format(**fmtspec),
le=le, **fmtspec)
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{normal}
#else
int i;
nsimd_{simd_ext}_vf16 ret;
f32 buf[8];
u32 mask[8];
vst1q_f32(buf, {oz0});
vst1q_f32(buf + 4, {oz1});
vst1q_u32(mask, {in0}.v0);
vst1q_u32(mask + 4, {in0}.v1);
for (i = 0; i < 8; i++) {{
if (mask[i]) {{
buf[i] = nsimd_f16_to_f32({in1}[i]);
}}
}}
ret.v0 = vld1q_f32(buf);
ret.v1 = vld1q_f32(buf + 4);
return ret;
#endif'''. \
format(oz0='vdupq_n_f32(0.0f)'.format(**fmtspec) \
if oz == 'z' else '{in2}.v0'.format(**fmtspec),
oz1='vdupq_n_f32(0.0f)'.format(**fmtspec) \
if oz == 'z' else '{in2}.v1'.format(**fmtspec),
normal=normal, **fmtspec)
return normal
# -----------------------------------------------------------------------------
# Stores of degree 1, 2, 3 and 4
def store1234(opts, simd_ext, typ, deg):
if simd_ext in neon:
if deg == 1:
normal = 'vst{deg}q_{suf}({in0}, {in1});'. \
format(deg=deg, **fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
f32 buf[4];
vst1q_f32(buf, {in1}.v0);
*((u16*){in0} ) = nsimd_f32_to_u16(buf[0]);
*((u16*){in0} + 1) = nsimd_f32_to_u16(buf[1]);
*((u16*){in0} + 2) = nsimd_f32_to_u16(buf[2]);
*((u16*){in0} + 3) = nsimd_f32_to_u16(buf[3]);
vst1q_f32(buf, {in1}.v1);
*((u16*){in0} + 4) = nsimd_f32_to_u16(buf[0]);
*((u16*){in0} + 5) = nsimd_f32_to_u16(buf[1]);
*((u16*){in0} + 6) = nsimd_f32_to_u16(buf[2]);
*((u16*){in0} + 7) = nsimd_f32_to_u16(buf[3]);
#endif'''.format(normal=normal, **fmtspec)
elif typ == 'f64' and simd_ext == 'neon128':
return \
'''*{in0} = {in1}.v0;
*({in0} + 1) = {in1}.v1;'''.format(**fmtspec)
else:
return normal
else:
normal = \
'''{soa_typ} buf;
{assignment}
vst{deg}q_{suf}({in0}, buf);'''. \
format(deg=deg, assignment='\n'.join([
'buf.val[{{}}] = {{in{}}};'.format(i). \
format(i - 1, **fmtspec) for i in range(1, deg + 1)]),
soa_typ=get_native_soa_typ(simd_ext, typ, deg),
**fmtspec)
if typ == 'f16':
assignment = \
'''nsimd_storeu_{{simd_ext}}_f16((f16 *)buf, {{in{}}});
temp.val[{{}}] = vld1q_u16(buf);'''
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
{soa_typ} temp;
u16 buf[8];
{assignment}
vst{deg}q_u16((u16 *){in0}, temp);
#endif'''. \
format(assignment='\n'.join([assignment.format(i). \
format(i - 1, **fmtspec) \
for i in range(1, deg + 1)]),
deg=deg, normal=normal,
soa_typ=get_native_soa_typ(simd_ext, 'u16', deg),
**fmtspec)
elif typ == 'f64' and simd_ext == 'neon128':
return \
'\n'.join(['*({{in0}} + {}) = {{in{}}}.v0;'. \
format(i - 1, i).format(**fmtspec) \
for i in range(1, deg + 1)]) + '\n' + \
'\n'.join(['*({{in0}} + {}) = {{in{}}}.v1;'. \
format(i + deg - 1, i).format(**fmtspec) \
for i in range(1, deg + 1)])
elif typ in ['i64', 'u64'] and simd_ext == 'neon128':
return \
'{typ} buf[{biglen}];'.format(biglen=2 * deg, **fmtspec) + \
'\n'.join(['vst1q_{{suf}}(buf + {im1x2}, {{in{i}}});'. \
format(im1x2=2 * (i - 1), i=i).format(**fmtspec) \
for i in range(1, deg + 1)]) + \
'\n'.join(['''*({in0} + {i}) = buf[{ix2}];
*({in0} + {ipd}) = buf[{ix2p1}];'''. \
format(i=i, ipd=i + deg, ix2=i * 2,
ix2p1=2 * i + 1, **fmtspec) \
for i in range(0, deg)])
else:
return normal
else:
if deg == 1:
return 'svst{deg}_{suf}({svtrue}, {in0}, {in1});'. \
format(deg=deg, **fmtspec)
fill_soa_typ = \
'\n'.join(['tmp = svset{{deg}}_{{suf}}(tmp, {im1}, {{in{i}}});'. \
format(im1=i - 1, i=i).format(deg=deg, **fmtspec) \
for i in range(1, deg + 1)])
return \
'''{soa_typ} tmp = svundef{deg}_{suf}();
{fill_soa_typ}
svst{deg}_{suf}({svtrue}, {in0}, tmp);'''. \
format(soa_typ=get_native_soa_typ('sve', typ, deg), deg=deg,
fill_soa_typ=fill_soa_typ, **fmtspec)
# -----------------------------------------------------------------------------
# Mask stores
def mask_store(simd_ext, typ):
if simd_ext in sve:
return 'svst1_{suf}({in0}, {in1}, {in2});'.format(**fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''if ({in0}.v0) {{
{in1}[0] = {in2}.v0;
}}
if ({in0}.v1) {{
{in1}[1] = {in2}.v1;
}}'''.format(**fmtspec)
le = 128 // int(typ[1:])
normal = '''int i;
{typ} buf[{le}];
u{typnbits} mask[{le}];
vst1q_{suf}(buf, {in2});
vst1q_u{typnbits}(mask, {in0});
for (i = 0; i < {le}; i++) {{
if (mask[i]) {{
{in1}[i] = buf[i];
}}
}}'''.format(le=le, **fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
f32 buf[8];
u32 mask[8];
int i;
vst1q_u32(mask, {in0}.v0);
vst1q_u32(mask + 4, {in0}.v1);
vst1q_f32(buf, {in2}.v0);
vst1q_f32(buf + 4, {in2}.v1);
for (i = 0; i < 8; i++) {{
if (mask[i]) {{
{in1}[i] = nsimd_f32_to_f16(buf[i]);
}}
}}
#endif'''.format(normal=normal, **fmtspec)
return normal
# -----------------------------------------------------------------------------
# Length
def len1(simd_ext, typ):
if simd_ext in neon:
return 'return {};'.format(128 // int(typ[1:]))
elif simd_ext == 'sve':
return 'return (int)svcntp_b{typnbits}({svtrue}, {svtrue});'. \
format(**fmtspec)
elif simd_ext in fixed_sized_sve:
return 'return {};'.format(int(simd_ext[3:]) // int(typ[1:]))
# -----------------------------------------------------------------------------
# Add/sub
def addsub(op, simd_ext, typ):
ret = f16f64(simd_ext, typ, op, op, 2)
if ret != '':
return ret
if simd_ext in neon:
return 'return v{op}q_{suf}({in0}, {in1});'. \
format(op=op, **fmtspec)
else:
return 'return sv{op}_{suf}_x({svtrue}, {in0}, {in1});'. \
format(op=op, **fmtspec)
# -----------------------------------------------------------------------------
# Multiplication
def mul2(simd_ext, typ):
ret = f16f64(simd_ext, typ, 'mul', 'mul', 2)
if ret != '':
return ret
elif simd_ext in neon and typ in ['i64', 'u64']:
return emulate_op2('*', simd_ext, typ)
else:
if simd_ext in neon:
return 'return vmulq_{suf}({in0}, {in1});'.format(**fmtspec)
else:
return 'return svmul_{suf}_x({svtrue}, {in0}, {in1});'. \
format(**fmtspec)
# -----------------------------------------------------------------------------
# Division
def div2(simd_ext, typ):
if simd_ext == 'aarch64' and typ in ['f32', 'f64']:
return 'return vdivq_{suf}({in0}, {in1});'.format(**fmtspec)
elif simd_ext in sve and \
typ in ['f16', 'f32', 'f64', 'i32', 'u32', 'i64', 'u64']:
return 'return svdiv_{suf}_x({svtrue}, {in0}, {in1});'. \
format(**fmtspec)
else:
ret = f16f64(simd_ext, typ, 'div', 'div', 2)
if ret != '':
return ret
return emulate_op2('/', simd_ext, typ)
# -----------------------------------------------------------------------------
# Binary operators: and, or, xor, andnot
def binop2(op, simd_ext, typ):
armop = {'orb': 'orr', 'xorb': 'eor', 'andb': 'and', 'andnotb': 'bic'}
if typ in common.iutypes:
if simd_ext in neon:
return 'return v{armop}q_{suf}({in0}, {in1});'. \
format(armop=armop[op], **fmtspec)
else:
return 'return sv{armop}_{suf}_x({svtrue}, {in0}, {in1});'. \
format(armop=armop[op], **fmtspec)
# From here only float types
if typ == 'f16':
intrinsics = \
'''return vreinterpretq_f16_u16(v{armop}q_u16(vreinterpretq_u16_f16(
{in0}), vreinterpretq_u16_f16({in1})));'''. \
format(armop=armop[op], **fmtspec)
else:
intrinsics = ''
ret = f16f64(simd_ext, typ, op, armop[op], 2, intrinsics)
if ret != '':
return ret
if simd_ext in neon:
return \
'''return vreinterpretq_f{typnbits}_u{typnbits}(v{armop}q_u{typnbits}(
vreinterpretq_u{typnbits}_f{typnbits}({in0}),
vreinterpretq_u{typnbits}_f{typnbits}({in1})));'''. \
format(armop=armop[op], **fmtspec)
else:
return \
'''return svreinterpret_f{typnbits}_u{typnbits}(
sv{armop}_u{typnbits}_x({svtrue},
svreinterpret_u{typnbits}_f{typnbits}({in0}),
svreinterpret_u{typnbits}_f{typnbits}({in1})));'''. \
format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# Binary not
def not1(simd_ext, typ):
if typ in common.iutypes:
if simd_ext in neon:
if typ in ['i8', 'u8', 'i16', 'u16', 'i32', 'u32']:
return 'return vmvnq_{suf}({in0});'.format(**fmtspec)
else:
return \
'''return vreinterpretq_{suf}_u32(vmvnq_u32(
vreinterpretq_u32_{suf}({in0})));'''. \
format(**fmtspec)
if simd_ext in sve:
return 'return svnot_{suf}_x({svtrue}, {in0});'.format(**fmtspec)
# From here only float types
if typ == 'f16':
intrinsics = \
'''return vreinterpretq_f16_u16(vmvnq_u16(vreinterpretq_u16_f16(
{in0})));'''.format(**fmtspec)
else:
intrinsics = ''
ret = f16f64(simd_ext, typ, 'notb', 'mvn', 1, intrinsics)
if ret != '':
return ret
if simd_ext in neon:
return \
'''return vreinterpretq_{suf}_u32(vmvnq_u32(
vreinterpretq_u32_{suf}({in0})));'''. \
format(**fmtspec)
else:
return \
'''return svreinterpret_{suf}_u{typnbits}(svnot_u{typnbits}_x(
{svtrue}, svreinterpret_u{typnbits}_{suf}({in0})));'''. \
format(**fmtspec)
# -----------------------------------------------------------------------------
# Logical operators: and, or, xor, andnot
def lop2(opts, op, simd_ext, typ):
armop = {'orl': 'orr', 'xorl': 'eor', 'andl': 'and', 'andnotl': 'bic'}
if simd_ext in neon:
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
return v{armop}q_u16({in0}, {in1});
#else
nsimd_{simd_ext}_vlf16 ret;
ret.v0 = v{armop}q_u32({in0}.v0, {in1}.v0);
ret.v1 = v{armop}q_u32({in0}.v1, {in1}.v1);
return ret;
#endif'''.format(armop=armop[op], **fmtspec)
elif simd_ext == 'neon128' and typ == 'f64':
if op == 'andnotl':
return '''nsimd_{simd_ext}_vlf64 ret;
ret.v0 = {in0}.v0 & (~{in1}.v0);
ret.v1 = {in0}.v1 & (~{in1}.v1);
return ret;'''.format(**fmtspec)
else:
cpuop = {'orl': '|', 'xorl': '^', 'andl': '&'}
return '''nsimd_{simd_ext}_vlf64 ret;
ret.v0 = {in0}.v0 {cpuop} {in1}.v0;
ret.v1 = {in0}.v1 {cpuop} {in1}.v1;
return ret;'''.format(cpuop=cpuop[op], **fmtspec)
else:
return 'return v{armop}q_u{typnbits}({in0}, {in1});'. \
format(armop=armop[op], **fmtspec)
else:
if opts.sve_emulate_bool:
# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve
# it needs to be deleted when the bug is corrected
return \
'''return sv{armop}_x({svtrue},
(svuint{typnbits}_t){in0},
(svuint{typnbits}_t){in1});'''. \
format(armop=armop[op], **fmtspec)
else:
return '''return sv{armop}_z({svtrue}, {in0}, {in1});'''. \
format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# Logical not
def lnot1(opts, simd_ext, typ):
if simd_ext in neon:
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
return vmvnq_u16({in0});
#else
nsimd_{simd_ext}_vlf16 ret;
ret.v0 = vmvnq_u32({in0}.v0);
ret.v1 = vmvnq_u32({in0}.v1);
return ret;
#endif'''.format(**fmtspec)
elif simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vlf64 ret;
ret.v0 = ~{in0}.v0;
ret.v1 = ~{in0}.v1;
return ret;'''.format(**fmtspec)
elif typ in ['i64', 'u64', 'f64']:
return '''return vreinterpretq_u{typnbits}_u32(vmvnq_u32(
vreinterpretq_u32_u{typnbits}({in0})));'''. \
format(**fmtspec)
else:
return 'return vmvnq_u{typnbits}({in0});'.format(**fmtspec)
elif simd_ext in sve:
if opts.sve_emulate_bool:
# TODO: the cast is a workaround to avoid a bug in gcc trunk for sve
# it needs to be deleted when the bug is corrected
return 'return svnot_x({svtrue}, (svuint{typnbits}_t){in0});'.format(**fmtspec)
else:
return 'return svnot_z({svtrue}, {in0});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# Square root
def sqrt1(simd_ext, typ):
if simd_ext == 'neon128':
if typ in 'f16':
return '''nsimd_neon128_vf16 ret;
ret.v0 = nsimd_sqrt_neon128_f32({in0}.v0);
ret.v1 = nsimd_sqrt_neon128_f32({in0}.v1);
return ret;'''.format(**fmtspec)
elif typ == 'f64':
return f16f64('neon128', 'f64', 'sqrt', 'sqrt', 1)
else:
return emulate_op1('sqrt', simd_ext, typ)
elif simd_ext == 'aarch64':
if typ == 'f16':
return f16f64('aarch64', 'f16', 'sqrt', 'sqrt', 1)
else:
return 'return vsqrtq_{suf}({in0});'.format(**fmtspec)
else:
return 'return svsqrt_{suf}_x({svtrue}, {in0});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# Shifts
def shl_shr(op, simd_ext, typ):
if simd_ext in neon:
sign = '-' if op == 'shr' else ''
if typ in common.utypes:
return '''return vshlq_{suf}({in0}, vdupq_n_s{typnbits}(
(i{typnbits})({sign}{in1})));'''. \
format(sign=sign, **fmtspec)
else:
return \
'''return vreinterpretq_s{typnbits}_u{typnbits}(vshlq_u{typnbits}(
vreinterpretq_u{typnbits}_s{typnbits}({in0}),
vdupq_n_s{typnbits}((i{typnbits})({sign}{in1}))));'''. \
format(sign=sign, **fmtspec)
else:
armop = 'lsl' if op == 'shl' else 'lsr'
if op == 'shr' and typ in common.itypes:
return \
'''return svreinterpret_{suf}_{suf2}(sv{armop}_{suf2}_x({svtrue},
svreinterpret_{suf2}_{suf}({in0}),
svdup_n_u{typnbits}((u{typnbits}){in1})));'''. \
format(suf2=common.bitfield_type[typ], armop=armop,
**fmtspec)
else:
return '''return sv{armop}_{suf}_x({svtrue}, {in0},
svdup_n_u{typnbits}((u{typnbits}){in1}));'''. \
format(armop=armop, **fmtspec)
def shra(simd_ext, typ):
if typ in common.utypes:
return '''return nsimd_shr_{simd_ext}_{typ}({in0}, {in1});'''. \
format(**fmtspec)
if simd_ext in neon:
return '''return vshlq_{suf}(
{in0}, vdupq_n_s{typnbits}((i{typnbits})-{in1}));'''.\
format(**fmtspec)
elif simd_ext in sve:
if typ[0] == 'i':
return '''return svasr_n_{suf}_x({svtrue}, {in0},
(u{typnbits}){in1});'''.\
format(**fmtspec)
elif typ[0] == 'u':
return 'return svlsl_n_{suf}_x({svtrue}, {in0}, (u64){in1});'.\
format(**fmtspec)
# -----------------------------------------------------------------------------
# Set1
def set1(simd_ext, typ):
if simd_ext in neon:
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vdupq_n_f16({in0});
#else
nsimd_{simd_ext}_vf16 ret;
f32 f = nsimd_f16_to_f32({in0});
ret.v0 = nsimd_set1_{simd_ext}_f32(f);
ret.v1 = nsimd_set1_{simd_ext}_f32(f);
return ret;
#endif'''.format(**fmtspec)
elif simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vf64 ret;
ret.v0 = {in0};
ret.v1 = {in0};
return ret;'''.format(**fmtspec)
else:
return 'return vdupq_n_{suf}({in0});'.format(**fmtspec)
else:
return 'return svdup_n_{suf}({in0});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# Set1l
def lset1(simd_ext, typ):
if simd_ext in sve:
return '''if ({in0}) {{
return svptrue_b{typnbits}();
}} else {{
return svpfalse_b();
}}'''.format(**fmtspec)
# getting here means no NEON and AARCH64 only
mask = 'vdupq_n_u{typnbits}((u{typnbits}){{}})'.format(**fmtspec)
normal = '''if ({in0}) {{
return {ones};
}} else {{
return {zeros};
}}'''.format(ones=mask.format('-1'), zeros=mask.format('0'),
**fmtspec)
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{normal}
#else
nsimd_{simd_ext}_vlf16 ret;
ret.v0 = nsimd_set1l_{simd_ext}_f32({in0});
ret.v1 = ret.v0;
return ret;
#endif'''.format(normal=normal, **fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''nsimd_neon128_vlf64 ret;
ret.v0 = (u64)({in0} ? -1 : 0);
ret.v1 = ret.v0;
return ret;'''.format(**fmtspec)
return normal
# -----------------------------------------------------------------------------
# Comparison operators: ==, <, <=, >, >=
def cmp2(opts, op, simd_ext, typ):
binop = {'eq': '==', 'lt': '<', 'le': '<=', 'gt': '>', 'ge': '>='}
armop = {'eq': 'eq', 'lt': 'lt', 'le': 'le', 'gt': 'gt', 'ge': 'ge'}
if simd_ext in neon:
emul_f16 = '''nsimd_{simd_ext}_vlf16 ret;
ret.v0 = nsimd_{op}_{simd_ext}_f32({in0}.v0, {in1}.v0);
ret.v1 = nsimd_{op}_{simd_ext}_f32({in0}.v1, {in1}.v1);
return ret;'''.format(op=op, **fmtspec)
normal = 'return vc{armop}q_{suf}({in0}, {in1});'. \
format(armop=armop[op], **fmtspec)
if typ == 'f16':
if simd_ext == 'neon128':
return emul_f16
else:
return \
'''#ifdef NSIMD_ARM_FP16
{}
#else
{}
#endif'''.format(normal, emul_f16)
if simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_{simd_ext}_vl{typ} ret;
ret.v0 = {in0}.v0 {op} {in1}.v0 ? (u64)-1 : 0;
ret.v1 = {in0}.v1 {op} {in1}.v1 ? (u64)-1 : 0;
return ret;'''.format(op=binop[op], **fmtspec)
elif simd_ext == 'neon128' and typ in ['i64', 'u64']:
return '''{typ} buf0[2], buf1[2];
u64 ret[2];
vst1q_{suf}(buf0, {in0});
vst1q_{suf}(buf1, {in1});
ret[0] = buf0[0] {op} buf1[0] ? (u64)-1 : 0;
ret[1] = buf0[1] {op} buf1[1] ? (u64)-1 : 0;
return vld1q_u64(ret);'''. \
format(op=binop[op], **fmtspec)
else:
return normal
elif simd_ext in sve:
if opts.sve_emulate_bool:
# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve
# it needs to be deleted when the bug is corrected
comp = 'svcmp{op}_{suf}({svtrue}, ({svetyp}){in0}, ({svetyp}){in1})'. \
format(op=armop[op], **fmtspec)
return 'return {};'.format(convert_from_predicate(opts, comp))
else:
return 'return svcmp{op}_{suf}({svtrue}, {in0}, {in1});'. \
format(op=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# Not equal
def neq2(opts, simd_ext, typ):
if simd_ext in neon:
return '''return nsimd_notl_{simd_ext}_{typ}(
nsimd_eq_{simd_ext}_{typ}({in0}, {in1}));'''. \
format(**fmtspec)
elif simd_ext in sve:
comp='svcmpne_{suf}({svtrue}, {in0}, {in1})'. \
format(**fmtspec)
return 'return {};'.format(convert_from_predicate(opts, comp))
# -----------------------------------------------------------------------------
# If_else
def if_else3(opts, simd_ext, typ):
if simd_ext in neon:
intrinsic = 'return vbslq_{suf}({in0}, {in1}, {in2});'. \
format(**fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{intrinsic}
#else
nsimd_{simd_ext}_vf16 ret;
ret.v0 = nsimd_if_else1_{simd_ext}_f32(
{in0}.v0, {in1}.v0, {in2}.v0);
ret.v1 = nsimd_if_else1_{simd_ext}_f32(
{in0}.v1, {in1}.v1, {in2}.v1);
return ret;
#endif'''.format(intrinsic=intrinsic, **fmtspec)
elif simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vf64 ret;
ret.v0 = {in0}.v0 != 0u ? {in1}.v0 : {in2}.v0;
ret.v1 = {in0}.v1 != 0u ? {in1}.v1 : {in2}.v1;
return ret;'''.format(**fmtspec)
else:
return intrinsic
elif simd_ext in sve:
if opts.sve_emulate_bool:
# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve
# it needs to be deleted when the bug is corrected
return 'return svsel_{suf}({cond}, ({svetyp}){in1}, ({svetyp}){in2});' \
.format(cond=convert_to_predicate(opts,
'{in0}'.format(**fmtspec)),
**fmtspec)
else:
return 'return svsel_{suf}({in0}, {in1}, {in2});' \
.format(**fmtspec)
# -----------------------------------------------------------------------------
# Minimum and maximum
def minmax2(op, simd_ext, typ):
ret = f16f64(simd_ext, typ, op, op, 2)
if ret != '':
return ret
if simd_ext in neon:
if typ in ['i64', 'u64']:
binop = '<' if op == 'min' else '>'
return '''{typ} buf0[2], buf1[2];
vst1q_{suf}(buf0, {in0});
vst1q_{suf}(buf1, {in1});
buf0[0] = buf0[0] {binop} buf1[0] ? buf0[0] : buf1[0];
buf0[1] = buf0[1] {binop} buf1[1] ? buf0[1] : buf1[1];
return vld1q_{suf}(buf0);'''. \
format(binop=binop, **fmtspec)
else:
return 'return v{op}q_{suf}({in0}, {in1});'. \
format(op=op, **fmtspec)
else:
return 'return sv{op}_{suf}_x({svtrue}, {in0}, {in1});'. \
format(op=op, **fmtspec)
# -----------------------------------------------------------------------------
# Abs
def abs1(simd_ext, typ):
if typ in common.utypes:
return 'return {in0};'.format(**fmtspec)
elif simd_ext in neon:
if typ == 'f16':
return f16f64(simd_ext, 'f16', 'abs', 'abs', 1)
elif (typ in ['i8', 'i16', 'i32', 'f32']) or \
(simd_ext == 'aarch64' and typ in ['i64', 'f64']):
return 'return vabsq_{suf}({in0});'.format(**fmtspec)
elif typ == 'i64':
return emulate_op1('abs', 'neon128', 'i64')
else:
return f16f64(simd_ext, 'f64', 'abs', 'abs', 1)
else:
return 'return svabs_{suf}_x({svtrue}, {in0});'. \
format(**fmtspec)
# -----------------------------------------------------------------------------
# Round, trunc, ceil and round_to_even
def round1(op, simd_ext, typ):
if typ in common.iutypes:
return 'return {in0};'.format(**fmtspec)
armop = {'floor': 'rndm', 'ceil': 'rndp', 'trunc': 'rnd',
'round_to_even': 'rndn'}
if simd_ext == 'neon128':
ret = f16f64('neon128', typ, op, 'v{armop}q_{suf}'. \
format(armop=armop, **fmtspec), 1)
if ret != '':
return ret
return emulate_op1(op, 'neon128', typ);
elif simd_ext == 'aarch64':
if typ == 'f16':
return f16f64('aarch64', 'f16', op, armop[op], 1)
else:
return 'return v{armop}q_{suf}({in0});'. \
format(armop=armop[op], **fmtspec)
else:
armop = {'floor': 'rintm', 'ceil': 'rintp', 'trunc': 'rintz',
'round_to_even': 'rintn'}
return 'return sv{armop}_{suf}_x({svtrue}, {in0});'. \
format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# FMA and FNMA
def fmafnma3(op, simd_ext, typ):
if typ in common.ftypes and simd_ext == 'aarch64':
armop = {'fma': 'fma', 'fnma': 'fms'}
else:
armop = {'fma': 'mla', 'fnma': 'mls'}
if simd_ext in neon:
normal = 'return v{armop}q_{suf}({in2}, {in1}, {in0});'. \
format(armop=armop[op], **fmtspec)
emul = emulate_op3_neon(op, simd_ext, typ)
if typ == 'f16':
using_f32 = \
'''nsimd_{simd_ext}_vf16 ret;
ret.v0 = nsimd_{op}_{simd_ext}_f32({in0}.v0, {in1}.v0, {in2}.v0);
ret.v1 = nsimd_{op}_{simd_ext}_f32({in0}.v1, {in1}.v1, {in2}.v1);
return ret;'''.format(op=op, **fmtspec)
if simd_ext == 'aarch64':
return \
'''#ifdef NSIMD_ARM_FP16
{}
#else
{}
#endif'''.format(emul, using_f32)
else:
return using_f32
elif simd_ext == 'neon128' and typ == 'f64':
return emulate_f64_neon('neon128', op, ['v'] * 4)
elif simd_ext == 'aarch64' and typ == 'f64':
return normal
elif typ in ['i64', 'u64']:
return emul
else:
return normal
else:
return 'return sv{armop}_{suf}_x({svtrue}, {in2}, {in1}, {in0});'. \
format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# FMS and FNMS
def fmsfnms3(op, simd_ext, typ):
if typ in common.iutypes:
return \
'''return nsimd_neg_{simd_ext}_{typ}(nsimd_{op2}_{simd_ext}_{typ}(
{in0}, {in1}, {in2}));'''. \
format(op2='fma' if op == 'fnms' else 'fnma', **fmtspec)
if simd_ext in neon:
return \
'''return nsimd_{op2}_{simd_ext}_{typ}({in0}, {in1},
nsimd_neg_{simd_ext}_{typ}({in2}));'''. \
format(op2='fma' if op == 'fms' else 'fnma', **fmtspec)
else:
armop = {'fnms': 'nmla', 'fms': 'nmls'}
return 'return sv{armop}_{suf}_x({svtrue}, {in2}, {in1}, {in0});'. \
format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# Neg
def neg1(simd_ext, typ):
if simd_ext in neon:
normal = 'return vnegq_{suf}({in0});'.format(**fmtspec)
if typ == 'f16':
return f16f64(simd_ext, 'f16', 'neg', 'neg', 1)
elif typ in ['i8', 'i16', 'i32', 'f32']:
return normal
elif typ in ['u8', 'u16', 'u32']:
return \
'''return vreinterpretq_{suf}_s{typnbits}(
vnegq_s{typnbits}(
vreinterpretq_s{typnbits}_{suf}({in0})));'''. \
format(**fmtspec)
elif simd_ext == 'neon128' and typ in ['i64', 'u64']:
return emulate_op1('neg', simd_ext, typ)
elif simd_ext == 'neon128' and typ == 'f64':
return \
'''nsimd_neon128_vf64 ret;
ret.v0 = -{in0}.v0;
ret.v1 = -{in0}.v1;
return ret;'''.format(**fmtspec)
elif simd_ext == 'aarch64' and typ in ['f64', 'i64']:
return normal
elif simd_ext == 'aarch64' and typ == 'u64':
return \
'''return vreinterpretq_u64_s64(vnegq_s64(
vreinterpretq_s64_u64({in0})));'''. \
format(**fmtspec)
else:
if typ in common.utypes:
return \
'''return svreinterpret_{suf}_s{typnbits}(
svneg_s{typnbits}_x({svtrue},
svreinterpret_s{typnbits}_{suf}({in0})));'''. \
format(**fmtspec)
else:
return 'return svneg_{suf}_x({svtrue}, {in0});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# Reciprocals
def recs1(op, simd_ext, typ):
cte = '({typ})1'.format(**fmtspec) if typ != 'f16' \
else 'nsimd_f32_to_f16(1.0f)'
if op in ['rec', 'rec11']:
return \
'''return nsimd_div_{simd_ext}_{typ}(
nsimd_set1_{simd_ext}_{typ}({cte}), {in0});'''. \
format(cte=cte, **fmtspec)
elif op == 'rsqrt11':
return \
'''return nsimd_div_{simd_ext}_{typ}(
nsimd_set1_{simd_ext}_{typ}({cte}),
nsimd_sqrt_{simd_ext}_{typ}({in0}));'''. \
format(cte=cte, **fmtspec)
elif op in ['rec8', 'rsqrt8']:
armop = 'recpe' if op == 'rec8' else 'rsqrte'
if simd_ext in sve:
return 'return sv{armop}_{suf}({in0});'. \
format(armop=armop, **fmtspec)
else:
ret = f16f64(simd_ext, typ, op, armop, 1)
if ret != '':
return ret
return 'return v{armop}q_{suf}({in0});'. \
format(armop=armop, **fmtspec)
# Rec11 and rsqrt11
# According to http://infocenter.arm.com/help/topic/com.arm.doc.faqs/ka14282.html
# reciprocal estimates only work when inputs is restrained in some small
# interval so we comment these for now and return full-precision reciprocals.
# def rec11rsqrt11(op, simd_ext, typ):
# armop = {'rec11': 'recpe', 'rsqrt11': 'rsqrte'}
# if simd_ext in neon:
# ret = f16f64(simd_ext, typ, op, armop[op], 1)
# if ret != '':
# return ret
# return 'return v{armop}q_{suf}({in0});'. \
# format(armop=armop[op], **fmtspec)
# else:
# return 'return sv{armop}_{suf}({in0});'. \
# format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# Load of logicals
def loadl(aligned, simd_ext, typ):
return \
'''/* This can surely be improved but it is not our priority. */
return nsimd_notl_{simd_ext}_{typ}(nsimd_eq_{simd_ext}_{typ}(
nsimd_load{align}_{simd_ext}_{typ}(
{in0}), nsimd_set1_{simd_ext}_{typ}({zero})));'''. \
format(align='a' if aligned else 'u',
zero = 'nsimd_f32_to_f16(0.0f)' if typ == 'f16'
else '({})0'.format(typ), **fmtspec)
# -----------------------------------------------------------------------------
# Store of logicals
def storel(aligned, simd_ext, typ):
return \
'''/* This can surely be improved but it is not our priority. */
nsimd_store{align}_{simd_ext}_{typ}({in0},
nsimd_if_else1_{simd_ext}_{typ}({in1},
nsimd_set1_{simd_ext}_{typ}({one}),
nsimd_set1_{simd_ext}_{typ}({zero})));'''. \
format(align = 'a' if aligned else 'u',
one = 'nsimd_f32_to_f16(1.0f)' if typ == 'f16'
else '({})1'.format(typ),
zero = 'nsimd_f32_to_f16(0.0f)' if typ == 'f16'
else '({})0'.format(typ), **fmtspec)
# -----------------------------------------------------------------------------
# All and any
def allany1(opts, op, simd_ext, typ):
binop = '&&' if op == 'all' else '||'
if simd_ext == 'neon128':
if typ == 'f16':
return \
'''return nsimd_{op}_neon128_f32({in0}.v0) {binop}
nsimd_{op}_neon128_f32({in0}.v1);'''. \
format(op=op, binop=binop, **fmtspec)
elif typ == 'f64':
return 'return {in0}.v0 {binop} {in0}.v1;'. \
format(binop=binop, **fmtspec)
else:
return 'return ' + \
binop.join(['vgetq_lane_u{typnbits}({in0}, {i})'. \
format(i=i, **fmtspec) \
for i in range(0, 128 // int(fmtspec['typnbits']))]) + \
';'
elif simd_ext == 'aarch64':
armop = {'all': 'min', 'any': 'max'}
normal = 'return v{armop}vq_u{typnbits}({in0}) != 0;'. \
format(armop=armop[op], **fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
return nsimd_{op}_aarch64_f32({in0}.v0) {binop}
nsimd_{op}_aarch64_f32({in0}.v1);
#endif'''.format(normal=normal, op=op, binop=binop, **fmtspec)
elif typ in ['i64', 'u64', 'f64']:
return \
'return v{armop}vq_u32(vreinterpretq_u32_u64({in0})) != 0;'. \
format(armop=armop[op], **fmtspec)
else:
return normal
elif simd_ext in sve:
if op == 'any':
operand= convert_to_predicate(opts, '{in0}'.format(**fmtspec))
return '''return svptest_any({svtrue}, {operand});'''. \
format(operand=operand, **fmtspec)
else:
operand='svnot_z({svtrue}, {op})'. \
format(op=convert_to_predicate(opts, '{in0}'.format(**fmtspec)),
**fmtspec)
return '''return !svptest_any({svtrue}, {operand});'''. \
format(operand=operand, **fmtspec)
# -----------------------------------------------------------------------------
# nbtrue
def nbtrue1(opts, simd_ext, typ):
if simd_ext == 'neon128':
if typ == 'f16':
return \
'''return nsimd_nbtrue_neon128_f32({in0}.v0) +
nsimd_nbtrue_neon128_f32({in0}.v1);'''. \
format(**fmtspec)
elif typ == 'f64':
return 'return -(int)((i64){in0}.v0 + (i64){in0}.v1);'. \
format(**fmtspec)
else:
return \
'''nsimd_neon128_vi{typnbits} temp =
vreinterpretq_s{typnbits}_u{typnbits}({in0});
return -(int)('''.format(**fmtspec) + \
'+'.join(['vgetq_lane_s{typnbits}(temp, {i})'. \
format(i=i, **fmtspec) \
for i in range(0, 128 // int(fmtspec['typnbits']))]) + \
');'
elif simd_ext == 'aarch64':
normal = \
'''return -(int)vaddvq_s{typnbits}(
vreinterpretq_s{typnbits}_u{typnbits}({in0}));'''. \
format(**fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
return nsimd_nbtrue_aarch64_f32({in0}.v0) +
nsimd_nbtrue_aarch64_f32({in0}.v1);
#endif'''.format(normal=normal, **fmtspec)
elif typ in ['i64', 'u64', 'f64']:
return \
'''return -(vaddvq_s32(vreinterpretq_s32_u64({in0})) >> 1);'''. \
format(**fmtspec)
else:
return normal
elif simd_ext in sve:
return 'return (int)svcntp_b{typnbits}({svtrue}, {op});'. \
format(op=convert_to_predicate(opts, '{in0}'.format(**fmtspec)),
**fmtspec)
# -----------------------------------------------------------------------------
# Reinterpret logical
def reinterpretl1(simd_ext, from_typ, to_typ):
if from_typ == to_typ or simd_ext in sve:
return 'return {in0};'.format(**fmtspec)
to_f16_with_f32 = \
'''nsimd_{simd_ext}_vlf16 ret;
u32 buf[4];
buf[0] = (vgetq_lane_u16({in0}, 0) ? (u32)-1 : 0);
buf[1] = (vgetq_lane_u16({in0}, 1) ? (u32)-1 : 0);
buf[2] = (vgetq_lane_u16({in0}, 2) ? (u32)-1 : 0);
buf[3] = (vgetq_lane_u16({in0}, 3) ? (u32)-1 : 0);
ret.v0 = vld1q_u32(buf);
buf[0] = (vgetq_lane_u16({in0}, 4) ? (u32)-1 : 0);
buf[1] = (vgetq_lane_u16({in0}, 5) ? (u32)-1 : 0);
buf[2] = (vgetq_lane_u16({in0}, 6) ? (u32)-1 : 0);
buf[3] = (vgetq_lane_u16({in0}, 7) ? (u32)-1 : 0);
ret.v1 = vld1q_u32(buf);
return ret;'''.format(**fmtspec)
from_f16_with_f32 = \
'''u16 buf[8];
buf[0] = (vgetq_lane_u32({in0}.v0, 0) ? (u16)-1 : 0);
buf[1] = (vgetq_lane_u32({in0}.v0, 1) ? (u16)-1 : 0);
buf[2] = (vgetq_lane_u32({in0}.v0, 2) ? (u16)-1 : 0);
buf[3] = (vgetq_lane_u32({in0}.v0, 3) ? (u16)-1 : 0);
buf[4] = (vgetq_lane_u32({in0}.v1, 0) ? (u16)-1 : 0);
buf[5] = (vgetq_lane_u32({in0}.v1, 1) ? (u16)-1 : 0);
buf[6] = (vgetq_lane_u32({in0}.v1, 2) ? (u16)-1 : 0);
buf[7] = (vgetq_lane_u32({in0}.v1, 3) ? (u16)-1 : 0);
return vld1q_u16(buf);'''.format(**fmtspec)
if simd_ext == 'neon128':
if to_typ == 'f16':
return to_f16_with_f32
elif from_typ == 'f16':
return from_f16_with_f32
elif to_typ == 'f64':
return '''nsimd_neon128_vlf64 ret;
ret.v0 = vgetq_lane_u64({in0}, 0);
ret.v1 = vgetq_lane_u64({in0}, 1);
return ret;'''.format(**fmtspec)
elif from_typ == 'f64':
return '''u64 buf[2];
buf[0] = {in0}.v0;
buf[1] = {in0}.v1;
return vld1q_u64(buf);'''.format(**fmtspec)
else:
return 'return {in0};'.format(**fmtspec)
elif simd_ext == 'aarch64':
if to_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return {in0};
#else
{using_f32}
#endif'''.format(using_f32=to_f16_with_f32, **fmtspec)
elif from_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return {in0};
#else
{using_f32}
#endif'''.format(using_f32=from_f16_with_f32, **fmtspec)
else:
return 'return {in0};'.format(**fmtspec)
# -----------------------------------------------------------------------------
# Convert
def convert1(simd_ext, from_typ, to_typ):
fmtspec2 = fmtspec.copy()
fmtspec2['to_suf'] = suf(to_typ)
fmtspec2['from_suf'] = suf(from_typ)
if from_typ == to_typ:
return 'return {in0};'.format(**fmtspec)
if from_typ in common.iutypes and to_typ in common.iutypes:
if simd_ext in neon:
return 'return vreinterpretq_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
else:
return 'return svreinterpret_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
if simd_ext in sve:
return 'return svcvt_{to_suf}_{from_suf}_x({svtrue}, {in0});'. \
format(**fmtspec2)
to_f16_with_f32 = \
'''nsimd_{simd_ext}_vf16 ret;
f32 buf[4];
buf[0] = (f32)vgetq_lane_{from_suf}({in0}, 0);
buf[1] = (f32)vgetq_lane_{from_suf}({in0}, 1);
buf[2] = (f32)vgetq_lane_{from_suf}({in0}, 2);
buf[3] = (f32)vgetq_lane_{from_suf}({in0}, 3);
ret.v0 = vld1q_f32(buf);
buf[0] = (f32)vgetq_lane_{from_suf}({in0}, 4);
buf[1] = (f32)vgetq_lane_{from_suf}({in0}, 5);
buf[2] = (f32)vgetq_lane_{from_suf}({in0}, 6);
buf[3] = (f32)vgetq_lane_{from_suf}({in0}, 7);
ret.v1 = vld1q_f32(buf);
return ret;'''.format(**fmtspec2)
from_f16_with_f32 = \
'''{to_typ} buf[8];
buf[0] = ({to_typ})vgetq_lane_f32({in0}.v0, 0);
buf[1] = ({to_typ})vgetq_lane_f32({in0}.v0, 1);
buf[2] = ({to_typ})vgetq_lane_f32({in0}.v0, 2);
buf[3] = ({to_typ})vgetq_lane_f32({in0}.v0, 3);
buf[4] = ({to_typ})vgetq_lane_f32({in0}.v1, 0);
buf[5] = ({to_typ})vgetq_lane_f32({in0}.v1, 1);
buf[6] = ({to_typ})vgetq_lane_f32({in0}.v1, 2);
buf[7] = ({to_typ})vgetq_lane_f32({in0}.v1, 3);
return vld1q_{to_suf}(buf);'''.format(**fmtspec2)
if simd_ext == 'neon128':
if to_typ == 'f16':
return to_f16_with_f32
elif from_typ == 'f16':
return from_f16_with_f32
elif to_typ == 'f64':
return '''nsimd_neon128_vf64 ret;
ret.v0 = (f64)vgetq_lane_{from_suf}({in0}, 0);
ret.v1 = (f64)vgetq_lane_{from_suf}({in0}, 1);
return ret;'''.format(**fmtspec2)
elif from_typ == 'f64':
return '''{to_typ} buf[2];
buf[0] = ({to_typ}){in0}.v0;
buf[1] = ({to_typ}){in0}.v1;
return vld1q_{to_suf}(buf);'''.format(**fmtspec2)
else:
return 'return vcvtq_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
elif simd_ext == 'aarch64':
if to_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vcvtq_{to_suf}_{from_suf}({in0});
#else
{using_f32}
#endif'''.format(using_f32=to_f16_with_f32, **fmtspec2)
elif from_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vcvtq_{to_suf}_{from_suf}({in0});
#else
{using_f32}
#endif'''.format(using_f32=from_f16_with_f32, **fmtspec2)
else:
return 'return vcvtq_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
# -----------------------------------------------------------------------------
# Reinterpret
def reinterpret1(simd_ext, from_typ, to_typ):
fmtspec2 = fmtspec.copy()
fmtspec2['to_suf'] = suf(to_typ)
fmtspec2['from_suf'] = suf(from_typ)
if from_typ == to_typ:
return 'return {in0};'.format(**fmtspec)
if simd_ext in sve:
return 'return svreinterpret_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
to_f16_with_f32 = \
'''nsimd_{simd_ext}_vf16 ret;
f32 buf[4];
buf[0] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 0));
buf[1] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 1));
buf[2] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 2));
buf[3] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 3));
ret.v0 = vld1q_f32(buf);
buf[0] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 4));
buf[1] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 5));
buf[2] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 6));
buf[3] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 7));
ret.v1 = vld1q_f32(buf);
return ret;'''.format(**fmtspec2)
from_f16_with_f32 = \
'''{to_typ} buf[8];
buf[0] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 0));
buf[1] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 1));
buf[2] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 2));
buf[3] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 3));
buf[4] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 0));
buf[5] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 1));
buf[6] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 2));
buf[7] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 3));
return vld1q_{to_suf}(buf);'''.format(**fmtspec2)
if simd_ext == 'neon128':
if to_typ == 'f16':
return to_f16_with_f32
elif from_typ == 'f16':
return from_f16_with_f32
elif to_typ == 'f64':
return '''nsimd_neon128_vf64 ret;
union {{ f64 to; {from_typ} from; }} buf;
buf.from = vgetq_lane_{from_suf}({in0}, 0);
ret.v0 = buf.to;
buf.from = vgetq_lane_{from_suf}({in0}, 1);
ret.v1 = buf.to;
return ret;'''.format(**fmtspec2)
elif from_typ == 'f64':
return '''union {{ f64 from; {to_typ} to; }} buf_;
{to_typ} buf[2];
buf_.from = {in0}.v0;
buf[0] = buf_.to;
buf_.from = {in0}.v1;
buf[1] = buf_.to;
return vld1q_{to_suf}(buf);'''.format(**fmtspec2)
else:
return 'return vreinterpretq_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
elif simd_ext == 'aarch64':
if to_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vreinterpretq_{to_suf}_{from_suf}({in0});
#else
{using_f32}
#endif'''.format(using_f32=to_f16_with_f32, **fmtspec2)
elif from_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vreinterpretq_{to_suf}_{from_suf}({in0});
#else
{using_f32}
#endif'''.format(using_f32=from_f16_with_f32, **fmtspec2)
else:
return 'return vreinterpretq_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
# -----------------------------------------------------------------------------
# reverse
def reverse1(simd_ext, typ):
armtyp = suf(typ)
if simd_ext in sve:
return '''return svrev_{suf}( {in0} );'''.format(**fmtspec)
elif simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vf64 ret;
ret.v0 = {in0}.v1;
ret.v1 = {in0}.v0;
return ret;'''.format(**fmtspec)
elif typ in [ 'i64', 'u64', 'f64' ]:
return '''return vcombine_{armtyp}(vget_high_{armtyp}({in0}),
vget_low_{armtyp}({in0}));'''. \
format(armtyp=armtyp, **fmtspec)
elif typ == 'f16':
return '''nsimd_{simd_ext}_vf16 ret;
ret.v0 = nsimd_reverse_{simd_ext}_f32(a0.v1);
ret.v1 = nsimd_reverse_{simd_ext}_f32(a0.v0);
return ret;'''.format(**fmtspec)
else:
return '''{in0} = vrev64q_{armtyp}({in0});
return vcombine_{armtyp}(vget_high_{armtyp}({in0}),
vget_low_{armtyp}({in0}));'''. \
format(armtyp=armtyp, **fmtspec)
# -----------------------------------------------------------------------------
# Horizontal sum
def addv(simd_ext, typ):
if simd_ext == 'neon128':
if typ == 'f64':
return 'return ({typ})({in0}.v0 + {in0}.v1);'.format(**fmtspec)
elif typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{t} tmp = vadd_{suf}(vget_low_{suf}({in0}),
vget_high_{suf}({in0}));
tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 3));
tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 0));
return vget_lane_{suf}(tmp, 0);
#else
float32x2_t tmp0 = vadd_f32(vget_low_f32({in0}.v0),
vget_high_f32({in0}.v0));
tmp0 = vadd_f32(tmp0, vext_f32(tmp0, tmp0, 1));
float32x2_t tmp1 = vadd_f32(vget_low_f32({in0}.v1),
vget_high_f32({in0}.v1));
tmp1 = vadd_f32(tmp1, vext_f32(tmp1, tmp1, 1));
return nsimd_f32_to_f16(vget_lane_f32(tmp0, 0) +
vget_lane_f32(tmp1, 0));
#endif''' .format(t=half_neon64_typ(typ), **fmtspec)
elif typ == 'f32':
return \
'''{t} tmp = vadd_{suf}(vget_low_{suf}({in0}),
vget_high_{suf}({in0}));
tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 1));
return vget_lane_{suf}(tmp, 0);'''. \
format(t=half_neon64_typ(typ), **fmtspec)
elif typ[0] in ['i', 'u']:
le = 128 // int(typ[1:]);
return \
'''{typ} res = ({typ})0;
{typ} buf[{le}];
vst1q_{suf}(buf, {in0});
for (int i = 0; i < {le}; i++) {{
res += buf[i];
}}
return res;'''. \
format(le=le, **fmtspec)
elif simd_ext == 'aarch64':
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{t} tmp = vadd_{suf}(vget_low_{suf}({in0}),
vget_high_{suf}({in0}));
tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 3));
tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 0));
return vget_lane_{suf}(tmp, 0);
#else
float32x2_t tmp0 = vadd_f32(vget_low_f32({in0}.v0),
vget_high_f32({in0}.v0));
tmp0 = vadd_f32(tmp0, vext_f32(tmp0, tmp0, 1));
float32x2_t tmp1 = vadd_f32(vget_low_f32({in0}.v1),
vget_high_f32({in0}.v1));
tmp1 = vadd_f32(tmp1, vext_f32(tmp1, tmp1, 1));
return nsimd_f32_to_f16(vget_lane_f32(tmp0, 0) +
vget_lane_f32(tmp1, 0));
#endif''' .format(t=half_neon64_typ(typ), **fmtspec)
elif typ in ['f32', 'f64']:
return 'return vaddvq_{suf}({in0});'.format(**fmtspec)
elif simd_ext in sve:
return 'return svaddv_{suf}({svtrue}, {in0});' .format(**fmtspec)
# -----------------------------------------------------------------------------
# Up convert
def upcvt1(simd_ext, from_typ, to_typ):
# For integer upcast, due to 2's complement representation
# _s : signed -> bigger signed
# _s : signed -> bigger unsigned
# _u : unsigned -> bigger signed
# _u : unsigned -> bigger unsigned
if simd_ext in neon:
if from_typ == 'f16' and to_typ == 'f32':
return \
'''#ifdef NSIMD_ARM_FP16
nsimd_{simd_ext}_vf32x2 ret;
ret.v0 = vcvt_f32_f16(vget_low_{suf}({in0}));
ret.v1 = vcvt_f32_f16(vget_high_{suf}({in0}));
return ret;
#else
nsimd_{simd_ext}_vf32x2 ret;
ret.v0 = {in0}.v0;
ret.v1 = {in0}.v1;
return ret;
#endif'''.format(**fmtspec)
elif from_typ == 'f32' and to_typ == 'f64':
if simd_ext == 'neon128':
return \
'''nsimd_neon128_vf64x2 ret;
f32 buf[4];
vst1q_f32(buf, {in0});
ret.v0.v0 = (f64)buf[0];
ret.v0.v1 = (f64)buf[1];
ret.v1.v0 = (f64)buf[2];
ret.v1.v1 = (f64)buf[3];
return ret;'''.format(**fmtspec)
else:
return \
'''nsimd_aarch64_vf64x2 ret;
ret.v0 = vcvt_f64_f32(vget_low_{suf}({in0}));
ret.v1 = vcvt_f64_f32(vget_high_{suf}({in0}));
return ret;'''.format(**fmtspec)
elif (from_typ in common.itypes and to_typ in common.itypes) or \
(from_typ in common.utypes and to_typ in common.utypes):
return '''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = vmovl_{suf}(vget_low_{suf}({in0}));
ret.v1 = vmovl_{suf}(vget_high_{suf}({in0}));
return ret;'''.format(**fmtspec)
elif (from_typ in common.itypes and to_typ in common.utypes) or \
(from_typ in common.utypes and to_typ in common.itypes):
return '''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = vreinterpretq_{suf_to_typ}_{suf_int_typ}(
vmovl_{suf}(vget_low_{suf}({in0})));
ret.v1 = vreinterpretq_{suf_to_typ}_{suf_int_typ}(
vmovl_{suf}(vget_high_{suf}({in0})));
return ret;'''. \
format(suf_to_typ=suf(to_typ),
suf_int_typ=suf(from_typ[0] + to_typ[1:]),
**fmtspec)
else:
return \
'''nsimd_{simd_ext}_v{to_typ}x2 ret;
nsimd_{simd_ext}_v{int_typ}x2 tmp;
tmp = nsimd_upcvt_{simd_ext}_{int_typ}_{from_typ}({in0});
ret.v0 = nsimd_cvt_{simd_ext}_{to_typ}_{int_typ}(tmp.v0);
ret.v1 = nsimd_cvt_{simd_ext}_{to_typ}_{int_typ}(tmp.v1);
return ret;'''. \
format(int_typ=from_typ[0] + to_typ[1:], **fmtspec)
# Getting here means that we deal with SVE
if (from_typ in common.itypes and to_typ in common.itypes) or \
(from_typ in common.utypes and to_typ in common.utypes):
return '''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = svunpklo_{suf_to_typ}({in0});
ret.v1 = svunpkhi_{suf_to_typ}({in0});
return ret;'''.format(suf_to_typ=suf(to_typ), **fmtspec)
elif (from_typ in common.itypes and to_typ in common.utypes) or \
(from_typ in common.utypes and to_typ in common.itypes):
return \
'''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = svreinterpret_{suf_to_typ}_{suf_int_typ}(
svunpklo_{suf_int_typ}({in0}));
ret.v1 = svreinterpret_{suf_to_typ}_{suf_int_typ}(
svunpkhi_{suf_int_typ}({in0}));
return ret;'''. \
format(suf_to_typ=suf(to_typ),
suf_int_typ=suf(from_typ[0] + to_typ[1:]), **fmtspec)
elif from_typ in common.iutypes and to_typ in common.ftypes:
return \
'''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = svcvt_{suf_to_typ}_{suf_int_typ}_x(
{svtrue}, svunpklo_{suf_int_typ}({in0}));
ret.v1 = svcvt_{suf_to_typ}_{suf_int_typ}_x(
{svtrue}, svunpkhi_{suf_int_typ}({in0}));
return ret;'''. \
format(suf_to_typ=suf(to_typ),
suf_int_typ=suf(from_typ[0] + to_typ[1:]), **fmtspec)
else:
return \
'''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = svcvt_{suf_to_typ}_{suf}_x({svtrue}, svzip1_{suf}(
{in0}, {in0}));
ret.v1 = svcvt_{suf_to_typ}_{suf}_x({svtrue}, svzip2_{suf}(
{in0}, {in0}));
return ret;'''.format(suf_to_typ=suf(to_typ), **fmtspec)
# -----------------------------------------------------------------------------
# Down convert
def downcvt1(simd_ext, from_typ, to_typ):
if simd_ext in neon:
if from_typ == 'f64' and to_typ == 'f32':
if simd_ext == 'neon128':
return '''f32 buf[4];
buf[0] = (f32){in0}.v0;
buf[1] = (f32){in0}.v1;
buf[2] = (f32){in1}.v0;
buf[3] = (f32){in1}.v1;
return vld1q_f32(buf);'''.format(**fmtspec)
else:
return '''return vcombine_f32(vcvt_f32_f64({in0}),
vcvt_f32_f64({in1}));'''. \
format(**fmtspec)
elif from_typ == 'f32' and to_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vcombine_f16(vcvt_f16_f32({in0}),
vcvt_f16_f32({in1}));
#else
nsimd_{simd_ext}_vf16 ret;
ret.v0 = {in0};
ret.v1 = {in1};
return ret;
#endif'''.format(**fmtspec)
elif (from_typ in common.itypes and to_typ in common.itypes) or \
(from_typ in common.utypes and to_typ in common.utypes):
return '''return vcombine_{suf_to_typ}(vmovn_{suf}({in0}),
vmovn_{suf}({in1}));'''. \
format(suf_to_typ=suf(to_typ), **fmtspec)
elif (from_typ in common.itypes and to_typ in common.itypes) or \
(from_typ in common.utypes and to_typ in common.utypes):
return '''return vreinterpretq_{suf_to_typ}(
vcombine_{suf_to_typ}(vmovn_{suf}({in0}),
vmovn_{suf}({in1}));'''. \
format(suf_to_typ=suf(to_typ), **fmtspec)
else:
return \
'''return nsimd_downcvt_{simd_ext}_{to_typ}_{int_typ}(
nsimd_cvt_{simd_ext}_{int_typ}_{from_typ}({in0}),
nsimd_cvt_{simd_ext}_{int_typ}_{from_typ}({in1}));'''.\
format(int_typ=to_typ[0] + from_typ[1:], **fmtspec)
# Getting here means that we deal with SVE
if from_typ in common.iutypes and to_typ in common.iutypes:
return '''return svuzp1_{suf_to_typ}(
svreinterpret_{suf_to_typ}_{suf}({in0}),
svreinterpret_{suf_to_typ}_{suf}({in1}));'''. \
format(suf_to_typ=suf(to_typ), **fmtspec)
elif from_typ in common.ftypes and to_typ in common.iutypes:
return \
'''return svuzp1_{suf_to_typ}(svreinterpret_{suf_to_typ}_{suf_int_typ}(
svcvt_{suf_int_typ}_{suf}_x({svtrue}, {in0})),
svreinterpret_{suf_to_typ}_{suf_int_typ}(
svcvt_{suf_int_typ}_{suf}_x({svtrue}, {in1})));'''. \
format(suf_to_typ=suf(to_typ),
suf_int_typ=suf(to_typ[0] + from_typ[1:]),
**fmtspec)
else:
return \
'''return svuzp1_{suf_to_typ}(svcvt_{suf_to_typ}_{suf}_x(
{svtrue}, {in0}), svcvt_{suf_to_typ}_{suf}_x(
{svtrue}, {in1}));'''. \
format(suf_to_typ=suf(to_typ), **fmtspec)
# -----------------------------------------------------------------------------
# adds
def adds(simd_ext, from_typ):
if from_typ in common.ftypes:
return 'return nsimd_add_{simd_ext}_{from_typ}({in0}, {in1});'. \
format(**fmtspec)
if simd_ext in neon:
return 'return vqaddq_{suf}({in0}, {in1});'.format(**fmtspec)
else:
return 'return svqadd_{suf}({in0}, {in1});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# subs
def subs(simd_ext, from_typ):
if from_typ in common.ftypes:
return 'return nsimd_sub_{simd_ext}_{from_typ}({in0}, {in1});'. \
format(**fmtspec)
elif simd_ext in neon:
return 'return vqsubq_{suf}({in0}, {in1});'.format(**fmtspec)
else:
return 'return svqsub_{suf}({in0}, {in1});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# to_mask
def to_mask1(opts, simd_ext, typ):
if typ in common.itypes + common.ftypes:
normal = 'return vreinterpretq_{suf}_u{typnbits}({in0});'. \
format(**fmtspec)
else:
normal = 'return {in0};'.format(**fmtspec)
emulate_f16 = '''nsimd_{simd_ext}_vf16 ret;
ret.v0 = nsimd_to_mask_{simd_ext}_f32({in0}.v0);
ret.v1 = nsimd_to_mask_{simd_ext}_f32({in0}.v1);
return ret;'''.format(**fmtspec)
if simd_ext == 'neon128' and typ == 'f16':
return emulate_f16
elif simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vf64 ret;
ret.v0 = nsimd_scalar_reinterpret_f64_u64({in0}.v0);
ret.v1 = nsimd_scalar_reinterpret_f64_u64({in0}.v1);
return ret;'''.format(**fmtspec)
elif simd_ext == 'aarch64' and typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{normal}
#else
{emulate_f16}
#endif'''.format(normal=normal, emulate_f16=emulate_f16)
elif simd_ext in sve:
if opts.sve_emulate_bool:
return 'return svreinterpret_{suf}_u{typnbits}({in0});'. \
format(**fmtspec)
else:
utyp = 'u{}'.format(fmtspec['typnbits'])
return '''return svreinterpret_{suf}_{utyp}(svsel_{utyp}(
{in0}, svdup_n_{utyp}(({utyp})-1),
svdup_n_{utyp}(({utyp})0)));'''. \
format(utyp=utyp, **fmtspec)
else:
return normal
# -----------------------------------------------------------------------------
# iota
def iota(simd_ext, typ):
if simd_ext in sve:
if typ in common.iutypes:
return 'return svindex_{suf}(0, 1);'.format(**fmtspec)
else:
return \
'''return svcvt_{suf}_s{typnbits}_x({svtrue},
svindex_s{typnbits}(0, 1));'''.format(**fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''nsimd_neon128_vf64 ret;
ret.v0 = 0.0;
ret.v1 = 1.0;
return ret;'''.format(**fmtspec)
typ2 = 'f32' if typ == 'f16' else typ
le = 128 // int(typ[1:])
iota = ', '.join(['({typ2}){i}'.format(typ2=typ2, i=i) \
for i in range(le)])
normal = '''{typ} buf[{le}] = {{ {iota} }};
return vld1q_{suf}(buf);'''. \
format(le=le, iota=iota, **fmtspec)
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{normal}
#else
f32 buf[8] = {{ {iota} }};
nsimd_{simd_ext}_vf16 ret;
ret.v0 = vld1q_f32(buf);
ret.v1 = vld1q_f32(buf + 4);
return ret;
#endif'''.format(iota=iota, normal=normal, **fmtspec)
return normal
# -----------------------------------------------------------------------------
# mask_for_loop_tail
def mask_for_loop_tail(simd_ext, typ):
if typ == 'f16':
threshold = 'nsimd_f32_to_f16((f32)({in1} - {in0}))'.format(**fmtspec)
else:
threshold = '({typ})({in1} - {in0})'.format(**fmtspec)
if simd_ext == 'sve':
le = 'nsimd_len_sve_{typ}()'.format(**fmtspec)
elif simd_ext in fixed_sized_sve:
le = int(simd_ext[3:]) // int(typ[1:])
else:
le = 128 // int(typ[1:])
return '''if ({in0} >= {in1}) {{
return nsimd_set1l_{simd_ext}_{typ}(0);
}}
if ({in1} - {in0} < {le}) {{
nsimd_{simd_ext}_v{typ} n =
nsimd_set1_{simd_ext}_{typ}({threshold});
return nsimd_lt_{simd_ext}_{typ}(
nsimd_iota_{simd_ext}_{typ}(), n);
}} else {{
return nsimd_set1l_{simd_ext}_{typ}(1);
}}'''.format(le=le, threshold=threshold, **fmtspec)
# -----------------------------------------------------------------------------
# to_logical
def to_logical1(opts, simd_ext, typ):
if typ in common.iutypes:
return '''return nsimd_ne_{simd_ext}_{typ}({in0},
nsimd_set1_{simd_ext}_{typ}(({typ})0));'''. \
format(**fmtspec)
normal_fp = \
'''return nsimd_reinterpretl_{simd_ext}_{suf}_{utyp}(
nsimd_ne_{simd_ext}_{utyp}(
nsimd_reinterpret_{simd_ext}_{utyp}_{typ}(
{in0}), nsimd_set1_{simd_ext}_{utyp}(({utyp})0)));'''. \
format(utyp='u{}'.format(fmtspec['typnbits']), **fmtspec)
if typ in ['f32', 'f64'] or (typ == 'f16' and simd_ext in sve):
return normal_fp
emulate_fp16 = \
'''nsimd_{simd_ext}_vlf16 ret;
ret.v0 = nsimd_to_logical_{simd_ext}_f32({in0}.v0);
ret.v1 = nsimd_to_logical_{simd_ext}_f32({in0}.v1);
return ret;'''.format(**fmtspec)
if simd_ext == 'aarch64':
return '''#ifdef NSIMD_ARM_FP16
{normal_fp}
#else
{emulate_fp16}
#endif'''.format(normal_fp=normal_fp,
emulate_fp16=emulate_fp16)
elif simd_ext == 'neon128':
return emulate_fp16
# -----------------------------------------------------------------------------
# unpack functions
def zip_unzip_half(func, simd_ext, typ):
if simd_ext == 'aarch64' or simd_ext in sve:
if typ =='f16' and simd_ext == 'aarch64':
if func in ['zip1', 'zip2']:
return '''\
#ifdef NSIMD_ARM_FP16
return {s}v{op}{q}_{suf}({in0}, {in1});
#else
nsimd_{simd_ext}_v{typ} ret;
ret.v0 = {s}vzip1{q}_f32({in0}.v{i}, {in1}.v{i});
ret.v1 = {s}vzip2{q}_f32({in0}.v{i}, {in1}.v{i});
return ret;
#endif
'''.format(op=func,
i = '0' if func in ['zip1', 'uzp1'] else '1',
s = 's' if simd_ext in sve else '',
q = '' if simd_ext in sve else 'q', **fmtspec)
else:
return '''\
#ifdef NSIMD_ARM_FP16
return {s}v{op}{q}_{suf}({in0}, {in1});
#else
nsimd_{simd_ext}_v{typ} ret;
ret.v0 = {s}v{func}{q}_f32({in0}.v0, {in0}.v1);
ret.v1 = {s}v{func}{q}_f32({in1}.v0, {in1}.v1);
return ret;
#endif'''.format(op=func, func=func,
s = 's' if simd_ext in sve else '',
q = '' if simd_ext in sve else 'q', **fmtspec)
else:
return 'return {s}v{op}{q}_{suf}({in0}, {in1});'. \
format(op=func, s = 's' if simd_ext in sve else '',
q = '' if simd_ext in sve else 'q', **fmtspec)
elif simd_ext == 'neon128':
armop = {'zip1': 'zipq', 'zip2': 'zipq', 'uzp1': 'uzpq',
'uzp2': 'uzpq'}
prefix = { 'i': 'int', 'u': 'uint', 'f': 'float' }
neon_typ = '{}{}x{}x2_t'. \
format(prefix[typ[0]], typ[1:], 128 // int(typ[1:]))
if typ == 'f16':
if func in ['zip1', 'zip2']:
return '''\
nsimd_{simd_ext}_v{typ} ret;
float32x4x2_t tmp = v{op}_f32({in0}.v{i}, {in1}.v{i});
ret.v0 = tmp.val[0];
ret.v1 = tmp.val[1];
return ret;
'''.format(i = '0' if func == 'zip1' else '1',
op=armop[func], **fmtspec)
else:
return '''\
nsimd_{simd_ext}_v{typ} ret;
float32x4x2_t tmp0 = vuzpq_f32({in0}.v0, {in0}.v1);
float32x4x2_t tmp1 = vuzpq_f32({in1}.v0, {in1}.v1);
ret.v0 = tmp0.val[{i}];
ret.v1 = tmp1.val[{i}];
return ret;
'''.format(i = '0' if func == 'uzp1' else '1', **fmtspec)
elif typ in ['i64', 'u64']:
return '''\
{typ} buf0[2], buf1[2];
{typ} ret[2];
vst1q_{suf}(buf0, {in0});
vst1q_{suf}(buf1, {in1});
ret[0] = buf0[{i}];
ret[1] = buf1[{i}];
return vld1q_{suf}(ret);'''. \
format(**fmtspec, i= '0' if func in ['zip1', 'uzp1'] else '1')
elif typ == 'f64' :
return '''\
nsimd_{simd_ext}_v{typ} ret;
ret.v0 = {in0}.v{i};
ret.v1 = {in1}.v{i};
return ret;'''. \
format(**fmtspec, i= '0' if func in ['zip1', 'uzp1'] else '1')
else :
return '''\
{neon_typ} res;
res = v{op}_{suf}({in0}, {in1});
return res.val[{i}];'''. \
format(neon_typ=neon_typ, op=armop[func], **fmtspec,
i = '0' if func in ['zip1', 'uzp1'] else '1')
def zip_unzip(func, simd_ext, typ):
lo_hi = '''\
nsimd_{simd_ext}_v{typ}x2 ret;
ret.v0 = nsimd_{func}lo_{simd_ext}_{typ}({in0}, {in1});
ret.v1 = nsimd_{func}hi_{simd_ext}_{typ}({in0}, {in1});
return ret;
'''.format(func='zip' if func == 'zip' else 'unzip', **fmtspec)
if simd_ext == 'aarch64' or simd_ext in sve:
content = '''\
nsimd_{simd_ext}_v{typ}x2 ret;
ret.v0 = {s}v{func}1{q}_{suf}({in0}, {in1});
ret.v1 = {s}v{func}2{q}_{suf}({in0}, {in1});
return ret;'''.format(s = 's' if simd_ext in sve else '',
q = '' if simd_ext in sve else 'q',
func=func, **fmtspec)
if typ == 'f16':
return '''\
#ifdef NSIMD_ARM_FP16
{c}
#else
{default}
#endif'''.\
format(c=content, default=lo_hi, s = 's' if simd_ext in sve else '',
**fmtspec)
else:
return content
else:
prefix = { 'i': 'int', 'u': 'uint', 'f': 'float' }
neon_typ = '{}{}x{}x2_t'.\
format(prefix[typ[0]], typ[1:], 128 // int(typ[1:]))
content = '''\
nsimd_{simd_ext}_v{typ}x2 ret;
{neon_typ} tmp = v{func}q_{suf}({in0}, {in1});
ret.v0 = tmp.val[0];
ret.v1 = tmp.val[1];
return ret;'''\
.format(func=func, neon_typ=neon_typ, **fmtspec)
if typ in ['u64', 'i64', 'f64']:
return lo_hi
elif typ == 'f16':
return '''\
#ifdef NSIMD_ARM_FP16
{content}
#else
{default}
#endif'''.\
format(content=content, default=lo_hi,
f='zip' if func == 'zip' else 'unzip', **fmtspec)
else:
return content
# -----------------------------------------------------------------------------
# gather
def gather(simd_ext, typ):
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
if simd_ext in sve:
emul = '''int i;
{typ} buf[{le}];
i{typnbits} offset_buf[{le}];
svst1_s{typnbits}({svtrue}, offset_buf, {in1});
for (i = 0; i < {real_le}; i++) {{
buf[i] = {in0}[offset_buf[i]];
}}
return svld1_{suf}({svtrue}, buf);'''. \
format(le=le, real_le=real_le, **fmtspec)
else:
emul = \
'''nsimd_{simd_ext}_v{typ} ret;
ret = vdupq_n_{suf}({in0}[vgetq_lane_s{typnbits}({in1}, 0)]);'''. \
format(**fmtspec) + ''.join([
'''ret = vsetq_lane_{suf}({in0}[
vgetq_lane_s{typnbits}({in1}, {i})], ret, {i});\n'''. \
format(i=i, **fmtspec) for i in range(1, le)]) + \
'return ret;'
if typ == 'f16':
if simd_ext in sve:
return emul
return '''#ifdef NSIMD_ARM_FP16
{emul}
#else
nsimd_{simd_ext}_vf16 ret;
f32 buf[8];
'''.format(emul=emul, **fmtspec) + \
''.join(['buf[{i}] = nsimd_f16_to_f32({in0}[' \
'vgetq_lane_s16({in1}, {i})]);\n'. \
format(i=i, **fmtspec) for i in range(4)]) + \
''.join(['buf[4 + {i}] = nsimd_f16_to_f32({in0}[' \
'vgetq_lane_s16({in1}, 4 + {i})]);\n'. \
format(i=i, **fmtspec) for i in range(4)]) + \
''' ret.v0 = vld1q_f32(buf);
ret.v1 = vld1q_f32(buf + 4);
return ret;
#endif'''.format(**fmtspec)
if simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vf64 ret;
i64 offset_buf[2];
vst1q_s64(offset_buf, {in1});
ret.v0 = {in0}[offset_buf[0]];
ret.v1 = {in0}[offset_buf[1]];
return ret;'''.format(**fmtspec)
if simd_ext in neon or typ in ['i8', 'u8', 'i16', 'u16']:
return emul
# getting here means SVE
return 'return svld1_gather_s{typnbits}index_{suf}({svtrue}, {in0}, ' \
'{in1});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# linear gather
def gather_linear(simd_ext, typ):
if simd_ext in sve:
if typ in ['i8', 'u8', 'i16', 'u16', 'f16']:
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
return '''{typ} buf[{le}];
int i;
for (i = 0; i < {real_le}; i++) {{
buf[i] = {in0}[i * {in1}];
}}
return svld1_{suf}({svtrue}, buf);'''. \
format(le=le, real_le=real_le, **fmtspec)
else:
return 'return svld1_gather_s{typnbits}index_{suf}({svtrue}, ' \
'{in0}, svindex_s{typnbits}(0, (i{typnbits}){in1}));'. \
format(**fmtspec)
# getting here means neon128 and aarch64
intrinsic = '''nsimd_{simd_ext}_v{typ} ret;
ret = vdupq_n_{suf}({in0}[0]);
'''.format(**fmtspec) + ''.join([
'ret = vsetq_lane_{suf}({in0}[{i} * {in1}], ret, {i});\n'. \
format(i=i, **fmtspec) \
for i in range(1, 128 // int(fmtspec['typnbits']))]) + \
'''return ret;'''
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{intrinsic}
#else
nsimd_{simd_ext}_vf16 ret;
f32 buf[8];
int i;
for (i = 0; i < 8; i++) {{
buf[i] = nsimd_f16_to_f32({in0}[i * {in1}]);
}}
ret.v0 = vld1q_f32(buf);
ret.v1 = vld1q_f32(buf + 4);
return ret;
#endif'''.format(intrinsic=intrinsic, **fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''nsimd_neon128_vf64 ret;
ret.v0 = {in0}[0];
ret.v1 = {in0}[{in1}];
return ret;'''.format(**fmtspec)
return intrinsic
# -----------------------------------------------------------------------------
# masked gather
def maskoz_gather(oz, simd_ext, typ):
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
if simd_ext in sve:
utyp = 'u{typnbits}'.format(**fmtspec)
store = '''svst1_s{typnbits}({svtrue}, offset_buf, {in2});
svst1_{utyp}({svtrue}, mask, svsel_{utyp}(
{in0}, svdup_n_{utyp}(({utyp})-1), svdup_n_{utyp}(
({utyp})0)));
'''.format(utyp=utyp, **fmtspec)
if oz == 'z':
store += 'svst1_{suf}({svtrue}, buf, svdup_n_{suf}(({typ})0));'. \
format(**fmtspec)
else:
store += 'svst1_{suf}({svtrue}, buf, {in3});'.format(**fmtspec)
load = 'svld1_{suf}({svtrue}, buf)'.format(**fmtspec)
else:
store = '''vst1q_s{typnbits}(offset_buf, {in2});
vst1q_u{typnbits}(mask, {in0});'''.format(**fmtspec)
if oz == 'z':
store += 'vst1q_{suf}(buf, vdupq_n_{suf}(({typ})0));'. \
format(**fmtspec)
else:
store += 'vst1q_{suf}(buf, {in3});'.format(**fmtspec)
load = 'vld1q_{suf}(buf)'.format(**fmtspec)
emul = '''int i;
{typ} buf[{le}];
u{typnbits} mask[{le}];
i{typnbits} offset_buf[{le}];
{store}
for (i = 0; i < {real_le}; i++) {{
if (mask[i]) {{
buf[i] = {in1}[offset_buf[i]];
}}
}}
return {load};'''. \
format(le=le, real_le=real_le, store=store, load=load, **fmtspec)
if typ == 'f16':
if simd_ext in sve:
return emul
if oz == 'z':
oz0 = 'vdupq_n_f32(0.0f)'
oz1 = oz0
else:
oz0 = '{in3}.v0'.format(**fmtspec)
oz1 = '{in3}.v1'.format(**fmtspec)
return '''#ifdef NSIMD_ARM_FP16
{emul}
#else
nsimd_{simd_ext}_vf16 ret;
int i;
f32 buf[{le}];
u32 mask[{le}];
i16 offset_buf[{le}];
vst1q_s16(offset_buf, {in2});
vst1q_f32(buf, {oz0});
vst1q_f32(buf + {leo2}, {oz1});
vst1q_u32(mask, {in0}.v0);
vst1q_u32(mask + {leo2}, {in0}.v1);
for (i = 0; i < {le}; i++) {{
if (mask[i]) {{
buf[i] = nsimd_f16_to_f32({in1}[offset_buf[i]]);
}}
}}
ret.v0 = vld1q_f32(buf);
ret.v1 = vld1q_f32(buf + {leo2});
return ret;
#endif'''.format(emul=emul, leo2=le // 2, le=le, oz0=oz0,
oz1=oz1, **fmtspec)
if simd_ext == 'neon128' and typ == 'f64':
oz0 = '0.0' if oz == 'z' else '{in3}.v0'.format(**fmtspec)
oz1 = '0.0' if oz == 'z' else '{in3}.v1'.format(**fmtspec)
return '''nsimd_neon128_vf64 ret;
i64 offset_buf[2];
vst1q_s64(offset_buf, {in2});
if ({in0}.v0) {{
ret.v0 = {in1}[offset_buf[0]];
}} else {{
ret.v0 = {oz0};
}}
if ({in0}.v1) {{
ret.v1 = {in1}[offset_buf[1]];
}} else {{
ret.v1 = {oz1};
}}
return ret;'''.format(oz0=oz0, oz1=oz1, **fmtspec)
if simd_ext in neon or typ in ['i8', 'u8', 'i16', 'u16']:
return emul
# getting here means SVE
oz0 = 'svdup_n_{suf}(({typ})0)'.format(**fmtspec) if oz == 'z' \
else '{in3}'.format(**fmtspec)
return '''return svsel_{suf}({in0}, svld1_gather_s{typnbits}index_{suf}(
{in0}, {in1}, {in2}), {oz0});'''. \
format(oz0=oz0, **fmtspec)
# -----------------------------------------------------------------------------
# scatter
def scatter(simd_ext, typ):
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
if simd_ext in sve:
emul = '''int i;
{typ} buf[{le}];
i{typnbits} offset_buf[{le}];
svst1_s{typnbits}({svtrue}, offset_buf, {in1});
svst1_{suf}({svtrue}, buf, {in2});
for (i = 0; i < {real_le}; i++) {{
{in0}[offset_buf[i]] = buf[i];
}}'''.format(le=le, real_le=real_le, **fmtspec)
else:
emul = '\n'.join(['{in0}[vgetq_lane_s{typnbits}({in1}, {i})] = ' \
'vgetq_lane_{suf}({in2}, {i});\n'. \
format(i=i, **fmtspec) for i in range(int(le))])
if typ == 'f16':
if simd_ext in sve:
return emul
return '''#ifdef NSIMD_ARM_FP16
{emul}
#else
'''.format(emul=emul) + \
'\n'.join(['{in0}[vgetq_lane_s16({in1}, {i})] = ' \
'nsimd_f32_to_f16(vgetq_lane_f32({in2}.v0, '
'{i}));\n'.format(i=i, **fmtspec) \
for i in range(4)]) + \
'\n'.join(['{in0}[vgetq_lane_s16({in1}, 4 + {i})] = ' \
'nsimd_f32_to_f16(vgetq_lane_f32({in2}.v1, '
'{i}));\n'.format(i=i, **fmtspec) \
for i in range(4)]) + \
'''
#endif'''
if simd_ext == 'neon128' and typ == 'f64':
return '''i64 offset_buf[2];
vst1q_s64(offset_buf, {in1});
{in0}[offset_buf[0]] = {in2}.v0;
{in0}[offset_buf[1]] = {in2}.v1;'''.format(**fmtspec)
if simd_ext in neon or typ in ['i8', 'u8', 'i16', 'u16']:
return emul
# getting here means SVE
return 'svst1_scatter_s{typnbits}index_{suf}({svtrue}, {in0}, ' \
'{in1}, {in2});'.format(le=le, **fmtspec)
# -----------------------------------------------------------------------------
# linear scatter
def scatter_linear(simd_ext, typ):
if simd_ext in sve:
if typ in ['i8', 'u8', 'i16', 'u16', 'f16']:
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
return '''{typ} buf[{le}];
int i;
svst1_{suf}({svtrue}, buf, {in2});
for (i = 0; i < {real_le}; i++) {{
{in0}[i * {in1}] = buf[i];
}}'''.format(le=le, real_le=real_le, **fmtspec)
else:
return 'svst1_scatter_s{typnbits}index_{suf}({svtrue}, {in0}, ' \
'svindex_s{typnbits}(0, (i{typnbits}){in1}), {in2});'. \
format(**fmtspec)
# getting here means neon128 and aarch64
intrinsic = '\n'.join([
'{in0}[{i} * {in1}] = vgetq_lane_{suf}({in2}, {i});'. \
format(i=i, **fmtspec) for i in range(128 // int(fmtspec['typnbits']))])
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{intrinsic}
#else
f32 buf[8];
int i;
vst1q_f32(buf, {in2}.v0);
vst1q_f32(buf + 4, {in2}.v1);
for (i = 0; i < 8; i++) {{
{in0}[i * {in1}] = nsimd_f32_to_f16(buf[i]);
}}
#endif'''.format(intrinsic=intrinsic, **fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''{in0}[0] = {in2}.v0;
{in0}[{in1}] = {in2}.v1;'''.format(**fmtspec)
return intrinsic
# -----------------------------------------------------------------------------
# mask_scatter
def mask_scatter(simd_ext, typ):
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
if simd_ext in sve:
store = '''svst1_s{typnbits}({svtrue}, offset_buf, {in2});
svst1_u{typnbits}({svtrue}, mask, svsel_u{typnbits}(
{in0}, svdup_n_u{typnbits}((u{typnbits})1),
svdup_n_u{typnbits}((u{typnbits})0)));
svst1_{suf}({svtrue}, buf, {in3});'''.format(**fmtspec)
else:
store = '''vst1q_s{typnbits}(offset_buf, {in2});
vst1q_{suf}(buf, {in3});
vst1q_u{typnbits}(mask, {in0});'''.format(**fmtspec)
emul = '''int i;
{typ} buf[{le}];
u{typnbits} mask[{le}];
i{typnbits} offset_buf[{le}];
{store}
for (i = 0; i < {real_le}; i++) {{
if (mask[i]) {{
{in1}[offset_buf[i]] = buf[i];
}}
}}'''.format(le=le, real_le=real_le, store=store, **fmtspec)
if typ == 'f16':
if simd_ext in sve:
return emul
return '''#ifdef NSIMD_ARM_FP16
{emul}
#else
int i;
f32 buf[{le}];
u32 mask[{le}];
i16 offset_buf[{le}];
vst1q_s16(offset_buf, {in2});
vst1q_f32(buf, {in3}.v0);
vst1q_f32(buf + {leo2}, {in3}.v1);
vst1q_u32(mask, {in0}.v0);
vst1q_u32(mask + {leo2}, {in0}.v1);
for (i = 0; i < {le}; i++) {{
if (mask[i]) {{
{in1}[offset_buf[i]] = nsimd_f32_to_f16(buf[i]);
}}
}}
#endif'''.format(emul=emul, le=le, leo2=le // 2, **fmtspec)
if simd_ext == 'neon128' and typ == 'f64':
return '''i64 offset_buf[2];
vst1q_s64(offset_buf, {in2});
if ({in0}.v0) {{
{in1}[offset_buf[0]] = {in3}.v0;
}}
if ({in0}.v1) {{
{in1}[offset_buf[1]] = {in3}.v1;
}}'''.format(**fmtspec)
if simd_ext in neon or typ in ['i8', 'u8', 'i16', 'u16']:
return emul
# getting here means SVE
return 'svst1_scatter_s{typnbits}index_{suf}({in0}, {in1}, ' \
'{in2}, {in3});'.format(le=le, **fmtspec)
# -----------------------------------------------------------------------------
# get_impl function
def get_impl(opts, func, simd_ext, from_typ, to_typ):
global fmtspec
simd_ext2 = simd_ext if not simd_ext in fixed_sized_sve else 'sve'
fmtspec = {
'simd_ext': simd_ext,
'simd_ext2': simd_ext2,
'typ': from_typ,
'from_typ': from_typ,
'to_typ': to_typ,
'suf': suf(from_typ),
'in0': common.in0,
'in1': common.in1,
'in2': common.in2,
'in3': common.in3,
'in4': common.in4,
'in5': common.in5,
'typnbits': from_typ[1:],
'svtrue': 'svptrue_b{}()'.format(from_typ[1:]),
'svetyp': sve_typ(from_typ),
}
impls = {
'loada': lambda: load1234(opts, simd_ext, from_typ, 1),
'masko_loada1': lambda: maskoz_load('o', simd_ext, from_typ),
'maskz_loada1': lambda: maskoz_load('z', simd_ext, from_typ),
'load2a': lambda: load1234(opts, simd_ext, from_typ, 2),
'load3a': lambda: load1234(opts, simd_ext, from_typ, 3),
'load4a': lambda: load1234(opts, simd_ext, from_typ, 4),
'loadu': lambda: load1234(opts, simd_ext, from_typ, 1),
'masko_loadu1': lambda: maskoz_load('o', simd_ext, from_typ),
'maskz_loadu1': lambda: maskoz_load('z', simd_ext, from_typ),
'load2u': lambda: load1234(opts, simd_ext, from_typ, 2),
'load3u': lambda: load1234(opts, simd_ext, from_typ, 3),
'load4u': lambda: load1234(opts, simd_ext, from_typ, 4),
'storea': lambda: store1234(opts, simd_ext, from_typ, 1),
'mask_storea1': lambda: mask_store(simd_ext, from_typ),
'store2a': lambda: store1234(opts, simd_ext, from_typ, 2),
'store3a': lambda: store1234(opts, simd_ext, from_typ, 3),
'store4a': lambda: store1234(opts, simd_ext, from_typ, 4),
'storeu': lambda: store1234(opts, simd_ext, from_typ, 1),
'mask_storeu1': lambda: mask_store(simd_ext, from_typ),
'store2u': lambda: store1234(opts, simd_ext, from_typ, 2),
'store3u': lambda: store1234(opts, simd_ext, from_typ, 3),
'store4u': lambda: store1234(opts, simd_ext, from_typ, 4),
'gather': lambda: gather(simd_ext, from_typ),
'gather_linear': lambda: gather_linear(simd_ext, from_typ),
'maskz_gather': lambda: maskoz_gather('z', simd_ext, from_typ),
'masko_gather': lambda: maskoz_gather('o', simd_ext, from_typ),
'scatter': lambda: scatter(simd_ext, from_typ),
'scatter_linear': lambda: scatter_linear(simd_ext, from_typ),
'mask_scatter': lambda: mask_scatter(simd_ext, from_typ),
'andb': lambda: binop2("andb", simd_ext2, from_typ),
'xorb': lambda: binop2("xorb", simd_ext2, from_typ),
'orb': lambda: binop2("orb", simd_ext2, from_typ),
'andl': lambda: lop2(opts, "andl", simd_ext2, from_typ),
'xorl': lambda: lop2(opts, "xorl", simd_ext2, from_typ),
'orl': lambda: lop2(opts, "orl", simd_ext2, from_typ),
'notb': lambda: not1(simd_ext2, from_typ),
'notl': lambda: lnot1(opts, simd_ext2, from_typ),
'andnotb': lambda: binop2("andnotb", simd_ext2, from_typ),
'andnotl': lambda: lop2(opts, "andnotl", simd_ext2, from_typ),
'add': lambda: addsub("add", simd_ext2, from_typ),
'sub': lambda: addsub("sub", simd_ext2, from_typ),
'adds': lambda: adds(simd_ext2, from_typ),
'subs': lambda: subs(simd_ext2, from_typ),
'div': lambda: div2(simd_ext2, from_typ),
'sqrt': lambda: sqrt1(simd_ext2, from_typ),
'len': lambda: len1(simd_ext, from_typ),
'mul': lambda: mul2(simd_ext2, from_typ),
'shl': lambda: shl_shr("shl", simd_ext2, from_typ),
'shr': lambda: shl_shr("shr", simd_ext2, from_typ),
'shra': lambda: shra(simd_ext2, from_typ),
'set1': lambda: set1(simd_ext2, from_typ),
'set1l': lambda: lset1(simd_ext2, from_typ),
'eq': lambda: cmp2(opts, "eq", simd_ext2, from_typ),
'lt': lambda: cmp2(opts, "lt", simd_ext2, from_typ),
'le': lambda: cmp2(opts, "le", simd_ext2, from_typ),
'gt': lambda: cmp2(opts, "gt", simd_ext2, from_typ),
'ge': lambda: cmp2(opts, "ge", simd_ext2, from_typ),
'ne': lambda: neq2(opts, simd_ext2, from_typ),
'if_else1': lambda: if_else3(opts, simd_ext2, from_typ),
'min': lambda: minmax2("min", simd_ext2, from_typ),
'max': lambda: minmax2("max", simd_ext2, from_typ),
'loadla': lambda: loadl(True, simd_ext2, from_typ),
'loadlu': lambda: loadl(False, simd_ext2, from_typ),
'storela': lambda: storel(True, simd_ext2, from_typ),
'storelu': lambda: storel(False, simd_ext2, from_typ),
'abs': lambda: abs1(simd_ext2, from_typ),
'fma': lambda: fmafnma3("fma", simd_ext2, from_typ),
'fnma': lambda: fmafnma3("fnma", simd_ext2, from_typ),
'fms': lambda: fmsfnms3("fms", simd_ext2, from_typ),
'fnms': lambda: fmsfnms3("fnms", simd_ext2, from_typ),
'ceil': lambda: round1("ceil", simd_ext2, from_typ),
'floor': lambda: round1("floor", simd_ext2, from_typ),
'trunc': lambda: round1("trunc", simd_ext2, from_typ),
'round_to_even': lambda: round1("round_to_even", simd_ext2, from_typ),
'all': lambda: allany1(opts, "all", simd_ext2, from_typ),
'any': lambda: allany1(opts, "any", simd_ext2, from_typ),
'reinterpret': lambda: reinterpret1(simd_ext2, from_typ, to_typ),
'reinterpretl': lambda: reinterpretl1(simd_ext2, from_typ, to_typ),
'cvt': lambda: convert1(simd_ext2, from_typ, to_typ),
'rec11': lambda: recs1("rec11", simd_ext2, from_typ),
'rec8': lambda: recs1("rec8", simd_ext2, from_typ),
'rsqrt11': lambda: recs1("rsqrt11", simd_ext2, from_typ),
'rsqrt8': lambda: recs1("rsqrt8", simd_ext2, from_typ),
'rec': lambda: recs1("rec", simd_ext2, from_typ),
'neg': lambda: neg1(simd_ext2, from_typ),
'nbtrue': lambda: nbtrue1(opts, simd_ext2, from_typ),
'reverse': lambda: reverse1(simd_ext2, from_typ),
'addv': lambda: addv(simd_ext2, from_typ),
'upcvt': lambda: upcvt1(simd_ext2, from_typ, to_typ),
'downcvt': lambda: downcvt1(simd_ext2, from_typ, to_typ),
'to_logical': lambda: to_logical1(opts, simd_ext2, from_typ),
'to_mask': lambda: to_mask1(opts, simd_ext2, from_typ),
'ziplo': lambda: zip_unzip_half("zip1", simd_ext2, from_typ),
'ziphi': lambda: zip_unzip_half("zip2", simd_ext2, from_typ),
'unziplo': lambda: zip_unzip_half("uzp1", simd_ext2, from_typ),
'unziphi': lambda: zip_unzip_half("uzp2", simd_ext2, from_typ),
'zip' : lambda: zip_unzip("zip", simd_ext2, from_typ),
'unzip' : lambda: zip_unzip("uzp", simd_ext2, from_typ),
'mask_for_loop_tail': lambda : mask_for_loop_tail(simd_ext, from_typ),
'iota': lambda : iota(simd_ext2, from_typ)
}
if simd_ext not in get_simd_exts():
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
if not from_typ in common.types:
raise ValueError('Unknown type "{}"'.format(from_typ))
if not func in impls:
return common.NOT_IMPLEMENTED
else:
return impls[func]()
|
anuga/parallel/tests/test_sequential_dist_sw_flow.py | samcom12/anuga_core | 136 | 12783516 | <reponame>samcom12/anuga_core
"""
Simple water flow example using ANUGA
Water driven up a linear slope and time varying boundary,
similar to a beach environment
This is a very simple test of the parallel algorithm using the simplified parallel API
"""
from __future__ import print_function
from __future__ import division
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
from past.utils import old_div
from future.utils import raise_
import unittest
import os
import sys
#import pypar
import numpy as num
import anuga
from anuga import Domain
from anuga import Reflective_boundary
from anuga import Dirichlet_boundary
from anuga import Time_boundary
from anuga import Transmissive_boundary
from anuga import rectangular_cross_domain
from anuga import distribute, myid, numprocs, send, receive, barrier, finalize
from anuga.parallel.sequential_distribute import sequential_distribute_dump
from anuga.parallel.sequential_distribute import sequential_distribute_load
import anuga.utilities.plot_utils as util
#--------------------------------------------------------------------------
# Setup parameters
#--------------------------------------------------------------------------
yieldstep = 0.25
finaltime = 1.0
nprocs = 4
N = 29
M = 29
verbose = False
new_parameters = {}
new_parameters['ghost_layer_width'] = 2
#---------------------------------
# Setup Functions
#---------------------------------
def topography(x,y):
return old_div(-x,2)
###########################################################################
# Setup Test
##########################################################################
def run_simulation(parallel=False, verbose=False):
#--------------------------------------------------------------------------
# Setup computational domain and quantities
#--------------------------------------------------------------------------
if myid == 0:
domain = rectangular_cross_domain(M, N)
domain.set_name('odomain') # Set sww filename
domain.set_datadir('.')
domain.set_quantity('elevation', topography) # Use function for elevation
domain.set_quantity('friction', 0.0) # Constant friction
domain.set_quantity('stage', expression='elevation') # Dry initial stage
else:
domain = None
#--------------------------------------------------------------------------
# Create pickled partition
#--------------------------------------------------------------------------
if myid == 0:
if verbose: print('DUMPING PARTITION DATA')
sequential_distribute_dump(domain, numprocs, verbose=verbose, parameters=new_parameters)
#--------------------------------------------------------------------------
# Create the parallel domains
#--------------------------------------------------------------------------
if parallel:
if myid == 0 and verbose : print('DISTRIBUTING TO PARALLEL DOMAIN')
pdomain = distribute(domain, verbose=verbose, parameters=new_parameters)
pdomain.set_name('pdomain')
if myid == 0 and verbose : print('LOADING IN PARALLEL DOMAIN')
sdomain = sequential_distribute_load(filename='odomain', verbose = verbose)
sdomain.set_name('sdomain')
if myid == 0 and verbose: print('EVOLVING pdomain')
setup_and_evolve(pdomain, verbose=verbose)
if myid == 0 and verbose: print('EVOLVING sdomain')
setup_and_evolve(sdomain, verbose=verbose)
if myid == 0:
if verbose: print('EVOLVING odomain')
setup_and_evolve(domain, verbose=verbose)
if myid == 0 and verbose:
parameter_file=open('odomain.txt', 'w')
from pprint import pprint
pprint(domain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.close()
parameter_file=open('sdomain.txt', 'w')
from pprint import pprint
pprint(sdomain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.close()
parameter_file=open('pdomain.txt', 'w')
from pprint import pprint
pprint(pdomain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.close()
assert num.allclose(pdomain.quantities['stage'].centroid_values, sdomain.quantities['stage'].centroid_values)
assert num.allclose(pdomain.quantities['stage'].vertex_values, sdomain.quantities['stage'].vertex_values)
assert num.allclose(pdomain.vertex_coordinates, sdomain.vertex_coordinates)
assert num.allclose(pdomain.centroid_coordinates, sdomain.centroid_coordinates)
#---------------------------------
# Now compare the merged sww files
#---------------------------------
if myid == 0:
if verbose: print('COMPARING SWW FILES')
odomain_v = util.get_output('odomain.sww')
odomain_c = util.get_centroids(odomain_v)
pdomain_v = util.get_output('pdomain.sww')
pdomain_c = util.get_centroids(pdomain_v)
sdomain_v = util.get_output('sdomain.sww')
sdomain_c = util.get_centroids(sdomain_v)
# Test some values against the original ordering
if verbose:
order = 2
print('PDOMAIN CENTROID VALUES')
print(num.linalg.norm(odomain_c.x-pdomain_c.x,ord=order))
print(num.linalg.norm(odomain_c.y-pdomain_c.y,ord=order))
print(num.linalg.norm(odomain_c.stage[-1]-pdomain_c.stage[-1],ord=order))
print(num.linalg.norm(odomain_c.xmom[-1]-pdomain_c.xmom[-1],ord=order))
print(num.linalg.norm(odomain_c.ymom[-1]-pdomain_c.ymom[-1],ord=order))
print(num.linalg.norm(odomain_c.xvel[-1]-pdomain_c.xvel[-1],ord=order))
print(num.linalg.norm(odomain_c.yvel[-1]-pdomain_c.yvel[-1],ord=order))
print('SDOMAIN CENTROID VALUES')
print(num.linalg.norm(odomain_c.x-sdomain_c.x,ord=order))
print(num.linalg.norm(odomain_c.y-sdomain_c.y,ord=order))
print(num.linalg.norm(odomain_c.stage[-1]-sdomain_c.stage[-1],ord=order))
print(num.linalg.norm(odomain_c.xmom[-1]-sdomain_c.xmom[-1],ord=order))
print(num.linalg.norm(odomain_c.ymom[-1]-sdomain_c.ymom[-1],ord=order))
print(num.linalg.norm(odomain_c.xvel[-1]-sdomain_c.xvel[-1],ord=order))
print(num.linalg.norm(odomain_c.yvel[-1]-sdomain_c.yvel[-1],ord=order))
print('PDOMAIN VERTEX VALUES')
print(num.linalg.norm(odomain_v.stage[-1]-pdomain_v.stage[-1],ord=order))
print(num.linalg.norm(odomain_v.xmom[-1]-pdomain_v.xmom[-1],ord=order))
print(num.linalg.norm(odomain_v.ymom[-1]-pdomain_v.ymom[-1],ord=order))
print(num.linalg.norm(odomain_v.xvel[-1]-pdomain_v.xvel[-1],ord=order))
print(num.linalg.norm(odomain_v.yvel[-1]-pdomain_v.yvel[-1],ord=order))
print('SDOMAIN VERTEX VALUES')
print(num.linalg.norm(odomain_v.stage[-1]-sdomain_v.stage[-1],ord=order))
print(num.linalg.norm(odomain_v.xmom[-1]-sdomain_v.xmom[-1],ord=order))
print(num.linalg.norm(odomain_v.ymom[-1]-sdomain_v.ymom[-1],ord=order))
print(num.linalg.norm(odomain_v.xvel[-1]-sdomain_v.xvel[-1],ord=order))
print(num.linalg.norm(odomain_v.yvel[-1]-sdomain_v.yvel[-1],ord=order))
assert num.allclose(odomain_c.stage,pdomain_c.stage)
assert num.allclose(odomain_c.xmom,pdomain_c.xmom)
assert num.allclose(odomain_c.ymom,pdomain_c.ymom)
assert num.allclose(odomain_c.xvel,pdomain_c.xvel)
assert num.allclose(odomain_c.yvel,pdomain_c.yvel)
assert num.allclose(odomain_v.x,pdomain_v.x)
assert num.allclose(odomain_v.y,pdomain_v.y)
assert num.linalg.norm(odomain_v.x-pdomain_v.x,ord=0) == 0
assert num.linalg.norm(odomain_v.y-pdomain_v.y,ord=0) == 0
assert num.linalg.norm(odomain_v.stage[-1]-pdomain_v.stage[-1],ord=0) < 100
assert num.linalg.norm(odomain_v.xmom[-1]-pdomain_v.xmom[-1],ord=0) < 100
assert num.linalg.norm(odomain_v.ymom[-1]-pdomain_v.ymom[-1],ord=0) < 100
assert num.linalg.norm(odomain_v.xvel[-1]-pdomain_v.xvel[-1],ord=0) < 100
assert num.linalg.norm(odomain_v.yvel[-1]-pdomain_v.yvel[-1],ord=0) < 100
assert num.allclose(odomain_c.x,sdomain_c.x)
assert num.allclose(odomain_c.y,sdomain_c.y)
assert num.allclose(odomain_c.stage,sdomain_c.stage)
assert num.allclose(odomain_c.xmom,sdomain_c.xmom)
assert num.allclose(odomain_c.ymom,sdomain_c.ymom)
assert num.allclose(odomain_c.xvel,sdomain_c.xvel)
assert num.allclose(odomain_c.yvel,sdomain_c.yvel)
assert num.allclose(odomain_v.x,sdomain_v.x)
assert num.allclose(odomain_v.y,sdomain_v.y)
order = 0
assert num.linalg.norm(odomain_v.x-sdomain_v.x,ord=order) == 0
assert num.linalg.norm(odomain_v.y-sdomain_v.y,ord=order) == 0
assert num.linalg.norm(odomain_v.stage[-1]-sdomain_v.stage[-1],ord=order) < 100
assert num.linalg.norm(odomain_v.xmom[-1]-sdomain_v.xmom[-1],ord=order) < 100
assert num.linalg.norm(odomain_v.ymom[-1]-sdomain_v.ymom[-1],ord=order) < 100
assert num.linalg.norm(odomain_v.xvel[-1]-sdomain_v.xvel[-1],ord=order) < 100
assert num.linalg.norm(odomain_v.yvel[-1]-sdomain_v.yvel[-1],ord=order) < 100
# COMPARE CENTROID PDOMAIN SDOMAIN
assert num.allclose(pdomain_c.x,sdomain_c.x)
assert num.allclose(pdomain_c.y,sdomain_c.y)
assert num.allclose(pdomain_c.stage[-1],sdomain_c.stage[-1])
assert num.allclose(pdomain_c.xmom[-1],sdomain_c.xmom[-1])
assert num.allclose(pdomain_c.ymom[-1],sdomain_c.ymom[-1])
assert num.allclose(pdomain_c.xvel[-1],sdomain_c.xvel[-1])
assert num.allclose(pdomain_c.yvel[-1],sdomain_c.yvel[-1])
# COMPARE VERTEX PDOMAIN SDOMAIN
assert num.allclose(pdomain_v.x,sdomain_v.x)
assert num.allclose(pdomain_v.y,sdomain_v.y)
assert num.allclose(pdomain_v.stage[-1],sdomain_v.stage[-1])
assert num.allclose(pdomain_v.xmom[-1],sdomain_v.xmom[-1])
assert num.allclose(pdomain_v.ymom[-1],sdomain_v.ymom[-1])
assert num.allclose(pdomain_v.xvel[-1],sdomain_v.xvel[-1])
assert num.allclose(pdomain_v.yvel[-1],sdomain_v.yvel[-1])
import os
os.remove('odomain.sww')
os.remove('pdomain.sww')
os.remove('sdomain.sww')
os.remove('odomain_P3_0.pickle')
os.remove('odomain_P3_1.pickle')
os.remove('odomain_P3_2.pickle')
#os.remove('odomain_P4_3.pickle')
import glob
[ os.remove(fl) for fl in glob.glob('*.npy') ]
def setup_and_evolve(domain, verbose=False):
#--------------------------------------------------------------------------
# Setup domain parameters
#--------------------------------------------------------------------------
domain.set_flow_algorithm('DE0')
#domain.set_store_vertices_uniquely()
#------------------------------------------------------------------------------
# Setup boundary conditions
# This must currently happen *AFTER* domain has been distributed
#------------------------------------------------------------------------------
Br = Reflective_boundary(domain) # Solid reflective wall
Bd = Dirichlet_boundary([-0.2,0.,0.]) # Constant boundary values
# Associate boundary tags with boundary objects
domain.set_boundary({'left': Br, 'right': Bd, 'top': Br, 'bottom': Br})
#------------------------------------------------------------------------------
# Evolve
#------------------------------------------------------------------------------
for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
if myid == 0 and verbose : domain.write_time()
#if myid == 0 and verbose : print domain.quantities['stage'].get_maximum_value()
domain.sww_merge(delete_old=True)
# Test an nprocs-way run of the shallow water equations
# against the sequential code.
class Test_parallel_sw_flow(unittest.TestCase):
def test_parallel_sw_flow(self):
if verbose : print("Expect this test to fail if not run from the parallel directory.")
cmd = anuga.mpicmd(os.path.abspath(__file__))
result = os.system(cmd)
assert_(result == 0)
# Because we are doing assertions outside of the TestCase class
# the PyUnit defined assert_ function can't be used.
def assert_(condition, msg="Assertion Failed"):
if condition == False:
#pypar.finalize()
raise_(AssertionError, msg)
if __name__=="__main__":
if numprocs == 1:
runner = unittest.TextTestRunner()
suite = unittest.makeSuite(Test_parallel_sw_flow, 'test')
runner.run(suite)
else:
from anuga.utilities.parallel_abstraction import global_except_hook
import sys
sys.excepthook = global_except_hook
#------------------------------------------
# Run the codel and compare sequential
# results at 4 gauge stations
#------------------------------------------
if myid ==0 and verbose: print('PARALLEL START')
run_simulation(parallel=True, verbose=verbose)
finalize()
|
src/copy_mechanism/copy_layer.py | Ravi-0809/question-generation | 212 | 12783527 | from typing import Callable
from tensorflow.python.layers import base
from tensorflow.python.eager import context
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import utils as layers_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.contrib.layers import fully_connected
import tensorflow as tf
import sys
from helpers.misc_utils import debug_tensor, debug_shape
from helpers.ops import safe_log
FLAGS = tf.app.flags.FLAGS
class CopyLayer(base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, embedding_dim,
units,
switch_units=64,
activation=None,
use_bias=False,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
source_provider: Callable[[], tf.Tensor] = None,
source_provider_sl: Callable[[], tf.Tensor] = None,
condition_encoding: Callable[[], tf.Tensor] = None,
output_mask: Callable[[], tf.Tensor] = None,
training_mode=False,
vocab_size=None,
context_as_set=False,
max_copy_size=None,
mask_oovs=False,
**kwargs):
super(CopyLayer, self).__init__(trainable=trainable, name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.vocab_size = vocab_size
self.source_provider = source_provider
self.source_provider_sl = source_provider_sl
self.embedding_dim = embedding_dim
self.units = units
self.switch_units = switch_units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
self.input_spec = base.InputSpec(min_ndim=2)
self.training_mode=training_mode
# self.output_mask=output_mask
self.max_copy_size=max_copy_size
self.mask_oovs = mask_oovs
self.context_as_set=context_as_set
self.condition_encoding = condition_encoding
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
# print("building copy layer")
# print(input_shape)
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype) # batch x len_source+emb_dim
# inputs = debug_shape(inputs, "inputs")
# print(inputs)
# [batch_size, emb_dim + len_source] in eval,
# [len_target, batch_size,emb_dim + len_source] in train
source = self.source_provider() # [batch_size, len_source]
# source = debug_shape(source,"src")
source_sl = self.source_provider_sl()
condition_encoding = self.condition_encoding()
# condition_encoding = debug_shape(condition_encoding, "cond enc")
batch_size = tf.shape(source)[0]
len_source = tf.shape(source)[1]
shape = tf.shape(inputs)
is_eval = len(inputs.get_shape()) == 2
beam_width = tf.constant(1) if is_eval else shape[1]
# len_target = tf.Print(len_target, [len_target, batch_size, shape[-1]], "input reshape")
# inputs = tf.reshape(inputs, [-1, shape[-1]]) # [len_target * batch_size, len_source + emb_dim]
inputs_new = tf.reshape(inputs,
[batch_size*beam_width, shape[-1]]) # [len_target, batch_size, len_source + emb_dim]
# inputs_new = debug_shape(inputs_new, "inputs_new")
# -- [len_target, batch_size, embedding_dim] attention, []
# -- [len_target, batch_size, len_source] alignments
# attention, alignments = tf.split(inputs, [self.embedding_dim, -1], axis=1)
attention, alignments = tf.split(inputs_new, num_or_size_splits=[self.embedding_dim, -1], axis=-1)
# [len_target, batch_size, vocab_size]
if FLAGS.out_vocab_cpu:
with tf.device('/cpu:*'):
shortlist = tf.layers.dense(attention, self.vocab_size, activation=tf.nn.softmax, use_bias=False)
else:
shortlist = tf.layers.dense(attention, self.vocab_size, activation=tf.nn.softmax, use_bias=False)
# attention = debug_shape(attention, "attn")
# alignments = debug_shape(alignments, "align ("+str(self.units)+" desired)")
# alignments = debug_tensor(alignments, "alignments")
# print(alignments)
# shortlist = debug_shape(shortlist, "shortlist")
# TEMP: kill OOVs
s = tf.shape(shortlist)
mask = tf.concat([tf.ones((s[0],1)),tf.zeros((s[0],1)),tf.ones((s[0],s[1]-2))], axis=1)
shortlist = tf.cond(self.mask_oovs, lambda: shortlist * mask, lambda: shortlist)
# pad the alignments to the longest possible source st output vocab is fixed size
# TODO: Check for non zero alignments outside the seq length
# alignments_padded = debug_shape(alignments_padded, "align padded")
# switch takes st, vt and yt−1 as inputs
# vt = concat(weighted context encoding at t; condition encoding)
# st = hidden state at t
# y_t-1 is previous generated token
condition_encoding_tiled = tf.contrib.seq2seq.tile_batch(condition_encoding, multiplier=beam_width)
vt = tf.concat([attention, condition_encoding_tiled], axis=1)
# NOTE: this is missing the previous input y_t-1 and s_t
switch_input = tf.concat([vt],axis=1)
switch_h1 = tf.layers.dropout(tf.layers.dense(switch_input, self.switch_units, activation=tf.nn.tanh, kernel_initializer=tf.glorot_uniform_initializer()), rate=0.3, training=self.training_mode)
switch_h2 = tf.layers.dropout(tf.layers.dense(switch_h1, self.switch_units, activation=tf.nn.tanh, kernel_initializer=tf.glorot_uniform_initializer()), rate=0.3, training=self.training_mode)
self.switch = tf.layers.dense(switch_h2, 1, activation=tf.sigmoid, kernel_initializer=tf.glorot_uniform_initializer())
# switch = debug_shape(switch, "switch")
if FLAGS.disable_copy:
self.switch = 0
elif FLAGS.disable_shortlist:
self.switch = 1
# if self.output_mask is not None:
# alignments = self.output_mask() * alignments
source_tiled = tf.contrib.seq2seq.tile_batch(source, multiplier=beam_width)
source_tiled_sl = tf.contrib.seq2seq.tile_batch(source_sl, multiplier=beam_width)
shortlist = (1-self.switch)*shortlist
alignments = self.switch*alignments
# Take any tokens that are the same in either vocab and combine their probabilities
# old: mult by a big sparse matrix - not v mem efficient..
# opt1: mult the copy dist by a vocab x copy matrix and add to vocab part
# opt2: do an nd_gather to copy the relevant prob mass, then mask carefully to remove it
if FLAGS.combine_vocab:
# copy everything in real shortlist except special toks
# print(len_source, self.max_copy_size)
source_tiled_sl_padded = tf.pad(source_tiled_sl, [[0, 0], [0, self.max_copy_size-tf.shape(source_tiled_sl)[-1]]], 'CONSTANT', constant_values=0)
# attempt 2!
batch_ix = tf.tile(tf.expand_dims(tf.range(batch_size*beam_width),axis=-1),[1,len_source])
# seq_ix = tf.tile(tf.expand_dims(tf.range(len_source),axis=0),[batch_size*beam_width,1])
tgt_indices = tf.reshape(tf.concat([tf.expand_dims(batch_ix,-1),tf.expand_dims(source_tiled_sl,-1)], axis=2),[-1,2])
ident_indices = tf.where(tf.greater(source_tiled_sl, -1)) # get ixs of all elements
# ident_indices = tf.where()
# tgt_indices = debug_tensor(tgt_indices)
# get the copy probs at each point in the source..
updates = tf.reshape(tf.gather_nd(alignments, ident_indices),[-1])
# and send them to the their shortlist index
sum_part = tf.scatter_nd(tgt_indices, updates, [batch_size*beam_width, self.vocab_size+self.max_copy_size])
# then zero out the ix's that got copied
align_zeroed = alignments * tf.cast(tf.greater_equal(source_tiled_sl,self.vocab_size),tf.float32)
align_moved = alignments * tf.cast(tf.less(source_tiled_sl,self.vocab_size),tf.float32) # ie only let through stuff that *isnt* in SL
# and add the correct pieces together
alignments = align_zeroed
shortlist = shortlist + sum_part[:,:self.vocab_size]
# result = tf.Print(result, [tf.reduce_sum(result[:,:self.vocab_size],-1)], "result sl sum")
# shortlist = tf.Print(shortlist, [tf.reduce_sum(align_moved,-1)], "sum align_moved")
# shortlist = tf.Print(shortlist, [tf.reduce_sum(sum_part[:,:self.vocab_size],-1)], "sum sum_part")
# convert position probs to ids
if self.context_as_set:
# print(source) # batch x seq
# print(alignments) # batch x seq
pos_to_id = tf.one_hot(source_tiled-self.vocab_size, depth=self.max_copy_size) # batch x seq x vocab
if FLAGS.maxout_pointer:
copy_dist = tf.reduce_max(pos_to_id * tf.expand_dims(alignments, 2), axis=1)
else:
copy_dist = tf.squeeze(tf.matmul(tf.expand_dims(alignments,1), pos_to_id), axis=1)
else:
copy_dist=alignments
copy_dist_padded = tf.pad(copy_dist, [[0, 0], [0, self.max_copy_size-tf.shape(copy_dist)[-1]]], 'CONSTANT', constant_values=0)
result = tf.concat([shortlist,copy_dist_padded], axis=1) # this used to be safe_log'd
# if FLAGS.combine_vocab:
# result = tf.Print(result, [tf.reduce_sum(result,-1)], "result sum")
target_shape = tf.concat([shape[:-1], [-1]], 0)
result =tf.reshape(result, target_shape)
return result
# return tf.Print(result, [tf.reduce_max(switch), tf.reduce_max(shortlist),
# tf.reduce_max(alignments)], summarize=10)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
# print(input_shape)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units+self.vocab_size if not self.context_as_set else self.vocab_size+self.max_copy_size)
# this for older tf versions
def _compute_output_shape(self, input_shape):
return self.compute_output_shape(input_shape)
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the `inputs` tensor has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = CopyLayer(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
print("inside copy layer, yaaay!")
sys.exit(0)
return layer.apply(inputs)
|
nso/test_api.py | caputomarcos/network-programmability-stream | 120 | 12783544 | <reponame>caputomarcos/network-programmability-stream
#!/usr/bin/env python3
import ncs
from ncs.maagic import Root
from typing import Iterator, Tuple
NSO_USERNAME = 'admin'
NSO_CONTEXT = 'python'
# NSO_GROUPS = ['ncsadmin']
def get_device_name(nso: Root) -> Iterator[Tuple[str, str]]:
for device in nso.devices.device:
# print device.config.ios__cached_show.version.model
breakpoint()
yield (device.name, device.ios__cached_show.version.model)
def main() -> None:
with ncs.maapi.single_read_trans(NSO_USERNAME, NSO_CONTEXT) as transaction:
nso = ncs.maagic.get_root(transaction)
devices = nso.devices.device
# print(devices["isp1-pe1"].config.ios__ntp.server.peer_list)
# breakpoint()
for device in devices:
device.config.ios__ntp.server.peer_list.append({"name": "192.168.3.11"})
# device.config.ios__ntp.server.ip = "192.168.3.11"
# print(device.name)
# print(device.config.ios__ntp)
# print(device.config.ios__cached_show.version)
transaction.apply()
if __name__ == '__main__':
main()
|
voctogui/voctogui.py | 0xflotus/voctomix | 521 | 12783558 | #!/usr/bin/env python3
import gi
# import GStreamer and GLib-Helper classes
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
gi.require_version('GstVideo', '1.0')
gi.require_version('GstNet', '1.0')
from gi.repository import Gtk, Gdk, Gst, GstVideo
import signal
import logging
import sys
import os
sys.path.insert(0, '.')
from vocto.debug import gst_log_messages
# check min-version
minGst = (1, 5)
minPy = (3, 0)
Gst.init([])
if Gst.version() < minGst:
raise Exception('GStreamer version', Gst.version(),
'is too old, at least', minGst, 'is required')
if sys.version_info < minPy:
raise Exception('Python version', sys.version_info,
'is too old, at least', minPy, 'is required')
Gdk.init([])
Gtk.init([])
# select Awaita:Dark theme
settings = Gtk.Settings.get_default()
settings.set_property("gtk-theme-name", "Adwaita")
settings.set_property("gtk-application-prefer-dark-theme", True) # if you want use dark theme, set second arg to True
# main class
class Voctogui(object):
def __init__(self):
self.log = logging.getLogger('Voctogui')
from lib.args import Args
from lib.ui import Ui
# Load UI file
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ui/voctogui.ui')
self.log.info('Loading ui-file from file %s', path)
if os.path.isfile(path):
self.ui = Ui(path)
else:
raise Exception("Can't find any .ui-Files to use in {}".format(path))
#
# search for a .css style sheet file and load it
#
css_provider = Gtk.CssProvider()
context = Gtk.StyleContext()
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ui/voctogui.css')
self.log.info('Loading css-file from file %s', path)
if os.path.isfile(path):
css_provider.load_from_path(path)
else:
raise Exception("Can't find .css file '{}'".format(path))
context.add_provider_for_screen(
Gdk.Screen.get_default(),
css_provider,
Gtk.STYLE_PROVIDER_PRIORITY_USER
)
self.ui.setup()
def run(self):
self.log.info('Setting UI visible')
self.ui.show()
try:
self.log.info('Running.')
Gtk.main()
self.log.info('Connection lost. Exiting.')
except KeyboardInterrupt:
self.log.info('Terminated via Ctrl-C')
def quit(self):
self.log.info('Quitting.')
Gtk.main_quit()
# run mainclass
def main():
# parse command-line args
from lib import args
args.parse()
from lib.args import Args
docolor = (Args.color == 'always') \
or (Args.color == 'auto' and sys.stderr.isatty())
from lib.loghandler import LogHandler
handler = LogHandler(docolor, Args.timestamp)
logging.root.addHandler(handler)
levels = { 3 : logging.DEBUG, 2 : logging.INFO, 1 : logging.WARNING, 0 : logging.ERROR }
logging.root.setLevel(levels[Args.verbose])
gst_levels = { 3 : Gst.DebugLevel.DEBUG, 2 : Gst.DebugLevel.INFO, 1 : Gst.DebugLevel.WARNING, 0 : Gst.DebugLevel.ERROR }
gst_log_messages(gst_levels[Args.gstreamer_log])
# make killable by ctrl-c
logging.debug('setting SIGINT handler')
signal.signal(signal.SIGINT, signal.SIG_DFL)
logging.info('Python Version: %s', sys.version_info)
logging.info('GStreamer Version: %s', Gst.version())
logging.debug('loading Config')
from lib import config
config.load()
from lib.config import Config
# establish a synchronus connection to server
import lib.connection as Connection
Connection.establish(Config.getHost())
# fetch config from server
Config.fetchServerConfig()
# Warn when connecting to a non-local core without preview-encoders enabled
# The list-comparison is not complete
# (one could use a local hostname or the local system ip),
# but it's only here to warn that one might be making a mistake
localhosts = ['::1',
'127.0.0.1',
'localhost']
if not Config.getPreviewsEnabled() and Config.getHost() not in localhosts:
logging.warning(
'Connecting to `%s` (which looks like a remote host) '
'might not work without enabeling the preview encoders '
'(set `[previews] enabled=true` on the core) or it might saturate '
'your ethernet link between the two machines.',
Config.getHost()
)
import lib.connection as Connection
import lib.clock as ClockManager
# obtain network-clock
ClockManager.obtainClock(Connection.ip)
# switch connection to nonblocking, event-driven mode
Connection.enterNonblockingMode()
# init main-class and main-loop
# (this binds all event-hander on the Connection)
logging.debug('initializing Voctogui')
voctogui = Voctogui()
# start the Mainloop and show the Window
logging.debug('running Voctogui')
voctogui.run()
if __name__ == '__main__':
try:
main()
except RuntimeError as e:
logging.error(str(e))
sys.exit(1)
|
src/genie/libs/parser/iosxr/tests/ShowSpanningTreePvrsTag/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 12783626 |
expected_output = {
'pvrstag': {
'foo': {
'domain': 'foo',
'interfaces': {
'GigabitEthernet0/0/0/0': {
'interface': 'GigabitEthernet0/0/0/0',
'vlans': {
'5': {
'preempt_delay': True,
'preempt_delay_state': 'Sending startup BPDU until 13:38:03',
'sub_interface': 'GigabitEthernet0/0/0/0.5',
'sub_interface_state': 'Up',
'max_age': 20,
'root_priority': 0,
'root_bridge': '0000.0000.0000',
'root_cost': 1,
'bridge_priority': 32768,
'bridge_id': '0255.1dff.3c70',
'port_priority': 128,
'port_id': 1,
'hello_time': 2,
'active': True,
'counters': {
'bdpu_sent': 6,
'topology_changes': 0,
},
},
},
},
'GigabitEthernet0/0/0/1': {
'interface': 'GigabitEthernet0/0/0/1',
'vlans': {
'5': {
'preempt_delay': True,
'preempt_delay_state': 'Sending standard BPDU',
'sub_interface': 'GigabitEthernet0/0/0/1.5',
'sub_interface_state': 'Up',
'max_age': 20,
'root_priority': 0,
'root_bridge': '0000.0000.0000',
'root_cost': 0,
'bridge_priority': 32768,
'bridge_id': '021a.9eff.5645',
'port_priority': 128,
'port_id': 1,
'hello_time': 2,
'active': True,
'counters': {
'bdpu_sent': 7,
'topology_changes': 0,
},
},
},
},
},
},
},
}
|
anuga/file_conversion/sts2sww_mesh.py | GeoscienceAustralia/anuga_core | 136 | 12783633 | from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
import os
import numpy as num
from anuga.file.netcdf import NetCDFFile
import pylab as P
import anuga
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
from anuga.shallow_water.shallow_water_domain import Domain
from anuga.shallow_water.boundaries import Reflective_boundary
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.shallow_water.forcing import *
from anuga.utilities.numerical_tools import ensure_numeric
from anuga.file.sww import Write_sww
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a, \
netcdf_float
def sts2sww_mesh(basename_in, basename_out=None,
spatial_thinning=1, verbose=False):
from anuga.mesh_engine.mesh_engine import NoTrianglesError
from anuga.pmesh.mesh import Mesh
if verbose:
print("Starting sts2sww_mesh")
mean_stage=0.
zscale=1.
if (basename_in[:-4]=='.sts'):
stsname = basename_in
else:
stsname = basename_in + '.sts'
if verbose: print("Reading sts NetCDF file: %s" %stsname)
infile = NetCDFFile(stsname, netcdf_mode_r)
cellsize = infile.cellsize
ncols = infile.ncols
nrows = infile.nrows
no_data = infile.no_data
refzone = infile.zone
x_origin = infile.xllcorner
y_origin = infile.yllcorner
origin = num.array([x_origin, y_origin])
x = infile.variables['x'][:]
y = infile.variables['y'][:]
times = infile.variables['time'][:]
wind_speed_full = infile.variables['wind_speed'][:]
wind_angle_full = infile.variables['wind_angle'][:]
pressure_full = infile.variables['barometric_pressure'][:]
infile.close()
number_of_points = nrows*ncols
points_utm = num.zeros((number_of_points,2),num.float)
points_utm[:,0]=x+x_origin
points_utm[:,1]=y+y_origin
thinned_indices=[]
for i in range(number_of_points):
if (old_div(i,ncols)==0 or old_div(i,ncols)==ncols-1 or (old_div(i,ncols))%(spatial_thinning)==0):
if ( i%(spatial_thinning)==0 or i%nrows==0 or i%nrows==nrows-1 ):
thinned_indices.append(i)
#Spatial thinning
points_utm=points_utm[thinned_indices]
number_of_points = points_utm.shape[0]
number_of_timesteps = wind_speed_full.shape[0]
wind_speed = num.empty((number_of_timesteps,number_of_points),dtype=float)
wind_angle = num.empty((number_of_timesteps,number_of_points),dtype=float)
barometric_pressure = num.empty((number_of_timesteps,number_of_points),dtype=float)
if verbose:
print("Total number of points: ", nrows*ncols)
print("Number of thinned points: ", number_of_points)
for i in range(number_of_timesteps):
wind_speed[i] = wind_speed_full[i,thinned_indices]
wind_angle[i] = wind_angle_full[i,thinned_indices]
barometric_pressure[i] = pressure_full[i,thinned_indices]
#P.plot(points_utm[:,0],points_utm[:,1],'ro')
#P.show()
if verbose:
print("Generating sww triangulation of gems data")
mesh = Mesh()
mesh.add_vertices(points_utm)
mesh.auto_segment(smooth_indents=True, expand_pinch=True)
mesh.auto_segment(mesh.shape.get_alpha() * 1.1)
try:
mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
except NoTrianglesError:
# This is a bit of a hack, going in and changing the data structure.
mesh.holes = []
mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
mesh_dic = mesh.Mesh2MeshList()
points_utm=ensure_numeric(points_utm)
assert num.alltrue(ensure_numeric(mesh_dic['generatedpointlist'])
== ensure_numeric(points_utm))
volumes = mesh_dic['generatedtrianglelist']
# Write sww intro and grid stuff.
if (basename_out is not None and basename_out[:-4]=='.sww'):
swwname = basename_out
else:
swwname = basename_in + '.sww'
if verbose: 'Output to %s' % swwname
if verbose:
print("Writing sww wind and pressure field file")
outfile = NetCDFFile(swwname, netcdf_mode_w)
sww = Write_sww([], ['wind_speed','wind_angle','barometric_pressure'])
sww.store_header(outfile, times, len(volumes), len(points_utm),
verbose=verbose, sww_precision='d')
outfile.mean_stage = mean_stage
outfile.zscale = zscale
sww.store_triangulation(outfile, points_utm, volumes,
refzone,
new_origin=origin, #check effect of this line
verbose=verbose)
if verbose:
print('Converting quantities')
# Read in a time slice from the sts file and write it to the SWW file
#print wind_angle[0,:10]
for i in range(len(times)):
sww.store_quantities(outfile,
slice_index=i,
verbose=verbose,
wind_speed=wind_speed[i,:],
wind_angle=wind_angle[i,:],
barometric_pressure=barometric_pressure[i,:],
sww_precision=num.float)
if verbose:
sww.verbose_quantities(outfile)
outfile.close()
|
hs_core/tests/api/native/test_hs_requests.py | tommac7/hydroshare | 178 | 12783690 | <gh_stars>100-1000
from django.test import TestCase
from hs_core.hydroshare import hs_requests
from django.conf import settings
class TestRewrite(TestCase):
""" Test local rewriting that bypasses firewalls and hits local nginx server """
def setUp(self):
self.prod_fqdn = getattr(settings, "PROD_FQDN_OR_IP", "www.hydroshare.org")
self.fqdn = getattr(settings, "FQDN_OR_IP", "www.hydroshare.org")
self.nginx_ip = hs_requests.get_nginx_ip()
def test_localize_outer(self):
""" rewrite requests to outer host"""
self.assertEqual(hs_requests.localize_url("https://{}/foo/bar/".format(self.fqdn)),
"https://{}/foo/bar/".format(self.nginx_ip))
self.assertEqual(hs_requests.localize_url("http://{}/foo/bar/".format(self.fqdn)),
"http://{}/foo/bar/".format(self.nginx_ip))
def test_localize_www(self):
""" rewrite requests to production host"""
self.assertEqual(hs_requests.localize_url("https://{}/foo/bar/".format(self.prod_fqdn)),
"https://{}/foo/bar/".format(self.nginx_ip))
self.assertEqual(hs_requests.localize_url("http://{}/foo/bar/".format(self.prod_fqdn)),
"http://{}/foo/bar/".format(self.nginx_ip))
def test_do_not_localize_others(self):
""" don't rewrite other host addresses """
self.assertEqual(hs_requests.localize_url("https://{}/foo/bar/".format("www.foo.com")),
"https://{}/foo/bar/".format("www.foo.com"))
self.assertEqual(hs_requests.localize_url("http://{}/foo/bar/".format("www.foo.com")),
"http://{}/foo/bar/".format("www.foo.com"))
|
i3pystatus/clock.py | fkusei/i3pystatus | 413 | 12783696 | import errno
import os
import locale
from datetime import datetime
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
from i3pystatus import IntervalModule
class Clock(IntervalModule):
"""
This class shows a clock.
.. note:: Optionally requires `pytz` for time zone data when using time
zones other than local time.
Format can be passed in four different ways:
- single string, no timezone, just the strftime-format
- one two-tuple, first is the format, second the timezone
- list of strings - no timezones
- list of two tuples, first is the format, second is timezone
Use mousewheel to cycle between formats.
For complete time format specification see:
::
man strftime
All available timezones are located in directory:
::
/usr/share/zoneinfo/
.. rubric:: Format examples
::
# one format, local timezone
format = '%a %b %-d %b %X'
# multiple formats, local timezone
format = [ '%a %b %-d %b %X', '%X' ]
# one format, specified timezone
format = ('%a %b %-d %b %X', 'Europe/Bratislava')
# multiple formats, specified timezones
format = [ ('%a %b %-d %b %X', 'America/New_York'), ('%X', 'Etc/GMT+9') ]
"""
settings = (
("format", "`None` means to use the default, locale-dependent format."),
("color", "RGB hexadecimal code color specifier, default to #ffffff"),
)
format = None
color = "#ffffff"
interval = 1
on_upscroll = ["scroll_format", 1]
on_downscroll = ["scroll_format", -1]
def init(self):
env_lang = os.environ.get('LC_TIME', None)
if env_lang is None:
env_lang = os.environ.get('LANG', None)
if env_lang is not None:
if env_lang.find('.') != -1:
lang = tuple(env_lang.split('.', 1))
else:
lang = (env_lang, None)
else:
lang = (None, None)
if lang != locale.getlocale(locale.LC_TIME):
# affects language of *.strftime() in whole program
locale.setlocale(locale.LC_TIME, lang)
if self.format is None:
if lang[0] == 'en_US':
# MDY format - United States of America
self.format = ["%a %b %-d %X"]
else:
# DMY format - almost all other countries
self.format = ["%a %-d %b %X"]
elif isinstance(self.format, str) or isinstance(self.format, tuple):
self.format = [self.format]
self.system_tz = self._get_system_tz()
self.format = [self._expand_format(fmt) for fmt in self.format]
self.current_format_id = 0
def _expand_format(self, fmt):
if isinstance(fmt, tuple):
if len(fmt) == 1:
return (fmt[0], None)
else:
if not HAS_PYTZ:
raise RuntimeError("Need `pytz` for timezone data")
return (fmt[0], pytz.timezone(fmt[1]))
return (fmt, self.system_tz)
def _get_system_tz(self):
'''
Get the system timezone for use when no timezone is explicitly provided
Requires pytz, if not available then no timezone will be set when not
explicitly provided.
'''
if not HAS_PYTZ:
return None
def _etc_localtime():
try:
with open('/etc/localtime', 'rb') as fp:
return pytz.tzfile.build_tzinfo('system', fp)
except OSError as exc:
if exc.errno != errno.ENOENT:
self.logger.error(
'Unable to read from /etc/localtime: %s', exc.strerror
)
except pytz.UnknownTimeZoneError:
self.logger.error(
'/etc/localtime contains unrecognized tzinfo'
)
return None
def _etc_timezone():
try:
with open('/etc/timezone', 'r') as fp:
tzname = fp.read().strip()
return pytz.timezone(tzname)
except OSError as exc:
if exc.errno != errno.ENOENT:
self.logger.error(
'Unable to read from /etc/localtime: %s', exc.strerror
)
except pytz.UnknownTimeZoneError:
self.logger.error(
'/etc/timezone contains unrecognized timezone \'%s\'',
tzname
)
return None
return _etc_localtime() or _etc_timezone()
def run(self):
time = datetime.now(self.format[self.current_format_id][1])
self.output = {
"full_text": time.strftime(self.format[self.current_format_id][0]),
"color": self.color,
"urgent": False,
}
def scroll_format(self, step=1):
self.current_format_id = (self.current_format_id + step) % len(self.format)
|
scripts/announcement.py | vishalbelsare/jina | 15,179 | 12783701 | import re
import sys
meetup_svg = '.github/images/meetup.svg'
readme_md = 'README.md'
conf_py = 'docs/conf.py'
def rm_announce():
# remove all announcement
with open(readme_md) as fp:
_old = fp.read()
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>\g<2>',
_old,
flags=re.DOTALL,
)
with open(readme_md, 'w') as fp:
fp.write(_new)
with open(conf_py) as fp:
_old = fp.read()
_new = re.sub(
r'(# start-announce\s*?\n).*(\n\s*?# end-announce)',
rf'\g<1>\g<2>',
_old,
flags=re.DOTALL,
)
with open(conf_py, 'w') as fp:
fp.write(_new)
if len(sys.argv) < 3:
rm_announce()
else:
text = sys.argv[1]
url = sys.argv[2]
if not text or not url:
rm_announce()
else:
announce_url = f'''
"announcement": \'\'\'
<a href="{url}">{text}</a>
\'\'\',
'''
meetup_svg_url = f'<a href="{url}"><img src="https://github.com/jina-ai/jina/blob/master/{meetup_svg}?raw=true"></a>'
# update meetup_svg
with open(meetup_svg) as fp:
_old = fp.read()
_new = re.sub(r'(<a href=").*(")', rf'\g<1>{url}\g<2>', _old)
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>{text}\g<2>',
_new,
flags=re.DOTALL,
)
with open(meetup_svg, 'w') as fp:
fp.write(_new)
# update readme_md
with open(readme_md) as fp:
_old = fp.read()
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>{meetup_svg_url}\g<2>',
_old,
flags=re.DOTALL,
)
with open(readme_md, 'w') as fp:
fp.write(_new)
# update conf
with open(conf_py) as fp:
_old = fp.read()
_new = re.sub(
r'(# start-announce\s*?\n).*(\n\s*?# end-announce)',
rf'\g<1>{announce_url}\g<2>',
_old,
flags=re.DOTALL,
)
with open(conf_py, 'w') as fp:
fp.write(_new)
|
deps/ts_proto_deps.bzl | heartless-clown/rules_proto | 249 | 12783715 | <reponame>heartless-clown/rules_proto
"""
GENERATED FILE - DO NOT EDIT (created via @build_stack_rules_proto//cmd/depsgen)
"""
load("@build_bazel_rules_nodejs//:index.bzl", "npm_install", "yarn_install")
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
def ts_proto_deps():
npm_ts_proto() # via <TOP>
npm_tsc() # via <TOP>
def npm_ts_proto():
_maybe(
npm_install,
name = "npm_ts_proto",
package_json = "@build_stack_rules_proto//plugin/stephenh/ts-proto:package.json",
package_lock_json = "@build_stack_rules_proto//plugin/stephenh/ts-proto:package-lock.json",
symlink_node_modules = False,
)
def npm_tsc():
_maybe(
yarn_install,
name = "npm_tsc",
package_json = "@build_stack_rules_proto//rules/ts:package.json",
yarn_lock = "@build_stack_rules_proto//rules/ts:yarn.lock",
frozen_lockfile = True,
)
|
languages/python/sqlalchemy-oso/tests/test_partial.py | connec/oso | 2,167 | 12783747 | <reponame>connec/oso
from polar import Variable
from sqlalchemy.orm import Session
from sqlalchemy_oso.partial import partial_to_filter
from .models import User
def test_partial_to_query_filter(oso, engine):
oso.load_str('ok(_: User{username:"gwen"});')
session = Session(bind=engine)
gwen = User(username="gwen")
session.add(gwen)
steve = User(username="steve")
session.add(steve)
result = oso.query_rule("ok", Variable("actor"), accept_expression=True)
partial = next(result)["bindings"]["actor"]
filter = partial_to_filter(partial, session, User, oso.get_class)
q = list(session.query(User).filter(filter))
assert q == [gwen]
|
src/mesh_edit.py | mbirkholzupc/hmd | 259 | 12783757 | <filename>src/mesh_edit.py
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import lsqr, cg, eigsh
import matplotlib.pyplot as plt
import scipy.io as sio
import pickle
import sparseqr
import time
WEIGHT = 1.0
##############################################################
## Laplacian Mesh Editing ##
##############################################################
#Purpose: To return a sparse matrix representing a Laplacian matrix with
#the graph Laplacian (D - A) in the upper square part and anchors as the
#lower rows
#Inputs: mesh (polygon mesh object), anchorsIdx (indices of the anchor points)
#Returns: L (An (N+K) x N sparse matrix, where N is the number of vertices
#and K is the number of anchors)
def getLaplacianMatrixUmbrella(mesh, anchorsIdx):
n = mesh.n_vertices() # N x 3
k = anchorsIdx.shape[0]
I = []
J = []
V = []
vv_idx_list = list(mesh.vertex_vertex_indices())
# Build sparse Laplacian Matrix coordinates and values
for i in range(n):
idx_nbr = filter(lambda x:x != -1, vv_idx_list[i])
num_nbr = len(idx_nbr)
I = I + ([i] * (num_nbr + 1)) # repeated row
J = J + idx_nbr + [i] # column indices and this row
V = V + ([-1] * num_nbr) + [num_nbr] # negative weights and row degree
# augment Laplacian matrix with anchor weights
for i in range(k):
I = I + [n + i]
J = J + [anchorsIdx[i]]
V = V + [WEIGHT] # default anchor weight
L = sparse.coo_matrix((V, (I, J)), shape=(n + k, n)).tocsr()
return L
# Modified for openmesh.mesh, Note that only suitable for watertight model
#Purpose: To return a sparse matrix representing a laplacian matrix with
#cotangent weights in the upper square part and anchors as the lower rows
#Inputs: mesh (polygon mesh object), anchorsIdx (indices of the anchor points)
#Returns: L (An (N+K) x N sparse matrix, where N is the number of vertices
#and K is the number of anchors)
def getLaplacianMatrixCotangent(mesh, anchorsIdx):
n = mesh.n_vertices() # N x 3
k = anchorsIdx.shape[0]
I = []
J = []
V = []
#l = mesh.vertex_vertex_indices()
for v in mesh.vertices():
weights = []
p_this = mesh.point(v)
p_nbrs = []
id_this = v.idx()
id_nbrs = []
for vv in mesh.vv(v):
p_nbrs.append(mesh.point(vv))
id_nbrs.append(vv.idx())
num_nbr = len(id_nbrs)
for i in range(num_nbr):
u = p_this - p_nbrs[(i+num_nbr-1)%num_nbr]
v = p_nbrs[(i+num_nbr)%num_nbr]- p_nbrs[(i+num_nbr-1)%num_nbr]
cotangent_1 = (np.dot(u, v)
/np.sqrt(np.sum(np.square(np.cross(u, v)))))
u = p_this - p_nbrs[(i+num_nbr+1)%num_nbr]
v = p_nbrs[(i+num_nbr)%num_nbr]- p_nbrs[(i+num_nbr+1)%num_nbr]
cotangent_2 = (np.dot(u, v)
/np.sqrt(np.sum(np.square(np.cross(u, v)))))
weights.append(-0.5 * (cotangent_1 + cotangent_2)) # cotangent weights
I = I + ([id_this] * (num_nbr + 1)) # repeated row
J = J + id_nbrs + [id_this] # column indices and this row
V = V + weights + [(-1 * np.sum(weights))] # n negative weights and row vertex sum
# augment Laplacian matrix with anchor weights
for i in range(k):
I = I + [n + i]
J = J + [anchorsIdx[i]]
V = V + [WEIGHT] # default anchor weight
L = sparse.coo_matrix((V, (I, J)), shape=(n + k, n)).tocsr()
return L
#Purpose: Given a mesh, to perform Laplacian mesh editing by solving the system
#of delta coordinates and anchors in the least squared sense
#Inputs: mesh (polygon mesh object), anchors (a K x 3 numpy array of anchor
#coordinates), anchorsIdx (a parallel array of the indices of the anchors)
#Returns: Nothing (should update mesh.VPos)
def solveLaplacianMesh(mesh, anchors, anchorsIdx, cotangent=True):
n = mesh.n_vertices()
k = anchorsIdx.shape[0]
operator = (getLaplacianMatrixUmbrella, getLaplacianMatrixCotangent)
L = operator[1](mesh, anchorsIdx) if cotangent else operator[0](mesh, anchorsIdx)
delta = np.array(L.dot(mesh.points()))
# augment delta solution matrix with weighted anchors
for i in range(k):
delta[n + i, :] = WEIGHT * anchors[i, :]
# update mesh vertices with least-squares solution
for i in range(3):
#mesh.points()[:, i] = lsqr(L, delta[:, i])[0]
mesh.points()[:, i] = sparseqr.solve(L, delta[:, i], tolerance = 1e-8)
return mesh
##############################################################
## High Speed Laplacian Mesh Editing ##
##############################################################
# using umbrella weights for higher speed
class fast_deform():
def __init__(self,
f_ijv_pkl = '../predef/dsa_IJV.pkl',
f_achr_pkl = '../predef/dsa_achr.pkl',
weight = 1.0,
):
self.weight = weight
with open (f_ijv_pkl, 'rb') as fp:
dic_IJV = pickle.load(fp)
I = dic_IJV['I']
J = dic_IJV['J']
V = dic_IJV['V']
self.n = dic_IJV['num_vert']
with open (f_achr_pkl, 'rb') as fp:
dic_achr = pickle.load(fp)
#achr_id = dic_achr['achr_id']
self.k = dic_achr['achr_num']
if weight != 1.0:
num_V = len(V)
for i in range(num_V-self.k,num_V):
V[i] = V[i] * self.weight
self.L = sparse.coo_matrix((V, (I, J)), shape=(self.n + self.k, self.n)).tocsr()
def deform(self, mesh, anchors):
#t_start = time.time()
delta = np.array(self.L.dot(mesh.points()))
#t_end = time.time()
#print("delta computation time is %.5f seconds." % (t_end - t_start))
#t_start = time.time()
# augment delta solution matrix with weighted anchors
for i in range(self.k):
delta[self.n + i, :] = self.weight * anchors[i, :]
#t_end = time.time()
#print("give anchor value computation time is %.5f seconds." % (t_end - t_start))
#t_start = time.time()
# update mesh vertices with least-squares solution
for i in range(3):
mesh.points()[:, i] = sparseqr.solve(self.L, delta[:, i], tolerance = 1e-8)
#mesh.points()[:, i] = lsqr(self.L, delta[:, i])[0]
#t_end = time.time()
#print("sparse lsqr time is %.5f seconds." % (t_end - t_start))
return mesh
##############################################################
## High Speed Laplacian Mesh Editing for DSA ##
##############################################################
class fast_deform_dsa():
def __init__(self,
f_ijv_pkl = '../predef/dsa_IJV.pkl',
f_achr_pkl = '../predef/dsa_achr.pkl',
weight = 1.0,
):
self.weight = weight
with open (f_ijv_pkl, 'rb') as fp:
dic_IJV = pickle.load(fp)
self.I = dic_IJV['I']
self.J = dic_IJV['J']
self.V = dic_IJV['V']
self.n = dic_IJV['num_vert']
with open (f_achr_pkl, 'rb') as fp:
dic_achr = pickle.load(fp)
#achr_id = dic_achr['achr_id']
self.k = dic_achr['achr_num']
self.num_V = len(self.V)
if self.weight != 1.0:
for i in range(self.num_V-self.k, self.num_V):
self.V[i] = self.V[i] * self.weight
# for inactive index, zero means inactive, non-zeros means active
def deform(self, verts, achr_verts, active_index = []):
if active_index != []:
for i in range(len(active_index)):
if active_index[i] == 0:
self.V[self.num_V-self.k+i] = 0
self.L = sparse.coo_matrix((self.V, (self.I, self.J)),
shape=(self.n + self.k, self.n)).tocsr()
delta = np.array(self.L.dot(verts))
# augment delta solution matrix with weighted anchors
for i in range(self.k):
delta[self.n + i, :] = self.weight * achr_verts[i, :]
# update mesh vertices with least-squares solution
deformed_verts = np.zeros(verts.shape)
for i in range(3):
deformed_verts[:, i] = sparseqr.solve(self.L,
delta[:, i],
tolerance = 1e-8
)
return deformed_verts
##############################################################
## High Speed Laplacian Mesh Editing for Joint Adapt ##
##############################################################
class fast_deform_dja():
def __init__(self,
f_ijv_pkl = '../predef/dja_IJV.pkl',
f_achr_pkl = '../predef/dja_achr.pkl',
weight = 1.0,
):
self.weight = weight
with open (f_ijv_pkl, 'rb') as fp:
dic_IJV = pickle.load(fp)
self.I = dic_IJV['I']
self.J = dic_IJV['J']
self.V = dic_IJV['V']
self.n = dic_IJV['num_vert']
with open (f_achr_pkl, 'rb') as fp:
dic_achr = pickle.load(fp)
#achr_id = dic_achr['achr_id']
self.k = dic_achr['achr_num']
self.num_V = len(self.V)
if self.weight != 1.0:
for i in range(self.num_V-self.k, self.num_V):
self.V[i] = self.V[i] * self.weight
# for inactive index, zero means inactive, non-zeros means active
def deform(self, verts, achr_verts):
self.L = sparse.coo_matrix((self.V, (self.I, self.J)),
shape=(self.n + self.k, self.n)).tocsr()
delta = np.array(self.L.dot(verts))
# augment delta solution matrix with weighted anchors
for i in range(self.k):
delta[self.n + i, :] = self.weight * achr_verts[i, :]
# update mesh vertices with least-squares solution
deformed_verts = np.zeros(verts.shape)
for i in range(3):
deformed_verts[:, i] = sparseqr.solve(self.L,
delta[:, i],
tolerance = 1e-8
)
return deformed_verts
|
models/nlp/electra/utils.py | kevinyang8/deep-learning-models | 129 | 12783790 | <reponame>kevinyang8/deep-learning-models
from colorama import Fore, Style
def colorize(token: str, color: str) -> str:
return f"{color}{token}{Style.RESET_ALL}"
def colorize_gen(tokenizer, true_ids, gen_ids, mask):
gen_ids = gen_ids.numpy()
true_ids = true_ids.numpy()
mask = mask.numpy()
tokens = tokenizer.convert_ids_to_tokens(gen_ids)
styled_tokens = tokens.copy()
for i in range(len(tokens)):
if mask[i]:
styled_tokens[i] = colorize(
tokens[i], color=Fore.GREEN if (true_ids[i] == gen_ids[i]) else Fore.RED
)
else:
styled_tokens[i] = tokens[i]
return " ".join(styled_tokens)
def colorize_dis(tokenizer, gen_ids, dis_preds):
gen_ids = gen_ids.numpy()
dis_preds = dis_preds.numpy()
tokens = tokenizer.convert_ids_to_tokens(gen_ids)
styled_tokens = tokens.copy()
for i in range(len(tokens)):
if dis_preds[i]:
styled_tokens[i] = colorize(tokens[i], color=Fore.YELLOW)
else:
styled_tokens[i] = tokens[i]
return " ".join(styled_tokens)
|
stable_nalu/layer/gumbel_nalu.py | wlm2019/Neural-Arithmetic-Units | 147 | 12783801 |
from .gumbel_nac import GumbelNACLayer
from .gumbel_mnac import GumbelMNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class GumbelNALULayer(AbstractNALULayer):
"""Implements the Gumbel NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(GumbelNACLayer, GumbelMNACLayer, in_features, out_features, **kwargs)
class GumbelNALUCell(AbstractRecurrentCell):
"""Implements the Gumbel NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(GumbelNALULayer, GumbelMNACLayer, input_size, hidden_size, **kwargs)
|
Basset/pretrained_model_reloaded_th.py | Luma-1994/lama | 137 | 12783812 | <reponame>Luma-1994/lama
import torch
import torch.nn as nn
from functools import reduce
from torch.autograd import Variable
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func,self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func,self.forward_prepare(input))
def get_model(load_weights = True):
# alphabet seems to be fine:
"""
https://github.com/davek44/Basset/tree/master/src/dna_io.py#L145-L148
seq = seq.replace('A','0')
seq = seq.replace('C','1')
seq = seq.replace('G','2')
seq = seq.replace('T','3')
"""
pretrained_model_reloaded_th = nn.Sequential( # Sequential,
nn.Conv2d(4,300,(19, 1)),
nn.BatchNorm2d(300),
nn.ReLU(),
nn.MaxPool2d((3, 1),(3, 1)),
nn.Conv2d(300,200,(11, 1)),
nn.BatchNorm2d(200),
nn.ReLU(),
nn.MaxPool2d((4, 1),(4, 1)),
nn.Conv2d(200,200,(7, 1)),
nn.BatchNorm2d(200),
nn.ReLU(),
nn.MaxPool2d((4, 1),(4, 1)),
Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(2000,1000)), # Linear,
nn.BatchNorm1d(1000,1e-05,0.1,True),#BatchNorm1d,
nn.ReLU(),
nn.Dropout(0.3),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(1000,1000)), # Linear,
nn.BatchNorm1d(1000,1e-05,0.1,True),#BatchNorm1d,
nn.ReLU(),
nn.Dropout(0.3),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(1000,164)), # Linear,
nn.Sigmoid(),
)
if load_weights:
sd = torch.load('model_files/pretrained_model_reloaded_th.pth')
pretrained_model_reloaded_th.load_state_dict(sd)
return pretrained_model_reloaded_th
model = get_model(load_weights = False)
|
tests/codec.py | axsguard/sstp-server | 223 | 12783822 | <reponame>axsguard/sstp-server
#!/usr/bin/env python3
import os
import timeit
from sstpd.codec import escape, PppDecoder
decoder = PppDecoder()
def get_enscaped():
frames = [os.urandom(1500) for i in range(2)]
return b''.join([escape(f) for f in frames])
def prof_unescape():
return timeit.timeit('decoder.unescape(data)',
setup='data = get_enscaped()',
globals=globals())
def codec_test():
frame = os.urandom(1500)
escaped = escape(frame)
print("escaped: %d bytes " % len(escaped))
unescaped = PppDecoder().unescape(escaped)
assert len(unescaped) == 1
print("unescaped: %d bytes" % len(unescaped[0]))
assert unescaped[0] == frame
def main():
codec_test()
print('Test unescape...')
print('\t%f' % prof_unescape())
if __name__ == '__main__':
main()
|
skmob/utils/tests/test_gislib.py | FilippoSimini/scikit-mobility | 489 | 12783823 | from skmob.utils import gislib
import math
class TestClustering:
def setup_method(self):
self.point_1 = (43.8430139, 10.5079940)
self.point_2 = (43.5442700, 10.3261500)
self.decimal = 43.8430139
self.DMS = (43, 50, 34.85)
def test_get_distance(self):
output = gislib.getDistance(self.point_1, self.point_2)
assert (math.isclose(output, 36.293701213))
support = gislib.getDistanceByHaversine(self.point_1, self.point_2)
assert (math.isclose(support, output))
output = gislib.getDistance(self.point_1, self.point_1)
assert (math.isclose(output, 0))
def test_get_distance_by_haversine(self):
output = gislib.getDistanceByHaversine(self.point_1, self.point_2)
assert (math.isclose(output, 36.293701213))
output = gislib.getDistanceByHaversine(self.point_1, self.point_1)
assert (math.isclose(output, 0))
# def test_decimal_to_DMS(self):
# output = gislib.DecimalToDMS(self.decimal)
# assert (output[0] == 43)
# assert (output[1] == 50)
# assert (math.isclose(output[2], 34.85))
def test_DMS_to_decimal(self):
output = gislib.DMSToDecimal(self.DMS[0], self.DMS[1], self.DMS[2])
assert (math.isclose(output, 43.84301388888))
def test_get_coordinates_for_distance(self):
output = gislib.getCoordinatesForDistance(self.point_1[0], self.point_1[1], 15)
assert (math.isclose(output[0], 0.134989200863))
assert (math.isclose(output[1], 0.187162559305))
# def test_is_within_distance(self):
# assert (gislib.isWithinDistance(self.point_1, self.point_2, 20))
# assert (gislib.isWithinDistance(self.point_1, self.point_2, 40) is False)
|
examples/settings.py | fakegit/googlevoice-1 | 156 | 12783867 | import pprint
from googlevoice import Voice
def run():
voice = Voice()
voice.login()
pprint.pprint(voice.settings)
__name__ == '__main__' and run()
|
examples/1_clap_for_everything.py | InnovativeInventor/pykeybasebot | 117 | 12783869 | <reponame>InnovativeInventor/pykeybasebot<gh_stars>100-1000
#!/usr/bin/env python3
###################################
# WHAT IS IN THIS EXAMPLE?
#
# This bot listens in one channel and reacts to every text message.
###################################
import asyncio
import logging
import os
import sys
import pykeybasebot.types.chat1 as chat1
from pykeybasebot import Bot
logging.basicConfig(level=logging.DEBUG)
if "win32" in sys.platform:
# Windows specific event-loop policy
asyncio.set_event_loop_policy(
asyncio.WindowsProactorEventLoopPolicy() # type: ignore
)
async def handler(bot, event):
if event.msg.content.type_name != chat1.MessageTypeStrings.TEXT.value:
return
channel = event.msg.channel
msg_id = event.msg.id
await bot.chat.react(channel, msg_id, ":clap:")
listen_options = {"filter-channel": {"name": "yourbot,someoneelse"}}
bot = Bot(username="yourbot", paperkey=os.environ["KEYBASE_PAPERKEY"], handler=handler)
asyncio.run(bot.start(listen_options))
|
pycaption/scc/translator.py | vpaul-dev/pycaption-github-release-notes | 183 | 12783900 | from pycaption.scc.constants import CHARACTERS, SPECIAL_CHARS, EXTENDED_CHARS
ALL_CHARACTERS = {**CHARACTERS, **SPECIAL_CHARS, **EXTENDED_CHARS}
COMMAND_LABELS = {
"9420": "Resume Caption Loading",
"9429": "Resume Direct Captioning",
"9425": "Roll-Up Captions--2 Rows",
"9426": "Roll-Up Captions--3 Rows",
"94a7": "Roll-Up Captions--4 Rows",
"942a": "Text Restart",
"94ab": "Resume Text Display",
"942c": "Erase Displayed Memory",
"94ae": "Erase Non-displayed Memory",
"942f": "End Of Caption",
"9140": "row 01, column 00, with plain white text.",
"91c1": "row 01, column 00, with white underlined text.",
"91c2": "row 01, column 00, with plain green text.",
"9143": "row 01, column 00, with green underlined text.",
"91c4": "row 01, column 00, with plain blue text.",
"9145": "row 01, column 00, with blue underlined text.",
"9146": "row 01, column 00, with plain cyan text.",
"91c7": "row 01, column 00, with cyan underlined text.",
"91c8": "row 01, column 00, with plain red text.",
"9149": "row 01, column 00, with red underlined text.",
"914a": "row 01, column 00, with plain yellow text.",
"91cb": "row 01, column 00, with yellow underlined text.",
"914c": "row 01, column 00, with plain magenta text.",
"91cd": "row 01, column 00, with magenta underlined text.",
"91ce": "row 01, column 00, with white italicized text.",
"914f": "row 01, column 00, with white underlined italicized text.",
"91d0": "row 01, column 00, with plain white text.",
"9151": "row 01, column 00, with white underlined text.",
"9152": "row 01, column 04, with plain white text.",
"91d3": "row 01, column 04, with white underlined text.",
"9154": "row 01, column 08, with plain white text.",
"91d5": "row 01, column 08, with white underlined text.",
"91d6": "row 01, column 12, with plain white text.",
"9157": "row 01, column 12, with white underlined text.",
"9158": "row 01, column 16, with plain white text.",
"91d9": "row 01, column 16, with white underlined text.",
"91da": "row 01, column 20, with plain white text.",
"915b": "row 01, column 20, with white underlined text.",
"91dc": "row 01, column 24, with plain white text.",
"915d": "row 01, column 24, with white underlined text.",
"915e": "row 01, column 28, with plain white text.",
"91df": "row 01, column 28, with white underlined text.",
"91e0": "row 02, column 00, with plain white text.",
"9161": "row 02, column 00, with white underlined text.",
"9162": "row 02, column 00, with plain green text.",
"91e3": "row 02, column 00, with green underlined text.",
"9164": "row 02, column 00, with plain blue text.",
"91e5": "row 02, column 00, with blue underlined text.",
"91e6": "row 02, column 00, with plain cyan text.",
"9167": "row 02, column 00, with cyan underlined text.",
"9168": "row 02, column 00, with plain red text.",
"91e9": "row 02, column 00, with red underlined text.",
"91ea": "row 02, column 00, with plain yellow text.",
"916b": "row 02, column 00, with yellow underlined text.",
"91ec": "row 02, column 00, with plain magenta text.",
"916d": "row 02, column 00, with magenta underlined text.",
"916e": "row 02, column 00, with white italicized text.",
"91ef": "row 02, column 00, with white underlined italicized text.",
"9170": "row 02, column 00, with plain white text.",
"91f1": "row 02, column 00, with white underlined text.",
"91f2": "row 02, column 04, with plain white text.",
"9173": "row 02, column 04, with white underlined text.",
"91f4": "row 02, column 08, with plain white text.",
"9175": "row 02, column 08, with white underlined text.",
"9176": "row 02, column 12, with plain white text.",
"91f7": "row 02, column 12, with white underlined text.",
"91f8": "row 02, column 16, with plain white text.",
"9179": "row 02, column 16, with white underlined text.",
"917a": "row 02, column 20, with plain white text.",
"91fb": "row 02, column 20, with white underlined text.",
"91fc": "row 02, column 24, with plain white text.",
"91fd": "row 02, column 24, with white underlined text.",
"91fe": "row 02, column 28, with plain white text.",
"917f": "row 02, column 28, with white underlined text.",
"9240": "row 03, column 00, with plain white text.",
"92c1": "row 03, column 00, with white underlined text.",
"92c2": "row 03, column 00, with plain green text.",
"9243": "row 03, column 00, with green underlined text.",
"92c4": "row 03, column 00, with plain blue text.",
"9245": "row 03, column 00, with blue underlined text.",
"9246": "row 03, column 00, with plain cyan text.",
"92c7": "row 03, column 00, with cyan underlined text.",
"92c8": "row 03, column 00, with plain red text.",
"9249": "row 03, column 00, with red underlined text.",
"924a": "row 03, column 00, with plain yellow text.",
"92cb": "row 03, column 00, with yellow underlined text.",
"924c": "row 03, column 00, with plain magenta text.",
"92cd": "row 03, column 00, with magenta underlined text.",
"92ce": "row 03, column 00, with white italicized text.",
"924f": "row 03, column 00, with white underlined italicized text.",
"92d0": "row 03, column 00, with plain white text.",
"9251": "row 03, column 00, with white underlined text.",
"9252": "row 03, column 04, with plain white text.",
"92d3": "row 03, column 04, with white underlined text.",
"9254": "row 03, column 08, with plain white text.",
"92d5": "row 03, column 08, with white underlined text.",
"92d6": "row 03, column 12, with plain white text.",
"9257": "row 03, column 12, with white underlined text.",
"9258": "row 03, column 16, with plain white text.",
"92d9": "row 03, column 16, with white underlined text.",
"92da": "row 03, column 20, with plain white text.",
"925b": "row 03, column 20, with white underlined text.",
"92dc": "row 03, column 24, with plain white text.",
"925d": "row 03, column 24, with white underlined text.",
"925e": "row 03, column 28, with plain white text.",
"92df": "row 03, column 28, with white underlined text.",
"92e0": "row 04, column 00, with plain white text.",
"9261": "row 04, column 00, with white underlined text.",
"9262": "row 04, column 00, with plain green text.",
"92e3": "row 04, column 00, with green underlined text.",
"9264": "row 04, column 00, with plain blue text.",
"92e5": "row 04, column 00, with blue underlined text.",
"92e6": "row 04, column 00, with plain cyan text.",
"9267": "row 04, column 00, with cyan underlined text.",
"9268": "row 04, column 00, with plain red text.",
"92e9": "row 04, column 00, with red underlined text.",
"92ea": "row 04, column 00, with plain yellow text.",
"926b": "row 04, column 00, with yellow underlined text.",
"92ec": "row 04, column 00, with plain magenta text.",
"926d": "row 04, column 00, with magenta underlined text.",
"926e": "row 04, column 00, with white italicized text.",
"92ef": "row 04, column 00, with white underlined italicized text.",
"9270": "row 04, column 00, with plain white text.",
"92f1": "row 04, column 00, with white underlined text.",
"92f2": "row 04, column 04, with plain white text.",
"9273": "row 04, column 04, with white underlined text.",
"92f4": "row 04, column 08, with plain white text.",
"9275": "row 04, column 08, with white underlined text.",
"9276": "row 04, column 12, with plain white text.",
"92f7": "row 04, column 12, with white underlined text.",
"92f8": "row 04, column 16, with plain white text.",
"9279": "row 04, column 16, with white underlined text.",
"927a": "row 04, column 20, with plain white text.",
"92fb": "row 04, column 20, with white underlined text.",
"92fc": "row 04, column 24, with plain white text.",
"92fd": "row 04, column 24, with white underlined text.",
"92fe": "row 04, column 28, with plain white text.",
"927f": "row 04, column 28, with white underlined text.",
"1540": "row 05, column 00, with plain white text.",
"15c1": "row 05, column 00, with white underlined text.",
"15c2": "row 05, column 00, with plain green text.",
"1543": "row 05, column 00, with green underlined text.",
"15c4": "row 05, column 00, with plain blue text.",
"1545": "row 05, column 00, with blue underlined text.",
"1546": "row 05, column 00, with plain cyan text.",
"15c7": "row 05, column 00, with cyan underlined text.",
"15c8": "row 05, column 00, with plain red text.",
"1549": "row 05, column 00, with red underlined text.",
"154a": "row 05, column 00, with plain yellow text.",
"15cb": "row 05, column 00, with yellow underlined text.",
"154c": "row 05, column 00, with plain magenta text.",
"15cd": "row 05, column 00, with magenta underlined text.",
"15ce": "row 05, column 00, with white italicized text.",
"154f": "row 05, column 00, with white underlined italicized text.",
"15d0": "row 05, column 00, with plain white text.",
"1551": "row 05, column 00, with white underlined text.",
"1552": "row 05, column 04, with plain white text.",
"15d3": "row 05, column 04, with white underlined text.",
"1554": "row 05, column 08, with plain white text.",
"15d5": "row 05, column 08, with white underlined text.",
"15d6": "row 05, column 12, with plain white text.",
"1557": "row 05, column 12, with white underlined text.",
"1558": "row 05, column 16, with plain white text.",
"15d9": "row 05, column 16, with white underlined text.",
"15da": "row 05, column 20, with plain white text.",
"155b": "row 05, column 20, with white underlined text.",
"15dc": "row 05, column 24, with plain white text.",
"155d": "row 05, column 24, with white underlined text.",
"155e": "row 05, column 28, with plain white text.",
"15df": "row 05, column 28, with white underlined text.",
"15e0": "row 06, column 00, with plain white text.",
"1561": "row 06, column 00, with white underlined text.",
"15462": "row 06, column 00, with plain green text.",
"15e3": "row 06, column 00, with green underlined text.",
"1564": "row 06, column 00, with plain blue text.",
"15e5": "row 06, column 00, with blue underlined text.",
"15e6": "row 06, column 00, with plain cyan text.",
"1567": "row 06, column 00, with cyan underlined text.",
"1568": "row 06, column 00, with plain red text.",
"15e9": "row 06, column 00, with red underlined text.",
"15ea": "row 06, column 00, with plain yellow text.",
"156b": "row 06, column 00, with yellow underlined text.",
"15ec": "row 06, column 00, with plain magenta text.",
"156d": "row 06, column 00, with magenta underlined text.",
"156e": "row 06, column 00, with white italicized text.",
"15ef": "row 06, column 00, with white underlined italicized text.",
"1570": "row 06, column 00, with plain white text.",
"15f1": "row 06, column 00, with white underlined text.",
"15f2": "row 06, column 04, with plain white text.",
"1573": "row 06, column 04, with white underlined text.",
"15f4": "row 06, column 08, with plain white text.",
"1575": "row 06, column 08, with white underlined text.",
"1576": "row 06, column 12, with plain white text.",
"15f7": "row 06, column 12, with white underlined text.",
"15f8": "row 06, column 16, with plain white text.",
"1579": "row 06, column 16, with white underlined text.",
"157a": "row 06, column 20, with plain white text.",
"15fb": "row 06, column 20, with white underlined text.",
"15fc": "row 06, column 24, with plain white text.",
"15fd": "row 06, column 24, with white underlined text.",
"15fe": "row 06, column 28, with plain white text.",
"157f": "row 06, column 28, with white underlined text.",
"1640": "row 07, column 00, with plain white text.",
"16c1": "row 07, column 00, with white underlined text.",
"16c2": "row 07, column 00, with plain green text.",
"1643": "row 07, column 00, with green underlined text.",
"16c4": "row 07, column 00, with plain blue text.",
"1645": "row 07, column 00, with blue underlined text.",
"1646": "row 07, column 00, with plain cyan text.",
"16c7": "row 07, column 00, with cyan underlined text.",
"16c8": "row 07, column 00, with plain red text.",
"1649": "row 07, column 00, with red underlined text.",
"164a": "row 07, column 00, with plain yellow text.",
"16cb": "row 07, column 00, with yellow underlined text.",
"164c": "row 07, column 00, with plain magenta text.",
"16cd": "row 07, column 00, with magenta underlined text.",
"16ce": "row 07, column 00, with white italicized text.",
"164f": "row 07, column 00, with white underlined italicized text.",
"16d0": "row 07, column 00, with plain white text.",
"1651": "row 07, column 00, with white underlined text.",
"1652": "row 07, column 04, with plain white text.",
"16d3": "row 07, column 04, with white underlined text.",
"1654": "row 07, column 08, with plain white text.",
"16d5": "row 07, column 08, with white underlined text.",
"16d6": "row 07, column 12, with plain white text.",
"1657": "row 07, column 12, with white underlined text.",
"1658": "row 07, column 16, with plain white text.",
"16d9": "row 07, column 16, with white underlined text.",
"16da": "row 07, column 20, with plain white text.",
"165b": "row 07, column 20, with white underlined text.",
"16dc": "row 07, column 24, with plain white text.",
"165d": "row 07, column 24, with white underlined text.",
"165e": "row 07, column 28, with plain white text.",
"16df": "row 07, column 28, with white underlined text.",
"16e0": "row 08, column 00, with plain white text.",
"1661": "row 08, column 00, with white underlined text.",
"16462": "row 08, column 00, with plain green text.",
"16e3": "row 08, column 00, with green underlined text.",
"1664": "row 08, column 00, with plain blue text.",
"16e5": "row 08, column 00, with blue underlined text.",
"16e6": "row 08, column 00, with plain cyan text.",
"1667": "row 08, column 00, with cyan underlined text.",
"1668": "row 08, column 00, with plain red text.",
"16e9": "row 08, column 00, with red underlined text.",
"16ea": "row 08, column 00, with plain yellow text.",
"166b": "row 08, column 00, with yellow underlined text.",
"16ec": "row 08, column 00, with plain magenta text.",
"166d": "row 08, column 00, with magenta underlined text.",
"166e": "row 08, column 00, with white italicized text.",
"16ef": "row 08, column 00, with white underlined italicized text.",
"1670": "row 08, column 00, with plain white text.",
"16f1": "row 08, column 00, with white underlined text.",
"16f2": "row 08, column 04, with plain white text.",
"1673": "row 08, column 04, with white underlined text.",
"16f4": "row 08, column 08, with plain white text.",
"1675": "row 08, column 08, with white underlined text.",
"1676": "row 08, column 12, with plain white text.",
"16f7": "row 08, column 12, with white underlined text.",
"16f8": "row 08, column 16, with plain white text.",
"1679": "row 08, column 16, with white underlined text.",
"167a": "row 08, column 20, with plain white text.",
"16fb": "row 08, column 20, with white underlined text.",
"16fc": "row 08, column 24, with plain white text.",
"16fd": "row 08, column 24, with white underlined text.",
"16fe": "row 08, column 28, with plain white text.",
"167f": "row 08, column 28, with white underlined text.",
"9740": "row 09, column 00, with plain white text.",
"97c1": "row 09, column 00, with white underlined text.",
"97c2": "row 09, column 00, with plain green text.",
"9743": "row 09, column 00, with green underlined text.",
"97c4": "row 09, column 00, with plain blue text.",
"9745": "row 09, column 00, with blue underlined text.",
"9746": "row 09, column 00, with plain cyan text.",
"97c7": "row 09, column 00, with cyan underlined text.",
"97c8": "row 09, column 00, with plain red text.",
"9749": "row 09, column 00, with red underlined text.",
"974a": "row 09, column 00, with plain yellow text.",
"97cb": "row 09, column 00, with yellow underlined text.",
"974c": "row 09, column 00, with plain magenta text.",
"97cd": "row 09, column 00, with magenta underlined text.",
"97ce": "row 09, column 00, with white italicized text.",
"974f": "row 09, column 00, with white underlined italicized text.",
"97d0": "row 09, column 00, with plain white text.",
"9751": "row 09, column 00, with white underlined text.",
"9752": "row 09, column 04, with plain white text.",
"97d3": "row 09, column 04, with white underlined text.",
"9754": "row 09, column 08, with plain white text.",
"97d5": "row 09, column 08, with white underlined text.",
"97d6": "row 09, column 12, with plain white text.",
"9757": "row 09, column 12, with white underlined text.",
"9758": "row 09, column 16, with plain white text.",
"97d9": "row 09, column 16, with white underlined text.",
"97da": "row 09, column 20, with plain white text.",
"975b": "row 09, column 20, with white underlined text.",
"97dc": "row 09, column 24, with plain white text.",
"975d": "row 09, column 24, with white underlined text.",
"975e": "row 09, column 28, with plain white text.",
"97df": "row 09, column 28, with white underlined text.",
"97e0": "row 10, column 00, with plain white text.",
"9761": "row 10, column 00, with white underlined text.",
"9762": "row 10, column 00, with plain green text.",
"97e3": "row 10, column 00, with green underlined text.",
"9764": "row 10, column 00, with plain blue text.",
"97e5": "row 10, column 00, with blue underlined text.",
"97e6": "row 10, column 00, with plain cyan text.",
"9767": "row 10, column 00, with cyan underlined text.",
"9768": "row 10, column 00, with plain red text.",
"97e9": "row 10, column 00, with red underlined text.",
"97ea": "row 10, column 00, with plain yellow text.",
"976b": "row 10, column 00, with yellow underlined text.",
"97ec": "row 10, column 00, with plain magenta text.",
"976d": "row 10, column 00, with magenta underlined text.",
"976e": "row 10, column 00, with white italicized text.",
"97ef": "row 10, column 00, with white underlined italicized text.",
"9770": "row 10, column 00, with plain white text.",
"97f1": "row 10, column 00, with white underlined text.",
"97f2": "row 10, column 04, with plain white text.",
"9773": "row 10, column 04, with white underlined text.",
"97f4": "row 10, column 08, with plain white text.",
"9775": "row 10, column 08, with white underlined text.",
"9776": "row 10, column 12, with plain white text.",
"97f7": "row 10, column 12, with white underlined text.",
"97f8": "row 10, column 16, with plain white text.",
"9779": "row 10, column 16, with white underlined text.",
"977a": "row 10, column 20, with plain white text.",
"97fb": "row 10, column 20, with white underlined text.",
"97fc": "row 10, column 24, with plain white text.",
"97fd": "row 10, column 24, with white underlined text.",
"97fe": "row 10, column 28, with plain white text.",
"977f": "row 10, column 28, with white underlined text.",
"1040": "row 11, column 00, with plain white text.",
"10c1": "row 11, column 00, with white underlined text.",
"10c2": "row 11, column 00, with plain green text.",
"1043": "row 11, column 00, with green underlined text.",
"10c4": "row 11, column 00, with plain blue text.",
"1045": "row 11, column 00, with blue underlined text.",
"1046": "row 11, column 00, with plain cyan text.",
"10c7": "row 11, column 00, with cyan underlined text.",
"10c8": "row 11, column 00, with plain red text.",
"1049": "row 11, column 00, with red underlined text.",
"104a": "row 11, column 00, with plain yellow text.",
"10cb": "row 11, column 00, with yellow underlined text.",
"104c": "row 11, column 00, with plain magenta text.",
"10cd": "row 11, column 00, with magenta underlined text.",
"10ce": "row 11, column 00, with white italicized text.",
"104f": "row 11, column 00, with white underlined italicized text.",
"10d0": "row 11, column 00, with plain white text.",
"1051": "row 11, column 00, with white underlined text.",
"1052": "row 11, column 04, with plain white text.",
"10d3": "row 11, column 04, with white underlined text.",
"1054": "row 11, column 08, with plain white text.",
"10d5": "row 11, column 08, with white underlined text.",
"10d6": "row 11, column 12, with plain white text.",
"1057": "row 11, column 12, with white underlined text.",
"1058": "row 11, column 16, with plain white text.",
"10d9": "row 11, column 16, with white underlined text.",
"10da": "row 11, column 20, with plain white text.",
"105b": "row 11, column 20, with white underlined text.",
"10dc": "row 11, column 24, with plain white text.",
"105d": "row 11, column 24, with white underlined text.",
"105e": "row 11, column 28, with plain white text.",
"10df": "row 11, column 28, with white underlined text.",
"1340": "row 12, column 00, with plain white text.",
"13c1": "row 12, column 00, with white underlined text.",
"13c2": "row 12, column 00, with plain green text.",
"1343": "row 12, column 00, with green underlined text.",
"13c4": "row 12, column 00, with plain blue text.",
"1345": "row 12, column 00, with blue underlined text.",
"1346": "row 12, column 00, with plain cyan text.",
"13c7": "row 12, column 00, with cyan underlined text.",
"13c8": "row 12, column 00, with plain red text.",
"1349": "row 12, column 00, with red underlined text.",
"134a": "row 12, column 00, with plain yellow text.",
"13cb": "row 12, column 00, with yellow underlined text.",
"134c": "row 12, column 00, with plain magenta text.",
"13cd": "row 12, column 00, with magenta underlined text.",
"13ce": "row 12, column 00, with white italicized text.",
"134f": "row 12, column 00, with white underlined italicized text.",
"13d0": "row 12, column 00, with plain white text.",
"1351": "row 12, column 00, with white underlined text.",
"1352": "row 12, column 04, with plain white text.",
"13d3": "row 12, column 04, with white underlined text.",
"1354": "row 12, column 08, with plain white text.",
"13d5": "row 12, column 08, with white underlined text.",
"13d6": "row 12, column 12, with plain white text.",
"1357": "row 12, column 12, with white underlined text.",
"1358": "row 12, column 16, with plain white text.",
"13d9": "row 12, column 16, with white underlined text.",
"13da": "row 12, column 20, with plain white text.",
"135b": "row 12, column 20, with white underlined text.",
"13dc": "row 12, column 24, with plain white text.",
"135d": "row 12, column 24, with white underlined text.",
"135e": "row 12, column 28, with plain white text.",
"13df": "row 12, column 28, with white underlined text.",
"13e0": "row 13, column 00, with plain white text.",
"1361": "row 13, column 00, with white underlined text.",
"13462": "row 13, column 00, with plain green text.",
"13e3": "row 13, column 00, with green underlined text.",
"1364": "row 13, column 00, with plain blue text.",
"13e5": "row 13, column 00, with blue underlined text.",
"13e6": "row 13, column 00, with plain cyan text.",
"1367": "row 13, column 00, with cyan underlined text.",
"1368": "row 13, column 00, with plain red text.",
"13e9": "row 13, column 00, with red underlined text.",
"13ea": "row 13, column 00, with plain yellow text.",
"136b": "row 13, column 00, with yellow underlined text.",
"13ec": "row 13, column 00, with plain magenta text.",
"136d": "row 13, column 00, with magenta underlined text.",
"136e": "row 13, column 00, with white italicized text.",
"13ef": "row 13, column 00, with white underlined italicized text.",
"1370": "row 13, column 00, with plain white text.",
"13f1": "row 13, column 00, with white underlined text.",
"13f2": "row 13, column 04, with plain white text.",
"1373": "row 13, column 04, with white underlined text.",
"13f4": "row 13, column 08, with plain white text.",
"1375": "row 13, column 08, with white underlined text.",
"1376": "row 13, column 12, with plain white text.",
"13f7": "row 13, column 12, with white underlined text.",
"13f8": "row 13, column 16, with plain white text.",
"1379": "row 13, column 16, with white underlined text.",
"137a": "row 13, column 20, with plain white text.",
"13fb": "row 13, column 20, with white underlined text.",
"13fc": "row 13, column 24, with plain white text.",
"13fd": "row 13, column 24, with white underlined text.",
"13fe": "row 13, column 28, with plain white text.",
"137f": "row 13, column 28, with white underlined text.",
"9440": "row 14, column 00, with plain white text.",
"94c1": "row 14, column 00, with white underlined text.",
"94c2": "row 14, column 00, with plain green text.",
"9443": "row 14, column 00, with green underlined text.",
"94c4": "row 14, column 00, with plain blue text.",
"9445": "row 14, column 00, with blue underlined text.",
"9446": "row 14, column 00, with plain cyan text.",
"94c7": "row 14, column 00, with cyan underlined text.",
"94c8": "row 14, column 00, with plain red text.",
"9449": "row 14, column 00, with red underlined text.",
"944a": "row 14, column 00, with plain yellow text.",
"94cb": "row 14, column 00, with yellow underlined text.",
"944c": "row 14, column 00, with plain magenta text.",
"94cd": "row 14, column 00, with magenta underlined text.",
"94ce": "row 14, column 00, with white italicized text.",
"944f": "row 14, column 00, with white underlined italicized text.",
"94d0": "row 14, column 00, with plain white text.",
"9451": "row 14, column 00, with white underlined text.",
"9452": "row 14, column 04, with plain white text.",
"94d3": "row 14, column 04, with white underlined text.",
"9454": "row 14, column 08, with plain white text.",
"94d5": "row 14, column 08, with white underlined text.",
"94d6": "row 14, column 12, with plain white text.",
"9457": "row 14, column 12, with white underlined text.",
"9458": "row 14, column 16, with plain white text.",
"94d9": "row 14, column 16, with white underlined text.",
"94da": "row 14, column 20, with plain white text.",
"945b": "row 14, column 20, with white underlined text.",
"94dc": "row 14, column 24, with plain white text.",
"945d": "row 14, column 24, with white underlined text.",
"945e": "row 14, column 28, with plain white text.",
"94df": "row 14, column 28, with white underlined text.",
"94e0": "row 15, column 00, with plain white text.",
"9461": "row 15, column 00, with white underlined text.",
"9462": "row 15, column 00, with plain green text.",
"94e3": "row 15, column 00, with green underlined text.",
"9464": "row 15, column 00, with plain blue text.",
"94e5": "row 15, column 00, with blue underlined text.",
"94e6": "row 15, column 00, with plain cyan text.",
"9467": "row 15, column 00, with cyan underlined text.",
"9468": "row 15, column 00, with plain red text.",
"94e9": "row 15, column 00, with red underlined text.",
"94ea": "row 15, column 00, with plain yellow text.",
"946b": "row 15, column 00, with yellow underlined text.",
"94ec": "row 15, column 00, with plain magenta text.",
"946d": "row 15, column 00, with magenta underlined text.",
"946e": "row 15, column 00, with white italicized text.",
"94ef": "row 15, column 00, with white underlined italicized text.",
"9470": "row 15, column 00, with plain white text.",
"94f1": "row 15, column 00, with white underlined text.",
"94f2": "row 15, column 04, with plain white text.",
"9473": "row 15, column 04, with white underlined text.",
"94f4": "row 15, column 08, with plain white text.",
"9475": "row 15, column 08, with white underlined text.",
"9476": "row 15, column 12, with plain white text.",
"94f7": "row 15, column 12, with white underlined text.",
"94f8": "row 15, column 16, with plain white text.",
"9479": "row 15, column 16, with white underlined text.",
"947a": "row 15, column 20, with plain white text.",
"94fb": "row 15, column 20, with white underlined text.",
"94fc": "row 15, column 24, with plain white text.",
"94fd": "row 15, column 24, with white underlined text.",
"94fe": "row 15, column 28, with plain white text.",
"947f": "row 15, column 28, with white underlined text.",
"97a1": "Tab Offset 1 column",
"97a2": "Tab Offset 2 columns",
"9723": "Tab Offset 3 columns",
"94a1": "BackSpace",
"94a4": "Delete to End of Row",
"94ad": "Carriage Return",
"1020": "Background White",
"10a1": "Background Semi-Transparent White",
"10a2": "Background Green",
"1023": "Background Semi-Transparent Green",
"10a4": "Background Blue",
"1025": "Background Semi-Transparent Blue",
"1026": "Background Cyan",
"10a7": "Background Semi-Transparent Cyan",
"10a8": "Background Red",
"1029": "Background Semi-Transparent Red",
"102a": "Background Yellow",
"10ab": "Background Semi-Transparent Yellow",
"102c": "Background Magenta",
"10ad": "Background Semi-Transparent Magenta",
"10ae": "Background Black",
"102f": "Background Semi-Transparent Black",
"97ad": "Background Transparent",
"97a4": "Standard Character Set",
"9725": "Double-Size Character Set",
"9726": "First Private Character Set",
"97a7": "Second Private Character Set",
"97a8": "People`s Republic of China Character Set",
"9729": "Korean Standard Character Set",
"972a": "First Registered Character Set",
"9120": "White",
"91a1": "White Underline",
"91a2": "Green",
"9123": "Green Underline",
"91a4": "Blue",
"9125": "Blue Underline",
"9126": "Cyan",
"91a7": "Cyan Underline",
"91a8": "Red",
"9129": "Red Underline",
"912a": "Yellow",
"91ab": "Yellow Underline",
"912c": "Magenta",
"91ad": "Magenta Underline",
"97ae": "Black",
"972f": "Black Underline",
"91ae": "Italics",
"912f": "Italics Underline",
"94a8": "Flash ON",
"9423": "Alarm Off",
"94a2": "Alarm On"
}
def translate_scc(scc_content, brackets='[]'):
"""
Replaces hexadecimal words with their meaning
In order to make SCC files more human readable and easier to debug,
this function is used to replace command codes with their labels and
character bytes with their actual characters
:param scc_content: SCC captions to be translated
:type scc_content: str
:param brackets: Brackets to group the translated content of a command
:type brackets: str
:return: Translated SCC captions
:rtype: str
"""
opening_bracket, closing_bracket = brackets if brackets else ('', '')
scc_elements = set(scc_content.split())
for elem in scc_elements:
name = COMMAND_LABELS.get(elem)
# If a 2 byte command was not found, try retrieving 1 byte characters
if not name:
char1 = ALL_CHARACTERS.get(elem[:2])
char2 = ALL_CHARACTERS.get(elem[2:])
if char1 is not None and char2 is not None:
name = f"{char1}{char2}"
if name:
scc_content = scc_content.replace(
elem, f"{opening_bracket}{name}{closing_bracket}")
return scc_content
|
tox_helpers/run_integration_tests.py | sivchand/smart_open | 2,047 | 12783915 | <reponame>sivchand/smart_open
"""Runs integration tests."""
import os
import subprocess
os.environ['PYTEST_ADDOPTS'] = "--reruns 3 --reruns-delay 1"
subprocess.check_call(
[
'pytest',
'integration-tests/test_207.py',
'integration-tests/test_http.py',
]
)
if os.environ.get('AWS_ACCESS_KEY_ID') and os.environ.get('AWS_SECRET_ACCESS_KEY'):
subprocess.check_call(['pytest', '-v', 'integration-tests/test_s3_ported.py'])
|
lib/datasets/augmentation.py | ParikhKadam/HybridPose | 369 | 12783925 | import numpy as np
import cv2
import pdb
# https://github.com/zju3dv/clean-pvnet/blob/master/lib/datasets/augmentation.py
def debug_visualize(image, mask, pts2d, sym_cor, name_prefix='debug'):
from random import sample
cv2.imwrite('{}_image.png'.format(name_prefix), image * 255)
cv2.imwrite('{}_mask.png'.format(name_prefix), mask * 255)
img_pts = image.copy() * 255
for i in range(pts2d.shape[0]):
x = int(round(pts2d[i, 0]))
y = int(round(pts2d[i, 1]))
img_pts = cv2.circle(img_pts, (x, y), 2, (0, 0, 255), thickness=-1)
cv2.imwrite('{}_pts.png'.format(name_prefix), img_pts)
img_sym = image.copy() * 255
ys, xs = np.nonzero(mask)
for i_pt in sample([i for i in range(len(ys))], min(100, len(ys))):
y = int(round(ys[i_pt]))
x = int(round(xs[i_pt]))
x_cor, y_cor = sym_cor[y, x]
x_cor = int(round(x + x_cor))
y_cor = int(round(y + y_cor))
img_sym = cv2.line(img_sym, (x, y), (x_cor, y_cor), (0, 0, 255), 1)
cv2.imwrite('{}_sym.png'.format(name_prefix), img_sym)
def rotate_sym_cor(sym_cor, mask, R):
h, w = sym_cor.shape[:2]
ys, xs = np.nonzero(mask)
source = np.float32(np.stack([xs, ys], axis=-1))
delta = np.float32(sym_cor[ys, xs])
target = source + delta
last_col = np.ones((source.shape[0], 1), dtype=np.float32)
source = np.concatenate([source, last_col], axis=-1)
target = np.concatenate([target, last_col], axis=-1)
last_row = np.asarray([[0, 0, 1]], dtype=np.float32)
R = np.concatenate([R, last_row], axis=0).transpose()
source = np.matmul(source, R)[:, :2]
target = np.matmul(target, R)[:, :2]
source = np.uint32(np.round(source))
delta = target - source
# remove invalid indices
xs, ys = source[:, 0], source[:, 1]
valid = (xs > 0) & (xs < w) & (ys > 0) & (ys < h)
xs, ys, delta = xs[valid], ys[valid], delta[valid]
sym_cor = np.zeros_like(sym_cor)
sym_cor[ys, xs] = delta
return sym_cor
def rotate_instance(img, mask, hcoords, sym_cor, rot_ang_min, rot_ang_max):
h, w = img.shape[0], img.shape[1]
degree = np.random.uniform(rot_ang_min, rot_ang_max)
hs, ws = np.nonzero(mask)
R = cv2.getRotationMatrix2D((np.mean(ws), np.mean(hs)), degree, 1)
sym_cor = rotate_sym_cor(sym_cor, mask, R)
mask = cv2.warpAffine(mask, R, (w, h), flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
img = cv2.warpAffine(img, R, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
last_row = np.asarray([[0, 0, 1]], dtype=np.float32)
R = np.concatenate([R, last_row], axis=0).transpose()
last_col = np.ones((hcoords.shape[0], 1), dtype=np.float32)
hcoords = np.concatenate([hcoords, last_col], axis=1)
hcoords = np.float32(np.matmul(hcoords, R))
hcoords = hcoords[:, :2]
return img, mask, hcoords, sym_cor
def crop_resize_instance_v1(img, mask, hcoords, sym_cor, imheight, imwidth,
overlap_ratio=0.5, ratio_min=0.8, ratio_max=1.2):
'''
crop a region with [imheight*resize_ratio,imwidth*resize_ratio]
which at least overlap with foreground bbox with overlap
'''
hcoords_last_col = np.ones((hcoords.shape[0], 1), dtype=np.float32)
hcoords = np.concatenate([hcoords, hcoords_last_col], axis=1)
resize_ratio = np.random.uniform(ratio_min, ratio_max)
target_height = int(imheight * resize_ratio)
target_width = int(imwidth * resize_ratio)
img, mask, hcoords, sym_cor = crop_or_padding_to_fixed_size_instance(
img, mask, hcoords, sym_cor, target_height, target_width, overlap_ratio)
img = cv2.resize(img, (imwidth, imheight), interpolation=cv2.INTER_LINEAR)
mask = cv2.resize(mask, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)
sym_cor = cv2.resize(sym_cor, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)
sym_cor /= resize_ratio
hcoords[:, 0] = hcoords[:, 0] / resize_ratio
hcoords[:, 1] = hcoords[:, 1] / resize_ratio
hcoords = hcoords[:, :2]
return img, mask, hcoords, sym_cor
def crop_or_padding_to_fixed_size_instance(img, mask, hcoords, sym_cor, th, tw,
overlap_ratio=0.5):
h, w, _ = img.shape
hs, ws = np.nonzero(mask)
hmin, hmax = np.min(hs), np.max(hs)
wmin, wmax = np.min(ws), np.max(ws)
fh, fw = hmax - hmin, wmax - wmin
hpad, wpad = th >= h, tw >= w
hrmax = int(min(hmin + overlap_ratio * fh, h - th)) # h must > target_height else hrmax<0
hrmin = int(max(hmin + overlap_ratio * fh - th, 0))
wrmax = int(min(wmin + overlap_ratio * fw, w - tw)) # w must > target_width else wrmax<0
wrmin = int(max(wmin + overlap_ratio * fw - tw, 0))
hbeg = 0 if (hpad or hrmin == hrmax) else np.random.randint(hrmin, hrmax)
hend = hbeg + th
wbeg = 0 if (wpad or wrmin == wrmax) else np.random.randint(wrmin, wrmax) # if pad then [0,wend] will larger than [0,w], indexing it is safe
wend = wbeg + tw
img = img[hbeg:hend, wbeg:wend]
mask = mask[hbeg:hend, wbeg:wend]
sym_cor = sym_cor[hbeg:hend, wbeg:wend]
hcoords[:, 0] -= wbeg * hcoords[:, 2]
hcoords[:, 1] -= hbeg * hcoords[:, 2]
if hpad or wpad:
nh, nw, _ = img.shape
new_img = np.zeros([th, tw, 3], dtype=img.dtype)
new_mask = np.zeros([th, tw], dtype=mask.dtype)
new_sym_cor = np.zeros([th, tw, 2], dtype=sym_cor.dtype)
hbeg = 0 if not hpad else (th - h) // 2
wbeg = 0 if not wpad else (tw - w) // 2
new_img[hbeg:hbeg + nh, wbeg:wbeg + nw] = img
new_mask[hbeg:hbeg + nh, wbeg:wbeg + nw] = mask
new_sym_cor[hbeg:hbeg + nh, wbeg:wbeg + nw] = sym_cor
hcoords[:, 0] += wbeg * hcoords[:, 2]
hcoords[:, 1] += hbeg * hcoords[:, 2]
img, mask, sym_cor = new_img, new_mask, new_sym_cor
return img, mask, hcoords, sym_cor
def crop_or_padding_to_fixed_size(img, mask, sym_cor, th, tw):
h, w, _ = img.shape
hpad, wpad = th >= h, tw >= w
hbeg = 0 if hpad else np.random.randint(0, h - th)
wbeg = 0 if wpad else np.random.randint(0,
w - tw) # if pad then [0,wend] will larger than [0,w], indexing it is safe
hend = hbeg + th
wend = wbeg + tw
img = img[hbeg:hend, wbeg:wend]
mask = mask[hbeg:hend, wbeg:wend]
sym_cor = sym_cor[hbeg:hend, wbeg:wend]
if hpad or wpad:
nh, nw, _ = img.shape
new_img = np.zeros([th, tw, 3], dtype=img.dtype)
new_mask = np.zeros([th, tw], dtype=mask.dtype)
new_sym_cor = np.zeros([th, tw, 2], dtype=sym_cor.dtype)
hbeg = 0 if not hpad else (th - h) // 2
wbeg = 0 if not wpad else (tw - w) // 2
new_img[hbeg:hbeg + nh, wbeg:wbeg + nw] = img
new_mask[hbeg:hbeg + nh, wbeg:wbeg + nw] = mask
new_sym_cor[hbeg:hbeg + nh, wbeg:wbeg + nw] = sym_cor
img, mask, sym_cor = new_img, new_mask, new_sym_cor
return img, mask, sym_cor
|
recipes/nsimd/2.x/conanfile.py | rockandsalt/conan-center-index | 562 | 12783926 | import os
from conans import ConanFile, CMake, tools
required_conan_version = ">=1.33.0"
class NsimdConan(ConanFile):
name = "nsimd"
homepage = "https://github.com/agenium-scale/nsimd"
description = "Agenium Scale vectorization library for CPUs and GPUs"
topics = ("hpc", "neon", "cuda", "avx", "simd", "avx2", "sse2", "aarch64", "avx512", "sse42", "rocm", "sve", "neon128")
url = "https://github.com/conan-io/conan-center-index"
license = "MIT"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
# This used only when building the library.
# Most functionality is header only.
"simd": [None, "cpu", "sse2", "sse42", "avx", "avx2", "avx512_knl", "avx512_skylake", "neon128", "aarch64", "sve", "sve128", "sve256", "sve512", "sve1024", "sve2048", "cuda", "rocm"]
}
default_options = {
"shared": False,
"fPIC": True,
"simd": None
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
# Most of the library is header only.
# cpp files do not use STL.
del self.settings.compiler.libcxx
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
if self.options.simd:
self._cmake.definitions["simd"] = self.options.simd
if self.settings.arch == "armv7hf":
self._cmake.definitions["NSIMD_ARM32_IS_ARMEL"] = False
self._cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"] = self.options.get_safe("fPIC", True)
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _patch_sources(self):
cmakefile_path = os.path.join(self._source_subfolder, "CMakeLists.txt")
tools.replace_in_file(cmakefile_path,
" SHARED ",
" ")
tools.replace_in_file(cmakefile_path,
"RUNTIME DESTINATION lib",
"RUNTIME DESTINATION bin")
tools.replace_in_file(cmakefile_path,
"set_property(TARGET ${o} PROPERTY POSITION_INDEPENDENT_CODE ON)",
"")
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
|
tests/functional/regressions/test_issue87.py | matt-koevort/tartiflette | 530 | 12783930 | import pytest
@pytest.mark.asyncio
@pytest.mark.ttftt_engine
@pytest.mark.parametrize(
"query,errors",
[
(
"""
subscription Sub {
newDog {
name
}
newHuman {
name
}
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 2, "column": 13},
{"line": 2, "column": 30},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
subscription Sub {
newDog {
name
}
__typename
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 2, "column": 13},
{"line": 2, "column": 30},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
fragment MultipleSubscriptionsFields on Subscription {
newDog {
name
}
newHuman {
name
}
}
subscription Sub {
...MultipleSubscriptionsFields
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 11, "column": 13},
{"line": 2, "column": 66},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
subscription Sub {
... on Subscription {
newDog {
name
}
newHuman {
name
}
}
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 2, "column": 13},
{"line": 3, "column": 35},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
fragment MultipleSubscriptionsFields on Subscription {
... on Subscription {
newDog {
name
}
newHuman {
name
}
}
}
subscription Sub {
...MultipleSubscriptionsFields
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 13, "column": 13},
{"line": 3, "column": 35},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
],
)
async def test_issue87(engine, query, errors):
assert await engine.execute(query) == {"data": None, "errors": errors}
|
fbchat/_events/_delta_type.py | JabLuszko/fbchat | 1,042 | 12783933 | <reponame>JabLuszko/fbchat
import attr
import datetime
from ._common import attrs_event, Event, UnknownEvent, ThreadEvent
from .. import _util, _threads, _models
from typing import Sequence, Optional
@attrs_event
class ColorSet(ThreadEvent):
"""Somebody set the color in a thread."""
#: The new color. Not limited to the ones in `ThreadABC.set_color`
color = attr.ib(type=str)
#: When the color was set
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
color = _threads.ThreadABC._parse_color(data["untypedData"]["theme_color"])
return cls(author=author, thread=thread, color=color, at=at)
@attrs_event
class EmojiSet(ThreadEvent):
"""Somebody set the emoji in a thread."""
#: The new emoji
emoji = attr.ib(type=str)
#: When the emoji was set
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
emoji = data["untypedData"]["thread_icon"]
return cls(author=author, thread=thread, emoji=emoji, at=at)
@attrs_event
class NicknameSet(ThreadEvent):
"""Somebody set the nickname of a person in a thread."""
#: The person whose nickname was set
subject = attr.ib(type=str)
#: The new nickname. If ``None``, the nickname was cleared
nickname = attr.ib(type=Optional[str])
#: When the nickname was set
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
subject = _threads.User(
session=session, id=data["untypedData"]["participant_id"]
)
nickname = data["untypedData"]["nickname"] or None # None if ""
return cls(
author=author, thread=thread, subject=subject, nickname=nickname, at=at
)
@attrs_event
class AdminsAdded(ThreadEvent):
"""Somebody added admins to a group."""
#: The people that were set as admins
added = attr.ib(type=Sequence["_threads.User"])
#: When the admins were added
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
subject = _threads.User(session=session, id=data["untypedData"]["TARGET_ID"])
return cls(author=author, thread=thread, added=[subject], at=at)
@attrs_event
class AdminsRemoved(ThreadEvent):
"""Somebody removed admins from a group."""
#: The people that were removed as admins
removed = attr.ib(type=Sequence["_threads.User"])
#: When the admins were removed
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
subject = _threads.User(session=session, id=data["untypedData"]["TARGET_ID"])
return cls(author=author, thread=thread, removed=[subject], at=at)
@attrs_event
class ApprovalModeSet(ThreadEvent):
"""Somebody changed the approval mode in a group."""
require_admin_approval = attr.ib(type=bool)
#: When the approval mode was set
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
raa = data["untypedData"]["APPROVAL_MODE"] == "1"
return cls(author=author, thread=thread, require_admin_approval=raa, at=at)
@attrs_event
class CallStarted(ThreadEvent):
"""Somebody started a call."""
#: When the call was started
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
return cls(author=author, thread=thread, at=at)
@attrs_event
class CallEnded(ThreadEvent):
"""Somebody ended a call."""
#: How long the call took
duration = attr.ib(type=datetime.timedelta)
#: When the call ended
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
duration = _util.seconds_to_timedelta(int(data["untypedData"]["call_duration"]))
return cls(author=author, thread=thread, duration=duration, at=at)
@attrs_event
class CallJoined(ThreadEvent):
"""Somebody joined a call."""
#: When the call ended
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
return cls(author=author, thread=thread, at=at)
@attrs_event
class PollCreated(ThreadEvent):
"""Somebody created a group poll."""
#: The new poll
poll = attr.ib(type="_models.Poll")
#: When the poll was created
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
poll_data = _util.parse_json(data["untypedData"]["question_json"])
poll = _models.Poll._from_graphql(session, poll_data)
return cls(author=author, thread=thread, poll=poll, at=at)
@attrs_event
class PollVoted(ThreadEvent):
"""Somebody voted in a group poll."""
#: The updated poll
poll = attr.ib(type="_models.Poll")
#: Ids of the voted options
added_ids = attr.ib(type=Sequence[str])
#: Ids of the un-voted options
removed_ids = attr.ib(type=Sequence[str])
#: When the poll was voted in
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
poll_data = _util.parse_json(data["untypedData"]["question_json"])
poll = _models.Poll._from_graphql(session, poll_data)
added_ids = _util.parse_json(data["untypedData"]["added_option_ids"])
removed_ids = _util.parse_json(data["untypedData"]["removed_option_ids"])
return cls(
author=author,
thread=thread,
poll=poll,
added_ids=[str(x) for x in added_ids],
removed_ids=[str(x) for x in removed_ids],
at=at,
)
@attrs_event
class PlanCreated(ThreadEvent):
"""Somebody created a plan in a group."""
#: The new plan
plan = attr.ib(type="_models.PlanData")
#: When the plan was created
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
plan = _models.PlanData._from_pull(session, data["untypedData"])
return cls(author=author, thread=thread, plan=plan, at=at)
@attrs_event
class PlanEnded(ThreadEvent):
"""A plan ended."""
#: The ended plan
plan = attr.ib(type="_models.PlanData")
#: When the plan ended
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
plan = _models.PlanData._from_pull(session, data["untypedData"])
return cls(author=author, thread=thread, plan=plan, at=at)
@attrs_event
class PlanEdited(ThreadEvent):
"""Somebody changed a plan in a group."""
#: The updated plan
plan = attr.ib(type="_models.PlanData")
#: When the plan was updated
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
plan = _models.PlanData._from_pull(session, data["untypedData"])
return cls(author=author, thread=thread, plan=plan, at=at)
@attrs_event
class PlanDeleted(ThreadEvent):
"""Somebody removed a plan in a group."""
#: The removed plan
plan = attr.ib(type="_models.PlanData")
#: When the plan was removed
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
plan = _models.PlanData._from_pull(session, data["untypedData"])
return cls(author=author, thread=thread, plan=plan, at=at)
@attrs_event
class PlanResponded(ThreadEvent):
"""Somebody responded to a plan in a group."""
#: The plan that was responded to
plan = attr.ib(type="_models.PlanData")
#: Whether the author will go to the plan or not
take_part = attr.ib(type=bool)
#: When the plan was removed
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
plan = _models.PlanData._from_pull(session, data["untypedData"])
take_part = data["untypedData"]["guest_status"] == "GOING"
return cls(author=author, thread=thread, plan=plan, take_part=take_part, at=at)
def parse_admin_message(session, data):
type_ = data["type"]
if type_ == "change_thread_theme":
return ColorSet._parse(session, data)
elif type_ == "change_thread_icon":
return EmojiSet._parse(session, data)
elif type_ == "change_thread_nickname":
return NicknameSet._parse(session, data)
elif type_ == "change_thread_admins":
event_type = data["untypedData"]["ADMIN_EVENT"]
if event_type == "add_admin":
return AdminsAdded._parse(session, data)
elif event_type == "remove_admin":
return AdminsRemoved._parse(session, data)
else:
pass
elif type_ == "change_thread_approval_mode":
return ApprovalModeSet._parse(session, data)
elif type_ == "instant_game_update":
pass # TODO: This
elif type_ == "messenger_call_log": # Previously "rtc_call_log"
event_type = data["untypedData"]["event"]
if event_type == "group_call_started":
return CallStarted._parse(session, data)
elif event_type in ["group_call_ended", "one_on_one_call_ended"]:
return CallEnded._parse(session, data)
else:
pass
elif type_ == "participant_joined_group_call":
return CallJoined._parse(session, data)
elif type_ == "group_poll":
event_type = data["untypedData"]["event_type"]
if event_type == "question_creation":
return PollCreated._parse(session, data)
elif event_type == "update_vote":
return PollVoted._parse(session, data)
else:
pass
elif type_ == "lightweight_event_create":
return PlanCreated._parse(session, data)
elif type_ == "lightweight_event_notify":
return PlanEnded._parse(session, data)
elif type_ == "lightweight_event_update":
return PlanEdited._parse(session, data)
elif type_ == "lightweight_event_delete":
return PlanDeleted._parse(session, data)
elif type_ == "lightweight_event_rsvp":
return PlanResponded._parse(session, data)
return UnknownEvent(source="Delta type", data=data)
|
distributed/protocol/tests/test_highlevelgraph.py | crusaderky/distributed | 1,358 | 12783934 | <gh_stars>1000+
import ast
import pytest
import dask
import dask.array as da
import dask.dataframe as dd
from distributed.diagnostics import SchedulerPlugin
from distributed.utils_test import gen_cluster
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
from numpy.testing import assert_array_equal
@gen_cluster(client=True)
async def test_combo_of_layer_types(c, s, a, b):
"""Check pack/unpack of a HLG that has everything!"""
def add(x, y, z, extra_arg):
return x + y + z + extra_arg
y = c.submit(lambda x: x, 2)
z = c.submit(lambda x: x, 3)
x = da.blockwise(
add,
"x",
da.zeros((3,), chunks=(1,)),
"x",
da.ones((3,), chunks=(1,)),
"x",
y,
None,
concatenate=False,
dtype=int,
extra_arg=z,
)
df = dd.from_pandas(pd.DataFrame({"a": np.arange(3)}), npartitions=3)
df = df.shuffle("a", shuffle="tasks")
df = df["a"].to_dask_array()
res = x.sum() + df.sum()
res = await c.compute(res, optimize_graph=False)
assert res == 21
@gen_cluster(client=True)
async def test_blockwise(c, s, a, b):
"""Check pack/unpack of blockwise layer"""
def add(x, y, z, extra_arg):
return x + y + z + extra_arg
y = c.submit(lambda x: x, 10)
z = c.submit(lambda x: x, 3)
x = da.blockwise(
add,
"x",
da.zeros((3,), chunks=(1,)),
"x",
da.ones((3,), chunks=(1,)),
"x",
y,
None,
concatenate=False,
dtype=int,
extra_arg=z,
)
res = await c.compute(x.sum(), optimize_graph=False)
assert res == 42
@gen_cluster(client=True)
async def test_shuffle(c, s, a, b):
"""Check pack/unpack of a shuffled dataframe"""
df = dd.from_pandas(
pd.DataFrame(
{"a": np.arange(10, dtype=int), "b": np.arange(10, 0, -1, dtype=float)}
),
npartitions=5,
)
df = df.shuffle("a", shuffle="tasks", max_branch=2)
df = df["a"] + df["b"]
res = await c.compute(df, optimize_graph=False)
assert res.dtypes == np.float64
assert (res == 10.0).all()
class ExampleAnnotationPlugin(SchedulerPlugin):
def __init__(self, priority_fn=None, qux="", resource="", retries=0):
self.priority_fn = priority_fn or (lambda k: 0)
self.qux = qux
self.resource = resource
self.retries = retries
self.priority_matches = 0
self.resource_matches = 0
self.retry_matches = 0
self.qux_matches = 0
def update_graph(self, scheduler, dsk=None, keys=None, restrictions=None, **kwargs):
annots = kwargs["annotations"]
if "priority" in annots:
self.priority_matches = sum(
int(self.priority_fn(ast.literal_eval(k)) == p)
for k, p in annots["priority"].items()
)
if "qux" in annots:
self.qux_matches = sum(int(self.qux == v) for v in annots["qux"].values())
if "custom_resource" in annots:
self.resource_matches = sum(
int(self.resource == v) for v in annots["custom_resource"].values()
)
if "retries" in annots:
self.retry_matches = sum(
int(self.retries == v) for v in annots["retries"].values()
)
@gen_cluster(client=True)
async def test_array_annotations(c, s, a, b):
def fn(k):
return k[1] * 5 + k[2]
qux = "baz"
resource = "widget"
plugin = ExampleAnnotationPlugin(priority_fn=fn, qux=qux, resource=resource)
s.add_plugin(plugin)
assert plugin in s.plugins.values()
with dask.annotate(priority=fn, qux=qux):
A = da.ones((10, 10), chunks=(2, 2))
with dask.annotate(custom_resource=resource):
B = A + 1
with dask.config.set(optimization__fuse__active=False):
result = await c.compute(B)
assert_array_equal(result, 2)
# There are annotation matches per array chunk (i.e. task)
assert plugin.qux_matches == A.npartitions
assert plugin.priority_matches == A.npartitions
assert plugin.resource_matches == B.npartitions
@gen_cluster(client=True)
async def test_dataframe_annotations(c, s, a, b):
retries = 5
plugin = ExampleAnnotationPlugin(retries=retries)
s.add_plugin(plugin)
assert plugin in s.plugins.values()
df = dd.from_pandas(
pd.DataFrame(
{"a": np.arange(10, dtype=int), "b": np.arange(10, 0, -1, dtype=float)}
),
npartitions=5,
)
df = df.shuffle("a", shuffle="tasks", max_branch=2)
acol = df["a"]
bcol = df["b"]
with dask.annotate(retries=retries):
df = acol + bcol
with dask.config.set(optimization__fuse__active=False):
rdf = await c.compute(df)
assert rdf.dtypes == np.float64
assert (rdf == 10.0).all()
# There is an annotation match per partition (i.e. task)
assert plugin.retry_matches == df.npartitions
|
dreamplace/ops/electric_potential/electric_overflow.py | Eternity666/DREAMPlace | 323 | 12783956 | ##
# @file electric_overflow.py
# @author <NAME>
# @date Aug 2018
#
import math
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
from torch.nn import functional as F
import dreamplace.ops.electric_potential.electric_potential_cpp as electric_potential_cpp
import dreamplace.configure as configure
if configure.compile_configurations["CUDA_FOUND"] == "TRUE":
import dreamplace.ops.electric_potential.electric_potential_cuda as electric_potential_cuda
import pdb
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
class ElectricDensityMapFunction(Function):
"""
@brief compute density overflow.
@param ctx pytorch API to store data for backward proporgation
@param pos location of cells, x and then y
@param node_size_x_clamped stretched size, max(bin_size*sqrt2, node_size)
@param node_size_y_clamped stretched size, max(bin_size*sqrt2, node_size)
@param offset_x (stretched size - node_size) / 2
@param offset_y (stretched size - node_size) / 2
@param ratio original area / stretched area
@param initial_density_map density_map for fixed cells
@param target_density target density
@param xl left boundary
@param yl lower boundary
@param xh right boundary
@param yh upper boundary
@param bin_size_x bin width
@param bin_size_x bin height
@param num_movable_nodes number of movable cells
@param num_filler_nodes number of filler cells
@param padding bin padding to boundary of placement region
@param padding_mask padding mask with 0 and 1 to indicate padding bins with padding regions to be 1
@param num_bins_x number of bins in horizontal direction
@param num_bins_y number of bins in vertical direction
@param num_movable_impacted_bins_x number of impacted bins for any movable cell in x direction
@param num_movable_impacted_bins_y number of impacted bins for any movable cell in y direction
@param num_filler_impacted_bins_x number of impacted bins for any filler cell in x direction
@param num_filler_impacted_bins_y number of impacted bins for any filler cell in y direction
@param sorted_node_map the indices of the movable node map
"""
@staticmethod
def forward(
pos,
node_size_x_clamped,
node_size_y_clamped,
offset_x,
offset_y,
ratio,
bin_center_x,
bin_center_y,
initial_density_map,
target_density,
xl,
yl,
xh,
yh,
bin_size_x,
bin_size_y,
num_movable_nodes,
num_filler_nodes,
padding,
padding_mask, # same dimensions as density map, with padding regions to be 1
num_bins_x,
num_bins_y,
num_movable_impacted_bins_x,
num_movable_impacted_bins_y,
num_filler_impacted_bins_x,
num_filler_impacted_bins_y,
deterministic_flag,
sorted_node_map):
if pos.is_cuda:
output = electric_potential_cuda.density_map(
pos.view(pos.numel()), node_size_x_clamped,
node_size_y_clamped, offset_x, offset_y, ratio, bin_center_x,
bin_center_y, initial_density_map, target_density, xl, yl, xh,
yh, bin_size_x, bin_size_y, num_movable_nodes,
num_filler_nodes, padding, num_bins_x, num_bins_y,
num_movable_impacted_bins_x, num_movable_impacted_bins_y,
num_filler_impacted_bins_x, num_filler_impacted_bins_y,
deterministic_flag, sorted_node_map)
else:
output = electric_potential_cpp.density_map(
pos.view(pos.numel()), node_size_x_clamped,
node_size_y_clamped, offset_x, offset_y, ratio, bin_center_x,
bin_center_y, initial_density_map, target_density, xl, yl, xh,
yh, bin_size_x, bin_size_y, num_movable_nodes,
num_filler_nodes, padding, num_bins_x, num_bins_y,
num_movable_impacted_bins_x, num_movable_impacted_bins_y,
num_filler_impacted_bins_x, num_filler_impacted_bins_y,
deterministic_flag)
density_map = output.view([num_bins_x, num_bins_y])
# set padding density
if padding > 0:
density_map.masked_fill_(padding_mask,
target_density * bin_size_x * bin_size_y)
return density_map
class ElectricOverflow(nn.Module):
def __init__(
self,
node_size_x,
node_size_y,
bin_center_x,
bin_center_y,
target_density,
xl,
yl,
xh,
yh,
bin_size_x,
bin_size_y,
num_movable_nodes,
num_terminals,
num_filler_nodes,
padding,
deterministic_flag, # control whether to use deterministic routine
sorted_node_map,
movable_macro_mask=None):
super(ElectricOverflow, self).__init__()
self.node_size_x = node_size_x
self.node_size_y = node_size_y
self.bin_center_x = bin_center_x
self.bin_center_y = bin_center_y
self.target_density = target_density
self.xl = xl
self.yl = yl
self.xh = xh
self.yh = yh
self.bin_size_x = bin_size_x
self.bin_size_y = bin_size_y
self.num_movable_nodes = num_movable_nodes
self.num_terminals = num_terminals
self.num_filler_nodes = num_filler_nodes
self.padding = padding
self.sorted_node_map = sorted_node_map
self.movable_macro_mask = movable_macro_mask
self.deterministic_flag = deterministic_flag
self.reset()
def reset(self):
sqrt2 = math.sqrt(2)
# clamped means stretch a cell to bin size
# clamped = max(bin_size*sqrt2, node_size)
# offset means half of the stretch size
# ratio means the original area over the stretched area
self.node_size_x_clamped = self.node_size_x.clamp(min=self.bin_size_x *
sqrt2)
self.offset_x = (self.node_size_x - self.node_size_x_clamped).mul(0.5)
self.node_size_y_clamped = self.node_size_y.clamp(min=self.bin_size_y *
sqrt2)
self.offset_y = (self.node_size_y - self.node_size_y_clamped).mul(0.5)
node_areas = self.node_size_x * self.node_size_y
self.ratio = node_areas / (self.node_size_x_clamped *
self.node_size_y_clamped)
# detect movable macros and scale down the density to avoid halos
# the definition of movable macros should be different according to algorithms
self.num_movable_macros = 0
if self.target_density < 1 and self.movable_macro_mask is not None:
self.num_movable_macros = self.movable_macro_mask.sum().data.item()
self.ratio[:self.num_movable_nodes][
self.movable_macro_mask] = self.target_density
# compute maximum impacted bins
self.num_bins_x = int(math.ceil((self.xh - self.xl) / self.bin_size_x))
self.num_bins_y = int(math.ceil((self.yh - self.yl) / self.bin_size_y))
if self.num_movable_nodes:
self.num_movable_impacted_bins_x = int(
((self.node_size_x[:self.num_movable_nodes].max() +
2 * sqrt2 * self.bin_size_x) /
self.bin_size_x).ceil().clamp(max=self.num_bins_x))
self.num_movable_impacted_bins_y = int(
((self.node_size_y[:self.num_movable_nodes].max() +
2 * sqrt2 * self.bin_size_y) /
self.bin_size_y).ceil().clamp(max=self.num_bins_y))
else:
self.num_movable_impacted_bins_x = 0
self.num_movable_impacted_bins_y = 0
if self.num_filler_nodes:
self.num_filler_impacted_bins_x = (
(self.node_size_x[-self.num_filler_nodes:].max() +
2 * sqrt2 * self.bin_size_x) /
self.bin_size_x).ceil().clamp(max=self.num_bins_x)
self.num_filler_impacted_bins_y = (
(self.node_size_y[-self.num_filler_nodes:].max() +
2 * sqrt2 * self.bin_size_y) /
self.bin_size_y).ceil().clamp(max=self.num_bins_y)
else:
self.num_filler_impacted_bins_x = 0
self.num_filler_impacted_bins_y = 0
if self.padding > 0:
self.padding_mask = torch.ones(self.num_bins_x,
self.num_bins_y,
dtype=torch.uint8,
device=self.node_size_x.device)
self.padding_mask[self.padding:self.num_bins_x - self.padding,
self.padding:self.num_bins_y -
self.padding].fill_(0)
else:
self.padding_mask = torch.zeros(self.num_bins_x,
self.num_bins_y,
dtype=torch.uint8,
device=self.node_size_x.device)
# initial density_map due to fixed cells
self.initial_density_map = None
def compute_initial_density_map(self, pos):
if self.num_terminals == 0:
num_fixed_impacted_bins_x = 0
num_fixed_impacted_bins_y = 0
else:
max_size_x = self.node_size_x[self.num_movable_nodes:self.
num_movable_nodes +
self.num_terminals].max()
max_size_y = self.node_size_y[self.num_movable_nodes:self.
num_movable_nodes +
self.num_terminals].max()
num_fixed_impacted_bins_x = ((max_size_x + self.bin_size_x) /
self.bin_size_x).ceil().clamp(
max=self.num_bins_x)
num_fixed_impacted_bins_y = ((max_size_y + self.bin_size_y) /
self.bin_size_y).ceil().clamp(
max=self.num_bins_y)
if pos.is_cuda:
func = electric_potential_cuda.fixed_density_map
else:
func = electric_potential_cpp.fixed_density_map
self.initial_density_map = func(
pos, self.node_size_x, self.node_size_y, self.bin_center_x,
self.bin_center_y, self.xl, self.yl, self.xh, self.yh,
self.bin_size_x, self.bin_size_y, self.num_movable_nodes,
self.num_terminals, self.num_bins_x, self.num_bins_y,
num_fixed_impacted_bins_x, num_fixed_impacted_bins_y,
self.deterministic_flag)
# scale density of fixed macros
self.initial_density_map.mul_(self.target_density)
def forward(self, pos):
if self.initial_density_map is None:
self.compute_initial_density_map(pos)
density_map = ElectricDensityMapFunction.forward(
pos, self.node_size_x_clamped, self.node_size_y_clamped,
self.offset_x, self.offset_y, self.ratio, self.bin_center_x,
self.bin_center_y, self.initial_density_map, self.target_density,
self.xl, self.yl, self.xh, self.yh, self.bin_size_x,
self.bin_size_y, self.num_movable_nodes, self.num_filler_nodes,
self.padding, self.padding_mask, self.num_bins_x, self.num_bins_y,
self.num_movable_impacted_bins_x, self.num_movable_impacted_bins_y,
self.num_filler_impacted_bins_x, self.num_filler_impacted_bins_y,
self.deterministic_flag, self.sorted_node_map)
bin_area = self.bin_size_x * self.bin_size_y
density_cost = (density_map -
self.target_density * bin_area).clamp_(min=0.0).sum().unsqueeze(0)
return density_cost, density_map.max().unsqueeze(0) / bin_area
def plot(plot_count, density_map, padding, name):
"""
density map contour and heat map
"""
density_map = density_map[padding:density_map.shape[0] - padding,
padding:density_map.shape[1] - padding]
print("max density = %g @ %s" %
(np.amax(density_map),
np.unravel_index(np.argmax(density_map), density_map.shape)))
print("mean density = %g" % (np.mean(density_map)))
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.arange(density_map.shape[0])
y = np.arange(density_map.shape[1])
x, y = np.meshgrid(x, y)
# looks like x and y should be swapped
ax.plot_surface(y, x, density_map, alpha=0.8)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('density')
# plt.tight_layout()
plt.savefig(name + ".3d.png")
plt.close()
# plt.clf()
#fig, ax = plt.subplots()
# ax.pcolor(density_map)
# Loop over data dimensions and create text annotations.
# for i in range(density_map.shape[0]):
# for j in range(density_map.shape[1]):
# text = ax.text(j, i, density_map[i, j],
# ha="center", va="center", color="w")
# fig.tight_layout()
#plt.savefig(name+".2d.%d.png" % (plot_count))
# plt.close()
|
tests/test_dataflow/multiwoz/conftest.py | luweishuang/task_oriented_dialogue_as_dataflow_synthesis | 257 | 12783974 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Any, Dict, List, Tuple
import pytest
from dataflow.multiwoz.trade_dst_utils import BeliefState
def convert_belief_dict_to_belief_state(belief_dict: Dict[str, str]) -> BeliefState:
belief_state: BeliefState = []
for slot_fullname, slot_value in sorted(belief_dict.items()):
belief_state.append({"slots": [[slot_fullname, slot_value]]})
return belief_state
def build_trade_dialogue(
dialogue_id: str, turns: List[Tuple[str, str, Dict[str, str]]]
) -> Dict[str, Any]:
trade_dialogue = {
"dialogue_idx": dialogue_id,
"dialogue": [
{
# Our mock dialogues here use 1-based turn indices.
# In real MultiWOZ/TRADE dialogues, turn index starts from 0.
"turn_idx": turn_idx + 1,
"system_transcript": agent_utt,
"transcript": user_utt,
"belief_state": convert_belief_dict_to_belief_state(belief_dict),
}
for turn_idx, (agent_utt, user_utt, belief_dict) in enumerate(turns)
],
}
return trade_dialogue
@pytest.fixture
def trade_dialogue_1() -> Dict[str, Any]:
return build_trade_dialogue(
dialogue_id="dummy_1",
turns=[
# turn 1
# activate a domain without constraint, the plan should call "Find" with "EqualityConstraint"
# we intentionally to only put two "none" slots in the belief state to match the MultiWoZ annotation style
(
"",
"i want to book a hotel",
{"hotel-name": "none", "hotel-type": "none"},
),
# turn 2
# add constraints, the plan should call "Revise" with "EqualityConstraint"
(
"ok what type",
"guest house and cheap, probably hilton",
{
"hotel-name": "hilton",
"hotel-pricerange": "cheap",
"hotel-type": "guest house",
},
),
# turn 3
# drop a constraint (but the domain is still active), the plan should call "Revise" with "EqualityConstraint"
(
"no results",
"ok try another hotel",
{
"hotel-name": "none",
"hotel-pricerange": "cheap",
"hotel-type": "guest house",
},
),
# turn 4
# drop the domain
("failed", "ok never mind", {}),
# turn 5
# activate the domain again
("sure", "can you find a hotel in west", {"hotel-area": "west"}),
# turn 6
# activate a new domain and use a refer call
(
"how about this",
"ok can you find a restaurant in the same area",
{"hotel-area": "west", "restaurant-area": "west"},
),
# turn 7
# use a refer call to get a value from a dead domain
# the salience model should find the first valid refer value (skips "none")
(
"how about this",
"use the same price range as the hotel",
{
"hotel-area": "west",
"restaurant-area": "west",
"restaurant-pricerange": "cheap",
},
),
# turn 8
# do not change belief state
(
"ok",
"give me the address",
{
"hotel-area": "west",
"restaurant-area": "west",
"restaurant-pricerange": "cheap",
},
),
# turn 9
# a new domain
(
"ok",
"book a taxi now",
{
"hotel-area": "west",
"restaurant-area": "west",
"restaurant-pricerange": "cheap",
"taxi-departure": "none",
},
),
# turn 10
# do not change belief state (make sure the plan is "Revise" not "Find")
(
"ok",
"ok",
{
"hotel-area": "west",
"restaurant-area": "west",
"restaurant-pricerange": "cheap",
"taxi-departure": "none",
},
),
],
)
|
chapter5_operations/prediction_monitoring_pattern/src/configurations.py | sudabon/ml-system-in-actions | 133 | 12783983 | import os
from logging import getLogger
from src.constants import CONSTANTS, PLATFORM_ENUM
logger = getLogger(__name__)
class PlatformConfigurations:
platform = os.getenv("PLATFORM", PLATFORM_ENUM.DOCKER.value)
if not PLATFORM_ENUM.has_value(platform):
raise ValueError(f"PLATFORM must be one of {[v.value for v in PLATFORM_ENUM.__members__.values()]}")
class DBConfigurations:
mysql_username = os.getenv("MYSQL_USER")
mysql_password = os.getenv("MYSQL_PASSWORD")
mysql_port = int(os.getenv("MYSQL_PORT", 3306))
mysql_database = os.getenv("MYSQL_DATABASE", "sample_db")
mysql_server = os.getenv("MYSQL_SERVER")
sql_alchemy_database_url = (
f"mysql://{mysql_username}:{mysql_password}@{mysql_server}:{mysql_port}/{mysql_database}?charset=utf8"
)
class APIConfigurations:
title = os.getenv("API_TITLE", "ServingPattern")
description = os.getenv("API_DESCRIPTION", "machine learning system serving patterns")
version = os.getenv("API_VERSION", "0.1")
class ModelConfigurations:
model_filepath = os.getenv("MODEL_FILEPATH")
label_filepath = os.getenv("LABEL_FILEPATH")
outlier_model_filepath = os.getenv("OUTLIER_MODEL_FILEPATH")
outlier_lower_threshold = float(os.getenv("OUTLIER_LOWER_THRESHOLD", 0.0))
logger.info(f"{PlatformConfigurations.__name__}: {PlatformConfigurations.__dict__}")
logger.info(f"{APIConfigurations.__name__}: {APIConfigurations.__dict__}")
logger.info(f"{ModelConfigurations.__name__}: {ModelConfigurations.__dict__}")
|
tests/messages_data/mime_emails/raw_email7.py | unqx/imap_tools | 344 | 12784029 | <gh_stars>100-1000
import datetime
from imap_tools import EmailAddress
DATA = dict(
subject='testing',
from_='<EMAIL>',
to=('<EMAIL>',),
cc=(),
bcc=(),
reply_to=(),
date=datetime.datetime(2005, 6, 6, 22, 21, 22, tzinfo=datetime.timezone(datetime.timedelta(0, 7200))),
date_str='Mon, 6 Jun 2005 22:21:22 +0200',
text='This is the first part.\r\n',
html='',
headers={'mime-version': ('1.0 (Apple Message framework v730)',), 'content-type': ('multipart/mixed; boundary=Apple-Mail-13-196941151',), 'message-id': ('<9169D984-4E0B-45EF-<EMAIL>-<EMAIL>>',), 'from': ('<EMAIL>',), 'subject': ('testing',), 'date': ('Mon, 6 Jun 2005 22:21:22 +0200',), 'to': ('<EMAIL>',)},
attachments=[
dict(
filename='test.rb',
content_id='',
content_disposition='attachment',
content_type='text/x-ruby-script',
payload=b'puts "testing, testing"\r\n',
),
dict(
filename='test.pdf',
content_id='',
content_disposition='inline',
content_type='application/pdf',
payload=b'blah blah blah',
),
dict(
filename='smime.p7s',
content_id='',
content_disposition='attachment',
content_type='application/pkcs7-signature',
payload=b"\x8d\xa9\xa2\xb1*\x86H\x86\xf7\r\x01\x07\x02\xa0\x800\x88\xda\x9a+1\x0b0\t\x06\x05+\x0e\x03\x02\x1a\x05\x000\x80\x06\t*\x86J6\xa6\x8a\xc1\x07\x01\x00\x00\xa0\x82\x05J0\x82\x05F0\x82\x04.\x8d\xa9\xa2\xb1\x02\x02\x04?\xbe\xbaD0\r\x06\t*\x88\xda\x9a+\r\x01\x01\x05\x05\x00011\x0b0\t\x06\x03U\x04\x06\x13\x02F6\xa6\x8a\xc0\n\x06\x03U\x04\n\x13\x03TDC1\x140\x12\x06\x8d\xa9\xa2\xb3\x13\x0bTDC OCES CH\xda\x9a+\r040229115901Z\x17\r06026\xa6\x8a\xc22901Z0\x81\x801\x0b0\t\x06\x03U\x04\x8d\xa9\xa2\xb0K1)0'\x06\x03U\x04\n\x13 H\xda\x9a+. organisatorisk tin6\xa6\x8a\xc4nin",
),
],
from_values=EmailAddress('', '<EMAIL>', '<EMAIL>'),
to_values=(EmailAddress('', '<EMAIL>', '<EMAIL>'),),
cc_values=(),
bcc_values=(),
reply_to_values=(),
) |
src/prefect/environments/execution/__init__.py | vnsn/prefect | 8,633 | 12784051 | """
Execution environments encapsulate the logic for where your Flow should execute in Prefect Cloud.
DEPRECATED: Environment based configuration is deprecated, please transition to
configuring `flow.run_config` instead of `flow.environment`. See
https://docs.prefect.io/orchestration/flow_config/overview.html for more info.
"""
from prefect.environments.execution.base import Environment, load_and_run_flow
from prefect.environments.execution.dask import DaskKubernetesEnvironment
from prefect.environments.execution.dask import DaskCloudProviderEnvironment
from prefect.environments.execution.fargate import FargateTaskEnvironment
from prefect.environments.execution.k8s import KubernetesJobEnvironment
from prefect.environments.execution.local import LocalEnvironment
|
common/test/test_signing_serializer.py | andkononykhin/plenum | 148 | 12784073 | <gh_stars>100-1000
from collections import OrderedDict
import pytest
from common.serializers.serialization import serialize_msg_for_signing
def test_serialize_int():
assert b"1" == serialize_msg_for_signing(1)
def test_serialize_str():
assert b"aaa" == serialize_msg_for_signing("aaa")
def test_serialize_none():
assert b"" == serialize_msg_for_signing(None)
def test_serialize_simple_dict():
assert b"1:a|2:b" == serialize_msg_for_signing({1: 'a', 2: 'b'})
assert b"1:a|2:b" == serialize_msg_for_signing({"2": 'b', "1": 'a'})
def test_serialize_array():
assert b"1,5,3,4,2" == serialize_msg_for_signing([1, 5, 3, 4, 2])
def test_serialize_ordered_dict():
v1 = OrderedDict([('1', 'a'), ('2', 'b')])
v2 = OrderedDict([('2', 'b'), ('1', 'a')])
assert b"1:a|2:b" == serialize_msg_for_signing(v1)
assert b"1:a|2:b" == serialize_msg_for_signing(v2)
def test_serialize_dict_with_array():
assert b"1:a|2:b|3:1,2:k" == serialize_msg_for_signing({1: 'a', 2: 'b', 3: [1, {2: 'k'}]})
assert b"1:a|2:b|3:1,2:k" == serialize_msg_for_signing({'1': 'a', '2': 'b', '3': ['1', {'2': 'k'}]})
@pytest.mark.skip("An issue in Signing Serializer: https://jira.hyperledger.org/browse/INDY-1469")
def test_serialize_dicts_with_different_keys():
v1 = serialize_msg_for_signing(
{
1: 'a',
2: {
3: 'b',
4: {
5: {
6: 'c'
}
}
}
})
v2 = serialize_msg_for_signing(
{
1: 'a',
2: {
3: 'b',
},
4: {
5: {
6: 'c'
}
}
})
assert v1 == v2
@pytest.mark.skip("An issue in Signing Serializer: https://jira.hyperledger.org/browse/INDY-1469")
def test_serialize_complex_dict():
assert b'1:a|2:3:b|2:4:5:6:c' == serialize_msg_for_signing(
{
1: 'a',
2: {
3: 'b',
4: {
5: {
6: 'c'
}
}
}
})
assert b'1:a|2:3:b|2:4:5:6:c' == serialize_msg_for_signing(
{
'1': 'a',
'2': {
'3': 'b',
'4': {
'5': {
'6': 'c'
}
}
}
})
v = serialize_msg_for_signing(
{
'1': 'a',
'2': 'b',
'3': {
'4': 'c',
'5': 'd',
'6': {
'7': {
'8': 'e',
'9': 'f'
},
'10': {
'11': 'g',
'12': 'h'
}
},
'13': {
'13': {
'13': 'i',
}
}
}
})
assert b'1:a|2:b|3:4:c|3:5:d|3:6:7:8:e|3:6:7:9:f|3:6:10:11:g|3:6:10:12:h|3:13:13:13:i' == v
@pytest.mark.skip("An issue in Signing Serializer: https://jira.hyperledger.org/browse/INDY-1469")
def test_serialize_complex_ordered_dict():
assert b'1:a|2:3:b|4:c' == serialize_msg_for_signing(
OrderedDict([
('1', 'a'),
('2', OrderedDict([
('3', 'b'),
('4', 'c'),
]))
]))
assert b'1:a|2:3:b|4:c' == serialize_msg_for_signing(
OrderedDict([
('2', OrderedDict([
('4', 'c'),
('3', 'b'),
])),
('1', 'a'),
]))
|
src/UnloadAutoPartitions/genunload.py | rmcelroyF8/amazon-redshift-utils | 2,452 | 12784085 | <reponame>rmcelroyF8/amazon-redshift-utils<gh_stars>1000+
"""
GenUnload : Generate unload commands given a config file (config.ini) which has information about table, schema, partition column & sort columns
-------------------------------------------------------------------------------------------------------------------------------------------
-- <NAME> 05/23/2019
example:
python3 genunload.py
Readme.md has the requirements info.
"""
from __future__ import print_function
import os
import sys
import datetime
from datetime import timedelta
import boto3
import base64
import json
import argparse
import configparser
import pgpasslib
import pg8000
import re
ssl = True
__version__ = "1.0"
pg8000.paramstyle = "qmark"
def connect(host, port, db, dbuser, table, schema, column_list, partition_column, sort_keys, s3path, iamrole):
# get password from .pgpass or environment
try:
pg_pwd = pgpasslib.getpass(host,port,db,dbuser)
print(pg_pwd)
if pg_pwd:
pwd = <PASSWORD>pwd
except pgpasslib.FileNotFound as e:
pass
# Connect to the cluster
try:
if debug:
print('Connecting to Redshift: %s' % host)
conn = pg8000.connect(database=db, user=dbuser, password=<PASSWORD>, host=host, port=port, ssl=ssl)
conn.autocommit = True
except:
print('Redshift Connection Failed: exception %s' % sys.exc_info()[1])
raise
if debug:
print('Successfully connected to Redshift cluster')
# create a new cursor
cursor = conn.cursor()
check_table_exists(cursor, conn, table, schema)
full_column_list, partition_keys, partition_column_type = get_column_list_partition_keys(cursor, conn, table, schema, column_list, partition_column)
gen_unload(full_column_list, partition_keys, partition_column_type, schema, table, partition_column, sort_keys, s3path, iamrole)
if execute:
print("Executing unload commands !")
execute_unload(cursor, conn)
conn.commit()
if debug:
print("Done with the script !!")
def check_table_exists(cursor, conn, table, schema):
# check if table exists
if debug:
print('Check for table exists: %s' % table)
stmt = "SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema = '%s' AND table_name = '%s');" % (schema, table)
cursor.execute(stmt)
s = cursor.fetchone()
s = ' '.join(map(str, s))
if s == 'False':
print('Table does not exist: %s' % table)
exit(1)
else:
print('Table %s exists' % table)
def get_column_list_partition_keys(cursor, conn, table, schema, column_list, partition_column):
# get partition column data type
stmt = "select data_type from information_schema.columns where table_name = '%s' and table_schema = '%s' and column_name = '%s';" % (
table, schema, partition_column)
if debug:
print('Collecting data type information for partition column: %s' % partition_column)
cursor.execute(stmt)
partition_column_type = cursor.fetchone()
if partition_column_type is None:
print('Please check your partition column: %s' % partition_column)
exit(1)
partition_column_type = ' '.join(map(str, partition_column_type))
if any(re.findall(r'integer|numeric|decimal|bigint|real|double precision|smallint', partition_column_type,
re.IGNORECASE)):
partition_column_type = 'numeric'
elif any(re.findall(r'timestamp without time zone|date|character varying|character|timestamp with time zone|bool|boolean', partition_column_type, re.IGNORECASE)):
partition_column_type = 'alphanumeric'
# if column_list not set , then select all columns except for partition column
if not column_list:
stmt = "select column_name from information_schema.columns where table_name = '%s' and table_schema = '%s' order by ordinal_position;" % (table, schema)
if debug:
print('Collecting column list excluding partition column: %s' % partition_column)
cursor.execute(stmt)
column_list = cursor.fetchall()
full_column_list = [x[0] for x in column_list]
full_column_list.remove(partition_column)
full_column_list = ','.join(map(str, full_column_list))
else:
full_column_list = column_list
# get distinct partition keys using partition column
stmt = "select distinct %s from %s.%s;" % (partition_column, schema, table)
if debug:
print('Collecting distinct partition keys for partition column: %s [skipping NULL values]' % partition_column)
cursor.execute(stmt)
keys = cursor.fetchall()
partition_keys = [x[0] for x in keys]
print('Column list = %s' % full_column_list)
# print('Partition keys = %s' % partition_keys)
return full_column_list, partition_keys, partition_column_type
def gen_unload(full_column_list, partition_keys, partition_column_type, schema, table, partition_column, sort_keys, s3path, iamrole):
"""
:param full_column_list: list
:param partition_keys: list
:param partition_column_type: str
:param schema: str
:param table: str
:param partition_column: str
:param sort_keys: str
:param s3path: str
:param iamrole: str
:return: str
"""
s3path = s3path[:-1] if s3path.endswith('/') else s3path
column_list_str = full_column_list
if sort_keys:
sql = 'SELECT ' + column_list_str + ' FROM ' + schema + '.' + table + ' WHERE ' + partition_column + '=' + '<>' + ' ORDER BY ' + sort_keys
else:
sql = 'SELECT ' + column_list_str + ' FROM ' + schema + '.' + table + ' WHERE ' + partition_column + '=' + '<>'
part1 = 'UNLOAD ( ' + '\'' + sql + '\''
part3 = 'IAM_ROLE \'' + iamrole + '\' FORMAT PARQUET ALLOWOVERWRITE;'
unload_stmt = str()
for key in partition_keys:
if key is not None:
if partition_column_type == 'numeric':
temp = part1.replace('<>', str(key))
unload_stmt = unload_stmt + temp + ') TO ' + '\'' + s3path + '/' + partition_column + '=' + str(key) + '/\' ' + part3 + '\n'
elif partition_column_type == 'alphanumeric':
temp = part1.replace('<>', '\\\'' + str(key) + '\\\'')
unload_stmt = unload_stmt + temp + ') TO ' + '\'' + s3path + '/' + partition_column + '=' + str(key) + '/\' ' + part3 + '\n'
if debug:
print('Generating unload statements !')
with open('unload.sql', 'w') as file:
file.write(unload_stmt)
def execute_unload(cursor, conn):
with open(os.path.dirname(__file__) + 'unload.sql', 'r') as sql:
unload_commands = sql.read()
for s in unload_commands.split(";"):
stmt = s.strip()
if s is not None and stmt != "":
if debug is True:
print(stmt)
try:
cursor.execute(stmt)
except Exception as e:
if re.search(".*column.*does not exist*", str(e)) is not None:
print('Check the column list !')
raise
else:
print(e)
raise e
conn.commit()
print('Done with executing unload commands !')
def main():
config = configparser.ConfigParser()
config.read('config.ini')
parser = argparse.ArgumentParser()
global debug
global execute
host = config.get('cluster', 'identifier', fallback=None)
dbuser = config.get('cluster', 'dbuser', fallback='dbadmin')
db = config.get('cluster', 'database', fallback='dev')
port = int(config.get('cluster', 'port', fallback='5439'))
schema = config.get('cluster', 'schema', fallback='public')
table = config.get('cluster', 'table', fallback=None)
partition_column = config.get('cluster', 'partition_key', fallback=None)
sort_keys = config.get('cluster', 'sort_keys', fallback=None)
column_list = config.get('cluster', 'column_list', fallback='All')
debug = config.getboolean('cluster', 'debug', fallback=False)
execute = config.getboolean('cluster', 'execute', fallback=False)
s3path = config.get('s3', 'path', fallback=None)
iamrole = config.get('creds', 'iam_role', fallback=None)
if host is None or dbuser is None or db is None or schema is None or table is None or partition_column is None or sort_keys is None or column_list is None or s3path is None or iamrole is None:
parser.print_help()
exit()
connect(host, port, db, dbuser, table, schema, column_list, partition_column, sort_keys, s3path, iamrole)
if __name__ == "__main__":
main()
|
scripts/contractInteraction/config.py | omerzam/Sovryn-smart-contracts | 108 | 12784098 | from brownie import *
from brownie.network.contract import InterfaceContainer
import json
def loadConfig():
global contracts, acct
thisNetwork = network.show_active()
if thisNetwork == "development":
acct = accounts[0]
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "testnet":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "testnet-ws":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "rsk-testnet":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "rsk-mainnet":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/mainnet_contracts.json')
else:
raise Exception("Network not supported.")
contracts = json.load(configFile)
|
social_core/backends/fedora.py | shnaqawi/social-core | 745 | 12784099 | <filename>social_core/backends/fedora.py<gh_stars>100-1000
"""
Fedora OpenId backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/fedora.html
"""
from .open_id import OpenIdAuth
class FedoraOpenId(OpenIdAuth):
name = 'fedora'
URL = 'https://id.fedoraproject.org'
USERNAME_KEY = 'nickname'
|
pytorch_ares/third_party/free_adv_train/free_train.py | thu-ml/realsafe | 107 | 12784137 | <reponame>thu-ml/realsafe
"""Trains a model, saving checkpoints and tensorboard summaries along
the way."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import shutil
from timeit import default_timer as timer
import tensorflow as tf
import numpy as np
import sys
from free_model import Model
import cifar10_input
import cifar100_input
import pdb
import config
def get_path_dir(data_dir, dataset, **_):
path = os.path.join(data_dir, dataset)
if os.path.islink(path):
path = os.readlink(path)
return path
def train(tf_seed, np_seed, train_steps, out_steps, summary_steps, checkpoint_steps, step_size_schedule,
weight_decay, momentum, train_batch_size, epsilon, replay_m, model_dir, dataset, **kwargs):
tf.set_random_seed(tf_seed)
np.random.seed(np_seed)
model_dir = model_dir + '%s_m%d_eps%.1f_b%d' % (dataset, replay_m, epsilon, train_batch_size) # TODO Replace with not defaults
# Setting up the data and the model
data_path = get_path_dir(dataset=dataset, **kwargs)
if dataset == 'cifar10':
raw_data = cifar10_input.CIFAR10Data(data_path)
else:
raw_data = cifar100_input.CIFAR100Data(data_path)
global_step = tf.contrib.framework.get_or_create_global_step()
model = Model(mode='train', dataset=dataset, train_batch_size=train_batch_size)
# Setting up the optimizer
boundaries = [int(sss[0]) for sss in step_size_schedule][1:]
values = [sss[1] for sss in step_size_schedule]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values)
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
# Optimizing computation
total_loss = model.mean_xent + weight_decay * model.weight_decay_loss
grads = optimizer.compute_gradients(total_loss)
# Compute new image
pert_grad = [g for g, v in grads if 'perturbation' in v.name]
sign_pert_grad = tf.sign(pert_grad[0])
new_pert = model.pert + epsilon * sign_pert_grad
clip_new_pert = tf.clip_by_value(new_pert, -epsilon, epsilon)
assigned = tf.assign(model.pert, clip_new_pert)
# Train
no_pert_grad = [(tf.zeros_like(v), v) if 'perturbation' in v.name else (g, v) for g, v in grads]
with tf.control_dependencies([assigned]):
min_step = optimizer.apply_gradients(no_pert_grad, global_step=global_step)
tf.initialize_variables([model.pert]) # TODO: Removed from TF
# Setting up the Tensorboard and checkpoint outputs
if not os.path.exists(model_dir):
os.makedirs(model_dir)
saver = tf.train.Saver(max_to_keep=1)
tf.summary.scalar('accuracy', model.accuracy)
tf.summary.scalar('xent', model.xent / train_batch_size)
tf.summary.scalar('total loss', total_loss / train_batch_size)
merged_summaries = tf.summary.merge_all()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
print('\n\n********** free training for epsilon=%.1f using m_replay=%d **********\n\n' % (epsilon, replay_m))
print('important params >>> \n model dir: %s \n dataset: %s \n training batch size: %d \n' % (model_dir, dataset, train_batch_size))
if dataset == 'cifar100':
print('the ride for CIFAR100 is bumpy -- fasten your seatbelts! \n \
you will probably see the training and validation accuracy fluctuating a lot early in trainnig \n \
this is natural especially for large replay_m values because we see that mini-batch so many times.')
# initialize data augmentation
if dataset == 'cifar10':
data = cifar10_input.AugmentedCIFAR10Data(raw_data, sess, model)
else:
data = cifar100_input.AugmentedCIFAR100Data(raw_data, sess, model)
# Initialize the summary writer, global variables, and our time counter.
summary_writer = tf.summary.FileWriter(model_dir + '/train', sess.graph)
eval_summary_writer = tf.summary.FileWriter(model_dir + '/eval')
sess.run(tf.global_variables_initializer())
# Main training loop
for ii in range(train_steps):
if ii % replay_m == 0:
x_batch, y_batch = data.train_data.get_next_batch(train_batch_size, multiple_passes=True)
nat_dict = {model.x_input: x_batch, model.y_input: y_batch}
x_eval_batch, y_eval_batch = data.eval_data.get_next_batch(train_batch_size, multiple_passes=True)
eval_dict = {model.x_input: x_eval_batch, model.y_input: y_eval_batch}
# Output to stdout
if ii % summary_steps == 0:
train_acc, summary = sess.run([model.accuracy, merged_summaries], feed_dict=nat_dict)
summary_writer.add_summary(summary, global_step.eval(sess))
val_acc, summary = sess.run([model.accuracy, merged_summaries], feed_dict=eval_dict)
eval_summary_writer.add_summary(summary, global_step.eval(sess))
print('Step {}: ({})'.format(ii, datetime.now()))
print(' training nat accuracy {:.4}% -- validation nat accuracy {:.4}%'.format(train_acc * 100,
val_acc * 100))
sys.stdout.flush()
# Tensorboard summaries
elif ii % out_steps == 0:
nat_acc = sess.run(model.accuracy, feed_dict=nat_dict)
print('Step {}: ({})'.format(ii, datetime.now()))
print(' training nat accuracy {:.4}%'.format(nat_acc * 100))
# Write a checkpoint
if (ii+1) % checkpoint_steps == 0:
saver.save(sess, os.path.join(model_dir, 'checkpoint'), global_step=global_step)
# Actual training step
sess.run(min_step, feed_dict=nat_dict)
if __name__ == '__main__':
args = config.get_args()
train(**vars(args))
|
ogb_lsc/pcq/conformer_utils.py | kawa-work/deepmind-research | 10,110 | 12784149 | # Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conformer utilities."""
import copy
from typing import List, Optional
from absl import logging
import numpy as np
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
import tensorflow.compat.v2 as tf
def generate_conformers(
molecule: Chem.rdchem.Mol,
max_num_conformers: int,
*,
random_seed: int = -1,
prune_rms_thresh: float = -1.0,
max_iter: int = -1,
fallback_to_random: bool = False,
) -> Chem.rdchem.Mol:
"""Generates conformers for a given molecule.
Args:
molecule: molecular representation of the compound.
max_num_conformers: maximum number of conformers to generate. If pruning is
done, the returned number of conformers is not guaranteed to match
max_num_conformers.
random_seed: random seed to use for conformer generation.
prune_rms_thresh: RMSD threshold which allows to prune conformers that are
too similar.
max_iter: Maximum number of iterations to perform when optimising MMFF force
field. If set to <= 0, energy optimisation is not performed.
fallback_to_random: if conformers cannot be obtained, use random coordinates
to initialise.
Returns:
Copy of a `molecule` with added hydrogens. The returned molecule contains
force field-optimised conformers. The number of conformers is guaranteed to
be <= max_num_conformers.
"""
mol = copy.deepcopy(molecule)
mol = Chem.AddHs(mol)
mol = _embed_conformers(
mol,
max_num_conformers,
random_seed,
prune_rms_thresh,
fallback_to_random,
use_random=False)
if max_iter > 0:
mol_with_conformers = _minimize_by_mmff(mol, max_iter)
if mol_with_conformers is None:
mol_with_conformers = _minimize_by_uff(mol, max_iter)
else:
mol_with_conformers = mol
# Aligns conformations in a molecule to each other using the first
# conformation as the reference.
AllChem.AlignMolConformers(mol_with_conformers)
# We remove hydrogens to keep the number of atoms consistent with the graph
# nodes.
mol_with_conformers = Chem.RemoveHs(mol_with_conformers)
return mol_with_conformers
def atom_to_feature_vector(
atom: rdkit.Chem.rdchem.Atom,
conformer: Optional[np.ndarray] = None,
) -> List[float]:
"""Converts rdkit atom object to feature list of indices.
Args:
atom: rdkit atom object.
conformer: Generated conformers. Returns -1 values if set to None.
Returns:
List containing positions (x, y, z) of each atom from the conformer.
"""
if conformer:
pos = conformer.GetAtomPosition(atom.GetIdx())
return [pos.x, pos.y, pos.z]
return [np.nan, np.nan, np.nan]
def compute_conformer(smile: str, max_iter: int = -1) -> np.ndarray:
"""Computes conformer.
Args:
smile: Smile string.
max_iter: Maximum number of iterations to perform when optimising MMFF force
field. If set to <= 0, energy optimisation is not performed.
Returns:
A tuple containing index, fingerprint and conformer.
Raises:
RuntimeError: If unable to convert smile string to RDKit mol.
"""
mol = rdkit.Chem.MolFromSmiles(smile)
if not mol:
raise RuntimeError('Unable to convert smile to molecule: %s' % smile)
conformer_failed = False
try:
mol = generate_conformers(
mol,
max_num_conformers=1,
random_seed=45,
prune_rms_thresh=0.01,
max_iter=max_iter)
except IOError as e:
logging.exception('Failed to generate conformers for %s . IOError %s.',
smile, e)
conformer_failed = True
except ValueError:
logging.error('Failed to generate conformers for %s . ValueError', smile)
conformer_failed = True
except: # pylint: disable=bare-except
logging.error('Failed to generate conformers for %s.', smile)
conformer_failed = True
atom_features_list = []
conformer = None if conformer_failed else list(mol.GetConformers())[0]
for atom in mol.GetAtoms():
atom_features_list.append(atom_to_feature_vector(atom, conformer))
conformer_features = np.array(atom_features_list, dtype=np.float32)
return conformer_features
def get_random_rotation_matrix(include_mirror_symmetry: bool) -> tf.Tensor:
"""Returns a single random rotation matrix."""
rotation_matrix = _get_random_rotation_3d()
if include_mirror_symmetry:
random_mirror_symmetry = _get_random_mirror_symmetry()
rotation_matrix = tf.matmul(rotation_matrix, random_mirror_symmetry)
return rotation_matrix
def rotate(vectors: tf.Tensor, rotation_matrix: tf.Tensor) -> tf.Tensor:
"""Batch of vectors on a single rotation matrix."""
return tf.matmul(vectors, rotation_matrix)
def _embed_conformers(
molecule: Chem.rdchem.Mol,
max_num_conformers: int,
random_seed: int,
prune_rms_thresh: float,
fallback_to_random: bool,
*,
use_random: bool = False,
) -> Chem.rdchem.Mol:
"""Embeds conformers into a copy of a molecule.
If random coordinates allowed, tries not to use random coordinates at first,
and uses random only if fails.
Args:
molecule: molecular representation of the compound.
max_num_conformers: maximum number of conformers to generate. If pruning is
done, the returned number of conformers is not guaranteed to match
max_num_conformers.
random_seed: random seed to use for conformer generation.
prune_rms_thresh: RMSD threshold which allows to prune conformers that are
too similar.
fallback_to_random: if conformers cannot be obtained, use random coordinates
to initialise.
*:
use_random: Use random coordinates. Shouldn't be set by any caller except
this function itself.
Returns:
A copy of a molecule with embedded conformers.
Raises:
ValueError: if conformers cannot be obtained for a given molecule.
"""
mol = copy.deepcopy(molecule)
# Obtains parameters for conformer generation.
# In particular, ETKDG is experimental-torsion basic knowledge distance
# geometry, which allows to randomly generate an initial conformation that
# satisfies various geometric constraints such as lower and upper bounds on
# the distances between atoms.
params = AllChem.ETKDGv3()
params.randomSeed = random_seed
params.pruneRmsThresh = prune_rms_thresh
params.numThreads = -1
params.useRandomCoords = use_random
conf_ids = AllChem.EmbedMultipleConfs(mol, max_num_conformers, params)
if not conf_ids:
if not fallback_to_random or use_random:
raise ValueError('Cant get conformers')
return _embed_conformers(
mol,
max_num_conformers,
random_seed,
prune_rms_thresh,
fallback_to_random,
use_random=True)
return mol
def _minimize_by_mmff(
molecule: Chem.rdchem.Mol,
max_iter: int,
) -> Optional[Chem.rdchem.Mol]:
"""Minimizes forcefield for conformers using MMFF algorithm.
Args:
molecule: a datastructure containing conformers.
max_iter: number of maximum iterations to use when optimising force field.
Returns:
A copy of a `molecule` containing optimised conformers; or None if MMFF
cannot be performed.
"""
molecule_props = AllChem.MMFFGetMoleculeProperties(molecule)
if molecule_props is None:
return None
mol = copy.deepcopy(molecule)
for conf_id in range(mol.GetNumConformers()):
ff = AllChem.MMFFGetMoleculeForceField(
mol, molecule_props, confId=conf_id, ignoreInterfragInteractions=False)
ff.Initialize()
# minimises a conformer within a mol in place.
ff.Minimize(max_iter)
return mol
def _minimize_by_uff(
molecule: Chem.rdchem.Mol,
max_iter: int,
) -> Chem.rdchem.Mol:
"""Minimizes forcefield for conformers using UFF algorithm.
Args:
molecule: a datastructure containing conformers.
max_iter: number of maximum iterations to use when optimising force field.
Returns:
A copy of a `molecule` containing optimised conformers.
"""
mol = copy.deepcopy(molecule)
conf_ids = range(mol.GetNumConformers())
for conf_id in conf_ids:
ff = AllChem.UFFGetMoleculeForceField(mol, confId=conf_id)
ff.Initialize()
# minimises a conformer within a mol in place.
ff.Minimize(max_iter)
return mol
def _get_symmetry_rotation_matrix(sign: tf.Tensor) -> tf.Tensor:
"""Returns the 2d/3d matrix for mirror symmetry."""
zero = tf.zeros_like(sign)
one = tf.ones_like(sign)
# pylint: disable=bad-whitespace,bad-continuation
rot = [sign, zero, zero,
zero, one, zero,
zero, zero, one]
# pylint: enable=bad-whitespace,bad-continuation
shape = (3, 3)
rot = tf.stack(rot, axis=-1)
rot = tf.reshape(rot, shape)
return rot
def _quaternion_to_rotation_matrix(quaternion: tf.Tensor) -> tf.Tensor:
"""Converts a batch of quaternions to a batch of rotation matrices."""
q0 = quaternion[0]
q1 = quaternion[1]
q2 = quaternion[2]
q3 = quaternion[3]
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
matrix = tf.stack([r00, r01, r02,
r10, r11, r12,
r20, r21, r22], axis=-1)
return tf.reshape(matrix, [3, 3])
def _get_random_rotation_3d() -> tf.Tensor:
random_quaternions = tf.random.normal(
shape=[4], dtype=tf.float32)
random_quaternions /= tf.linalg.norm(
random_quaternions, axis=-1, keepdims=True)
return _quaternion_to_rotation_matrix(random_quaternions)
def _get_random_mirror_symmetry() -> tf.Tensor:
random_0_1 = tf.random.uniform(
shape=(), minval=0, maxval=2, dtype=tf.int32)
random_signs = tf.cast((2 * random_0_1) - 1, tf.float32)
return _get_symmetry_rotation_matrix(random_signs)
|
fkie_node_manager/src/fkie_node_manager/nmd_client/__init__.py | JOiiNT-LAB/multimaster_fkie | 194 | 12784152 | <filename>fkie_node_manager/src/fkie_node_manager/nmd_client/__init__.py
# Software License Agreement (BSD License)
#
# Copyright (c) 2018, Fraunhofer FKIE/CMS, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtCore import QObject, Signal
import fkie_node_manager_daemon.remote as remote
from .file_channel import FileChannel
from .launch_channel import LaunchChannel
from .monitor_channel import MonitorChannel
from .screen_channel import ScreenChannel
from .settings_channel import SettingsChannel
from .version_channel import VersionChannel
class NmdClient(QObject):
error = Signal(str, str, str, Exception)
'''
:ivar str,str,str,Exception error: error is a signal, which is emitted on errors {method, url, path, Exception}.
'''
def __init__(self):
QObject.__init__(self)
self._channels = []
self.file = FileChannel()
self.file.error.connect(self.on_error)
self._channels.append(self.file)
self.launch = LaunchChannel()
self.launch.error.connect(self.on_error)
self._channels.append(self.launch)
self.monitor = MonitorChannel()
self.monitor.error.connect(self.on_error)
self._channels.append(self.monitor)
self.screen = ScreenChannel()
self.screen.error.connect(self.on_error)
self._channels.append(self.screen)
self.settings = SettingsChannel()
self.settings.error.connect(self.on_error)
self._channels.append(self.settings)
self.version = VersionChannel()
self.version.error.connect(self.on_error)
self._channels.append(self.version)
def stop(self):
print("clear grpc channels...")
for channel in self._channels:
channel.stop()
remote.clear_channels()
print("clear grpc channels...ok")
self.clear_cache()
del self._channels[:]
def clear_cache(self, grpc_path=''):
for channel in self._channels:
channel.clear_cache(grpc_path)
def on_error(self, method, url, path, exception):
self.error.emit(method, url, path, exception)
|
utils/wfuzzbasicauthbrute/wfuzz/framework/core/myexception.py | ismailbozkurt/kubebot | 171 | 12784184 |
class FuzzException(Exception):
FATAL, SIGCANCEL = range(2)
def __init__(self, etype, msg):
self.etype = etype
self.msg = msg
Exception.__init__(self, msg)
|
tests/core/trio/test_trio_endpoint_compat_with_asyncio.py | gsalgado/lahja | 400 | 12784192 | <filename>tests/core/trio/test_trio_endpoint_compat_with_asyncio.py<gh_stars>100-1000
import asyncio
import multiprocessing
import pytest
from lahja.asyncio import AsyncioEndpoint
from lahja.common import BaseEvent, ConnectionConfig
class EventTest(BaseEvent):
def __init__(self, value):
self.value = value
def run_asyncio(coro, *args):
loop = asyncio.get_event_loop()
loop.run_until_complete(coro(*args))
loop.close()
async def _do_asyncio_client_endpoint(name, ipc_path):
config = ConnectionConfig(name, ipc_path)
async with AsyncioEndpoint(name + "client").run() as client:
await client.connect_to_endpoints(config)
assert client.is_connected_to(name)
await client.wait_until_endpoint_subscribed_to(config.name, EventTest)
event = EventTest("test")
await client.broadcast(event)
@pytest.mark.trio
async def test_trio_endpoint_serving_asyncio_endpoint(
endpoint_server, endpoint_server_config
):
name = endpoint_server_config.name
path = endpoint_server_config.path
proc = multiprocessing.Process(
target=run_asyncio, args=(_do_asyncio_client_endpoint, name, path)
)
proc.start()
result = await endpoint_server.wait_for(EventTest)
assert isinstance(result, EventTest)
assert result.value == "test"
proc.join()
|
lib/WindowParent.py | aganders3/python-0.9.1 | 116 | 12784203 | # A 'WindowParent' is the only module that uses real stdwin functionality.
# It is the root of the tree.
# It should have exactly one child when realized.
import stdwin
from stdwinevents import *
from TransParent import ManageOneChild
Error = 'WindowParent.Error' # Exception
class WindowParent() = ManageOneChild():
#
def create(self, (title, size)):
self.title = title
self.size = size # (width, height)
self._reset()
return self
#
def _reset(self):
self.child = 0
self.win = 0
self.itimer = 0
self.do_mouse = 0
self.do_timer = 0
#
def destroy(self):
if self.child: self.child.destroy()
self._reset()
#
def need_mouse(self, child): self.do_mouse = 1
def no_mouse(self, child): self.do_mouse = 0
#
def need_timer(self, child): self.do_timer = 1
def no_timer(self, child): self.do_timer = 0
#
def realize(self):
if self.win:
raise Error, 'realize(): called twice'
if not self.child:
raise Error, 'realize(): no child'
size = self.child.minsize(self.beginmeasuring())
self.size = max(self.size[0], size[0]), \
max(self.size[1], size[1])
#stdwin.setdefwinsize(self.size)
# XXX Compensate stdwin bug:
stdwin.setdefwinsize(self.size[0]+4, self.size[1]+2)
self.win = stdwin.open(self.title)
if self.itimer:
self.win.settimer(self.itimer)
bounds = (0, 0), self.win.getwinsize()
self.child.setbounds(bounds)
#
def beginmeasuring(self):
# Return something with which a child can measure text
if self.win:
return self.win.begindrawing()
else:
return stdwin
#
def begindrawing(self):
if self.win:
return self.win.begindrawing()
else:
raise Error, 'begindrawing(): not realized yet'
#
def change(self, area):
if self.win:
self.win.change(area)
#
def scroll(self, args):
if self.win:
self.win.scroll(args)
#
def settimer(self, itimer):
if self.win:
self.win.settimer(itimer)
else:
self.itimer = itimer
#
# Only call dispatch if we have a child
#
def dispatch(self, (type, win, detail)):
if win <> self.win:
return
elif type = WE_DRAW:
d = self.win.begindrawing()
self.child.draw(d, detail)
elif type = WE_MOUSE_DOWN:
if self.do_mouse: self.child.mouse_down(detail)
elif type = WE_MOUSE_MOVE:
if self.do_mouse: self.child.mouse_move(detail)
elif type = WE_MOUSE_UP:
if self.do_mouse: self.child.mouse_up(detail)
elif type = WE_TIMER:
if self.do_timer: self.child.timer()
elif type = WE_SIZE:
self.win.change((0, 0), (10000, 10000)) # XXX
bounds = (0, 0), self.win.getwinsize()
self.child.setbounds(bounds)
#
|
ansible/roles/lib_gcloud/build/src/gcloud_compute_projectinfo.py | fahlmant/openshift-tools | 164 | 12784209 | # pylint: skip-file
class GcloudComputeProjectInfoError(Exception):
'''exception class for projectinfo'''
pass
# pylint: disable=too-many-instance-attributes
class GcloudComputeProjectInfo(GcloudCLI):
''' Class to wrap the gcloud compute projectinfo command'''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
metadata=None,
metadata_from_file=None,
remove_keys=None,
verbose=False):
''' Constructor for gcloud resource '''
super(GcloudComputeProjectInfo, self).__init__()
self._metadata = metadata
self.metadata_from_file = metadata_from_file
self.remove_keys = remove_keys
self._existing_metadata = None
self.verbose = verbose
@property
def metadata(self):
'''property for existing metadata'''
return self._metadata
@property
def existing_metadata(self):
'''property for existing metadata'''
if self._existing_metadata == None:
self._existing_metadata = []
metadata = self.list_metadata()
metadata = metadata['results']['commonInstanceMetadata']
if metadata.has_key('items'):
self._existing_metadata = metadata['items']
return self._existing_metadata
def list_metadata(self):
'''return metatadata'''
results = self._list_metadata('project-info')
if results['returncode'] == 0:
results['results'] = yaml.load(results['results'])
return results
def exists(self):
''' return whether the metadata that we are removing exists '''
# currently we aren't opening up files for comparison so always return False
if self.metadata_from_file:
return False
for key, val in self.metadata.items():
for data in self.existing_metadata:
if key == 'sshKeys' and data['key'] == key:
ssh_keys = {}
# get all the users and their public keys out of the project
for user_pub_key in data['value'].strip().split('\n'):
col_index = user_pub_key.find(':')
user = user_pub_key[:col_index]
pub_key = user_pub_key[col_index+1:]
ssh_keys[user] = pub_key
# compare the users that were passed in to see if we need to update
for inc_user, inc_pub_key in val.items():
if not ssh_keys.has_key(inc_user) or ssh_keys[inc_user] != inc_pub_key:
return False
# matched all ssh keys
break
elif data['key'] == str(key) and str(data['value']) == str(val):
break
else:
return False
return True
def keys_exist(self):
''' return whether the keys exist in the metadata'''
for key in self.remove_keys:
for mdata in self.existing_metadata:
if key == mdata['key']:
break
else:
# NOT FOUND
return False
return True
def needs_update(self):
''' return whether an we need to update '''
# compare incoming values with metadata returned
# for each key in user supplied check against returned data
return not self.exists()
def delete_metadata(self, remove_all=False):
''' attempt to remove metadata '''
return self._delete_metadata(self.remove_keys, remove_all=remove_all)
def create_metadata(self):
'''create an metadata'''
results = None
if self.metadata and self.metadata.has_key('sshKeys'):
# create a file and pass it to create
ssh_strings = ["%s:%s" % (user, pub_key) for user, pub_key in self.metadata['sshKeys'].items()]
ssh_keys = {'sshKeys': Utils.create_file('ssh_keys', '\n'.join(ssh_strings), 'raw')}
results = self._create_metadata('project-info', self.metadata, ssh_keys)
# remove them and continue
del self.metadata['sshKeys']
if len(self.metadata.keys()) == 0:
return results
new_results = self._create_metadata('project-info', self.metadata, self.metadata_from_file)
if results:
return [results, new_results]
return new_results
|
examples/optimizers/swarm/create_ffoa.py | anukaal/opytimizer | 528 | 12784220 | from opytimizer.optimizers.swarm import FFOA
# Creates a FFOA optimizer
o = FFOA()
|
pyorient/ogm/vertex.py | spy7/pyorient | 142 | 12784239 | <reponame>spy7/pyorient
from .element import GraphElement
from .broker import VertexBroker
class Vertex(GraphElement):
Broker = VertexBroker
# TODO
# Edge information is carried in vertexes retrieved from database,
# as OrientBinaryObject. Can likely optimise these traversals
# when we know how to parse these.
def outE(self, *edge_classes):
g = self._graph
return g.outE(self._id, *edge_classes) if g else None
def inE(self, *edge_classes):
g = self._graph
return g.inE(self._id, *edge_classes) if g else None
def bothE(self, *edge_classes):
g = self._graph
return g.bothE(self._id, *edge_classes) if g else None
def out(self, *edge_classes):
g = self._graph
return g.out(self._id, *edge_classes) if g else None
def in_(self, *edge_classes):
g = self._graph
return g.in_(self._id, *edge_classes) if g else None
def both(self, *edge_classes):
g = self._graph
return g.both(self._id, *edge_classes) if g else None
def __call__(self, edge_or_broker):
"""Provides syntactic sugar for creating edges."""
if hasattr(edge_or_broker, 'broker'):
edge_or_broker = edge_or_broker.broker.element_cls
elif hasattr(edge_or_broker, 'element_cls'):
edge_or_broker = edge_or_broker.element_cls
if edge_or_broker.decl_type == 1:
return VertexVector(self, edge_or_broker.objects)
class VertexVector(object):
def __init__(self, origin, edge_broker, **kwargs):
self.origin = origin
self.edge_broker = edge_broker
self.kwargs = kwargs
def __gt__(self, target):
"""Syntactic sugar for creating an edge.
:param target: If a batch variable, return a command for creating an
edge to this vertex. Otherwise, create the edge.
"""
if hasattr(target, '_id'):
if target._id[0] == '$':
return self.edge_broker.create_command(
self.origin, target, **self.kwargs)
else:
return self.edge_broker.create(
self.origin, target, **self.kwargs)
return self
|
external_push/admin.py | fossabot/fermentrack | 114 | 12784243 | <reponame>fossabot/fermentrack
from django.contrib import admin
from external_push.models import GenericPushTarget, BrewersFriendPushTarget, BrewfatherPushTarget, ThingSpeakPushTarget, GrainfatherPushTarget
@admin.register(GenericPushTarget)
class GenericPushTargetAdmin(admin.ModelAdmin):
list_display = ('name', 'status', 'target_host')
@admin.register(BrewersFriendPushTarget)
class BrewersFriendPushTargetAdmin(admin.ModelAdmin):
list_display = ('gravity_sensor_to_push', 'status', 'push_frequency')
@admin.register(BrewfatherPushTarget)
class BrewfatherPushTargetAdmin(admin.ModelAdmin):
list_display = ('gravity_sensor_to_push', 'status', 'push_frequency')
@admin.register(ThingSpeakPushTarget)
class ThingSpeakPushTargetAdmin(admin.ModelAdmin):
list_display = ('name', 'status')
@admin.register(GrainfatherPushTarget)
class GrainfatherPushTargetAdmin(admin.ModelAdmin):
list_display = ('gravity_sensor_to_push', 'status', 'push_frequency')
|
scripts/UtilitiesConvertCharacter.py | CrackerCat/pwndra | 524 | 12784255 | # Convert an operand to characters
#@author b0bb
#@category Pwn
#@keybinding shift r
#@menupath Analysis.Pwn.Utilities.Convert to Char
#@toolbar
import ghidra.app.cmd.equate.SetEquateCmd as SetEquateCmd
import ghidra.program.util.OperandFieldLocation as OperandFieldLocation
import ghidra.program.model.lang.OperandType as OperandType
def run():
if type(currentLocation) is not OperandFieldLocation:
return
addr = currentLocation.getAddress()
inst = currentProgram.getListing().getInstructionAt(addr)
opin = currentLocation.getOperandIndex()
if inst.getOperandType(opin) == OperandType.SCALAR:
string = ''
scalar = inst.getScalar(opin)
bvalue = scalar.byteArrayValue()
if not currentProgram.getLanguage().isBigEndian():
bvalue.reverse()
for value in bvalue:
if value < 0x20 or value > 0x7e:
string += '\\x%02x' % value
else:
string += chr(value)
cmd = SetEquateCmd('"%s"' % string, addr, opin, scalar.getValue())
state.getTool().execute(cmd, currentProgram)
run() |
rllab/envs/mujoco/hill/terrain.py | RussellM2020/maml_gps | 1,838 | 12784266 | from scipy.stats import multivariate_normal
from scipy.signal import convolve2d
import matplotlib
try:
matplotlib.pyplot.figure()
matplotlib.pyplot.close()
except Exception:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
# the colormap should assign light colors to low values
TERRAIN_CMAP = 'Greens'
DEFAULT_PATH = '/tmp/mujoco_terrains'
STEP = 0.1
def generate_hills(width, height, nhills):
'''
@param width float, terrain width
@param height float, terrain height
@param nhills int, #hills to gen. #hills actually generted is sqrt(nhills)^2
'''
# setup coordinate grid
xmin, xmax = -width/2.0, width/2.0
ymin, ymax = -height/2.0, height/2.0
x, y = np.mgrid[xmin:xmax:STEP, ymin:ymax:STEP]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
# generate hilltops
xm, ym = np.mgrid[xmin:xmax:width/np.sqrt(nhills), ymin:ymax:height/np.sqrt(nhills)]
mu = np.c_[xm.flat, ym.flat]
sigma = float(width*height)/(nhills*8)
for i in range(mu.shape[0]):
mu[i] = multivariate_normal.rvs(mean=mu[i], cov=sigma)
# generate hills
sigma = sigma + sigma*np.random.rand(mu.shape[0])
rvs = [ multivariate_normal(mu[i,:], cov=sigma[i]) for i in range(mu.shape[0]) ]
hfield = np.max([ rv.pdf(pos) for rv in rvs ], axis=0)
return x, y, hfield
def clear_patch(hfield, box):
''' Clears a patch shaped like box, assuming robot is placed in center of hfield
@param box: rllab.spaces.Box-like
'''
if box.flat_dim > 2:
raise ValueError("Provide 2dim box")
# clear patch
h_center = int(0.5 * hfield.shape[0])
w_center = int(0.5 * hfield.shape[1])
fromrow, torow = w_center + int(box.low[0]/STEP), w_center + int(box.high[0] / STEP)
fromcol, tocol = h_center + int(box.low[1]/STEP), h_center + int(box.high[1] / STEP)
hfield[fromrow:torow, fromcol:tocol] = 0.0
# convolve to smoothen edges somewhat, in case hills were cut off
K = np.ones((10,10)) / 100.0
s = convolve2d(hfield[fromrow-9:torow+9, fromcol-9:tocol+9], K, mode='same', boundary='symm')
hfield[fromrow-9:torow+9, fromcol-9:tocol+9] = s
return hfield
def _checkpath(path_):
if path_ is None:
path_ = DEFAULT_PATH
if not os.path.exists(path_):
os.makedirs(path_)
return path_
def save_heightfield(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure the path + fname match the <file> attribute
of the <asset> element in the env XML where the height field is defined
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP) # terrain_cmap is necessary to make sure tops get light color
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close()
def save_texture(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure this matches the <texturedir> of the
<compiler> element in the env XML
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP)
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
# for some reason plt.grid does not work here, so generate gridlines manually
for i in np.arange(xmin,xmax,0.5):
plt.plot([i,i], [ymin,ymax], 'k', linewidth=0.1)
for i in np.arange(ymin,ymax,0.5):
plt.plot([xmin,xmax],[i,i], 'k', linewidth=0.1)
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close() |
FaceTime/frida/replay.py | googleprojectzero/Street-Party | 226 | 12784273 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
import frida
import sys
import os
vid_index=0
aud_index = 0
def on_message(message, data):
global vid_index
global aud_index
print(message)
session = frida.attach("avconferenced")
code = open('replay.js', 'r').read()
script = session.create_script(code);
script.on("message", on_message)
script.load()
print("Press Ctrl-C to quit")
sys.stdin.read()
|
test/rql_test/drivers/driver_test.py | zadcha/rethinkdb | 21,684 | 12784280 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from driver import bag, compare, err, err_regex, partial, uuid
class PythonTestDriverTest(unittest.TestCase):
def compare(self, expected, result, options=None):
self.assertTrue(compare(expected, result, options=options))
def compareFalse(self, expected, result, options=None):
self.assertFalse(compare(expected, result, options=options))
def test_string(self):
# simple
self.compare('a', 'a')
self.compare('á', 'á')
self.compare('something longer\nwith two lines', 'something longer\nwith two lines')
self.compareFalse('a', 'b')
self.compareFalse('a', 1)
self.compareFalse('a', [])
self.compareFalse('a', None)
self.compareFalse('a', ['a'])
self.compareFalse('a', {'a': 1})
def test_array(self):
# simple pass
self.compare([1, 2, 3], [1, 2, 3])
# out of order
self.compareFalse([1, 2, 3], [1, 3, 2])
# totally mistmatched lists
self.compareFalse([1, 2, 3], [3, 4, 5])
# missing items
self.compareFalse([1, 2, 3], [1, 2])
self.compareFalse([1, 2, 3], [1, 3])
# extra items
self.compareFalse([1, 2, 3], [1, 2, 3, 4])
# empty array
self.compare([], [])
self.compareFalse([1], [])
self.compareFalse([], [1])
self.compareFalse([], None)
# strings
self.compare(['a', 'b'], ['a', 'b'])
self.compareFalse(['a', 'c'], ['a', 'b'])
# multiple of a single value
self.compare([1, 2, 2, 3, 3, 3], [1, 2, 2, 3, 3, 3])
self.compareFalse([1, 2, 2, 3, 3, 3], [1, 2, 3])
self.compareFalse([1, 2, 3], [1, 2, 2, 3, 3, 3])
def test_array_partial(self):
'''note that these are all in-order'''
# simple
self.compare(partial([1]), [1, 2, 3])
self.compare(partial([2]), [1, 2, 3])
self.compare(partial([3]), [1, 2, 3])
self.compare(partial([1, 2]), [1, 2, 3])
self.compare(partial([1, 3]), [1, 2, 3])
self.compare(partial([1, 2, 3]), [1, 2, 3])
self.compareFalse(partial([4]), [1, 2, 3])
# ordered
self.compareFalse(partial([3, 2, 1], ordered=True), [1, 2, 3])
self.compareFalse(partial([1, 3, 2], ordered=True), [1, 2, 3])
# empty array
self.compare(partial([]), [1, 2, 3])
# multiple of a single items
self.compare(partial([1, 2, 2]), [1, 2, 2, 3, 3, 3])
self.compareFalse(partial([1, 2, 2, 2]), [1, 2, 2, 3, 3, 3])
def test_array_unordered(self):
# simple
self.compare(bag([1, 2]), [1, 2])
self.compare(bag([2, 1]), [1, 2])
self.compareFalse(bag([1, 2]), [1, 2, 3])
self.compareFalse(bag([1, 3]), [1, 2, 3])
self.compareFalse(bag([3, 1]), [1, 2, 3])
# empty array
self.compare(bag([]), [])
def test_dict(self):
# simple
self.compare({'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'b': 2, 'c': 3})
self.compare({'a': 1, 'b': 2, 'c': 3}, {'c': 3, 'a': 1, 'b': 2})
self.compareFalse({'a': 1, 'b': 2, 'c': 3}, {'a': 1})
self.compareFalse({'a': 1}, {'a': 1, 'b': 2, 'c': 3})
# empty
self.compare({}, {})
self.compareFalse({}, {'a': 1})
self.compareFalse({'a': 1}, {})
def test_dict_partial(self):
# simple
self.compare(partial({'a': 1}), {'a': 1})
self.compare(partial({'a': 1}), {'a': 1, 'b': 2})
self.compareFalse(partial({'a': 2}), {'a': 1, 'b': 2})
self.compareFalse(partial({'c': 1}), {'a': 1, 'b': 2})
self.compareFalse(partial({'a': 1, 'b': 2}), {'b': 2})
# empty
self.compare(partial({}), {})
self.compare(partial({}), {'a': 1})
self.compareFalse(partial({'a': 1}), {})
def test_compare_dict_in_array(self):
# simple
self.compare([{'a': 1}], [{'a': 1}])
self.compare([{'a': 1, 'b': 2}], [{'a': 1, 'b': 2}])
self.compare([{'a': 1}, {'b': 2}], [{'a': 1}, {'b': 2}])
self.compareFalse([{'a': 1}], [{'a': 1, 'b': 2}])
self.compareFalse([{'a': 2, 'b': 2}], [{'a': 1, 'b': 2}])
self.compareFalse([{'a': 2, 'c': 3}], [{'a': 1, 'b': 2}])
self.compareFalse([{'a': 2, 'c': 3}], [{'a': 1}])
self.compareFalse([{'a': 1}, {'b': 2}], [{'a': 1, 'b': 2}])
# order
self.compareFalse([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# partial
self.compare(partial([{}]), [{'a': 1, 'b': 2}])
self.compare(partial([{}]), [{'a': 1, 'b': 2}])
self.compare(partial([{'a': 1}]), [{'a': 1, 'b': 2}])
self.compare(partial([{'a': 1, 'b': 2}]), [{'a': 1, 'b': 2}])
self.compare(partial([{'a': 1}, {'b': 2}]), [{'a': 1}, {'b': 2}, {'c': 3}])
self.compareFalse(partial([{'a': 2}]), [{'a': 1, 'b': 2}])
self.compareFalse(partial([{'a': 1, 'b': 2}]), [{'a': 1}])
# partial order
self.compareFalse(partial([{'a': 1}, {'b': 2}], ordered=True), [{'b': 2}, {'a': 1}])
# partial unordered
self.compare(partial([{'a': 1}, {'b': 2}]), [{'b': 2}, {'a': 1}])
self.compare(partial([{'a': 1}, {'b': 2}], ordered=False), [{'b': 2}, {'a': 1}])
def test_compare_partial_items_in_array(self):
self.compare([{'a': 1, 'b': 1}, partial({'a': 2})], [{'a': 1, 'b': 1}, {'a': 2, 'b': 2}])
def test_compare_array_in_dict(self):
pass
def test_exception(self):
# class only
self.compare(KeyError, KeyError())
self.compare(KeyError(), KeyError())
self.compare(err('KeyError'), KeyError())
self.compare(err(KeyError), KeyError())
self.compareFalse(KeyError, NameError())
self.compareFalse(KeyError(), NameError())
self.compareFalse(err('KeyError'), NameError())
self.compareFalse(err(KeyError), NameError())
# subclass
self.compare(LookupError, KeyError())
self.compare(LookupError(), KeyError())
self.compare(err('LookupError'), KeyError())
self.compare(err(LookupError), KeyError())
self.compareFalse(KeyError, LookupError())
self.compareFalse(KeyError(), LookupError())
self.compareFalse(err('KeyError'), LookupError())
self.compareFalse(err(KeyError), LookupError())
# message
self.compare(err(KeyError), KeyError('alpha'))
self.compare(err(KeyError, 'alpha'), KeyError('alpha'))
self.compareFalse(err(KeyError, 'alpha'), KeyError('beta'))
# regex message
self.compare(err(KeyError), KeyError('alpha'))
# regex message with debug/assertion text
self.compare(err_regex(KeyError, 'alpha'), KeyError('alpha'))
self.compare(err_regex(KeyError, 'alp'), KeyError('alpha'))
self.compare(err_regex(KeyError, '.*pha'), KeyError('alpha'))
self.compareFalse(err_regex(KeyError, 'beta'), KeyError('alpha'))
# ToDo: frames (when/if we support them)
def test_compare_uuid(self):
# simple
self.compare(uuid(), '4e9e5bc2-9b11-4143-9aa1-75c10e7a193a')
self.compareFalse(uuid(), '4')
self.compareFalse(uuid(), '*')
self.compareFalse(uuid(), None)
def test_numbers(self):
# simple
self.compare(1, 1)
self.compare(1, 1.0)
self.compare(1.0, 1)
self.compare(1.0, 1.0)
self.compareFalse(1, 2)
self.compareFalse(1, 2.0)
self.compareFalse(1.0, 2)
self.compareFalse(1.0, 2.0)
# precision
precision = {'precision': 0.5}
self.compare(1, 1.4, precision)
self.compare(1.0, 1.4, precision)
self.compareFalse(1, 2, precision)
self.compareFalse(1, 1.6, precision)
self.compareFalse(1.0, 2, precision)
self.compareFalse(1.0, 1.6, precision)
if __name__ == '__main__':
unittest.main()
|
tests/unit/test_order.py | Aspire1Inspire2/td-ameritrade-python-api | 610 | 12784290 | <gh_stars>100-1000
import unittest
import td.enums as td_enums
from unittest import TestCase
from configparser import ConfigParser
from td.orders import Order
from td.orders import OrderLeg
from td.client import TDClient
from td.stream import TDStreamerClient
class TDSession(TestCase):
"""Will perform a unit test for the TD session."""
def setUp(self) -> None:
"""Set up the Robot."""
# Grab configuration values.
config = ConfigParser()
config.read('config/config.ini')
CLIENT_ID = config.get('main', 'CLIENT_ID')
REDIRECT_URI = config.get('main', 'REDIRECT_URI')
JSON_PATH = config.get('main', 'JSON_PATH')
ACCOUNT_NUMBER = config.get('main', 'ACCOUNT_NUMBER')
# Initalize the session.
self.td_session = TDClient(
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URI,
credentials_path=JSON_PATH,
account_number=ACCOUNT_NUMBER
)
self.td_order = Order()
self.td_order_leg = OrderLeg()
def test_creates_instance_of_session(self):
"""Create an instance and make sure it's a robot."""
self.assertIsInstance(self.td_session, TDClient)
self.assertIsInstance(self.td_order, Order)
self.assertIsInstance(self.td_order_leg, OrderLeg)
def test_define_simple_order(self):
"""Test creating a simple order."""
# Add the Order session.
self.td_order.order_session(
session=td_enums.ORDER_SESSION.NORMAL
)
# Add the Order duration.
self.td_order.order_duration(
duration=td_enums.DURATION.GOOD_TILL_CANCEL
)
# Add the Order Leg Instruction.
self.td_order_leg.order_leg_instruction(
instruction=td_enums.ORDER_INSTRUCTIONS.SELL
)
# Add the Order Leg price.
self.td_order_leg.order_leg_price(
price=112.50
)
# Add the Order Leg quantity.
self.td_order_leg.order_leg_quantity(
quantity=10
)
# Add the Order Leg Asset.
self.td_order_leg.order_leg_asset(
asset_type=td_enums.ORDER_ASSET_TYPE.EQUITY,
symbol='MSFT'
)
# Add the Order Leg.
self.td_order.add_order_leg(
order_leg=self.td_order_leg
)
correct_dict = {
"session": "NORMAL",
"duration": "GOOD_TILL_CANCEL",
"orderLegCollection": [
{
"instruction": "SELL",
"price": 112.5, "quantity": 10,
"instrument": {
"assetType":
"EQUITY",
"symbol": "MSFT"
}
}
]
}
self.assertDictEqual(correct_dict, self.td_order._grab_order())
def tearDown(self):
"""Clean Up."""
self.td_session = None
self.td_order = None
self.td_order_leg = None
if __name__ == '__main__':
unittest.main()
|
core/polyaxon/polyboard/processors/logs_processor.py | admariner/polyaxon | 3,200 | 12784338 | <filename>core/polyaxon/polyboard/processors/logs_processor.py
# !/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from polyaxon.polyboard.logging.handler import PolyaxonHandler
EXCLUDE_DEFAULT_LOGGERS = ("polyaxon.client", "polyaxon.cli")
def setup_logging(add_logs, exclude=EXCLUDE_DEFAULT_LOGGERS):
plx_logger = logging.getLogger()
plx_logger.setLevel(logging.INFO)
if logging.StreamHandler not in map(type, plx_logger.handlers):
plx_logger.addHandler(logging.StreamHandler())
plx_logger.propagate = False
if PolyaxonHandler in map(type, plx_logger.handlers):
for handler in plx_logger.handlers:
if isinstance(handler, PolyaxonHandler):
handler.set_add_logs(add_logs=add_logs)
else:
handler = PolyaxonHandler(add_logs=add_logs)
plx_logger.addHandler(handler)
for logger_name in exclude:
plx_logger = logging.getLogger(logger_name)
if logging.StreamHandler not in map(type, plx_logger.handlers):
plx_logger.addHandler(logging.StreamHandler())
plx_logger.propagate = False
|
unstrip.py | pzread/unstrip | 103 | 12784395 | <reponame>pzread/unstrip
import sys
import sqlite3
import msgpack
from scan import *
from mark import *
if __name__ == '__main__':
conn = sqlite3.connect('fin.db')
try:
conn.execute('CREATE TABLE flowfin (label text primary key,len int,fin blob,hash text);')
conn.execute('CREATE INDEX index_flowfin_len ON flowfin (len);')
conn.execute('CREATE INDEX index_flowfin_hash ON flowfin (hash);')
except sqlite3.OperationalError:
pass
if 'gendb' in sys.argv:
gen_db(conn)
else:
filepath = sys.argv[-1]
exe = EXE(filepath,filepath)
mark_list = []
call_loc = set()
start_pc = exe.elf.header['e_entry']
call_loc = exe.ScanBlock(exe.GetSection(start_pc))
main_pc = None
cur = conn.cursor()
cur.execute('SELECT * FROM flowfin WHERE label=?;',
('libc-start.o # __libc_start_main',))
finent = cur.fetchone()
if finent != None:
finb = msgpack.unpackb(finent[2])
for pos,loc in call_loc:
fina = exe.FuncFin(loc,set())
if CmpFin(fina,finb) == 0:
ins,_ = Disasm(pos[0],pos[1] - 7)
main_pc = ins.operands[1].value.imm
break
if main_pc != None:
mark_list.append((exe.GetSection(main_pc),'main'))
call_loc.update(exe.ScanBlock(exe.GetSection(main_pc)))
for pos,loc in call_loc:
fina = exe.FuncFin(loc,set())
find_name = None
for row in conn.execute('SELECT * FROM flowfin WHERE len<=?;',
(len(fina),)):
finb = msgpack.unpackb(row[2])
dis = CmpFin(fina,finb)
if dis == 0:
find_name = row[0]
break
if find_name == None:
find_name = '<unknown>'
else:
mark_list.append((loc,find_name.split(' # ')[1]))
print('%016lx - %s'%(loc[0].base + loc[1],find_name))
mark(exe,mark_list)
|
src/meltano/core/job/finder.py | siilats/meltano | 122 | 12784416 | <reponame>siilats/meltano
"""Defines JobFinder."""
from datetime import datetime, timedelta
from .job import HEARTBEAT_VALID_MINUTES, HEARTBEATLESS_JOB_VALID_HOURS, Job, State
class JobFinder:
"""
Query builder for the `Job` model for a certain `elt_uri`.
"""
def __init__(self, job_id: str):
self.job_id = job_id
def latest(self, session):
return (
session.query(Job)
.filter(Job.job_id == self.job_id)
.order_by(Job.started_at.desc())
.first()
)
def successful(self, session):
return session.query(Job).filter(
(Job.job_id == self.job_id)
& (Job.state == State.SUCCESS)
& Job.ended_at.isnot(None)
)
def running(self, session):
"""Find jobs in the running state."""
return session.query(Job).filter(
(Job.job_id == self.job_id) & (Job.state == State.RUNNING)
)
def latest_success(self, session):
return self.successful(session).order_by(Job.ended_at.desc()).first()
def latest_running(self, session):
"""Find the most recent job in the running state, if any."""
return self.running(session).order_by(Job.started_at.desc()).first()
def with_payload(self, session, flags=0, since=None):
query = (
session.query(Job)
.filter(
(Job.job_id == self.job_id)
& (Job.payload_flags != 0)
& (Job.payload_flags.op("&")(flags) == flags)
& Job.ended_at.isnot(None)
)
.order_by(Job.ended_at.asc())
)
if since:
query = query.filter(Job.ended_at > since)
return query
def latest_with_payload(self, session, **kwargs):
return (
self.with_payload(session, **kwargs)
.order_by(None) # Reset ascending order
.order_by(Job.ended_at.desc())
.first()
)
@classmethod
def all_stale(cls, session):
"""Return all stale jobs."""
now = datetime.utcnow()
last_valid_heartbeat_at = now - timedelta(minutes=HEARTBEAT_VALID_MINUTES)
last_valid_started_at = now - timedelta(hours=HEARTBEATLESS_JOB_VALID_HOURS)
return session.query(Job).filter(
(Job.state == State.RUNNING)
& (
(
Job.last_heartbeat_at.isnot(None)
& (Job.last_heartbeat_at < last_valid_heartbeat_at)
)
| (
Job.last_heartbeat_at.is_(None)
& (Job.started_at < last_valid_started_at)
)
)
)
def stale(self, session):
"""Return stale jobs with the instance's job ID."""
return self.all_stale(session).filter(Job.job_id == self.job_id)
|
tasks/admin.py | housepig7/ops | 394 | 12784427 | <reponame>housepig7/ops
from django.contrib import admin
# Register your models here.
from .models import history,toolsscript
admin.site.register(history)
admin.site.register(toolsscript) |
sdk/search/azure-search-documents/tests/test_index_documents_batch.py | rsdoherty/azure-sdk-for-python | 2,728 | 12784432 | <gh_stars>1000+
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
from azure.search.documents.models import IndexAction
from azure.search.documents import IndexDocumentsBatch
METHOD_NAMES = [
"add_upload_actions",
"add_delete_actions",
"add_merge_actions",
"add_merge_or_upload_actions",
]
METHOD_MAP = dict(zip(METHOD_NAMES, ["upload", "delete", "merge", "mergeOrUpload"]))
class TestIndexDocumentsBatch(object):
def test_init(self):
batch = IndexDocumentsBatch()
assert batch.actions == []
def test_repr(self):
batch = IndexDocumentsBatch()
assert repr(batch) == "<IndexDocumentsBatch [0 actions]>"
batch._actions = [1, 2, 3]
assert repr(batch) == "<IndexDocumentsBatch [3 actions]>"
# a strict length test here would require constructing an actions list
# with a length of ~10**24, so settle for this simple sanity check on
# an extreme case.
batch_actions = list(range(2000))
assert len(repr(batch)) <= 1024
def test_actions_returns_list_copy(self):
batch = IndexDocumentsBatch()
batch.actions.extend([1, 2, 3])
assert type(batch.actions) is list
assert batch.actions == []
assert batch.actions is not batch._actions
@pytest.mark.parametrize("method_name", METHOD_NAMES)
def test_add_method(self, method_name):
batch = IndexDocumentsBatch()
method = getattr(batch, method_name)
method("doc1")
assert len(batch.actions) == 1
method("doc2", "doc3")
assert len(batch.actions) == 3
method(["doc4", "doc5"])
assert len(batch.actions) == 5
method(("doc6", "doc7"))
assert len(batch.actions) == 7
assert all(
action.action_type == METHOD_MAP[method_name] for action in batch.actions
)
assert all(type(action) == IndexAction for action in batch.actions)
expected = ["doc{}".format(i) for i in range(1, 8)]
assert [action.additional_properties for action in batch.actions] == expected
|
src/ostorlab/agent/message/proto/v2/report/status_pb2.py | bbhunter/ostorlab | 113 | 12784475 | <reponame>bbhunter/ostorlab<gh_stars>100-1000
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v2/report/status.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='v2/report/status.proto',
package='',
serialized_pb=_b(
'\n\x16v2/report/status.proto\"i\n\x06status\x12\x0f\n\x07scan_id\x18\x01 \x02(\x05\x12%\n\nattributes\x18\x02 \x03(\x0b\x32\x11.status.attribute\x1a\'\n\tattribute\x12\x0b\n\x03key\x18\x01 \x02(\t\x12\r\n\x05value\x18\x02 \x02(\t')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_STATUS_ATTRIBUTE = _descriptor.Descriptor(
name='attribute',
full_name='status.attribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='status.attribute.key', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='status.attribute.value', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=131,
)
_STATUS = _descriptor.Descriptor(
name='status',
full_name='status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scan_id', full_name='status.scan_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attributes', full_name='status.attributes', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_STATUS_ATTRIBUTE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=26,
serialized_end=131,
)
_STATUS_ATTRIBUTE.containing_type = _STATUS
_STATUS.fields_by_name['attributes'].message_type = _STATUS_ATTRIBUTE
DESCRIPTOR.message_types_by_name['status'] = _STATUS
status = _reflection.GeneratedProtocolMessageType('status', (_message.Message,), dict(
attribute=_reflection.GeneratedProtocolMessageType('attribute', (_message.Message,), dict(
DESCRIPTOR=_STATUS_ATTRIBUTE,
__module__='v2.report.status_pb2'
# @@protoc_insertion_point(class_scope:status.attribute)
))
,
DESCRIPTOR=_STATUS,
__module__='v2.report.status_pb2'
# @@protoc_insertion_point(class_scope:status)
))
_sym_db.RegisterMessage(status)
_sym_db.RegisterMessage(status.attribute)
# @@protoc_insertion_point(module_scope)
|
memory/memory_object.py | nbanmp/seninja | 109 | 12784504 | <gh_stars>100-1000
from ..expr import BV, BVArray, Bool, ITE
class MemoryObj(object):
def __init__(self, name, bits=64, bvarray=None):
self.bvarray = BVArray(
"MEMOBJ_" + name, bits, 8
) if bvarray is None else bvarray
self.name = name
self.bits = bits
def __str__(self):
return "<MemoryObj{bits} {name}>".format(
bits=self.bits,
name=self.name
)
def __repr__(self):
return self.__str__()
def load(self, index: BV):
return self.bvarray.Select(index)
def store(self, index: BV, value: BV, condition: Bool = None):
if condition is None:
self.bvarray.Store(index, value)
else:
# this can be inefficient
self.bvarray.ConditionalStore(index, value, condition)
def copy(self):
return MemoryObj(self.name, self.bits, self.bvarray.copy())
def merge(self, other, merge_condition: Bool):
self.bvarray = self.bvarray.merge(other.bvarray, merge_condition)
|
tests/frequentist/test_bounds.py | danielsaaf/confidence | 107 | 12784518 | import pytest
import time
import numpy as np
from spotify_confidence.analysis.frequentist.confidence_computers.z_test_computer import sequential_bounds
@pytest.mark.skip(reason="Skipping because this test is very slow")
def test_many_days():
"""
This input (based on a real experiment) is very long, which can cause slow calculation
"""
t = [
0.0016169976338740648,
0.0057857955498163615,
0.012200379088315757,
0.020199591701142824,
0.02956441064038571,
0.04047102718841871,
0.052929825413405296,
0.06580092295219643,
0.07878439818310792,
0.09148496950057272,
0.1028893343050959,
0.1128434997940756,
0.12298934256730025,
0.13280979910049193,
0.14267997977787195,
0.15281963941289514,
0.16293176212095561,
0.17198778455162406,
0.17996747917082068,
0.18786110540725684,
0.1955669737257397,
0.20335013690301407,
0.21277055903588274,
0.22148328777708232,
0.2295912740670489,
0.23640586948077766,
0.2431234831038822,
0.24987292468428604,
0.2568336065927525,
0.2649271880853427,
0.27282722271091664,
0.2799894816822785,
0.2862801096305317,
0.2925685639072496,
0.2988294699944579,
0.3051314956400879,
0.3118994077972684,
0.31887303037202536,
0.32523581745772245,
0.3307398353487736,
0.33616198578702633,
0.34151324975562525,
0.3478405485563082,
0.3546238566149848,
0.36130761502236336,
0.36751189302418574,
0.3730571543616735,
0.37865278180851814,
0.38428987795273567,
0.3900127609160433,
0.3964718089893684,
0.40306122104207753,
0.40914555292031984,
0.41449831480764515,
0.4198849769608837,
0.4256404199470336,
0.4315384355133149,
0.43801594290086987,
0.4444516211895538,
0.45034373518130405,
0.4556807858158224,
0.4610488197166289,
0.46633036852044285,
0.4717294082126311,
0.47769497653470894,
0.48369759863580825,
0.4892945325380834,
0.49431792124380325,
0.49935417177798586,
0.5043009639028166,
0.5093262559789482,
0.5149098888134348,
0.5205835093969735,
0.5261172491490695,
0.5310141031413226,
0.5359027242118537,
0.540068909216935,
0.5451620919252675,
0.5506752550043325,
0.5562355968920056,
0.5614758121490083,
0.5660462437469214,
0.5706616804819072,
0.5750453002157994,
0.5795939049979849,
0.5861802311128667,
0.5913273051077091,
0.5958976691303413,
0.6001503392324151,
0.6042404457337608,
0.6082963816680697,
0.6124734913435614,
0.6174918231657613,
0.6223867287374153,
0.6268875352709179,
0.6308341907134806,
0.6348490070893678,
0.6388763812049537,
0.6430405276890614,
0.6476616520101889,
0.6525750168960728,
0.6570689758011117,
0.6610427627189518,
0.6649727383296814,
0.6689671694958335,
0.673019050913289,
0.6776959248411508,
0.6825336054124376,
0.6869984168463193,
0.6908780826604262,
0.6949984065748767,
0.6991746490342636,
0.7033415661048878,
0.7082721626873987,
0.7131064081819068,
0.7176506656210218,
0.7216193168175142,
0.7256178250256133,
0.7296113326629264,
0.733677461202103,
0.7383860054116087,
0.7431864069529378,
0.7475115177561259,
0.7513220765829758,
0.7551652404828552,
0.7591154774153049,
0.7635879699061145,
0.76888963361854,
0.7740750002725536,
0.7788235152607059,
0.7829338267710377,
0.7870690059847372,
0.7912444713283939,
0.7954864645360872,
0.8002680350991415,
0.8051864906561857,
0.8097254772233912,
0.8137210008565843,
0.8175460095309978,
0.8214444612731922,
0.8256005212486867,
0.8302889054993935,
0.8351108860804202,
0.839542135124793,
0.8433705788759852,
0.8472835029908369,
0.8513248314019267,
0.8556693700983707,
0.8606610209471658,
0.865499591259651,
0.8699232042972833,
0.8737653545679493,
0.8776996212090155,
0.8816179062961511,
0.8856027192473231,
0.8900849425785808,
0.8947120585746139,
0.8993599427069738,
0.9035026227768521,
0.9075820073336299,
0.9115699850604569,
0.9158137239629064,
0.9207252417911126,
0.925749689176233,
0.9303560370359392,
0.9343408161994707,
0.9384800274049299,
0.9426168396879175,
0.9475247422385961,
0.9523909621035122,
0.9573336433987555,
0.9618665256655873,
0.9657568345864344,
0.9697355995499667,
0.973736889607129,
0.9778353641807583,
0.9828378833872299,
0.987703190985854,
0.9921586319807856,
0.9960384779956415,
1.0,
]
start_time = time.time()
results = sequential_bounds(np.array(t), alpha=0.003333333, sides=2)
my_bounds = results.bounds
expected = np.array(
[
5.75400023,
8.0,
5.14701605,
4.91478643,
4.80691346,
4.69004328,
4.57921075,
4.49683943,
4.44452939,
4.38899083,
4.35683792,
4.33289847,
4.301461,
4.27383028,
4.24513591,
4.21444005,
4.18809224,
4.17037988,
4.15702106,
4.13796352,
4.12345883,
4.10808648,
4.07898394,
4.06169498,
4.04985422,
4.04453139,
4.03288177,
4.02205301,
4.00664024,
3.98770613,
3.97358123,
3.96589571,
3.95946059,
3.94995533,
3.94128534,
3.93114789,
3.91870273,
3.90749163,
3.90064315,
3.8958719,
3.88847126,
3.88184277,
3.86841705,
3.85642932,
3.84721152,
3.84099201,
3.83689676,
3.8295672,
3.82234648,
3.81501541,
3.80286989,
3.79370807,
3.78728177,
3.78449351,
3.77865864,
3.76988501,
3.76230126,
3.75251025,
3.74474277,
3.73953663,
3.73534961,
3.72974059,
3.72466752,
3.71785112,
3.70903202,
3.70176221,
3.6976847,
3.6944938,
3.68996741,
3.68449851,
3.67888767,
3.67142884,
3.66522708,
3.65968721,
3.65649679,
3.65207508,
3.65156885,
3.643952,
3.63644572,
3.63029181,
3.62665696,
3.62527741,
3.62117738,
3.61789837,
3.6128686,
3.59904477,
3.5976517,
3.59678297,
3.59434356,
3.59116304,
3.58814574,
3.5835558,
3.57659985,
3.5726481,
3.56990393,
3.56879169,
3.56501955,
3.56127173,
3.55720436,
3.55194666,
3.54597713,
3.5436994,
3.54287161,
3.53974477,
3.53649679,
3.53314876,
3.52700997,
3.52175088,
3.51873367,
3.51846468,
3.51401711,
3.5106822,
3.50742162,
3.50113309,
3.49658758,
3.49376264,
3.49238249,
3.48979047,
3.48725107,
3.48341163,
3.47810608,
3.47381485,
3.47184685,
3.47110719,
3.46801712,
3.46472076,
3.45913659,
3.45209404,
3.4484684,
3.44587153,
3.44472549,
3.44242755,
3.43895355,
3.43549018,
3.43080058,
3.42621252,
3.42437516,
3.42371762,
3.42122891,
3.41861765,
3.41451447,
3.40936002,
3.4051931,
3.40307035,
3.40295986,
3.40052495,
3.39688763,
3.39279348,
3.38725208,
3.38421998,
3.38214471,
3.38133324,
3.37908335,
3.37689107,
3.37364203,
3.36937673,
3.36593888,
3.36250238,
3.36109704,
3.35878324,
3.35666501,
3.35305866,
3.34754255,
3.34364255,
3.34157534,
3.34085629,
3.33864193,
3.33563376,
3.33016843,
3.32687574,
3.32338656,
3.32166421,
3.32107266,
3.31861916,
3.31615129,
3.31334059,
3.30792367,
3.30479742,
3.30339238,
3.30296421,
3.30041534,
]
)
assert np.allclose(my_bounds, expected)
# if the calculation with max_nints takes longer than 10 seconds, something is most likely broken
assert (time.time() - start_time) < 15
# Run a second time but with initial state from last run.
start_time = time.time()
results = sequential_bounds(np.array(t), alpha=0.003333333, sides=2, state=results.state)
my_bounds = results.bounds
assert np.allclose(my_bounds, expected)
# if the calculation with max_nints takes longer than 10 seconds, something is most likely broken
print(f"Time passed second round: {time.time() - start_time}")
assert (time.time() - start_time) < 0.01
@pytest.mark.skip(reason="Skipping because this test is very slow")
def test_many_days_fast_and_no_crash():
"""
This is based on experiment 1735 on 26.11.2020. The calculation of the corresponding bounds takes many minutes
without performance tweak. Therefore, this test only checks for absence of crashs and time constraints, but
does not compare against the baseline without performance tweak. There is a Jupyter notebook making that comparison.
"""
t = [
0.011404679673257933,
0.02292450819418779,
0.0356455988484443,
0.04835740420885424,
0.05971666577058213,
0.06976017458481187,
0.07984165086754545,
0.09002459314412276,
0.10026356929804565,
0.11129746744100509,
0.1222487922920801,
0.13250332796555583,
0.1418309168157694,
0.15072692856918676,
0.15940425274581055,
0.16819162796171988,
0.17766544268380677,
0.18725283769713902,
0.19600162922594835,
0.20386600701959812,
0.21159934032678884,
0.21916233120704773,
0.22688560894714668,
0.23509036348536208,
0.24366994698965522,
0.2515994198750076,
0.25875219123481424,
0.2659624389836802,
0.2731790169781248,
0.28051081384508175,
0.28822790138928306,
0.2962915558739476,
0.3037246366701631,
0.31063411372423433,
0.31767205835063517,
0.32464032826076655,
0.3318100596369355,
0.3397812253123048,
0.3476375502493003,
0.3550356746451523,
0.3616457394863339,
0.3683042335071859,
0.375005792804928,
0.38175551518794676,
0.3891222824602354,
0.39652683513644266,
0.40347332732118724,
0.4098512458112366,
0.4163205187081655,
0.42263992444151655,
0.42899148558161226,
0.43464157988476515,
0.43858871208254674,
0.44192382717460427,
0.44482627278235426,
0.4474605932759375,
0.44957511937869815,
0.4509048070694502,
0.45222422911858906,
0.45333747002744257,
0.45426598540713137,
0.4551955091445229,
0.45605329943533507,
0.456895460181754,
0.4578387508027823,
0.45881449093488524,
0.45965707183034693,
0.4603621239391219,
0.4610501740166303,
0.46173166976907054,
0.4624475477181825,
0.4632872155802805,
0.4641010162663083,
0.46481571779810027,
0.4654194019478082,
0.4660207332628762,
0.4666458170038323,
0.4672646265190821,
0.46791675385342846,
0.4685898046101078,
0.46918687841487516,
0.46969451649339183,
0.47019581032136176,
0.4706811945055765,
0.47116992587716583,
0.47170379526092326,
0.47227291514937425,
0.4727852448922026,
0.47322669549150526,
0.4736554715946826,
0.47408022827201673,
0.47450655350577753,
0.4749737592414058,
0.47545756086422586,
0.4759381553493523,
0.47630259262910407,
0.4766609657576709,
0.47699441004302984,
0.4773518028238301,
0.477775327063972,
0.4781977729215707,
0.47856485714029223,
0.47888037506649034,
0.47919262983512245,
0.47949520717080135,
0.47980748994936967,
0.4801789017032324,
0.4805627078538587,
0.48090167009664675,
0.4811904245288165,
0.48149113920373887,
0.4817901452725537,
0.4820966860142033,
0.48243977972257923,
0.4827841618880198,
0.48309197708176604,
0.4833586316742829,
0.4836129058750043,
0.4838654994795544,
0.4841171547512422,
0.48439948090305657,
0.48470691796266424,
0.4849764575786085,
0.4852081697757299,
0.48545255646897667,
0.4856974893559792,
0.48595208567096676,
0.48624575584693763,
0.4865416528128355,
0.4867930840050338,
0.4870117575768593,
0.4872274340855126,
0.4874240218226533,
0.4876215198827202,
0.4878617751103791,
0.488108108494191,
0.48831807097586183,
0.4884937072807334,
0.48866595438332605,
0.488852192449045,
0.48903411698459087,
0.4892522303576926,
0.4894829201921431,
0.4896802221826566,
0.4898457609055321,
0.49001188783706756,
0.4901847091433521,
0.4903469286887892,
0.4905345812562857,
0.49073597269748276,
0.49091467609036693,
0.4910691508884479,
0.4912115954189357,
0.49135658885361677,
0.49150574176382184,
0.49167835299558493,
0.49186735004001847,
0.49203167033066975,
0.49216849886895175,
0.4923075682021289,
0.4924506289512129,
0.49259525825672346,
0.49276396210238826,
0.49294465420074185,
0.4931019580023778,
0.49330306934421303,
0.4935200763248353,
0.49373208353184794,
0.4939721566949216,
0.4942334053697541,
0.4944958444668745,
0.4947262121870588,
0.49492469059489225,
0.4951192336066912,
0.495294323717807,
0.4954780829041733,
0.4956838158854796,
0.49592192835302007,
0.49614550366367866,
0.49633301618149417,
0.49652995404283723,
0.4967104500716375,
0.4969174855149766,
0.49712443692850716,
0.4973541744251272,
0.49756258235533957,
0.49772464784612763,
0.4978989396740621,
0.4980669292663541,
0.4982378038820735,
0.49843929335804726,
0.4986487236509305,
0.49883442952786183,
0.49899118713574214,
0.49915640374435144,
0.49932506557511197,
]
alpha = 0.0033333333333333335
sides = 2
start_time = time.time()
my_bounds = sequential_bounds(np.array(t), alpha=alpha, sides=sides).bounds
expected = np.array(
[
5.0536015,
4.819334,
4.70702194,
4.60970036,
4.55329219,
4.5118919,
4.465161,
4.42168832,
4.37932413,
4.33343066,
4.29780246,
4.26550766,
4.2476601,
4.22343408,
4.20455427,
4.1834642,
4.15580542,
4.13352266,
4.1170148,
4.10326736,
4.08845795,
4.07496919,
4.05959646,
4.0417501,
4.02262887,
4.01056674,
4.00192679,
3.98996708,
3.97709149,
3.96442225,
3.95010566,
3.93456306,
3.92603865,
3.91801377,
3.90630556,
3.8975012,
3.88641115,
3.87143326,
3.85966246,
3.85112482,
3.84569926,
3.83714224,
3.82719647,
3.81910741,
3.80682977,
3.79652758,
3.78889289,
3.78428912,
3.77646938,
3.76966463,
3.76150223,
3.75820905,
3.76088934,
3.76171382,
3.76141619,
3.76079216,
3.76237742,
3.76725034,
3.76769877,
3.7690107,
3.7710916,
3.77168583,
3.76813708,
3.7705804,
3.76669411,
3.76711572,
3.76808636,
3.76962133,
3.76680748,
3.76844159,
3.76552364,
3.76210975,
3.76321355,
3.76471956,
3.76227721,
3.76424368,
3.76172169,
3.75923,
3.76099518,
3.75829319,
3.76028082,
3.75824824,
3.7562443,
3.76013739,
3.75818674,
3.7560594,
3.75379557,
3.75757852,
3.75582548,
3.75412511,
3.75244297,
3.75075688,
3.74891172,
3.75280489,
3.75090966,
3.7494744,
3.74806463,
3.75254602,
3.75114099,
3.74947802,
3.74782149,
3.74638383,
3.75092969,
3.74970739,
3.7485241,
3.74730404,
3.74585452,
3.74435839,
3.74303855,
3.74191532,
3.74074663,
3.73958567,
3.74415751,
3.74282592,
3.74149075,
3.74029857,
3.73926672,
3.73828357,
3.73730769,
3.7363362,
3.7352472,
3.73406243,
3.74020438,
3.7393112,
3.73836986,
3.73742713,
3.73644796,
3.73531947,
3.73418345,
3.73321896,
3.73238074,
3.73155456,
3.73080198,
3.73004637,
3.7291278,
3.72818669,
3.7273851,
3.72671496,
3.72605809,
3.72534827,
3.72465527,
3.72382494,
3.72294733,
3.73077145,
3.73014101,
3.72950865,
3.72885115,
3.7282343,
3.72752112,
3.72675617,
3.7260778,
3.7254917,
3.72495149,
3.72440186,
3.72383671,
3.723183,
3.72246763,
3.72184599,
3.7213286,
3.72080295,
3.72026245,
3.71971626,
3.71907946,
3.71839777,
3.71780463,
3.71704671,
3.7162294,
3.71543144,
3.71452847,
3.72065881,
3.71967136,
3.71880523,
3.71805949,
3.71732896,
3.71667185,
3.71598258,
3.71521135,
3.71431933,
3.71348235,
3.71278081,
3.71204444,
3.71136994,
3.7105967,
3.70982427,
3.70896735,
3.71527887,
3.71467395,
3.71402372,
3.71339733,
3.71276051,
3.71201001,
3.71123041,
3.71053954,
3.70995666,
3.70934263,
3.70871611,
]
)
assert np.allclose(my_bounds, expected)
# if the calculation with max_nints takes longer than 30 seconds, something is most likely broken
assert (time.time() - start_time) < 30
|
m2-modified/ims/common/agentless-system-crawler/tests/functional/test_functional_plugins.py | CCI-MOC/ABMI | 108 | 12784538 | <reponame>CCI-MOC/ABMI
import shutil
import tempfile
import unittest
import docker
import requests.exceptions
from plugins.systems.cpu_container_crawler import CpuContainerCrawler
from plugins.systems.cpu_host_crawler import CpuHostCrawler
from plugins.systems.memory_container_crawler import MemoryContainerCrawler
from plugins.systems.memory_host_crawler import MemoryHostCrawler
from plugins.systems.os_container_crawler import OSContainerCrawler
from plugins.systems.process_container_crawler import ProcessContainerCrawler
# Tests the FeaturesCrawler class
# Throws an AssertionError if any test fails
# Tests conducted with a single container running.
class HostAndContainerPluginsFunctionalTests(unittest.TestCase):
image_name = 'alpine:latest'
def setUp(self):
self.docker = docker.Client(
base_url='unix://var/run/docker.sock', version='auto')
try:
if len(self.docker.containers()) != 0:
raise Exception(
"Sorry, this test requires a machine with no docker"
"containers running.")
except requests.exceptions.ConnectionError:
print ("Error connecting to docker daemon, are you in the docker"
"group? You need to be in the docker group.")
self.docker.pull(repository='alpine', tag='latest')
self.container = self.docker.create_container(
image=self.image_name, command='/bin/sleep 60')
self.tempd = tempfile.mkdtemp(prefix='crawlertest.')
self.docker.start(container=self.container['Id'])
def tearDown(self):
self.docker.stop(container=self.container['Id'])
self.docker.remove_container(container=self.container['Id'])
shutil.rmtree(self.tempd)
def test_crawl_invm_cpu(self):
fc = CpuHostCrawler()
cores = len(list(fc.crawl()))
assert cores > 0
def test_crawl_invm_mem(self):
fc = MemoryHostCrawler()
cores = len(list(fc.crawl()))
assert cores > 0
def test_crawl_outcontainer_cpu(self):
fc = CpuContainerCrawler()
for key, feature, t in fc.crawl(self.container['Id']):
print key, feature
cores = len(list(fc.crawl(self.container['Id'])))
assert cores > 0
def test_crawl_outcontainer_os(self):
fc = OSContainerCrawler()
assert len(list(fc.crawl(self.container['Id']))) == 1
def test_crawl_outcontainer_processes(self):
fc = ProcessContainerCrawler()
# sleep + crawler
assert len(list(fc.crawl(self.container['Id']))) == 2
def test_crawl_outcontainer_processes_mmapfiles(self):
fc = ProcessContainerCrawler()
output = "%s" % list(fc.crawl(self.container['Id'], get_mmap_files='True'))
assert '/bin/busybox' in output
def test_crawl_outcontainer_mem(self):
fc = MemoryContainerCrawler()
output = "%s" % list(fc.crawl(self.container['Id']))
assert 'memory_used' in output
if __name__ == '__main__':
unittest.main()
|
src/std/rfc4566.py | ojimary/titus | 108 | 12784562 | <reponame>ojimary/titus<gh_stars>100-1000
# Copyright (c) 2007, <NAME>. All rights reserved. See LICENSING for details.
# @implements RFC4566 (SDP)
import socket, time
class attrs(object):
'''A generic class that allows uniformly accessing the attribute and items,
and returns None for invalid attribute instead of throwing an acception.'''
def __init__(self, **kwargs):
for n,v in kwargs.items(): self[n] = v
# attribute access: use container if not found
def __getattr__(self, name): return self.__getitem__(name)
# container access: use key in __dict__
def __getitem__(self, name): return self.__dict__.get(name, None)
def __setitem__(self, name, value): self.__dict__[name] = value
def __contains__(self, name): return name in self.__dict__
#def __repr__(self): return repr(self.__dict__)
# @implements RFC4566 P3L3-P3L21
class SDP(attrs):
'''A SDP packet with dynamic properties.
The header names can be accessed as attributes or items.
Accessing an unavailable header gives None instead of exception.
'''
# header names that can appear multiple times.
_multiple = 'tramb'
def __init__(self, value=None):
if value:
self._parse(value)
# @implements RFC4566 P11L1-P12L10
class originator(attrs):
'''Represents a o= line with attributes username (str), sessionid (long),
version (long), nettype (str), addrtype (str), address (str).'''
def __init__(self, value=None):
if value:
self.username, self.sessionid, self.version, self.nettype, self.addrtype, self.address = value.split(' ')
self.sessionid = int(self.sessionid)
self.version = int(self.version)
else:
hostname = socket.gethostname()
self.username, self.sessionid, self.version, self.nettype, self.addrtype, self.address = \
'-', int(time.time()), int(time.time()), 'IN', 'IP4', (hostname.find('.')>0 and hostname or socket.gethostbyname(hostname))
def __repr__(self):
return ' '.join(map(lambda x: str(x), [self.username, self.sessionid, self.version, self.nettype, self.addrtype, self.address]))
# @implements RFC4566 P14L7-P16L9
class connection(attrs):
'''Represents a c= line with attributes nettype (str), addrtype (str), address (str)
and optionally ttl (int) and count (int).'''
def __init__(self, value=None, **kwargs):
if value:
self.nettype, self.addrtype, rest = value.split(' ')
rest = rest.split('/')
if len(rest) == 1: self.address = rest[0]
elif len(rest) == 2: self.address, self.ttl = rest[0], int(rest[1])
else: self.address, self.ttl, self.count = rest[0], int(rest[1]), int(rest[2])
elif 'address' in kwargs:
self.address = kwargs.get('address')
self.nettype = kwargs.get('nettype', 'IN')
self.addrtype = kwargs.get('addrtype', 'IP4')
if 'ttl' in kwargs: self.ttl = int(kwargs.get('ttl'))
if 'count' in kwargs: self.count = int(kwargs.get('count'))
def __repr__(self):
return self.nettype + ' ' + self.addrtype + ' ' + self.address + ('/' + str(self.ttl) if self.ttl else '') + ('/' + str(self.count) if self.count else '')
# @implements RFC4566 P22L17-P24L33
class media(attrs):
'''Represents a m= line and all subsequent lines until next m= or end.
It has attributes such as media (str), port (int), proto (str), fmt (list).'''
def __init__(self, value=None, **kwargs):
if value:
self.media, self.port, self.proto, rest = value.split(' ', 3)
self.port = int(self.port)
self.fmt = []
for f in rest.split(' '):
a = attrs()
try: a.pt = int(f) # if payload type is numeric
except: a.pt = f
self.fmt.append(a)
elif 'media' in kwargs:
self.media = kwargs.get('media')
self.port = int(kwargs.get('port', 0))
self.proto = kwargs.get('proto', 'RTP/AVP')
self.fmt = kwargs.get('fmt', [])
def __repr__(self):
result = self.media + ' ' + str(self.port) + ' ' + self.proto + ' ' + ' '.join(map(lambda x: str(x.pt), self.fmt))
for k in filter(lambda x: x in self, 'icbka'): # order is important
if k not in SDP._multiple: # single header
result += '\r\n' + k + '=' + str(self[k])
else:
for v in self[k]:
result += '\r\n' + k + '=' + str(v)
for f in self.fmt:
if f.name:
result += '\r\n' + 'a=rtpmap:' + str(f.pt) + ' ' + f.name + '/' + str(f.rate) + (f.params and ('/'+f.params) or '')
return result
def dup(self): # use this method instead of SDP.media(str(m)) to duplicate m. Otherwise, fmt will be incomplete
result = SDP.media(media=self.media, port=self.port, proto=self.proto, fmt=map(lambda f: attrs(pt=f.pt, name=f.name, rate=f.rate, params=f.params), self.fmt))
for k in filter(lambda x: x in self, 'icbka'):
result[k] = self[k][:] if isinstance(self[k], list) else self[k]
return result
# @implements RFC4566 P8L17-P10L5
def _parse(self, text):
g = True # whether we are in global line or per media line?
for line in text.replace('\r\n', '\n').split('\n'):
k, sep, v = line.partition('=')
if k == 'o': v = SDP.originator(v)
elif k == 'c': v = SDP.connection(v)
elif k == 'm': v = SDP.media(v)
if k == 'm': # new m= line
if not self['m']:
self['m'] = []
self['m'].append(v)
obj = self['m'][-1]
elif self['m']: # not in global
obj = self['m'][-1]
# @implements RFC4566 P25L41-P27L7
if k == 'a' and v.startswith('rtpmap:'):
pt, rest = v[7:].split(' ', 1)
name, sep, rest = rest.partition('/')
rate, sep, params = rest.partition('/')
for f in filter(lambda x: str(x.pt) == str(pt), obj.fmt):
f.name = name; f.rate = int(rate); f.params = params or None
else:
obj[k] = (k in SDP._multiple and ((k in obj) and (obj[k]+[v]) or [v])) or v
else: # global
obj = self
obj[k] = ((k in SDP._multiple) and ((k in obj) and (obj[k]+[v]) or [v])) or v
def __repr__(self):
result = ''
for k in filter(lambda x: x in self, 'vosiuepcbtam'): # order is important
if k not in SDP._multiple: # single header
result += k + '=' + str(self[k]) + '\r\n'
else:
for v in self[k]:
result += k + '=' + str(v) + '\r\n'
return result
#--------------------------- Testing --------------------------------------
# @implements RFC4566 P10L7-P10L21
def testSDP():
s = '''v=0\r
o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5\r
s=SDP Seminar\r
i=A Seminar on the session description protocol\r
u=http://www.example.com/seminars/sdp.pdf\r
e=<EMAIL> (<NAME>)\r
c=IN IP4 172.16.31.10/127\r
t=2873397496 2873404696\r
a=recvonly\r
m=audio 49170 RTP/AVP 0\r
m=video 51372 RTP/AVP 99\r
a=rtpmap:99 h263-1998/90000\r
'''
sdp = SDP(s)
assert str(sdp) == s
if __name__ == '__main__':
import doctest
doctest.testmod()
testSDP()
|
alipay/aop/api/response/AlipayFundTransAacollectBatchQueryResponse.py | snowxmas/alipay-sdk-python-all | 213 | 12784568 | <filename>alipay/aop/api/response/AlipayFundTransAacollectBatchQueryResponse.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.BatchDetailInfo import BatchDetailInfo
from alipay.aop.api.domain.BatchDetailInfo import BatchDetailInfo
from alipay.aop.api.domain.BatchDetailInfo import BatchDetailInfo
class AlipayFundTransAacollectBatchQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayFundTransAacollectBatchQueryResponse, self).__init__()
self._batch_memo = None
self._batch_no = None
self._batch_status = None
self._biz_type = None
self._create_date = None
self._creater_user_id = None
self._detail_list = None
self._ext_param = None
self._paid_detail_list = None
self._pay_amount_single = None
self._pay_amount_total = None
self._pay_amount_total_for_receiver = None
self._real_items_total = None
self._show_items_total = None
self._success_amount_total = None
self._time_out_value = None
self._unpaid_detail_list = None
@property
def batch_memo(self):
return self._batch_memo
@batch_memo.setter
def batch_memo(self, value):
self._batch_memo = value
@property
def batch_no(self):
return self._batch_no
@batch_no.setter
def batch_no(self, value):
self._batch_no = value
@property
def batch_status(self):
return self._batch_status
@batch_status.setter
def batch_status(self, value):
self._batch_status = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def create_date(self):
return self._create_date
@create_date.setter
def create_date(self, value):
self._create_date = value
@property
def creater_user_id(self):
return self._creater_user_id
@creater_user_id.setter
def creater_user_id(self, value):
self._creater_user_id = value
@property
def detail_list(self):
return self._detail_list
@detail_list.setter
def detail_list(self, value):
if isinstance(value, list):
self._detail_list = list()
for i in value:
if isinstance(i, BatchDetailInfo):
self._detail_list.append(i)
else:
self._detail_list.append(BatchDetailInfo.from_alipay_dict(i))
@property
def ext_param(self):
return self._ext_param
@ext_param.setter
def ext_param(self, value):
self._ext_param = value
@property
def paid_detail_list(self):
return self._paid_detail_list
@paid_detail_list.setter
def paid_detail_list(self, value):
if isinstance(value, list):
self._paid_detail_list = list()
for i in value:
if isinstance(i, BatchDetailInfo):
self._paid_detail_list.append(i)
else:
self._paid_detail_list.append(BatchDetailInfo.from_alipay_dict(i))
@property
def pay_amount_single(self):
return self._pay_amount_single
@pay_amount_single.setter
def pay_amount_single(self, value):
self._pay_amount_single = value
@property
def pay_amount_total(self):
return self._pay_amount_total
@pay_amount_total.setter
def pay_amount_total(self, value):
self._pay_amount_total = value
@property
def pay_amount_total_for_receiver(self):
return self._pay_amount_total_for_receiver
@pay_amount_total_for_receiver.setter
def pay_amount_total_for_receiver(self, value):
self._pay_amount_total_for_receiver = value
@property
def real_items_total(self):
return self._real_items_total
@real_items_total.setter
def real_items_total(self, value):
self._real_items_total = value
@property
def show_items_total(self):
return self._show_items_total
@show_items_total.setter
def show_items_total(self, value):
self._show_items_total = value
@property
def success_amount_total(self):
return self._success_amount_total
@success_amount_total.setter
def success_amount_total(self, value):
self._success_amount_total = value
@property
def time_out_value(self):
return self._time_out_value
@time_out_value.setter
def time_out_value(self, value):
self._time_out_value = value
@property
def unpaid_detail_list(self):
return self._unpaid_detail_list
@unpaid_detail_list.setter
def unpaid_detail_list(self, value):
if isinstance(value, list):
self._unpaid_detail_list = list()
for i in value:
if isinstance(i, BatchDetailInfo):
self._unpaid_detail_list.append(i)
else:
self._unpaid_detail_list.append(BatchDetailInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayFundTransAacollectBatchQueryResponse, self).parse_response_content(response_content)
if 'batch_memo' in response:
self.batch_memo = response['batch_memo']
if 'batch_no' in response:
self.batch_no = response['batch_no']
if 'batch_status' in response:
self.batch_status = response['batch_status']
if 'biz_type' in response:
self.biz_type = response['biz_type']
if 'create_date' in response:
self.create_date = response['create_date']
if 'creater_user_id' in response:
self.creater_user_id = response['creater_user_id']
if 'detail_list' in response:
self.detail_list = response['detail_list']
if 'ext_param' in response:
self.ext_param = response['ext_param']
if 'paid_detail_list' in response:
self.paid_detail_list = response['paid_detail_list']
if 'pay_amount_single' in response:
self.pay_amount_single = response['pay_amount_single']
if 'pay_amount_total' in response:
self.pay_amount_total = response['pay_amount_total']
if 'pay_amount_total_for_receiver' in response:
self.pay_amount_total_for_receiver = response['pay_amount_total_for_receiver']
if 'real_items_total' in response:
self.real_items_total = response['real_items_total']
if 'show_items_total' in response:
self.show_items_total = response['show_items_total']
if 'success_amount_total' in response:
self.success_amount_total = response['success_amount_total']
if 'time_out_value' in response:
self.time_out_value = response['time_out_value']
if 'unpaid_detail_list' in response:
self.unpaid_detail_list = response['unpaid_detail_list']
|
asymmetric_cryptography/asymmetric.py | elishahyousaf/Awesome-Python-Scripts | 1,026 | 12784572 | from Crypto import Random
from Crypto.PublicKey import RSA
import base64
def generate_keys(modulus_length=256*4):
privatekey = RSA.generate(modulus_length, Random.new().read)
publickey = privatekey.publickey()
return privatekey, publickey
def encryptit(message , publickey):
encrypted_msg = publickey.encrypt(message, 32)[0]
encoded_encrypted_msg = base64.b64encode(encrypted_msg)
return encoded_encrypted_msg
def decryptit(message, privatekey):
decoded_encrypted_msg = base64.b64decode(message)
decoded_decrypted_msg = privatekey.decrypt(decoded_encrypted_msg)
return decoded_decrypted_msg
if __name__ == '__main__':
message = "This is a awesome message!"
privatekey , publickey = generate_keys()
encrypted_msg = encryptit(message.encode("utf-8"), publickey)
decrypted_msg = decryptit(encrypted_msg, privatekey)
print(f'{privatekey.exportKey()} - ({len(privatekey.exportKey())})')
print(f'{publickey.exportKey()} - ({len(publickey.exportKey())})')
print(f'Original: {message} - ({len(message)})')
print(f'Encrypted: {encrypted_msg} - ({len(encrypted_msg)})')
print(f'Decrypted: {decrypted_msg} - ({len(decrypted_msg)})') |
airflow/providers/trino/transfers/gcs_to_trino.py | npodewitz/airflow | 8,092 | 12784619 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage to Trino operator."""
import csv
import json
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Iterable, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.trino.hooks.trino import TrinoHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSToTrinoOperator(BaseOperator):
"""
Loads a csv file from Google Cloud Storage into a Trino table.
Assumptions:
1. CSV file should not have headers
2. Trino table with requisite columns is already created
3. Optionally, a separate JSON file with headers can be provided
:param source_bucket: Source GCS bucket that contains the csv
:param source_object: csv file including the path
:param trino_table: trino table to upload the data
:param trino_conn_id: destination trino connection
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud and
interact with the Google Cloud Storage service.
:param schema_fields: The names of the columns to fill in the table. If schema_fields is
provided, any path provided in the schema object will be
:param schema_object: JSON file with schema fields
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
template_fields: Sequence[str] = (
'source_bucket',
'source_object',
'trino_table',
)
def __init__(
self,
*,
source_bucket: str,
source_object: str,
trino_table: str,
trino_conn_id: str = "trino_default",
gcp_conn_id: str = "google_cloud_default",
schema_fields: Optional[Iterable[str]] = None,
schema_object: Optional[str] = None,
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.trino_table = trino_table
self.trino_conn_id = trino_conn_id
self.gcp_conn_id = gcp_conn_id
self.schema_fields = schema_fields
self.schema_object = schema_object
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
trino_hook = TrinoHook(trino_conn_id=self.trino_conn_id)
with NamedTemporaryFile("w+") as temp_file:
self.log.info("Downloading data from %s", self.source_object)
gcs_hook.download(
bucket_name=self.source_bucket,
object_name=self.source_object,
filename=temp_file.name,
)
data = csv.reader(temp_file)
rows = (tuple(row) for row in data)
self.log.info("Inserting data into %s", self.trino_table)
if self.schema_fields:
trino_hook.insert_rows(table=self.trino_table, rows=rows, target_fields=self.schema_fields)
elif self.schema_object:
blob = gcs_hook.download(
bucket_name=self.source_bucket,
object_name=self.schema_object,
)
schema_fields = json.loads(blob.decode("utf-8"))
trino_hook.insert_rows(table=self.trino_table, rows=rows, target_fields=schema_fields)
else:
trino_hook.insert_rows(table=self.trino_table, rows=rows)
|
gtsfm/two_view_estimator.py | swershrimpy/gtsfm | 122 | 12784624 | <reponame>swershrimpy/gtsfm
"""Estimator which operates on a pair of images to compute relative pose and verified indices.
Authors: <NAME>, <NAME>
"""
import logging
from typing import Dict, Optional, Tuple
import dask
import numpy as np
from dask.delayed import Delayed
from gtsam import Cal3Bundler, Pose3, Rot3, Unit3
import gtsfm.utils.geometry_comparisons as comp_utils
import gtsfm.utils.logger as logger_utils
import gtsfm.utils.metrics as metric_utils
from gtsfm.common.keypoints import Keypoints
from gtsfm.common.two_view_estimation_report import TwoViewEstimationReport
from gtsfm.frontend.inlier_support_processor import InlierSupportProcessor
from gtsfm.frontend.matcher.matcher_base import MatcherBase
from gtsfm.frontend.verifier.verifier_base import VerifierBase
from gtsfm.evaluation.metrics import GtsfmMetric, GtsfmMetricsGroup
logger = logger_utils.get_logger()
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
pil_logger = logging.getLogger("PIL")
pil_logger.setLevel(logging.INFO)
EPSILON = 1e-6
class TwoViewEstimator:
"""Wrapper for running two-view relative pose estimation on image pairs in the dataset."""
def __init__(
self,
matcher: MatcherBase,
verifier: VerifierBase,
inlier_support_processor: InlierSupportProcessor,
eval_threshold_px: float,
) -> None:
"""Initializes the two-view estimator from matcher and verifier.
Args:
matcher: matcher to use.
verifier: verifier to use.
inlier_support_processor: post-processor that uses information about RANSAC support to filter out pairs.
eval_threshold_px: distance threshold for marking a correspondence pair as inlier during evaluation
(not during estimation).
"""
self._matcher = matcher
self._verifier = verifier
self.processor = inlier_support_processor
self._corr_metric_dist_threshold = eval_threshold_px
def get_corr_metric_dist_threshold(self) -> float:
"""Getter for the distance threshold used in the metric for correct correspondences."""
return self._corr_metric_dist_threshold
def create_computation_graph(
self,
keypoints_i1_graph: Delayed,
keypoints_i2_graph: Delayed,
descriptors_i1_graph: Delayed,
descriptors_i2_graph: Delayed,
camera_intrinsics_i1_graph: Delayed,
camera_intrinsics_i2_graph: Delayed,
im_shape_i1_graph: Delayed,
im_shape_i2_graph: Delayed,
i2Ti1_expected_graph: Optional[Delayed] = None,
) -> Tuple[Delayed, Delayed, Delayed, Optional[Delayed], Optional[Delayed], Optional[Delayed]]:
"""Create delayed tasks for matching and verification.
Args:
keypoints_i1_graph: keypoints for image i1.
keypoints_i2_graph: keypoints for image i2.
descriptors_i1_graph: corr. descriptors for image i1.
descriptors_i2_graph: corr. descriptors for image i2.
camera_intrinsics_i1_graph: intrinsics for camera i1.
camera_intrinsics_i2_graph: intrinsics for camera i2.
im_shape_i1_graph: image shape for image i1.
im_shape_i2_graph: image shape for image i2.
i2Ti1_expected_graph (optional): ground truth relative pose, used for evaluation if available. Defaults to
None.
Returns:
Computed relative rotation wrapped as Delayed.
Computed relative translation direction wrapped as Delayed.
Indices of verified correspondences wrapped as Delayed.
Two view report w/ verifier metrics wrapped as Delayed.
Two view report w/ post-processor metrics wrapped as Delayed.
"""
# graph for matching to obtain putative correspondences
corr_idxs_graph = self._matcher.create_computation_graph(
keypoints_i1_graph,
keypoints_i2_graph,
descriptors_i1_graph,
descriptors_i2_graph,
im_shape_i1_graph,
im_shape_i2_graph,
)
# verification on putative correspondences to obtain relative pose
# and verified correspondences
# TODO: name this verified_correspondence_idxs (add note: everything here is delayed)
(i2Ri1_graph, i2Ui1_graph, v_corr_idxs_graph, inlier_ratio_est_model) = self._verifier.create_computation_graph(
keypoints_i1_graph,
keypoints_i2_graph,
corr_idxs_graph,
camera_intrinsics_i1_graph,
camera_intrinsics_i2_graph,
)
# if we have the expected GT data, evaluate the computed relative pose
if i2Ti1_expected_graph is not None:
R_error_deg, U_error_deg = dask.delayed(compute_relative_pose_metrics, nout=2)(
i2Ri1_graph, i2Ui1_graph, i2Ti1_expected_graph
)
num_inliers_gt_model, inlier_ratio_gt_model, v_corr_idxs_inlier_mask_gt = dask.delayed(
compute_correspondence_metrics, nout=3
)(
keypoints_i1_graph,
keypoints_i2_graph,
v_corr_idxs_graph,
camera_intrinsics_i1_graph,
camera_intrinsics_i2_graph,
i2Ti1_expected_graph,
self._corr_metric_dist_threshold,
)
else:
R_error_deg, U_error_deg = None, None
num_inliers_gt_model, inlier_ratio_gt_model = None, None
v_corr_idxs_inlier_mask_gt = None
two_view_report_graph = dask.delayed(generate_two_view_report)(
inlier_ratio_est_model,
R_error_deg,
U_error_deg,
num_inliers_gt_model,
inlier_ratio_gt_model,
v_corr_idxs_inlier_mask_gt,
v_corr_idxs_graph,
)
# Note: We name the output as _pp, as it represents a post-processed quantity.
(
i2Ri1_pp_graph,
i2Ui1_pp_graph,
v_corr_idxs_pp_graph,
two_view_report_pp_graph,
) = self.processor.create_computation_graph(
i2Ri1_graph, i2Ui1_graph, v_corr_idxs_graph, two_view_report_graph
)
# We provide both, as we will create reports for both.
return (i2Ri1_pp_graph, i2Ui1_pp_graph, v_corr_idxs_pp_graph, two_view_report_graph, two_view_report_pp_graph)
def generate_two_view_report(
inlier_ratio_est_model: float,
R_error_deg: float,
U_error_deg: float,
num_inliers_gt_model: int,
inlier_ratio_gt_model: float,
v_corr_idxs_inlier_mask_gt: np.ndarray,
v_corr_idxs: np.ndarray,
) -> TwoViewEstimationReport:
"""Wrapper around class constructor for Dask."""
two_view_report = TwoViewEstimationReport(
inlier_ratio_est_model=inlier_ratio_est_model,
num_inliers_est_model=v_corr_idxs.shape[0],
num_inliers_gt_model=num_inliers_gt_model,
inlier_ratio_gt_model=inlier_ratio_gt_model,
v_corr_idxs_inlier_mask_gt=v_corr_idxs_inlier_mask_gt,
v_corr_idxs=v_corr_idxs,
R_error_deg=R_error_deg,
U_error_deg=U_error_deg,
)
return two_view_report
def compute_correspondence_metrics(
keypoints_i1: Keypoints,
keypoints_i2: Keypoints,
corr_idxs_i1i2: np.ndarray,
intrinsics_i1: Cal3Bundler,
intrinsics_i2: Cal3Bundler,
i2Ti1: Pose3,
epipolar_distance_threshold: float,
) -> Tuple[int, float, Optional[np.ndarray]]:
"""Compute the metrics for the generated verified correspondence.
Args:
keypoints_i1: detected keypoints in image i1.
keypoints_i2: detected keypoints in image i2.
corr_idxs_i1i2: indices of correspondences.
intrinsics_i1: intrinsics for i1.
intrinsics_i2: intrinsics for i2.
i2Ti1: relative pose.
epipolar_distance_threshold: max epipolar distance to qualify as a correct match.
Returns:
Number of inlier correspondences to ground truth epipolar geometry, i.e. #correct correspondences.
Inlier Ratio, i.e. ratio of correspondences which are correct w.r.t. given relative pose.
Mask of which verified correspondences are classified as correct under Sampson error
(using GT epipolar geometry).
"""
if corr_idxs_i1i2.size == 0:
return 0, float("Nan"), None
v_corr_idxs_inlier_mask_gt = metric_utils.count_correct_correspondences(
keypoints_i1.extract_indices(corr_idxs_i1i2[:, 0]),
keypoints_i2.extract_indices(corr_idxs_i1i2[:, 1]),
intrinsics_i1,
intrinsics_i2,
i2Ti1,
epipolar_distance_threshold,
)
num_inliers_gt_model = np.count_nonzero(v_corr_idxs_inlier_mask_gt)
inlier_ratio_gt_model = num_inliers_gt_model / corr_idxs_i1i2.shape[0]
return num_inliers_gt_model, inlier_ratio_gt_model, v_corr_idxs_inlier_mask_gt
def compute_relative_pose_metrics(
i2Ri1_computed: Optional[Rot3], i2Ui1_computed: Optional[Unit3], i2Ti1_expected: Pose3
) -> Tuple[Optional[float], Optional[float]]:
"""Compute the metrics on relative camera pose.
Args:
i2Ri1_computed: computed relative rotation.
i2Ui1_computed: computed relative translation direction.
i2Ti1_expected: expected relative pose.
Returns:
Rotation error, in degrees
Unit translation error, in degrees
"""
R_error_deg = comp_utils.compute_relative_rotation_angle(i2Ri1_computed, i2Ti1_expected.rotation())
U_error_deg = comp_utils.compute_relative_unit_translation_angle(
i2Ui1_computed, Unit3(i2Ti1_expected.translation())
)
return (R_error_deg, U_error_deg)
def aggregate_frontend_metrics(
two_view_reports_dict: Dict[Tuple[int, int], Optional[TwoViewEstimationReport]],
angular_err_threshold_deg: float,
metric_group_name: str,
) -> None:
"""Aggregate the front-end metrics to log summary statistics.
We define "pose error" as the maximum of the angular errors in rotation and translation, per:
SuperGlue, CVPR 2020: https://arxiv.org/pdf/1911.11763.pdf
Learning to find good correspondences. CVPR 2018:
OA-Net, ICCV 2019:
NG-RANSAC, ICCV 2019:
Args:
two_view_report_dict: report containing front-end metrics for each image pair.
angular_err_threshold_deg: threshold for classifying angular error metrics as success.
metric_group_name: name we will assign to the GtsfmMetricGroup returned by this fn.
"""
num_image_pairs = len(two_view_reports_dict.keys())
# all rotational errors in degrees
rot3_angular_errors = []
trans_angular_errors = []
inlier_ratio_gt_model_all_pairs = []
inlier_ratio_est_model_all_pairs = []
num_inliers_gt_model_all_pairs = []
num_inliers_est_model_all_pairs = []
# populate the distributions
for report in two_view_reports_dict.values():
if report is None:
continue
rot3_angular_errors.append(report.R_error_deg)
trans_angular_errors.append(report.U_error_deg)
inlier_ratio_gt_model_all_pairs.append(report.inlier_ratio_gt_model)
inlier_ratio_est_model_all_pairs.append(report.inlier_ratio_est_model)
num_inliers_gt_model_all_pairs.append(report.num_inliers_gt_model)
num_inliers_est_model_all_pairs.append(report.num_inliers_est_model)
rot3_angular_errors = np.array(rot3_angular_errors, dtype=float)
trans_angular_errors = np.array(trans_angular_errors, dtype=float)
# count number of rot3 errors which are not None. Should be same in rot3/unit3
num_valid_image_pairs = np.count_nonzero(~np.isnan(rot3_angular_errors))
# compute pose errors by picking the max error from rot3 and unit3 errors
pose_errors = np.maximum(rot3_angular_errors, trans_angular_errors)
# check errors against the threshold
success_count_rot3 = np.sum(rot3_angular_errors < angular_err_threshold_deg)
success_count_unit3 = np.sum(trans_angular_errors < angular_err_threshold_deg)
success_count_pose = np.sum(pose_errors < angular_err_threshold_deg)
# count image pair entries where inlier ratio w.r.t. GT model == 1.
all_correct = np.count_nonzero(
[report.inlier_ratio_gt_model == 1.0 for report in two_view_reports_dict.values() if report is not None]
)
logger.debug(
"[Two view optimizer] [Summary] Rotation success: %d/%d/%d",
success_count_rot3,
num_valid_image_pairs,
num_image_pairs,
)
logger.debug(
"[Two view optimizer] [Summary] Translation success: %d/%d/%d",
success_count_unit3,
num_valid_image_pairs,
num_image_pairs,
)
logger.debug(
"[Two view optimizer] [Summary] Pose success: %d/%d/%d",
success_count_pose,
num_valid_image_pairs,
num_image_pairs,
)
logger.debug(
"[Two view optimizer] [Summary] # Image pairs with 100%% inlier ratio:: %d/%d", all_correct, num_image_pairs
)
# TODO(akshay-krishnan): Move angular_err_threshold_deg and num_total_image_pairs to metadata.
frontend_metrics = GtsfmMetricsGroup(
metric_group_name,
[
GtsfmMetric("angular_err_threshold_deg", angular_err_threshold_deg),
GtsfmMetric("num_total_image_pairs", int(num_image_pairs)),
GtsfmMetric("num_valid_image_pairs", int(num_valid_image_pairs)),
GtsfmMetric("rotation_success_count", int(success_count_rot3)),
GtsfmMetric("translation_success_count", int(success_count_unit3)),
GtsfmMetric("pose_success_count", int(success_count_pose)),
GtsfmMetric("num_all_inlier_correspondences_wrt_gt_model", int(all_correct)),
GtsfmMetric("rot3_angular_errors_deg", rot3_angular_errors),
GtsfmMetric("trans_angular_errors_deg", trans_angular_errors),
GtsfmMetric("pose_errors_deg", pose_errors),
GtsfmMetric("inlier_ratio_wrt_gt_model", inlier_ratio_gt_model_all_pairs),
GtsfmMetric("inlier_ratio_wrt_est_model", inlier_ratio_est_model_all_pairs),
GtsfmMetric("num_inliers_est_model", num_inliers_est_model_all_pairs),
GtsfmMetric("num_inliers_gt_model", num_inliers_gt_model_all_pairs),
],
)
return frontend_metrics
|
filters/tests/test_mixins.py | jof/drf-url-filters | 176 | 12784644 | <filename>filters/tests/test_mixins.py
import unittest
from filters.mixins import FiltersMixin
class MyTest(unittest.TestCase):
def test(self):
self.assertEqual(4, 4)
|
tower_cli/cli/action.py | kedark3/tower-cli | 363 | 12784652 | <filename>tower_cli/cli/action.py<gh_stars>100-1000
# Copyright 2017, Ansible by Red Hat
# <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from click.formatting import join_options
from tower_cli.conf import SETTINGS_PARMS
class ActionSubcommand(click.Command):
"""A Command subclass that adds support for the concept that invocation
without arguments assumes `--help`.
This code is adapted by taking code from click.MultiCommand and placing
it here, to get just the --help functionality and nothing else.
"""
def __init__(self, name=None, no_args_is_help=True, **kwargs):
self.no_args_is_help = no_args_is_help
super(ActionSubcommand, self).__init__(name=name, **kwargs)
def parse_args(self, ctx, args):
"""Parse arguments sent to this command.
The code for this method is taken from MultiCommand:
https://github.com/mitsuhiko/click/blob/master/click/core.py
It is Copyright (c) 2014 by <NAME>.
See the license:
https://github.com/mitsuhiko/click/blob/master/LICENSE
"""
if not args and self.no_args_is_help and not ctx.resilient_parsing:
click.echo(ctx.get_help())
ctx.exit()
return super(ActionSubcommand, self).parse_args(ctx, args)
def format_options(self, ctx, formatter):
"""Monkey-patch click's format_options method to support option categorization.
"""
field_opts = []
global_opts = []
local_opts = []
other_opts = []
for param in self.params:
if param.name in SETTINGS_PARMS:
opts = global_opts
elif getattr(param, 'help', None) and param.help.startswith('[FIELD]'):
opts = field_opts
param.help = param.help[len('[FIELD]'):]
else:
opts = local_opts
rv = param.get_help_record(ctx)
if rv is None:
continue
else:
opts.append(rv)
if self.add_help_option:
help_options = self.get_help_option_names(ctx)
if help_options:
other_opts.append([join_options(help_options)[0], 'Show this message and exit.'])
if field_opts:
with formatter.section('Field Options'):
formatter.write_dl(field_opts)
if local_opts:
with formatter.section('Local Options'):
formatter.write_dl(local_opts)
if global_opts:
with formatter.section('Global Options'):
formatter.write_dl(global_opts)
if other_opts:
with formatter.section('Other Options'):
formatter.write_dl(other_opts)
|
codegen_sources/test_generation/test_runners/python_test_runner.py | AlexShypula/CodeGen | 241 | 12784664 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import subprocess
import sys
import uuid
from pathlib import Path, PosixPath
from subprocess import Popen
from .evosuite_test_runners import (
EvosuiteTestRunner,
TestRuntimeError,
CompilationError,
InvalidTest,
clean_firejail,
FIREJAIL_PROFILE,
)
from ...model.src.utils import (
TREE_SITTER_ROOT,
limit_virtual_memory,
MAX_VIRTUAL_MEMORY,
)
from ...preprocessing.lang_processors.lang_processor import LangProcessor
sys.path.append(str(Path(__file__).parents[3]))
print("adding to path", str(Path(__file__).parents[3]))
python_processor = LangProcessor.processors["python"](root_folder=TREE_SITTER_ROOT)
class PythonTestRunner(EvosuiteTestRunner):
def __init__(
self,
tmp_folder=Path(
Path.home().joinpath("data/CodeGen/automatic_tests/tmp_tests_folder/python")
),
timeout=15,
):
super().__init__(tmp_folder=tmp_folder, timeout=timeout)
def _run_tests(
self,
function: str,
test: str,
tmp_path: PosixPath,
classname: str = None,
scaffolding: str = None,
):
assert (
scaffolding is None
), f"Scaffolding should be None for python tests, was {scaffolding}"
if "#TOFILL" not in test:
raise InvalidTest("Missing #TOFILL")
try:
f_name = python_processor.get_function_name(function)
except (ValueError, IndexError):
raise CompilationError("No function definition")
function = python_processor.detokenize_code(
function.replace(f" {f_name.strip()} ", " f_filled ")
)
filled_test = test.replace("#TOFILL", function)
test_path = self.write_test(filled_test, classname, tmp_path)
assert test_path.is_file()
test_cmd = f"{limit_virtual_memory(MAX_VIRTUAL_MEMORY)}; firejail --profile={FIREJAIL_PROFILE} python {test_path}"
test_proc = Popen(
test_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
executable="/bin/bash",
preexec_fn=os.setsid,
)
return test_proc, tmp_path
def _eval_proc_state(self, out, err):
stderr = err.decode("utf-8", errors="replace")
stderr = clean_firejail(stderr)
res_line = stderr.splitlines()
if len(res_line) <= 2 or not (
res_line[-1].startswith("OK") or res_line[-1].startswith("FAILED")
):
raise TestRuntimeError(stderr)
assert res_line[-3].startswith("Ran ")
number_of_tests = int(res_line[-3].replace("Ran ", "").split(" ")[0])
res_line = res_line[-1]
if res_line.startswith("OK"):
return "success", number_of_tests, 0
else:
assert res_line.startswith("FAILED (errors=") or res_line.startswith(
"FAILED (failures="
)
number_failures = int(res_line.split("=")[-1].replace(")", ""))
return "failure", number_of_tests, number_failures
@staticmethod
def write_test(test, classname, out_folder):
if classname is None:
classname = "a"
test_path = out_folder.joinpath(f"python_test_{classname}.py")
with open(test_path, "w", encoding="utf-8") as o:
o.write(test)
return test_path
|
bin/generate_sitemap.py | gaybro8777/CiteSeerX | 108 | 12784671 | #!/usr/bin/python
# Script to generate sitemaps
# <NAME>
# Requires mysql-python
import MySQLdb
import argparse
import logging
import os
import sys
import subprocess
from config import db
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("sitemapdir")
args = parser.parse_args()
try:
sitemapdir = args.sitemapdir
except:
logging.error("sitemap dir not set. run python generate_sitemap.py -h")
sys.exit(0)
# clear sitemapdir if it is there already
if os.path.exists(sitemapdir):
subprocess.call(['rm','-rfv',sitemapdir+"/*"])
else:
os.makedirs(sitemapdir)
MAX_PER_FILE = 49999
db = MySQLdb.connect(host=db["dbhost"], user=db["dbuser"], passwd=db["dbpass"], db="citeseerx")
cur = db.cursor()
i = 0
file = 1
header = '<?xml version="1.0" encoding="UTF-8"?>\n<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n'
cur.execute("SELECT id FROM papers WHERE public = 1")
for row in cur.fetchall():
if i == 0:
f = open(os.path.join(sitemapdir,"sitemap%d.xml" % file), 'w+')
f.write(header)
f.write('<url>\n\t<loc>http://citeseerx.ist.psu.edu/viewdoc/download?doi=%s&rep=rep1&type=pdf</loc>\n</url>\n' % row[0])
i = i + 1
if i == MAX_PER_FILE:
file = file + 1
i = 0
f.write('</urlset>')
f.close()
logging.info("sitemap generated: {}".format(f.name))
if not f.closed:
f.write('</urlset>')
f.close()
logging.info("sitemap generated: {}".format(f.name))
f = open(os.path.join(sitemapdir,'sitemap_index.xml'), 'w+')
f.write('<?xml version="1.0" encoding="UTF-8"?>\n<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n')
for i in range(1, file+1):
f.write('<sitemap>\n\t<loc>http://citeseerx.ist.psu.edu/sitemap%d.xml</loc>\n</sitemap>\n' % i)
f.write('</sitemapindex>');
f.close()
logging.info("sitemap index file: {}".format(f.name))
|
OpenDataCatalog/contest/views.py | runonthespot/Open-Data-Catalog | 105 | 12784676 | from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.template.loader import render_to_string
from django.core.mail import send_mail, mail_managers, EmailMessage
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from OpenDataCatalog.contest.models import *
from datetime import datetime
def get_entries(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest, is_visible=True)
if not request.GET.__contains__('sort'):
entries = entries.order_by('-vote_count')
return render_to_response('contest/entries.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_entries_table(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest)
if not request.GET.__contains__('sort'):
entries = entries.order_by('-vote_count')
return render_to_response('contest/entry_table.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_winners(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest, is_visible=True).order_by('-vote_count')
return render_to_response('contest/winners.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_rules(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
return render_to_response('contest/rules.html', {'contest': contest}, context_instance=RequestContext(request))
def get_entry(request, entry_id):
entry = Entry.objects.get(pk=entry_id)
return render_to_response('contest/entry.html', {'contest': entry.contest, 'entry': entry}, context_instance=RequestContext(request))
#@login_required
def add_entry(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
if request.method == 'POST':
form = EntryForm(request.POST)
form.contest = contest_id
if form.is_valid():
data = {
#"submitter": request.user.username,
"submit_date": datetime.now(),
"org_name": form.cleaned_data.get("org_name"),
"org_url": form.cleaned_data.get("org_url"),
"contact_person": form.cleaned_data.get("contact_person"),
"contact_phone": form.cleaned_data.get("contact_phone"),
"contact_email": form.cleaned_data.get("contact_email"),
"data_set": form.cleaned_data.get("data_set"),
"data_use": form.cleaned_data.get("data_use"),
"data_mission": form.cleaned_data.get("data_mission")
}
subject = 'OpenDataPhilly - Contest Submission'
user_email = form.cleaned_data.get("contact_email")
text_content = render_to_string('contest/submit_email.txt', data)
text_content_copy = render_to_string('contest/submit_email_copy.txt', data)
mail_managers(subject, text_content)
msg = EmailMessage(subject, text_content_copy, to=[user_email])
msg.send()
return render_to_response('contest/thanks.html', {'contest': contest}, context_instance=RequestContext(request))
else:
form = EntryForm()
return render_to_response('contest/submit_entry.html', {'contest': contest, 'form': form}, context_instance=RequestContext(request))
@login_required
def add_vote(request, entry_id):
entry = Entry.objects.get(pk=entry_id)
contest = entry.contest
user = User.objects.get(username=request.user)
if contest.user_can_vote(user):
new_vote = Vote(user=user, entry=entry)
new_vote.save()
entry.vote_count = entry.vote_set.count()
entry.save()
next_vote_date = contest.get_next_vote_date(user)
if next_vote_date > contest.end_date:
messages.success(request, '<div style="font-weight:bold;">Your vote has been recorded.</div>Thank you for your vote! You will not be able to vote again before the end of the contest. <br><br>Please encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
messages.success(request, '<div style="font-weight:bold;">Your vote has been recorded.</div>You may vote once per week, so come back and visit us again on ' + next_vote_date.strftime('%A, %b %d %Y, %I:%M%p') + '. <br><br>Until then, encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
next_vote_date = contest.get_next_vote_date(user)
if next_vote_date > contest.end_date:
messages.error(request, '<div style="font-weight:bold;">You have already voted.</div>You will not be able to vote again before the end of the contest. <br><br>Please encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
messages.error(request, '<div style="font-weight:bold;">You have already voted.</div>You may vote once per week, so come back and visit us again on ' + next_vote_date.strftime('%A, %b %d %Y, %I:%M%p') + '. <br><br>Until then, encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
return redirect('/contest/?sort=vote_count')
|
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotBooleanPart_t.py | htlcnn/ironpython-stubs | 182 | 12784741 | <filename>release/stubs.min/Tekla/Structures/ModelInternal_parts/dotBooleanPart_t.py
class dotBooleanPart_t(object):
# no doc
Boolean=None
OperativePart=None
Type=None
|
netdev/vendors/cisco/cisco_asa.py | maliciousgroup/netdev | 199 | 12784744 | <reponame>maliciousgroup/netdev
"""Subclass specific to Cisco ASA"""
import re
from netdev.logger import logger
from netdev.vendors.ios_like import IOSLikeDevice
class CiscoASA(IOSLikeDevice):
"""Class for working with Cisco ASA"""
def __init__(self, *args, **kwargs):
"""
Initialize class for asynchronous working with network devices
:param str host: device hostname or ip address for connection
:param str username: username for logging to device
:param str password: <PASSWORD> for logging to device
:param str secret: secret password for privilege mode
:param int port: ssh port for connection. Default is 22
:param str device_type: network device type
:param known_hosts: file with known hosts. Default is None (no policy). With () it will use default file
:param str local_addr: local address for binding source of tcp connection
:param client_keys: path for client keys. Default in None. With () it will use default file in OS
:param str passphrase: password for encrypted client keys
:param float timeout: timeout in second for getting information from channel
:param loop: asyncio loop object
"""
super().__init__(*args, **kwargs)
self._multiple_mode = False
_disable_paging_command = "terminal pager 0"
@property
def multiple_mode(self):
""" Returning Bool True if ASA in multiple mode"""
return self._multiple_mode
async def connect(self):
"""
Async Connection method
Using 5 functions:
* _establish_connection() for connecting to device
* _set_base_prompt() for finding and setting device prompt
* _enable() for getting privilege exec mode
* _disable_paging() for non interact output in commands
* _check_multiple_mode() for checking multiple mode in ASA
"""
logger.info("Host {}: trying to connect to the device".format(self._host))
await self._establish_connection()
await self._set_base_prompt()
await self.enable_mode()
await self._disable_paging()
await self._check_multiple_mode()
logger.info("Host {}: Has connected to the device".format(self._host))
async def _set_base_prompt(self):
"""
Setting two important vars for ASA
base_prompt - textual prompt in CLI (usually hostname)
base_pattern - regexp for finding the end of command. IT's platform specific parameter
For ASA devices base_pattern is "prompt([\/\w]+)?(\(.*?\))?[#|>]
"""
logger.info("Host {}: Setting base prompt".format(self._host))
prompt = await self._find_prompt()
# Cut off prompt from "prompt/context/other" if it exists
# If not we get all prompt
prompt = prompt[:-1].split("/")
prompt = prompt[0]
self._base_prompt = prompt
delimiters = map(re.escape, type(self)._delimiter_list)
delimiters = r"|".join(delimiters)
base_prompt = re.escape(self._base_prompt[:12])
pattern = type(self)._pattern
self._base_pattern = pattern.format(prompt=base_prompt, delimiters=delimiters)
logger.debug("Host {}: Base Prompt: {}".format(self._host, self._base_prompt))
logger.debug("Host {}: Base Pattern: {}".format(self._host, self._base_pattern))
return self._base_prompt
async def _check_multiple_mode(self):
"""Check mode multiple. If mode is multiple we adding info about contexts"""
logger.info("Host {}:Checking multiple mode".format(self._host))
out = await self.send_command("show mode")
if "multiple" in out:
self._multiple_mode = True
logger.debug(
"Host {}: Multiple mode: {}".format(self._host, self._multiple_mode)
)
|
examples/basic_observer.py | ddunmire/python-bleson | 103 | 12784767 | <reponame>ddunmire/python-bleson
#!/usr/bin/env python3
import sys
from time import sleep
from bleson import get_provider, Observer
# Get the wait time from the first script argument or default it to 10 seconds
WAIT_TIME = int(sys.argv[1]) if len(sys.argv)>1 else 10
def on_advertisement(advertisement):
print(advertisement)
adapter = get_provider().get_adapter()
observer = Observer(adapter)
observer.on_advertising_data = on_advertisement
observer.start()
sleep(WAIT_TIME)
observer.stop()
|
usaspending_api/download/tests/unit/test_zip_file.py | g4brielvs/usaspending-api | 217 | 12784771 | <filename>usaspending_api/download/tests/unit/test_zip_file.py
import os
import zipfile
from tempfile import NamedTemporaryFile
from usaspending_api.download.filestreaming.zip_file import append_files_to_zip_file
def test_append_files_to_zip_file():
with NamedTemporaryFile() as zip_file:
with NamedTemporaryFile() as include_file_1:
with NamedTemporaryFile() as include_file_2:
include_file_1.write(b"this is a test")
include_file_1.flush()
include_file_2.write(b"this is also a test")
include_file_2.flush()
append_files_to_zip_file([include_file_1.name, include_file_2.name], zip_file.name)
with zipfile.ZipFile(zip_file.name, "r") as zf:
assert [z.filename for z in zf.filelist] == [
os.path.basename(include_file_1.name),
os.path.basename(include_file_2.name),
]
|
google/colab/_import_magics.py | figufema/TesteClone | 1,521 | 12784861 | <filename>google/colab/_import_magics.py
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow magics to be declared without forcing an import.
This module allows us to declare a magic will be available while delaying the
import of the associated package. The primary purpose is to avoid importing too
many packages at startup, as it complicates package installation for users.
Note that importing the original module will *replace* these registrations, as
magics are still being registered in their original modules.
In addition, the IPython getdoc() function allows us to lazily request help on
a magic -- again, requesting help on a specific magic will import the module
where that magic resides.
For general Python objects or functions, this might be dangerous -- however,
magics are special, in that they're not represented by a Python object, so
there's no danger that overwriting the name -> function mapping will cause
trouble later on. The only user-visible aspect is that the source reference in
the help will update from this module to the actual importing module after the
first use.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# IPython requires get_ipython to be available as a local variable wherever you
# want to register a magic, in an attempt to prevent you from registering
# magics before the IPython magic machinery is loaded. So we need to directly
# import the symbol, instead of just the module.
from IPython import get_ipython
from IPython.core import magic
def _load_extension(module):
get_ipython().extension_manager.load_extension(module)
def _get_extension_magic(name, module, magic_type, magic_module_loader):
magic_module_loader(module)
m = get_ipython().magics_manager.magics[magic_type][name]
if m.__module__ == __name__:
raise ValueError('No %s magic named "%s" found in "%s"' %
(magic_type, name, module))
return m
def _declare_line_magic(name, module, magic_module_loader):
"""Declare a line magic called name in module."""
# If the module or extension has already been imported, don't overwrite the
# existing definition.
if module in sys.modules or module in get_ipython().extension_manager.loaded:
return
def impl(line, **kwargs):
return _get_extension_magic(name, module, 'line',
magic_module_loader)(line, **kwargs)
# pylint: disable=g-long-lambda
impl.getdoc = lambda: _get_extension_magic(name, module, 'line',
magic_module_loader).__doc__
magic.register_line_magic(name)(impl)
def _declare_cell_magic(name, module, magic_module_loader):
"""Declare a cell magic called name in module."""
# If the module or extension has already been imported, don't overwrite the
# existing definition.
if module in sys.modules or module in get_ipython().extension_manager.loaded:
return
def impl(line, cell, **kwargs):
return _get_extension_magic(name, module, 'cell',
magic_module_loader)(line, cell, **kwargs)
# pylint: disable=g-long-lambda
impl.getdoc = lambda: _get_extension_magic(name, module, 'cell',
magic_module_loader).__doc__
magic.register_cell_magic(name)(impl)
def _declare_colabx_magics():
if get_ipython():
_declare_cell_magic('bigquery', 'google.cloud.bigquery', _load_extension)
|
examples/composite_keys/testdata.py | NeolithEra/Flask-AppBuilder | 3,862 | 12784877 | import logging
from app import db
from app.models import Inventory, Datacenter, Rack, Item
import random
import string
from datetime import datetime
log = logging.getLogger(__name__)
DC_RACK_MAX = 20
ITEM_MAX = 1000
cities = ["Lisbon", "Porto", "Madrid", "Barcelona", "Frankfurt", "London"]
models = ["Server MX", "Server MY", "Server DL380", "Server x440", "Server x460"]
datacenters = list()
def get_random_name(names_list, size=1):
return names_list[random.randrange(0, len(names_list))]
def serial_generator(size=6, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
for city in cities:
datacenter = Datacenter()
datacenter.name = "DC %s" % city
datacenter.address = city
datacenters.append(datacenter)
db.session.add(datacenter)
log.info(datacenter)
try:
db.session.commit()
for num in range(1, DC_RACK_MAX):
rack = Rack()
rack.num = num
rack.datacenter = datacenter
db.session.add(rack)
except Exception as e:
log.error("Creating Datacenter: %s", e)
db.session.rollback()
for i in range(1, ITEM_MAX):
item = Item()
item.serial_number = serial_generator()
item.model = get_random_name(models)
db.session.add(item)
log.info(item)
try:
db.session.commit()
except Exception as e:
log.error("Creating Item: %s", e)
db.session.rollback()
|
fhir/resources/DSTU2/substance.py | cstoltze/fhir.resources | 144 | 12784890 | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/DSTU2/substance.html
Release: DSTU2
Version: 1.0.2
Revision: 7202
"""
from typing import List as ListType
from pydantic import Field
from . import domainresource, fhirtypes
from .backboneelement import BackboneElement
class Substance(domainresource.DomainResource):
"""A homogeneous material with a definite composition
A homogeneous material with a definite composition.
"""
resource_type = Field("Substance", const=True)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="List of Unique identifier (represented as 'dict' in JSON)",
description="Unique identifier for the substance",
element_property=True,
)
category: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="category",
title="List of Type `CodeableConcept` (represented as `dict` in JSON).",
description="What class/type of substance this is",
element_property=True,
)
code: fhirtypes.CodeableConceptType = Field(
None,
alias="code",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="What substance this is",
element_property=True,
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Type `String` (represented as `dict` in JSON)",
description="Textual description of the substance, comments",
element_property=True,
)
instance: ListType[fhirtypes.SubstanceInstanceType] = Field(
None,
alias="instance",
title="List of Type `SubstanceInstance` (represented as `dict` in JSON).",
description="If this describes a specific package/container of the substance",
element_property=True,
)
ingredient: ListType[fhirtypes.SubstanceIngredientType] = Field(
None,
alias="ingredient",
title="List of Type `SubstanceIngredient` (represented as `dict` in JSON).",
description="Composition information about the substance",
element_property=True,
)
class SubstanceInstance(BackboneElement):
"""If this describes a specific package/container of the substance
If this describes a specific package/container of the substance.
"""
resource_type = Field("SubstanceInstance", const=True)
identifier: fhirtypes.IdentifierType = Field(
None,
alias="identifier",
title="Identifier of the package/container",
description=(
"Identifier associated with the package/container"
" (usually a label affixed directly)"
),
element_property=True,
)
expiry: fhirtypes.DateTime = Field(
None,
alias="expiry",
title="When no longer valid to use",
description=(
"When the substance is no longer valid to use. "
"For some substances, a single arbitrary date is used for expiry."
),
element_property=True,
)
quantity: fhirtypes.QuantityType = Field(
None,
alias="quantity",
title=(
"Type `Quantity` referencing `SimpleQuantity` (represented as `dict` in "
"JSON)."
),
description="Amount of substance in the package",
element_property=True,
)
class SubstanceIngredient(BackboneElement):
"""Composition information about the substance
A substance can be composed of other substances.
"""
resource_type = Field("SubstanceIngredient", const=True)
quantity: fhirtypes.RatioType = Field(
None,
alias="quantity",
title="Type `Ratio` (represented as `dict` in JSON).",
description="Optional amount (concentration)",
element_property=True,
)
substance: fhirtypes.ReferenceType = Field(
None,
alias="substance",
title=(
"`Reference` items referencing `Substance` (represented as `dict` in"
" JSON)"
),
description="A component of the substance",
enum_reference_types=["Substance"],
element_property=True,
)
|
fabric_bolt/projects/migrations/0003_auto_20150911_1911.py | jooni22/fabric-bolt | 219 | 12784920 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hosts', '0002_sshconfig'),
('projects', '0002_auto_20140912_1509'),
]
operations = [
migrations.AddField(
model_name='configuration',
name='value_ssh_key',
field=models.ForeignKey(verbose_name=b'Value', blank=True, to='hosts.SSHConfig', null=True),
),
migrations.AlterField(
model_name='configuration',
name='data_type',
field=models.CharField(default=b'string', max_length=10, null=True, blank=True, choices=[(b'boolean', b'Boolean'), (b'number', b'Number'), (b'string', b'String'), (b'ssk_key', b'SSH Key')]),
),
]
|
examples/text_scroll.py | paddywwoof/python-sense-hat | 104 | 12784941 | <reponame>paddywwoof/python-sense-hat<gh_stars>100-1000
#!/usr/bin/python
from sense_hat import SenseHat
sense = SenseHat()
sense.set_rotation(180)
red = (255, 0, 0)
sense.show_message("One small step for Pi!", text_colour=red)
|
Subsets and Splits