max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
sympy/polys/tests/test_polymatrix.py | iamabhishek0/sympy | 603 | 12675281 | <gh_stars>100-1000
from sympy.matrices.dense import Matrix
from sympy.polys.polymatrix import PolyMatrix
from sympy.polys import Poly
from sympy import S, ZZ, QQ, EX
from sympy.abc import x
def test_polymatrix():
pm1 = PolyMatrix([[Poly(x**2, x), Poly(-x, x)], [Poly(x**3, x), Poly(-1 + x, x)]])
v1 = PolyMatrix([[1, 0], [-1, 0]], ring='ZZ[x]')
m1 = Matrix([[1, 0], [-1, 0]], ring='ZZ[x]')
A = PolyMatrix([[Poly(x**2 + x, x), Poly(0, x)], \
[Poly(x**3 - x + 1, x), Poly(0, x)]])
B = PolyMatrix([[Poly(x**2, x), Poly(-x, x)], [Poly(-x**2, x), Poly(x, x)]])
assert A.ring == ZZ[x]
assert isinstance(pm1*v1, PolyMatrix)
assert pm1*v1 == A
assert pm1*m1 == A
assert v1*pm1 == B
pm2 = PolyMatrix([[Poly(x**2, x, domain='QQ'), Poly(0, x, domain='QQ'), Poly(-x**2, x, domain='QQ'), \
Poly(x**3, x, domain='QQ'), Poly(0, x, domain='QQ'), Poly(-x**3, x, domain='QQ')]])
assert pm2.ring == QQ[x]
v2 = PolyMatrix([1, 0, 0, 0, 0, 0], ring='ZZ[x]')
m2 = Matrix([1, 0, 0, 0, 0, 0], ring='ZZ[x]')
C = PolyMatrix([[Poly(x**2, x, domain='QQ')]])
assert pm2*v2 == C
assert pm2*m2 == C
pm3 = PolyMatrix([[Poly(x**2, x), S.One]], ring='ZZ[x]')
v3 = S.Half*pm3
assert v3 == PolyMatrix([[Poly(S.Half*x**2, x, domain='QQ'), S.Half]], ring='EX')
assert pm3*S.Half == v3
assert v3.ring == EX
pm4 = PolyMatrix([[Poly(x**2, x, domain='ZZ'), Poly(-x**2, x, domain='ZZ')]])
v4 = Matrix([1, -1], ring='ZZ[x]')
assert pm4*v4 == PolyMatrix([[Poly(2*x**2, x, domain='ZZ')]])
assert len(PolyMatrix()) == 0
assert PolyMatrix([1, 0, 0, 1])/(-1) == PolyMatrix([-1, 0, 0, -1])
|
server/intrinsic/algorithm/grosse2009/poisson.py | paulu/opensurfaces | 137 | 12675293 | import numpy as np
import pyamg
import scipy.sparse
def get_gradients(I):
"""Get the vertical (derivative-row) and horizontal (derivative-column) gradients
of an image."""
I_y = np.zeros(I.shape)
I_y[1:, :, ...] = I[1:, :, ...] - I[:-1, :, ...]
I_x = np.zeros(I.shape)
I_x[:, 1:, ...] = I[:, 1:, ...] - I[:, :-1, ...]
return I_y, I_x
def solve(t_y, t_x, mask, t_y_weights=None, t_x_weights=None):
"""Solve for the image which best matches the target vertical gradients
t_y and horizontal gradients t_x, e.g. the one which minimizes sum of squares
of the residual
sum of (I[i,j] - I[i-1,j] - t_y[i,j])**2 + (I[i,j] - I[i,j-1] - t_x[i,j])**2
Only considers the target gradients lying entirely within the mask.
The first row of t_y and the first column of t_x are ignored. Optionally,
you may pass in an array with the weights corresponding to each target
gradient. The solution is unique up to a constant added to each of
the pixels. """
if t_y_weights is None:
t_y_weights = np.ones(t_y.shape)
if t_x_weights is None:
t_x_weights = np.ones(t_x.shape)
M, N = mask.shape
numbers = get_numbers(mask)
A = get_A(mask, t_y_weights, t_x_weights)
b = get_b(t_y, t_x, mask, t_y_weights, t_x_weights)
solver = pyamg.ruge_stuben_solver(A)
x = solver.solve(b)
I = np.zeros(mask.shape)
for i in range(M):
for j in range(N):
I[i,j] = x[numbers[i,j]]
return I
def solve_L1(t_y, t_x, mask):
"""Same as solve(), except using an L1 penalty rather than least squares."""
EPSILON = 0.0001
# We minimize the L1-norm of the residual
#
# sum of |r_i|
# r = Ax - b
#
# by alternately minimizing the variational upper bound
#
# |r_i| <= a_i * r_i**2 + 1 / (4 * a_i)
#
# with respect to x and a. When r is fixed, this bound is tight for a = 1 / (2 * r).
# When a is fixed, we optimize for x by solving a weighted least-squares problem.
I = solve(t_y, t_x, mask)
for i in range(20):
I_y, I_x = get_gradients(I)
t_y_err = mask * np.abs(I_y - t_y)
t_x_err = mask * np.abs(I_x - t_x)
t_y_weights = 1. / (2. * np.clip(t_y_err, EPSILON, np.infty))
t_x_weights = 1. / (2. * np.clip(t_x_err, EPSILON, np.infty))
try:
I = solve(t_y, t_x, mask, t_y_weights, t_x_weights)
except:
# Occasionally the solver fails when the weights get very large
# or small. In that case, we just return the previous iteration's
# estimate, which is hopefully close enough.
return I
return I
###################### Stuff below here not very readable ##########################
def get_numbers(mask):
M, N = mask.shape
numbers = np.zeros(mask.shape, dtype=int)
count = 0
for i in range(M):
for j in range(N):
if mask[i,j]:
numbers[i, j] = count
count += 1
return numbers
def get_b(t_y, t_x, mask, t_y_weights, t_x_weights):
M, N = mask.shape
t_y = t_y[1:, :]
t_y_weights = t_y_weights[1:, :]
t_x = t_x[:, 1:]
t_x_weights = t_x_weights[:, 1:]
numbers = get_numbers(mask)
K = np.max(numbers) + 1
b = np.zeros(K)
# horizontal derivatives
for i in range(M):
for j in range(N-1):
if mask[i,j] and mask[i,j+1]:
n1 = numbers[i,j]
n2 = numbers[i,j+1]
# row (i,j): -x_{i,j+1} + x_{i,j} + t
b[n1] -= t_x[i,j] * t_x_weights[i,j]
# row (i, j+1): x_{i,j+1} - x_{i,j} - t
b[n2] += t_x[i,j] * t_x_weights[i,j]
# vertical derivatives
for i in range(M-1):
for j in range(N):
if mask[i,j] and mask[i+1,j]:
n1 = numbers[i,j]
n2 = numbers[i+1,j]
# row (i,j): -x_{i+1,j} + x_{i,j} + t
b[n1] -= t_y[i,j] * t_y_weights[i,j]
# row (i, j+1): x_{i+1,j} - x_{i,j} - t
b[n2] += t_y[i,j] * t_y_weights[i,j]
return b
def get_A(mask, t_y_weights, t_x_weights):
M, N = mask.shape
numbers = get_numbers(mask)
K = np.max(numbers) + 1
t_y_weights = t_y_weights[1:, :]
t_x_weights = t_x_weights[:, 1:]
# horizontal derivatives
count = 0
for i in range(M):
for j in range(N-1):
if mask[i,j] and mask[i,j+1]:
count += 1
data = np.zeros(4*count)
row = np.zeros(4*count)
col = np.zeros(4*count)
count = 0
for i in range(M):
for j in range(N-1):
if mask[i,j] and mask[i,j+1]:
n1 = numbers[i,j]
n2 = numbers[i,j+1]
# row (i,j): -x_{i,j+1} + x_{i,j} + t
row[4*count] = n1
col[4*count] = n2
data[4*count] = -t_x_weights[i, j]
row[4*count+1] = n1
col[4*count+1] = n1
data[4*count+1] = t_x_weights[i, j]
# row (i, j+1): x_{i,j+1} - x_{i,j} - t
row[4*count+2] = n2
col[4*count+2] = n2
data[4*count+2] = t_x_weights[i, j]
row[4*count+3] = n2
col[4*count+3] = n1
data[4*count+3] = -t_x_weights[i, j]
count += 1
data1 = data
row1 = row
col1 = col
# vertical derivatives
count = 0
for i in range(M-1):
for j in range(N):
if mask[i,j] and mask[i+1,j]:
count += 1
data = np.zeros(4*count)
row = np.zeros(4*count)
col = np.zeros(4*count)
count = 0
for i in range(M-1):
for j in range(N):
if mask[i,j] and mask[i+1,j]:
n1 = numbers[i,j]
n2 = numbers[i+1,j]
# row (i,j): -x_{i+1,j} + x_{i,j} + t
row[4*count] = n1
col[4*count] = n2
data[4*count] = -t_y_weights[i, j]
row[4*count+1] = n1
col[4*count+1] = n1
data[4*count+1] = t_y_weights[i, j]
# row (i, j+1): x_{i+1,j} - x_{i,j} - t
row[4*count+2] = n2
col[4*count+2] = n2
data[4*count+2] = t_y_weights[i, j]
row[4*count+3] = n2
col[4*count+3] = n1
data[4*count+3] = -t_y_weights[i, j]
count += 1
data2 = data
row2 = row
col2 = col
data = np.concatenate([data1, data2])
row = np.concatenate([row1, row2])
col = np.concatenate([col1, col2])
return scipy.sparse.coo_matrix((data, (row, col)), shape=(K, K))
|
qiskit/test/mock/backends/paris/fake_paris.py | Roshan-Thomas/qiskit-terra | 1,599 | 12675299 | <reponame>Roshan-Thomas/qiskit-terra<filename>qiskit/test/mock/backends/paris/fake_paris.py<gh_stars>1000+
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Fake Paris device (20 qubit).
"""
import os
from qiskit.test.mock import fake_pulse_backend
class FakeParis(fake_pulse_backend.FakePulseBackend):
"""A fake Paris backend.
06 17
↕ ↕
00 ↔ 01 ↔ 04 ↔ 07 ↔ 10 ↔ 12 ↔ 15 ↔ 18 ↔ 20 ↔ 23
↕ ↕ ↕
02 13 24
↕ ↕ ↕
03 ↔ 05 ↔ 08 ↔ 11 ↔ 14 ↔ 16 ↔ 19 ↔ 22 ↔ 25 ↔ 26
↕ ↕
09 20
"""
dirname = os.path.dirname(__file__)
conf_filename = "conf_paris.json"
props_filename = "props_paris.json"
defs_filename = "defs_paris.json"
backend_name = "fake_paris"
class FakeLegacyParis(fake_pulse_backend.FakePulseLegacyBackend):
"""A fake Paris backend.
06 17
↕ ↕
00 ↔ 01 ↔ 04 ↔ 07 ↔ 10 ↔ 12 ↔ 15 ↔ 18 ↔ 20 ↔ 23
↕ ↕ ↕
02 13 24
↕ ↕ ↕
03 ↔ 05 ↔ 08 ↔ 11 ↔ 14 ↔ 16 ↔ 19 ↔ 22 ↔ 25 ↔ 26
↕ ↕
09 20
"""
dirname = os.path.dirname(__file__)
conf_filename = "conf_paris.json"
props_filename = "props_paris.json"
defs_filename = "defs_paris.json"
backend_name = "fake_paris"
|
test/test_3gpp_channel_channel_coefficients.py | NVlabs/sionna | 163 | 12675336 | #
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
try:
import sionna
except ImportError as e:
import sys
sys.path.append("../")
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
print('Number of GPUs available :', len(gpus))
if gpus:
gpu_num = 0 # Number of the GPU to be used
try:
tf.config.set_visible_devices(gpus[gpu_num], 'GPU')
print('Only GPU number', gpu_num, 'used.')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except RuntimeError as e:
print(e)
import unittest
import numpy as np
import sionna
from channel_test_utils import *
class TestChannelCoefficientsGenerator(unittest.TestCase):
r"""Test the computation of channel coefficients"""
# Batch size used to check the LSP distribution
BATCH_SIZE = 32
# Carrier frequency
CARRIER_FREQUENCY = 3.5e9 # Hz
# Maximum allowed deviation for calculation (relative error)
MAX_ERR = 1e-2
# # Heigh of UTs
H_UT = 1.5
# # Heigh of BSs
H_BS = 10.0
# # Number of BS
NB_BS = 3
# Number of UT
NB_UT = 10
# Number of channel time samples
NUM_SAMPLES = 64
# Sampling frequency
SAMPLING_FREQUENCY = 20e6
def setUp(self):
# Forcing the seed to make the tests deterministic
tf.random.set_seed(42)
fc = TestChannelCoefficientsGenerator.CARRIER_FREQUENCY
# UT and BS arrays have no impact on LSP
# However, these are needed to instantiate the model
self.tx_array = sionna.channel.tr38901.PanelArray(num_rows_per_panel=2,
num_cols_per_panel=2,
polarization='dual',
polarization_type='VH',
antenna_pattern='38.901',
carrier_frequency=fc,
dtype=tf.complex128)
self.rx_array = sionna.channel.tr38901.PanelArray(num_rows_per_panel=1,
num_cols_per_panel=1,
polarization='dual',
polarization_type='VH',
antenna_pattern='38.901',
carrier_frequency=fc,
dtype=tf.complex128)
self.ccg = sionna.channel.tr38901.ChannelCoefficientsGenerator(
fc,
tx_array=self.tx_array,
rx_array=self.rx_array,
subclustering=True,
dtype=tf.complex128)
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
nb_ut = TestChannelCoefficientsGenerator.NB_UT
nb_bs = TestChannelCoefficientsGenerator.NB_BS
h_ut = TestChannelCoefficientsGenerator.H_UT
h_bs = TestChannelCoefficientsGenerator.H_BS
rx_orientations = tf.random.uniform([batch_size, nb_ut, 3], 0.0,
2*np.pi, dtype=tf.float64)
tx_orientations = tf.random.uniform([batch_size, nb_bs, 3], 0.0,
2*np.pi, dtype=tf.float64)
ut_velocities = tf.random.uniform([batch_size, nb_ut, 3], 0.0, 5.0,
dtype=tf.float64)
scenario = sionna.channel.tr38901.RMaScenario(fc, self.rx_array,
self.tx_array,
"downlink",
dtype=tf.complex128)
ut_loc = generate_random_loc(batch_size, nb_ut, (100,2000),
(100,2000), (h_ut, h_ut), dtype=tf.float64)
bs_loc = generate_random_loc(batch_size, nb_bs, (0,100),
(0,100), (h_bs, h_bs),
dtype=tf.float64)
in_state = generate_random_bool(batch_size, nb_ut, 0.5)
scenario.set_topology(ut_loc, bs_loc, rx_orientations,
tx_orientations, ut_velocities, in_state)
self.scenario = scenario
topology = sionna.channel.tr38901.Topology(
velocities=ut_velocities,
moving_end='rx',
los_aoa=scenario.los_aoa,
los_aod=scenario.los_aod,
los_zoa=scenario.los_zoa,
los_zod=scenario.los_zod,
los=scenario.los,
distance_3d=scenario.distance_3d,
tx_orientations=tx_orientations,
rx_orientations=rx_orientations)
self.topology = topology
lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)
ray_sampler = sionna.channel.tr38901.RaysGenerator(scenario)
lsp_sampler.topology_updated_callback()
ray_sampler.topology_updated_callback()
lsp = lsp_sampler()
self.rays = ray_sampler(lsp)
self.lsp = lsp
num_time_samples = TestChannelCoefficientsGenerator.NUM_SAMPLES
sampling_frequency = TestChannelCoefficientsGenerator.SAMPLING_FREQUENCY
c_ds = scenario.get_param("cDS")*1e-9
_, _, phi, sample_times = self.ccg(num_time_samples,
sampling_frequency, lsp.k_factor, self.rays, topology, c_ds,
debug=True)
self.phi = phi.numpy()
self.sample_times = sample_times.numpy()
self.c_ds = c_ds
def max_rel_err(self, r, x):
"""Compute the maximum relative error, ``r`` being the reference value,
``x`` an esimate of ``r``."""
err = np.abs(r-x)
rel_err = np.where(np.abs(r) > 0.0, np.divide(err,np.abs(r)+1e-6), err)
return np.max(rel_err)
def unit_sphere_vector_ref(self, theta, phi):
"""Reference implementation: Unit to sphere vector"""
uvec = np.stack([np.sin(theta)*np.cos(phi),
np.sin(theta)*np.sin(phi), np.cos(theta)],
axis=-1)
uvec = np.expand_dims(uvec, axis=-1)
return uvec
def test_unit_sphere_vector(self):
"""Test 3GPP channel coefficient calculation: Unit sphere vector"""
#
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
theta = tf.random.normal(shape=[batch_size]).numpy()
phi = tf.random.normal(shape=[batch_size]).numpy()
uvec_ref = self.unit_sphere_vector_ref(theta, phi)
uvec = self.ccg._unit_sphere_vector(theta, phi).numpy()
max_err = self.max_rel_err(uvec_ref, uvec)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def forward_rotation_matrix_ref(self, orientations):
"""Reference implementation: Forward rotation matrix"""
a, b, c = orientations[...,0], orientations[...,1], orientations[...,2]
#
R = np.zeros(list(a.shape) + [3,3])
#
R[...,0,0] = np.cos(a)*np.cos(b)
R[...,1,0] = np.sin(a)*np.cos(b)
R[...,2,0] = -np.sin(b)
#
R[...,0,1] = np.cos(a)*np.sin(b)*np.sin(c) - np.sin(a)*np.cos(c)
R[...,1,1] = np.sin(a)*np.sin(b)*np.sin(c) + np.cos(a)*np.cos(c)
R[...,2,1] = np.cos(b)*np.sin(c)
#
R[...,0,2] = np.cos(a)*np.sin(b)*np.cos(c) + np.sin(a)*np.sin(c)
R[...,1,2] = np.sin(a)*np.sin(b)*np.cos(c) - np.cos(a)*np.sin(c)
R[...,2,2] = np.cos(b)*np.cos(c)
#
return R
def test_forward_rotation_matrix(self):
"""Test 3GPP channel coefficient calculation: Forward rotation matrix"""
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
orientation = tf.random.normal(shape=[batch_size,3]).numpy()
R_ref = self.forward_rotation_matrix_ref(orientation)
R = self.ccg._forward_rotation_matrix(orientation).numpy()
max_err = self.max_rel_err(R_ref, R)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def reverse_rotation_matrix_ref(self, orientations):
"""Reference implementation: Reverse rotation matrix"""
R = self.forward_rotation_matrix_ref(orientations)
dim_ind = np.arange(len(R.shape))
dim_ind = np.concatenate([dim_ind[:-2], [dim_ind[-1]], [dim_ind[-2]]],
axis=0)
R_inv = np.transpose(R, dim_ind)
return R_inv
def test_reverse_rotation_matrix(self):
"""Test 3GPP channel coefficient calculation: Reverse rotation matrix"""
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
orientation = tf.random.normal(shape=[batch_size,3]).numpy()
R_ref = self.reverse_rotation_matrix_ref(orientation)
R = self.ccg._reverse_rotation_matrix(orientation).numpy()
max_err = self.max_rel_err(R_ref, R)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def gcs_to_lcs_ref(self, orientations, theta, phi):
"""Reference implementation: GCS to LCS angles"""
rho = self.unit_sphere_vector_ref(theta, phi)
Rinv = self.reverse_rotation_matrix_ref(orientations)
rho_prime = Rinv@rho
x = np.array([1,0,0])
x = np.expand_dims(x, axis=-1)
x = np.broadcast_to(x, rho_prime.shape)
y = np.array([0,1,0])
y = np.expand_dims(y, axis=-1)
y = np.broadcast_to(y, rho_prime.shape)
z = np.array([0,0,1])
z = np.expand_dims(z, axis=-1)
z = np.broadcast_to(z, rho_prime.shape)
theta_prime = np.sum(rho_prime*z, axis=-2)
theta_prime = np.clip(theta_prime, -1., 1.)
theta_prime = np.arccos(theta_prime)
phi_prime = np.angle(np.sum(rho_prime*x, axis=-2)\
+ 1j*np.sum(rho_prime*y, axis=-2))
theta_prime = np.squeeze(theta_prime, axis=-1)
phi_prime = np.squeeze(phi_prime, axis=-1)
return (theta_prime, phi_prime)
def test_gcs_to_lcs(self):
"""Test 3GPP channel coefficient calculation: GCS to LCS"""
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
orientation = tf.random.normal(shape=[batch_size,3]).numpy()
theta = tf.random.normal(shape=[batch_size]).numpy()
phi = tf.random.normal(shape=[batch_size]).numpy()
theta_prime_ref, phi_prime_ref = self.gcs_to_lcs_ref(orientation, theta,
phi)
theta_prime, phi_prime = self.ccg._gcs_to_lcs(
tf.cast(orientation, tf.float64),
tf.cast(theta, tf.float64),
tf.cast(phi, tf.float64))
theta_prime = theta_prime.numpy()
phi_prime = phi_prime.numpy()
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
max_err = self.max_rel_err(theta_prime_ref, theta_prime)
self.assertLessEqual(max_err, err_tol)
max_err = self.max_rel_err(phi_prime_ref, phi_prime)
self.assertLessEqual(max_err, err_tol)
def compute_psi_ref(self, orientations, theta, phi):
"""Reference implementation: Compute psi angle"""
a = orientations[...,0]
b = orientations[...,1]
c = orientations[...,2]
real = np.sin(c)*np.cos(theta)*np.sin(phi-a)\
+ np.cos(c)*(np.cos(b)*np.sin(theta)\
-np.sin(b)*np.cos(theta)*np.cos(phi-a))
imag = np.sin(c)*np.cos(phi-a) + np.sin(b)*np.cos(c)*np.sin(phi-a)
return np.angle(real+1j*imag)
def l2g_response_ref(self, F_prime, orientations, theta, phi):
"""Reference implementation: L2G response"""
psi = self.compute_psi_ref(orientations, theta, phi)
mat = np.zeros(list(np.shape(psi)) + [2,2])
mat[...,0,0] = np.cos(psi)
mat[...,0,1] = -np.sin(psi)
mat[...,1,0] = np.sin(psi)
mat[...,1,1] = np.cos(psi)
F = [email protected]_dims(F_prime, axis=-1)
return F
def test_l2g_response(self):
"""Test 3GPP channel coefficient calculation: L2G antenna response"""
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
orientation = tf.random.normal(shape=[batch_size,3]).numpy()
theta = tf.random.normal(shape=[batch_size]).numpy()
phi = tf.random.normal(shape=[batch_size]).numpy()
F_prime = tf.random.normal(shape=[batch_size,2]).numpy()
F_ref = self.l2g_response_ref(F_prime, orientation, theta, phi)
F = self.ccg._l2g_response( tf.cast(F_prime, tf.float64),
tf.cast(orientation,tf.float64),
tf.cast(theta, tf.float64),
tf.cast(phi, tf.float64)).numpy()
max_err = self.max_rel_err(F_ref, F)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def rot_pos_ref(self, orientations, positions):
R = self.forward_rotation_matrix_ref(orientations)
pos_r = R@positions
return pos_r
def rot_pos(self, orientations, positions):
"""Reference implementation: Rotate according to an orientation"""
R = self.forward_rotation_matrix_ref(orientations)
pos_r = R@positions
return pos_r
def test_rot_pos(self):
"""Test 3GPP channel coefficient calculation: Rotate position according
to orientation"""
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
orientations = tf.random.normal(shape=[batch_size,3]).numpy()
positions = tf.random.normal(shape=[batch_size,3, 1]).numpy()
pos_r_ref = self.rot_pos_ref(orientations, positions)
pos_r = self.ccg._rot_pos( tf.cast(orientations, tf.float64),
tf.cast(positions, tf.float64)).numpy()
max_err = self.max_rel_err(pos_r_ref, pos_r)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_get_tx_antenna_positions_ref(self, topology):
"""Reference implementation: Positions of the TX array elements"""
tx_orientations = topology.tx_orientations.numpy()
# Antenna locations in LCS and reshape for broadcasting
ant_loc_lcs = self.tx_array.ant_pos.numpy()
ant_loc_lcs = np.expand_dims(np.expand_dims(
np.expand_dims(ant_loc_lcs, axis=0), axis=1), axis=-1)
# Antenna loc in GCS relative to BS location
tx_orientations = np.expand_dims(tx_orientations, axis=2)
ant_loc_gcs = np.squeeze(self.rot_pos_ref(tx_orientations, ant_loc_lcs),
axis=-1)
return ant_loc_gcs
def test_step_11_get_tx_antenna_positions(self):
"""Test 3GPP channel coefficient calculation: Positions of the TX array
elements"""
tx_ant_pos_ref= self.step_11_get_tx_antenna_positions_ref(self.topology)
tx_ant_pos = self.ccg._step_11_get_tx_antenna_positions(self.topology)
tx_ant_pos = tx_ant_pos.numpy()
max_err = self.max_rel_err(tx_ant_pos_ref, tx_ant_pos)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_get_rx_antenna_positions_ref(self, topology):
"""Reference implementation: Positions of the RX array elements"""
rx_orientations = topology.rx_orientations.numpy()
# Antenna locations in LCS and reshape for broadcasting
ant_loc_lcs = self.rx_array.ant_pos.numpy()
ant_loc_lcs = np.expand_dims(np.expand_dims(
np.expand_dims(ant_loc_lcs, axis=0), axis=1), axis=-1)
# Antenna loc in GCS relative to UT location
rx_orientations = np.expand_dims(rx_orientations, axis=2)
ant_loc_gcs = np.squeeze(self.rot_pos_ref(rx_orientations, ant_loc_lcs),
axis=-1)
return ant_loc_gcs
def test_step_11_get_rx_antenna_positions(self):
"""Test 3GPP channel coefficient calculation: Positions of the RX array
elements"""
rx_ant_pos_ref= self.step_11_get_rx_antenna_positions_ref(self.topology)
rx_ant_pos = self.ccg._step_11_get_rx_antenna_positions(self.topology)
rx_ant_pos = rx_ant_pos.numpy()
max_err = self.max_rel_err(rx_ant_pos_ref, rx_ant_pos)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_phase_matrix_ref(self, Phi, kappa):
"""Reference implementation: Phase matrix"""
xpr_scaling = np.sqrt(1./kappa)
H_phase = np.zeros(list(Phi.shape[:-1]) + [2,2])\
+1j*np.zeros(list(Phi.shape[:-1]) + [2,2])
H_phase[...,0,0] = np.exp(1j*Phi[...,0])
H_phase[...,0,1] = xpr_scaling*np.exp(1j*Phi[...,1])
H_phase[...,1,0] = xpr_scaling*np.exp(1j*Phi[...,2])
H_phase[...,1,1] = np.exp(1j*Phi[...,3])
return H_phase
def test_step_11_phase_matrix(self):
"""Test 3GPP channel coefficient calculation:
Phase matrix calculation"""
H_phase_ref = self.step_11_phase_matrix_ref(self.phi, self.rays.xpr)
H_phase = self.ccg._step_11_phase_matrix(self.phi, self.rays).numpy()
max_err = self.max_rel_err(H_phase_ref, H_phase)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_field_matrix_ref(self, topology, aoa, aod, zoa, zod, H_phase):
"""Reference implementation: Field matrix"""
tx_orientations = topology.tx_orientations.numpy()
rx_orientations = topology.rx_orientations.numpy()
# Convert departure angles to LCS
tx_orientations = np.expand_dims(np.expand_dims(
np.expand_dims(tx_orientations, axis=2), axis=2), axis=2)
zod_prime, aod_prime = self.gcs_to_lcs_ref(tx_orientations, zod, aod)
# Convert arrival angles to LCS
rx_orientations = np.expand_dims(np.expand_dims(
np.expand_dims(rx_orientations, axis=1), axis=3), axis=3)
zoa_prime, aoa_prime = self.gcs_to_lcs_ref(rx_orientations, zoa, aoa)
# Compute the TX antenna reponse in LCS and map it to GCS
F_tx_prime_pol1_1, F_tx_prime_pol1_2 = self.tx_array.ant_pol1.field(
tf.constant(zod_prime,tf.float64), tf.constant(aod_prime,tf.float64))
F_tx_prime_pol1_1 = F_tx_prime_pol1_1.numpy()
F_tx_prime_pol1_2 = F_tx_prime_pol1_2.numpy()
F_tx_prime_pol1 = np.stack([F_tx_prime_pol1_1, F_tx_prime_pol1_2],
axis=-1)
F_tx_pol1 = self.l2g_response_ref(F_tx_prime_pol1, tx_orientations,
zod, aod)
# Dual polarization case for TX
if (self.tx_array.polarization == 'dual'):
F_tx_prime_pol2_1, F_tx_prime_pol2_2 = self.tx_array.ant_pol2.field(
tf.constant(zod_prime, tf.float64),
tf.constant(aod_prime, tf.float64))
F_tx_prime_pol2_1 = F_tx_prime_pol2_1.numpy()
F_tx_prime_pol2_2 = F_tx_prime_pol2_2.numpy()
F_tx_prime_pol2 = np.stack([F_tx_prime_pol2_1, F_tx_prime_pol2_2],
axis=-1)
F_tx_pol2 = self.l2g_response_ref(F_tx_prime_pol2, tx_orientations,
zod, aod)
# Compute the RX antenna reponse in LCS and map it to GCS
F_rx_prime_pol1_1, F_rx_prime_pol1_2 = self.rx_array.ant_pol1.field(
tf.constant(zoa_prime, tf.float64),
tf.constant(aoa_prime, tf.float64))
F_rx_prime_pol1_1 = F_rx_prime_pol1_1.numpy()
F_rx_prime_pol1_2 = F_rx_prime_pol1_2.numpy()
F_rx_prime_pol1 = np.stack([F_rx_prime_pol1_1, F_rx_prime_pol1_2],
axis=-1)
F_rx_pol1 = self.l2g_response_ref(F_rx_prime_pol1, rx_orientations,
zoa, aoa)
# Dual polarization case for RX
if (self.rx_array.polarization == 'dual'):
F_rx_prime_pol2_1, F_rx_prime_pol2_2 = self.rx_array.ant_pol2.field(
tf.constant(zoa_prime, tf.float64),
tf.constant(aoa_prime, tf.float64))
F_rx_prime_pol2_1 = F_rx_prime_pol2_1.numpy()
F_rx_prime_pol2_2 = F_rx_prime_pol2_2.numpy()
F_rx_prime_pol2 = np.stack([F_rx_prime_pol2_1, F_rx_prime_pol2_2],
axis=-1)
F_rx_pol2 = self.l2g_response_ref(F_rx_prime_pol2, rx_orientations,
zoa, aoa)
# Compute prtoduct between the phase matrix and the TX antenna field.
F_tx_pol1 = H_phase@F_tx_pol1
if (self.tx_array.polarization == 'dual'):
F_tx_pol2 = H_phase@F_tx_pol2
# TX: Scatteing the antenna response
# Single polarization case is easy, as one only needs to repeat the same
# antenna response for all elements
F_tx_pol1 = np.expand_dims(np.squeeze(F_tx_pol1, axis=-1), axis=-2)
if (self.tx_array.polarization == 'single'):
F_tx = np.tile(F_tx_pol1, [1,1,1,1,1, self.tx_array.num_ant,1])
# Dual-polarization requires scatterting the responses at the right
# place
else:
F_tx_pol2 = np.expand_dims(np.squeeze(F_tx_pol2, axis=-1), axis=-2)
F_tx = np.zeros(F_tx_pol1.shape) + 1j*np.zeros(F_tx_pol1.shape)
F_tx = np.tile(F_tx, [1,1,1,1,1, self.tx_array.num_ant,1])
F_tx[:,:,:,:,:,self.tx_array.ant_ind_pol1,:] = F_tx_pol1
F_tx[:,:,:,:,:,self.tx_array.ant_ind_pol2,:] = F_tx_pol2
# RX: Scatteing the antenna response
# Single polarization case is easy, as one only needs to repeat the same
# antenna response for all elements
F_rx_pol1 = np.expand_dims(np.squeeze(F_rx_pol1, axis=-1), axis=-2)
if (self.rx_array.polarization == 'single'):
F_rx = np.tile(F_rx_pol1, [1,1,1,1,1,self.rx_array.num_ant,1])
# Dual-polarization requires scatterting the responses at the right
# place
else:
F_rx_pol2 = np.expand_dims(np.squeeze(F_rx_pol2, axis=-1), axis=-2)
F_rx = np.zeros(F_rx_pol1.shape) + 1j*np.zeros(F_rx_pol1.shape)
F_rx = np.tile(F_rx, [1,1,1,1,1,self.rx_array.num_ant,1])
F_rx[:,:,:,:,:,self.rx_array.ant_ind_pol1,:] = F_rx_pol1
F_rx[:,:,:,:,:,self.rx_array.ant_ind_pol2,:] = F_rx_pol2
# Computing H_field
F_tx = np.expand_dims(F_tx, axis=-3)
F_rx = np.expand_dims(F_rx, axis=-2)
H_field = np.sum(F_tx*F_rx, axis=-1)
return H_field
def test_step_11_field_matrix(self):
"""Test 3GPP channel coefficient calculation:
Field matrix calculation"""
H_phase = self.step_11_phase_matrix_ref(self.phi, self.rays.xpr)
H_field_ref = self.step_11_field_matrix_ref(self.topology,
self.rays.aoa,
self.rays.aod,
self.rays.zoa,
self.rays.zod,
H_phase)
H_field = self.ccg._step_11_field_matrix(self.topology,
tf.constant(self.rays.aoa, tf.float64),
tf.constant(self.rays.aod, tf.float64),
tf.constant(self.rays.zoa, tf.float64),
tf.constant(self.rays.zod, tf.float64),
tf.constant(H_phase, tf.complex128)).numpy()
max_err = self.max_rel_err(H_field_ref, H_field)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_array_offsets_ref(self, aoa, aod, zoa, zod, topology):
"""Reference implementation: Array offset matrix"""
# Arrival spherical unit vector
r_hat_rx = np.squeeze(self.unit_sphere_vector_ref(zoa, aoa), axis=-1)
r_hat_rx = np.expand_dims(r_hat_rx, axis=-2)
# Departure spherical unit vector
r_hat_tx = np.squeeze(self.unit_sphere_vector_ref(zod, aod), axis=-1)
r_hat_tx = np.expand_dims(r_hat_tx, axis=-2)
# TX location vector
d_bar_tx = self.step_11_get_tx_antenna_positions_ref(topology)
d_bar_tx = np.expand_dims(np.expand_dims(
np.expand_dims(d_bar_tx, axis=2), axis=3), axis=4)
# RX location vector
d_bar_rx = self.step_11_get_rx_antenna_positions_ref(topology)
d_bar_rx = np.expand_dims(np.expand_dims(
np.expand_dims(d_bar_rx, axis=1), axis=3), axis=4)
lambda_0 = self.scenario.lambda_0.numpy()
# TX offset matrix
tx_offset = np.sum(r_hat_tx*d_bar_tx, axis=-1)
rx_offset = np.sum(r_hat_rx*d_bar_rx, axis=-1)
tx_offset = np.expand_dims(tx_offset, -2)
rx_offset = np.expand_dims(rx_offset, -1)
antenna_offset = np.exp(1j*2*np.pi*(tx_offset+rx_offset)/lambda_0)
return antenna_offset
def test_step_11_array_offsets(self):
"""Test 3GPP channel coefficient calculation: Array offset matrix"""
H_array_ref = self.step_11_array_offsets_ref(self.rays.aoa,
self.rays.aod,
self.rays.zoa,
self.rays.zod,
self.topology)
H_array = self.ccg._step_11_array_offsets(self.topology,
tf.constant(self.rays.aoa, tf.float64),
tf.constant(self.rays.aod, tf.float64),
tf.constant(self.rays.zoa, tf.float64),
tf.constant(self.rays.zod, tf.float64)).numpy()
max_err = self.max_rel_err(H_array_ref, H_array)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_doppler_matrix_ref(self, topology, aoa, zoa, t):
"""Reference implementation: Array offset matrix"""
velocities = topology.velocities.numpy()
lambda_0 = self.scenario.lambda_0.numpy()
# Arrival spherical unit vector
r_hat_rx = np.squeeze(self.unit_sphere_vector_ref(zoa, aoa), axis=-1)
# Velocity vec
if topology.moving_end == "tx":
velocities = np.expand_dims(velocities, axis=2)
elif topology.moving_end == 'rx':
velocities = np.expand_dims(velocities, axis=1)
velocities = np.expand_dims(np.expand_dims(velocities, axis=3), axis=4)
# Doppler matrix
exponent = np.sum(r_hat_rx*velocities, axis=-1, keepdims=True)
exponent = exponent/lambda_0
exponent = 2*np.pi*exponent*t
H_doppler = np.exp(1j*exponent)
return H_doppler
def test_step_11_doppler_matrix(self):
"""Test 3GPP channel coefficient calculation: Doppler matrix"""
H_doppler_ref = self.step_11_doppler_matrix_ref(self.topology,
self.rays.aoa,
self.rays.zoa,
self.sample_times)
H_doppler = self.ccg._step_11_doppler_matrix(self.topology,
tf.constant(self.rays.aoa, tf.float64),
tf.constant(self.rays.zoa, tf.float64),
tf.constant(self.sample_times, tf.float64)).numpy()
max_err = self.max_rel_err(H_doppler_ref, H_doppler)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_nlos_ref(self, phi, aoa, aod, zoa, zod, kappa, powers, t,
topology):
"""Reference implemenrtation: Compute the channel matrix of the NLoS
component"""
H_phase = self.step_11_phase_matrix_ref(phi, kappa)
H_field = self.step_11_field_matrix_ref(topology, aoa, aod, zoa, zod,
H_phase)
H_array = self.step_11_array_offsets_ref(aoa, aod, zoa, zod, topology)
H_doppler = self.step_11_doppler_matrix_ref(topology, aoa, zoa, t)
H_field = np.expand_dims(H_field, axis=-1)
H_array = np.expand_dims(H_array, axis=-1)
H_doppler = np.expand_dims(np.expand_dims(H_doppler, axis=-2), axis=-3)
H_full = H_field*H_array*H_doppler
power_scaling = np.sqrt(powers/aoa.shape[4])
power_scaling = np.expand_dims(np.expand_dims(np.expand_dims(
np.expand_dims(power_scaling, axis=4), axis=5), axis=6), axis=7)
H_full = H_full*power_scaling
return H_full
def test_step_11_nlos_ref(self):
"""Test 3GPP channel coefficient calculation: Doppler matrix"""
H_full_ref = self.step_11_nlos_ref( self.phi,
self.rays.aoa,
self.rays.aod,
self.rays.zoa,
self.rays.zod,
self.rays.xpr,
self.rays.powers,
self.sample_times,
self.topology)
H_full = self.ccg._step_11_nlos(tf.constant(self.phi, tf.float64),
self.topology,
self.rays,
tf.constant(self.sample_times, tf.float64)).numpy()
max_err = self.max_rel_err(H_full_ref, H_full)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_reduce_nlos_ref(self, H_full, powers, delays, c_DS):
"""Reference implementation: Compute the channel matrix of the NLoS
component 2"""
# Sorting clusters in descending roder
cluster_ordered = np.flip(np.argsort(powers, axis=3), axis=3)
delays_ordered = np.take_along_axis(delays, cluster_ordered, axis=3)
H_full_ordered = tf.gather(H_full, cluster_ordered, axis=3,
batch_dims=3).numpy()
## Weak clusters (all except first two)
delays_weak = delays_ordered[:,:,:,2:]
H_full_weak = np.sum(H_full_ordered[:,:,:,2:,:,:,:], axis=4)
## Strong clusters (first two)
# Each strong cluster is split into 3 sub-cluster
# Subcluster delays
strong_delays = delays_ordered[:,:,:,:2]
strong_delays = np.expand_dims(strong_delays, -1)
delays_expension = np.array([[[[[0.0, 1.28, 2.56]]]]])
c_DS = np.expand_dims(np.expand_dims(c_DS.numpy(), axis=-1), axis=-1)
strong_delays = strong_delays + delays_expension*c_DS
strong_delays = np.reshape(strong_delays,
list(strong_delays.shape[:-2]) + [-1])
# Subcluster coefficient
H_full_strong = H_full_ordered[:,:,:,:2,:,:,:]
H_full_subcl_1 = np.sum(np.take(H_full_strong, [0,1,2,3,4,5,6,7,18,19],
axis=4), axis=4)
H_full_subcl_2 = np.sum(np.take(H_full_strong, [8,9,10,11,16,17],
axis=4), axis=4)
H_full_subcl_3 = np.sum(np.take(H_full_strong, [12,13,14,15],
axis=4), axis=4)
H_full_strong_subcl = np.stack([H_full_subcl_1,H_full_subcl_2,
H_full_subcl_3], axis=3)
H_full_strong_subcl = np.transpose(H_full_strong_subcl,
[0,1,2,4,3,5,6,7])
H_full_strong_subcl = np.reshape(H_full_strong_subcl,
np.concatenate([H_full_strong_subcl.shape[:3], [-1],
H_full_strong_subcl.shape[5:]], axis=0))
## Putting together strong and weak clusters
H_nlos = np.concatenate([H_full_strong_subcl, H_full_weak], axis=3)
delays_nlos = np.concatenate([strong_delays, delays_weak], axis=3)
## Sorting
delays_sorted_ind = np.argsort(delays_nlos, axis=3)
delays_nlos = np.take_along_axis(delays_nlos, delays_sorted_ind, axis=3)
H_nlos = tf.gather(H_nlos, delays_sorted_ind,
axis=3, batch_dims=3).numpy()
return (H_nlos, delays_nlos)
def test_step_11_reduce_nlos(self):
"""Test 3GPP channel coefficient calculation: NLoS channel matrix
computation"""
H_full_ref = self.step_11_nlos_ref( self.phi,
self.rays.aoa,
self.rays.aod,
self.rays.zoa,
self.rays.zod,
self.rays.xpr,
self.rays.powers,
self.sample_times,
self.topology)
H_nlos_ref, delays_nlos_ref = self.step_11_reduce_nlos_ref(
H_full_ref,
self.rays.powers.numpy(),
self.rays.delays.numpy(),
self.c_ds)
H_nlos, delays_nlos = self.ccg._step_11_reduce_nlos(
tf.constant(H_full_ref, tf.complex128), self.rays, self.c_ds)
H_nlos = H_nlos.numpy()
delays_nlos = delays_nlos.numpy()
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
max_err = self.max_rel_err(H_nlos_ref, H_nlos)
self.assertLessEqual(max_err, err_tol)
max_err = self.max_rel_err(delays_nlos_ref, delays_nlos)
self.assertLessEqual(max_err, err_tol)
def step_11_los_ref(self, t, topology):
"""Reference implementation: Compute the channel matrix of the NLoS
component 2"""
# LoS departure and arrival angles
los_aoa = np.expand_dims(np.expand_dims(topology.los_aoa.numpy(),
axis=3), axis=4)
los_zoa = np.expand_dims(np.expand_dims(topology.los_zoa.numpy(),
axis=3), axis=4)
los_aod = np.expand_dims(np.expand_dims(topology.los_aod.numpy(),
axis=3), axis=4)
los_zod = np.expand_dims(np.expand_dims(topology.los_zod.numpy(),
axis=3), axis=4)
# Field matrix
H_phase = np.reshape(np.array([[1.,0.],
[0.,-1.]]), [1,1,1,1,1,2,2])
H_field = self.step_11_field_matrix_ref(topology, los_aoa, los_aod,
los_zoa, los_zod, H_phase)
# Array offset matrix
H_array = self.step_11_array_offsets_ref(los_aoa, los_aod, los_zoa,
los_zod, topology)
# Doppler matrix
H_doppler = self.step_11_doppler_matrix_ref(topology, los_aoa,
los_zoa, t)
# Phase shift due to propagation delay
d3D = topology.distance_3d.numpy()
lambda_0 = self.scenario.lambda_0.numpy()
H_delay = np.exp(1j*2*np.pi*d3D/lambda_0)
# Combining all to compute channel coefficient
H_field = np.expand_dims(np.squeeze(H_field, axis=4), axis=-1)
H_array = np.expand_dims(np.squeeze(H_array, axis=4), axis=-1)
H_doppler = np.expand_dims(H_doppler, axis=4)
H_delay = np.expand_dims(np.expand_dims(np.expand_dims(
np.expand_dims(H_delay, axis=3), axis=4), axis=5), axis=6)
H_los = H_field*H_array*H_doppler*H_delay
return H_los
def test_step11_los(self):
"""Test 3GPP channel coefficient calculation: LoS channel matrix"""
H_los_ref = self.step_11_los_ref(self.sample_times, self.topology)
H_los = self.ccg._step_11_los(self.topology, self.sample_times)
H_los = H_los.numpy()
max_err = self.max_rel_err(H_los_ref, H_los)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_ref(self, phi, k_factor, aoa, aod, zoa, zod, kappa, powers,
delays, t, topology, c_ds):
"""Reference implementation: Step 11"""
## NLoS
H_full = self.step_11_nlos_ref(phi, aoa, aod, zoa, zod, kappa, powers,
t, topology)
H_nlos, delays_nlos = self.step_11_reduce_nlos_ref(H_full, powers,
delays, c_ds)
## LoS
H_los = self.step_11_los_ref(t, topology)
k_factor = np.reshape(k_factor, list(k_factor.shape) + [1,1,1,1])
los_scaling = np.sqrt(k_factor/(k_factor+1.))
nlos_scaling = np.sqrt(1./(k_factor+1.))
H_los_nlos = nlos_scaling*H_nlos
H_los_los = los_scaling*H_los
H_los_los = H_los_los + H_los_nlos[:,:,:,:1,...]
H_los = np.concatenate([H_los_los, H_los_nlos[:,:,:,1:,...]], axis=3)
## Setting up the CIR according to the link configuration
los_status = topology.los.numpy()
los_status = np.reshape(los_status, list(los_status.shape) + [1,1,1,1])
H = np.where(los_status, H_los, H_nlos)
return H, delays_nlos
def test_step_11(self):
"""Test 3GPP channel coefficient calculation: Step 11"""
H, delays_nlos = self.ccg._step_11(tf.constant(self.phi, tf.float64),
self.topology,
self.lsp.k_factor,
self.rays,
tf.constant(self.sample_times,
tf.float64),
self.c_ds)
H = H.numpy()
delays_nlos = delays_nlos.numpy()
H_ref, delays_nlos_ref = self.step_11_ref(self.phi,
self.lsp.k_factor.numpy(),
self.rays.aoa.numpy(),
self.rays.aod.numpy(),
self.rays.zoa.numpy(),
self.rays.zod.numpy(),
self.rays.xpr.numpy(),
self.rays.powers.numpy(),
self.rays.delays.numpy(),
self.sample_times,
self.topology,
self.c_ds)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
max_err = self.max_rel_err(H_ref, H)
self.assertLessEqual(max_err, err_tol)
max_err = self.max_rel_err(delays_nlos_ref, delays_nlos)
self.assertLessEqual(max_err, err_tol)
|
packages/syft/src/syft/core/node/vm/__init__.py | vishalbelsare/PySyft | 8,428 | 12675340 | # stdlib
from typing import Dict # noqa: F401
# relative
from ..common.node_service.node_service import NodeService # noqa: F401
from .client import VirtualMachineClient
from .vm import VirtualMachine
message_service_mapping: Dict[str, NodeService] = {}
__all__ = [
"NodeService",
"VirtualMachineClient",
"VirtualMachine",
"message_service_mapping",
]
|
youtube_dl/extractor/beampro.py | hackarada/youtube-dl | 3,001 | 12675350 | <filename>youtube_dl/extractor/beampro.py
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
clean_html,
compat_str,
float_or_none,
int_or_none,
parse_iso8601,
try_get,
urljoin,
)
class BeamProBaseIE(InfoExtractor):
_API_BASE = 'https://mixer.com/api/v1'
_RATINGS = {'family': 0, 'teen': 13, '18+': 18}
def _extract_channel_info(self, chan):
user_id = chan.get('userId') or try_get(chan, lambda x: x['user']['id'])
return {
'uploader': chan.get('token') or try_get(
chan, lambda x: x['user']['username'], compat_str),
'uploader_id': compat_str(user_id) if user_id else None,
'age_limit': self._RATINGS.get(chan.get('audience')),
}
class BeamProLiveIE(BeamProBaseIE):
IE_NAME = 'Mixer:live'
_VALID_URL = r'https?://(?:\w+\.)?(?:beam\.pro|mixer\.com)/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://mixer.com/niterhayven',
'info_dict': {
'id': '261562',
'ext': 'mp4',
'title': 'Introducing The Witcher 3 // The Grind Starts Now!',
'description': 'md5:0b161ac080f15fe05d18a07adb44a74d',
'thumbnail': r're:https://.*\.jpg$',
'timestamp': 1483477281,
'upload_date': '20170103',
'uploader': 'niterhayven',
'uploader_id': '373396',
'age_limit': 18,
'is_live': True,
'view_count': int,
},
'skip': 'niterhayven is offline',
'params': {
'skip_download': True,
},
}
_MANIFEST_URL_TEMPLATE = '%s/channels/%%s/manifest.%%s' % BeamProBaseIE._API_BASE
@classmethod
def suitable(cls, url):
return False if BeamProVodIE.suitable(url) else super(BeamProLiveIE, cls).suitable(url)
def _real_extract(self, url):
channel_name = self._match_id(url)
chan = self._download_json(
'%s/channels/%s' % (self._API_BASE, channel_name), channel_name)
if chan.get('online') is False:
raise ExtractorError(
'{0} is offline'.format(channel_name), expected=True)
channel_id = chan['id']
def manifest_url(kind):
return self._MANIFEST_URL_TEMPLATE % (channel_id, kind)
formats = self._extract_m3u8_formats(
manifest_url('m3u8'), channel_name, ext='mp4', m3u8_id='hls',
fatal=False)
formats.extend(self._extract_smil_formats(
manifest_url('smil'), channel_name, fatal=False))
self._sort_formats(formats)
info = {
'id': compat_str(chan.get('id') or channel_name),
'title': self._live_title(chan.get('name') or channel_name),
'description': clean_html(chan.get('description')),
'thumbnail': try_get(
chan, lambda x: x['thumbnail']['url'], compat_str),
'timestamp': parse_iso8601(chan.get('updatedAt')),
'is_live': True,
'view_count': int_or_none(chan.get('viewersTotal')),
'formats': formats,
}
info.update(self._extract_channel_info(chan))
return info
class BeamProVodIE(BeamProBaseIE):
IE_NAME = 'Mixer:vod'
_VALID_URL = r'https?://(?:\w+\.)?(?:beam\.pro|mixer\.com)/[^/?#&]+\?.*?\bvod=(?P<id>[^?#&]+)'
_TESTS = [{
'url': 'https://mixer.com/willow8714?vod=2259830',
'md5': 'b2431e6e8347dc92ebafb565d368b76b',
'info_dict': {
'id': '2259830',
'ext': 'mp4',
'title': 'willow8714\'s Channel',
'duration': 6828.15,
'thumbnail': r're:https://.*source\.png$',
'timestamp': 1494046474,
'upload_date': '20170506',
'uploader': 'willow8714',
'uploader_id': '6085379',
'age_limit': 13,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://mixer.com/streamer?vod=IxFno1rqC0S_XJ1a2yGgNw',
'only_matching': True,
}, {
'url': 'https://mixer.com/streamer?vod=Rh3LY0VAqkGpEQUe2pN-ig',
'only_matching': True,
}]
@staticmethod
def _extract_format(vod, vod_type):
if not vod.get('baseUrl'):
return []
if vod_type == 'hls':
filename, protocol = 'manifest.m3u8', 'm3u8_native'
elif vod_type == 'raw':
filename, protocol = 'source.mp4', 'https'
else:
assert False
data = vod.get('data') if isinstance(vod.get('data'), dict) else {}
format_id = [vod_type]
if isinstance(data.get('Height'), compat_str):
format_id.append('%sp' % data['Height'])
return [{
'url': urljoin(vod['baseUrl'], filename),
'format_id': '-'.join(format_id),
'ext': 'mp4',
'protocol': protocol,
'width': int_or_none(data.get('Width')),
'height': int_or_none(data.get('Height')),
'fps': int_or_none(data.get('Fps')),
'tbr': int_or_none(data.get('Bitrate'), 1000),
}]
def _real_extract(self, url):
vod_id = self._match_id(url)
vod_info = self._download_json(
'%s/recordings/%s' % (self._API_BASE, vod_id), vod_id)
state = vod_info.get('state')
if state != 'AVAILABLE':
raise ExtractorError(
'VOD %s is not available (state: %s)' % (vod_id, state),
expected=True)
formats = []
thumbnail_url = None
for vod in vod_info['vods']:
vod_type = vod.get('format')
if vod_type in ('hls', 'raw'):
formats.extend(self._extract_format(vod, vod_type))
elif vod_type == 'thumbnail':
thumbnail_url = urljoin(vod.get('baseUrl'), 'source.png')
self._sort_formats(formats)
info = {
'id': vod_id,
'title': vod_info.get('name') or vod_id,
'duration': float_or_none(vod_info.get('duration')),
'thumbnail': thumbnail_url,
'timestamp': parse_iso8601(vod_info.get('createdAt')),
'view_count': int_or_none(vod_info.get('viewsTotal')),
'formats': formats,
}
info.update(self._extract_channel_info(vod_info.get('channel') or {}))
return info
|
applications/admin.py | crydotsnake/djangogirls | 446 | 12675354 | from adminsortable2.admin import SortableAdminMixin
from django.contrib import admin
from django.shortcuts import redirect, render
from django.urls import reverse, path
from django.utils.html import format_html
from core.models import Event
from .models import Answer, Application, Email, Form, Question
class FormAdmin(admin.ModelAdmin):
list_display = (
'text_header', 'event',
'open_from', 'open_until', 'number_of_applications',
'get_submissions_url'
)
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(event__team__in=[request.user])
def get_form(self, request, obj=None, **kwargs):
form = super(FormAdmin, self).get_form(request, obj, **kwargs)
if not request.user.is_superuser:
event = Event.objects.filter(team__in=[request.user])
form.base_fields['event'].queryset = event
return form
def get_urls(self):
urls = super().get_urls()
my_urls = [
path('submissions/', self.admin_site.admin_view(self.view_submissions)),
]
return my_urls + urls
def view_submissions(self, request):
forms = self.get_queryset(request)
if forms.count() == 1:
# There is only one form, redirect to applications list straight away
form = forms.get()
return redirect('applications:applications', form.event.page_url)
return render(request, 'admin/applications/form/view_submissions.html', {
'forms': forms
})
def get_submissions_url(self, obj):
return format_html(
'<a href="{}" target="_blank">See all submitted applications</a>',
reverse('applications:applications', args=[obj.event.page_url]))
get_submissions_url.short_description = "Applications"
class FormFilter(admin.SimpleListFilter):
title = "Form"
parameter_name = "form"
def lookups(self, request, queryset):
qs = Form.objects.all()
if not request.user.is_superuser:
qs = qs.filter(event__team__in=[request.user])
return map(lambda x: (x.id, str(x)), qs)
def queryset(self, request, queryset):
if self.value():
return queryset.filter(form=self.value())
return queryset
class QuestionAdmin(SortableAdminMixin, admin.ModelAdmin):
list_display = ('form', 'title', 'question_type', 'is_required', 'order')
list_filter = (FormFilter,)
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(form__event__team__in=[request.user])
def get_form(self, request, obj=None, **kwargs):
form = super().get_form(request, obj, **kwargs)
if not request.user.is_superuser:
form_objs = Form.objects.filter(event__team__in=[request.user])
form.base_fields['form'].queryset = form_objs
return form
class AnswerInlineAdmin(admin.TabularInline):
model = Answer
can_delete = False
extra = 0
readonly_fields = ('question', 'answer')
class ApplicationAdmin(admin.ModelAdmin):
list_display = ('number', 'form', 'newsletter_optin', 'email', 'created')
list_filter = ('form', 'newsletter_optin')
inlines = [AnswerInlineAdmin]
class AnswerAdmin(admin.ModelAdmin):
list_display = ('application', 'question', 'answer')
raw_id_fields = ('question', 'application')
class EmailAdmin(admin.ModelAdmin):
list_display = ('form', 'author', 'subject', 'recipients_group', 'created', 'sent')
admin.site.register(Form, FormAdmin)
admin.site.register(Question, QuestionAdmin)
admin.site.register(Application, ApplicationAdmin)
admin.site.register(Answer, AnswerAdmin)
admin.site.register(Email, EmailAdmin)
|
igibson/utils/muvr_utils.py | suresh-guttikonda/iGibson | 360 | 12675366 | <reponame>suresh-guttikonda/iGibson
""" Utility classes and functions needed for the multi-user VR experience. """
import copy
import time
from collections import defaultdict
from time import sleep
import numpy as np
from PodSixNet.Channel import Channel
from PodSixNet.Connection import ConnectionListener, connection
from PodSixNet.Server import Server
from igibson.render.mesh_renderer.mesh_renderer_cpu import Instance, InstanceGroup
from igibson.utils.vr_utils import VrData
# An FPS cap is needed to ensure that the client and server don't fall too far out of sync
# 30 is a good cap that matches average VR speed and guarantees that the server frame data queue does not become backlogged
MUVR_FPS_CAP = 30.0
# Classes used in MUVR demos
class IGVRClient(ConnectionListener):
"""MUVR client that uses server's frame data to render and generates VR data for the server to consume."""
def __init__(self, host, port):
self.Connect((host, port))
self.frame_data = {}
self.frame_start = 0
self.vr_offset = [0, 0, 0]
def register_data(self, sim, client_agent):
self.s = sim
self.renderer = sim.renderer
self.client_agent = client_agent
self.vr_device = "{}_controller".format(self.s.vr_settings.movement_controller)
self.devices = ["left_controller", "right_controller", "hmd"]
def ingest_frame_data(self):
self.frame_start = time.time()
if not self.frame_data:
return
# Deep copy frame data so it doesn't get overwritten by a random async callback
self.latest_frame_data = copy.deepcopy(self.frame_data)
for instance in self.renderer.get_instances():
data = self.latest_frame_data[instance.pybullet_uuid]
if isinstance(instance, Instance):
trans = np.array(data[0])
rot = np.array(data[1])
instance.pose_trans = trans
instance.pose_rot = rot
elif isinstance(instance, InstanceGroup):
poses_trans = []
poses_rot = []
data_trans = data[0]
data_rot = data[1]
num_links = len(data_trans)
for i in range(num_links):
next_trans = np.array(data_trans[i])
next_rot = np.array(data_rot[i])
poses_trans.append(np.ascontiguousarray(next_trans))
poses_rot.append(np.ascontiguousarray(next_rot))
instance.poses_trans = poses_trans
instance.poses_rot = poses_rot
def client_step(self):
self.s.viewer.update()
if self.s.can_access_vr_context:
self.s.poll_vr_events()
# Sets the VR starting position if one has been specified by the user
self.s.perform_vr_start_pos_move()
# Update VR offset so updated value can be used in server
self.client_agent.update_frame_offset()
def gen_vr_data(self):
if not self.s.can_access_vr_context:
self.vr_data = []
else:
# Store all data in a dictionary to be sent to the server
vr_data_dict = defaultdict(list)
for device in self.devices:
device_data = []
is_valid, trans, rot = self.s.get_data_for_vr_device(device)
device_data.extend([is_valid, trans.tolist(), rot.tolist()])
device_data.extend(self.s.get_device_coordinate_system(device))
if device in ["left_controller", "right_controller"]:
device_data.extend(self.s.get_button_data_for_controller(device))
vr_data_dict[device] = device_data
vr_data_dict["eye_data"] = self.s.get_eye_tracking_data()
# We need to get VR events instead of polling here, otherwise the previously events will be erased
vr_data_dict["event_data"] = self.s.get_vr_events()
vr_data_dict["vr_pos"] = self.s.get_vr_pos().tolist()
vr_data_dict["vr_offset"] = [float(self.vr_offset[0]), float(self.vr_offset[1]), float(self.vr_offset[2])]
vr_data_dict["vr_settings"] = [
self.s.vr_settings.eye_tracking,
self.s.vr_settings.touchpad_movement,
self.s.vr_settings.movement_controller,
self.s.vr_settings.relative_movement_device,
self.s.vr_settings.movement_speed,
]
self.vr_data = dict(vr_data_dict)
def send_vr_data(self):
if self.vr_data:
self.Send({"action": "vr_data", "vr_data": self.vr_data})
def Network_frame_data(self, data):
# Store frame data until it is needed during rendering
# This avoids the overhead of updating the renderer every single time this function is called
self.frame_data = data["frame_data"]
def Refresh(self):
# Receive data from connection's queue
self.Pump()
# Push data out to the network
connection.Pump()
# Keep client at FPS cap if it is running too fast
frame_dur = time.time() - self.frame_start
time_until_min_dur = (1 / MUVR_FPS_CAP) - frame_dur
if time_until_min_dur > 0:
sleep(time_until_min_dur)
class IGVRChannel(Channel):
"""Server's representation of the IGVRClient."""
def __init__(self, *args, **kwargs):
Channel.__init__(self, *args, **kwargs)
self.vr_data = {}
def Close(self):
print(self, "Client disconnected")
def Network_vr_data(self, data):
# Store vr data until it is needed for physics simulation
# This avoids the overhead of updating the physics simulation every time this function is called
self.vr_data = data["vr_data"]
def send_frame_data(self, frame_data):
self.Send({"action": "frame_data", "frame_data": frame_data})
class IGVRServer(Server):
"""MUVR server that sends frame data and ingests vr data each frame."""
channelClass = IGVRChannel
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
self.client = None
self.latest_vr_data = None
self.frame_start = 0
def Connected(self, channel, addr):
# print("Someone connected to the server!")
self.client = channel
def register_data(self, sim, client_agent):
self.s = sim
self.renderer = sim.renderer
self.client_agent = client_agent
def client_connected(self):
return self.client is not None
def ingest_vr_data(self):
self.frame_start = time.time()
if not self.client:
return
if not self.client.vr_data:
return
if not self.latest_vr_data:
self.latest_vr_data = VrData(self.s.vr_settings)
# Make a copy of channel's most recent VR data, so it doesn't get mutated if new requests arrive
self.latest_vr_data.refresh_muvr_data(copy.deepcopy(self.client.vr_data))
def gen_frame_data(self):
# Frame data is stored as a dictionary mapping pybullet uuid to pose/rot data
self.frame_data = {}
# It is assumed that the client renderer will have loaded instances in the same order as the server
for instance in self.renderer.get_instances():
# Loop through all instances and get pos and rot data
# We convert numpy arrays into lists so they can be serialized and sent over the network
# Lists can also be easily reconstructed back into numpy arrays on the client side
if isinstance(instance, Instance):
pose = instance.pose_trans.tolist()
rot = instance.pose_rot.tolist()
self.frame_data[instance.pybullet_uuid] = [pose, rot]
elif isinstance(instance, InstanceGroup):
poses = []
rots = []
for pose in instance.poses_trans:
poses.append(pose.tolist())
for rot in instance.poses_rot:
rots.append(rot.tolist())
self.frame_data[instance.pybullet_uuid] = [poses, rots]
def send_frame_data(self):
if self.client:
self.client.send_frame_data(self.frame_data)
def Refresh(self):
self.Pump()
# Keep server at FPS cap if it is running too fast
frame_dur = time.time() - self.frame_start
time_until_min_dur = (1 / MUVR_FPS_CAP) - frame_dur
if time_until_min_dur > 0:
sleep(time_until_min_dur)
# Test functions/classes used for debugging network issues
def gen_test_packet(sender="server", size=3000):
"""
Generates a simple test packet, containing a decent amount of data,
as well as the timestamp of generation and the sender.
"""
# Packet containing 'size' floats
data = [0.0 if i % 2 == 0 else 1.0 for i in range(size)]
timestamp = "{}".format(time.time())
packet = {"data": data, "timestamp": timestamp, "sender": sender}
return packet
class IGVRTestClient(ConnectionListener):
"""Test client to debug connections."""
def __init__(self, host, port):
self.Connect((host, port))
def set_packet_size(self, size):
self.packet_size = size
def gen_packet(self):
self.packet = gen_test_packet(sender="client", size=self.packet_size)
def send_packet(self):
self.Send({"action": "client_packet", "packet": self.packet})
def Network_server_packet(self, data):
self.server_packet = data["packet"]
print("----- Packet received from {} -----".format(self.server_packet["sender"]))
packet_tstamp = float(self.server_packet["timestamp"])
print("Packet Timestamp: {}".format(packet_tstamp))
curr_time = time.time()
print("Current Timestamp: {}".format(curr_time))
print("Delta (+ is delay): {}\n".format(curr_time - packet_tstamp))
def Refresh(self):
# Receive data from connection's queue
self.Pump()
# Push data out to the network
connection.Pump()
class IGVRTestChannel(Channel):
"""Server's representation of the IGVRTestClient."""
def __init__(self, *args, **kwargs):
Channel.__init__(self, *args, **kwargs)
def Close(self):
print(self, "Client disconnected")
def Network_client_packet(self, data):
self.client_packet = data["packet"]
print("----- Packet received from {} -----".format(self.client_packet["sender"]))
packet_tstamp = float(self.client_packet["timestamp"])
print("Packet Timestamp: {}".format(packet_tstamp))
curr_time = time.time()
print("Current Timestamp: {}".format(curr_time))
print("Delta (+ is delay): {}\n".format(curr_time - packet_tstamp))
def send_packet(self, packet):
self.Send({"action": "server_packet", "packet": packet})
class IGVRTestServer(Server):
"""Test MUVR server."""
channelClass = IGVRTestChannel
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
self.client = None
def Connected(self, channel, addr):
print("Someone connected to the server!")
self.client = channel
def client_connected(self):
return self.client is not None
def set_packet_size(self, size):
self.packet_size = size
def gen_packet(self):
self.packet = gen_test_packet(sender="server", size=self.packet_size)
def send_packet(self):
if self.client:
self.client.send_packet(self.packet)
def Refresh(self):
self.Pump()
|
pogom/pgoapi/protos/POGOProtos/Networking/Responses/ReleasePokemonResponse_pb2.py | tier4fusion/pogom-updated | 463 | 12675375 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/ReleasePokemonResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/ReleasePokemonResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n<POGOProtos/Networking/Responses/ReleasePokemonResponse.proto\x12\x1fPOGOProtos.Networking.Responses\"\xdd\x01\n\x16ReleasePokemonResponse\x12N\n\x06result\x18\x01 \x01(\x0e\x32>.POGOProtos.Networking.Responses.ReleasePokemonResponse.Result\x12\x15\n\rcandy_awarded\x18\x02 \x01(\x05\"\\\n\x06Result\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x14\n\x10POKEMON_DEPLOYED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\x18\n\x14\x45RROR_POKEMON_IS_EGG\x10\x04\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_RELEASEPOKEMONRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='POGOProtos.Networking.Responses.ReleasePokemonResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POKEMON_DEPLOYED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAILED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_POKEMON_IS_EGG', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=227,
serialized_end=319,
)
_sym_db.RegisterEnumDescriptor(_RELEASEPOKEMONRESPONSE_RESULT)
_RELEASEPOKEMONRESPONSE = _descriptor.Descriptor(
name='ReleasePokemonResponse',
full_name='POGOProtos.Networking.Responses.ReleasePokemonResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='POGOProtos.Networking.Responses.ReleasePokemonResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='candy_awarded', full_name='POGOProtos.Networking.Responses.ReleasePokemonResponse.candy_awarded', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RELEASEPOKEMONRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=319,
)
_RELEASEPOKEMONRESPONSE.fields_by_name['result'].enum_type = _RELEASEPOKEMONRESPONSE_RESULT
_RELEASEPOKEMONRESPONSE_RESULT.containing_type = _RELEASEPOKEMONRESPONSE
DESCRIPTOR.message_types_by_name['ReleasePokemonResponse'] = _RELEASEPOKEMONRESPONSE
ReleasePokemonResponse = _reflection.GeneratedProtocolMessageType('ReleasePokemonResponse', (_message.Message,), dict(
DESCRIPTOR = _RELEASEPOKEMONRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.ReleasePokemonResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.ReleasePokemonResponse)
))
_sym_db.RegisterMessage(ReleasePokemonResponse)
# @@protoc_insertion_point(module_scope)
|
Algo and DSA/LeetCode-Solutions-master/Python/last-substring-in-lexicographical-order.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12675379 | # Time: O(n)
# Space: O(1)
class Solution(object):
def lastSubstring(self, s):
"""
:type s: str
:rtype: str
"""
left, right, l = 0, 1, 0
while right+l < len(s):
if s[left+l] == s[right+l]:
l += 1
continue
if s[left+l] > s[right+l]:
right += l+1
else:
left = max(right, left+l+1)
right = left+1
l = 0
return s[left:]
# Time: O(n)
# Space: O(n)
import collections
class Solution2(object):
def lastSubstring(self, s):
"""
:type s: str
:rtype: str
"""
count = collections.defaultdict(list)
for i in xrange(len(s)):
count[s[i]].append(i)
max_c = max(count.iterkeys())
starts = {}
for i in count[max_c]:
starts[i] = i+1
while len(starts)-1 > 0:
lookup = set()
next_count = collections.defaultdict(list)
for start, end in starts.iteritems():
if end == len(s): # finished
lookup.add(start)
continue
next_count[s[end]].append(start)
if end in starts: # overlapped
lookup.add(end)
next_starts = {}
max_c = max(next_count.iterkeys())
for start in next_count[max_c]:
if start not in lookup:
next_starts[start] = starts[start]+1
starts = next_starts
return s[next(starts.iterkeys()):]
|
pyethapp/eth_service.py | josephites/pyethapp | 1,519 | 12675390 | <reponame>josephites/pyethapp
# -*- coding: utf8 -*-
from __future__ import absolute_import
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
import copy
import time
import statistics
from collections import deque
import gevent
import gevent.lock
from gevent.queue import Queue
from gevent.event import AsyncResult
import rlp
from devp2p.protocol import BaseProtocol
from devp2p.service import WiredService
from ethereum.block import Block
from ethereum.meta import make_head_candidate
from ethereum.pow.chain import Chain
from ethereum.pow.consensus import initialize, check_pow
from ethereum.config import Env
from ethereum.genesis_helpers import mk_genesis_data
from ethereum import config as ethereum_config
from ethereum.messages import apply_transaction, validate_transaction
from ethereum.transaction_queue import TransactionQueue
from ethereum.experimental.refcount_db import RefcountDB
from ethereum.slogging import get_logger
from ethereum.exceptions import InvalidTransaction, InvalidNonce, \
InsufficientBalance, InsufficientStartGas, VerificationFailed
from ethereum.transactions import Transaction
from ethereum.utils import (
encode_hex,
to_string,
)
from .synchronizer import Synchronizer
from . import eth_protocol
from pyethapp import sentry
from pyethapp.dao import is_dao_challenge, build_dao_header
log = get_logger('eth.chainservice')
class DuplicatesFilter(object):
def __init__(self, max_items=128):
self.max_items = max_items
self.filter = list()
def update(self, data):
"returns True if unknown"
if data not in self.filter:
self.filter.append(data)
if len(self.filter) > self.max_items:
self.filter.pop(0)
return True
else:
self.filter.append(self.filter.pop(0))
return False
def __contains__(self, v):
return v in self.filter
class DAOChallenger(object):
request_timeout = 8.
def __init__(self, chainservice, proto):
self.chainservice = chainservice
self.config = chainservice.config['eth']['block']
self.proto = proto
self.deferred = None
gevent.spawn(self.run)
def run(self):
self.deferred = AsyncResult()
self.proto.send_getblockheaders(self.config['DAO_FORK_BLKNUM'], 1, 0)
try:
dao_headers = self.deferred.get(block=True, timeout=self.request_timeout)
log.debug("received DAO challenge answer", proto=self.proto, answer=dao_headers)
result = len(dao_headers) == 1 and \
dao_headers[0].hash == self.config['DAO_FORK_BLKHASH'] and \
dao_headers[0].extra_data == self.config['DAO_FORK_BLKEXTRA']
self.chainservice.on_dao_challenge_answer(self.proto, result)
except gevent.Timeout:
log.debug('challenge dao timed out', proto=self.proto)
self.chainservice.on_dao_challenge_answer(self.proto, False)
def receive_blockheaders(self, proto, blockheaders):
log.debug('blockheaders received', proto=proto, num=len(blockheaders))
if proto != self.proto:
return
self.deferred.set(blockheaders)
class ChainService(WiredService):
"""
Manages the chain and requests to it.
"""
# required by BaseService
name = 'chain'
default_config = dict(
eth=dict(network_id=0, genesis='', pruning=-1),
block=ethereum_config.default_config
)
# required by WiredService
wire_protocol = eth_protocol.ETHProtocol # create for each peer
# initialized after configure:
chain = None
genesis = None
synchronizer = None
config = None
block_queue_size = 1024
processed_gas = 0
processed_elapsed = 0
process_time_queue_period = 5
def __init__(self, app):
self.config = app.config
sce = self.config['eth']
if int(sce['pruning']) >= 0:
self.db = RefcountDB(app.services.db)
if "I am not pruning" in self.db.db:
raise RuntimeError(
"The database in '{}' was initialized as non-pruning. "
"Can not enable pruning now.".format(self.config['data_dir']))
self.db.ttl = int(sce['pruning'])
self.db.db.put("I am pruning", "1")
else:
self.db = app.services.db
if "I am pruning" in self.db:
raise RuntimeError(
"The database in '{}' was initialized as pruning. "
"Can not disable pruning now".format(self.config['data_dir']))
self.db.put("I am not pruning", "1")
if 'network_id' in self.db:
db_network_id = self.db.get(b'network_id')
if db_network_id != to_string(sce['network_id']):
raise RuntimeError(
"The database in '{}' was initialized with network id {} and can not be used "
"when connecting to network id {}. Please choose a different data directory.".format(
self.config['data_dir'], db_network_id, sce['network_id']
)
)
else:
self.db.put(b'network_id', to_string(sce['network_id']))
self.db.commit()
assert self.db is not None
super(ChainService, self).__init__(app)
log.info('initializing chain')
coinbase = app.services.accounts.coinbase
env = Env(self.db, sce['block'])
genesis_data = sce.get('genesis_data', {})
if not genesis_data:
genesis_data = mk_genesis_data(env)
self.chain = Chain(
env=env, genesis=genesis_data, coinbase=coinbase,
new_head_cb=self._on_new_head)
header = self.chain.state.prev_headers[0]
log.info('chain at', number=header.number)
if 'genesis_hash' in sce:
assert sce['genesis_hash'] == self.chain.genesis.hex_hash, \
"Genesis hash mismatch.\n Expected: %s\n Got: %s" % (
sce['genesis_hash'], self.chain.genesis.hex_hash)
self.dao_challenges = dict()
self.synchronizer = Synchronizer(self, force_sync=None)
self.block_queue = Queue(maxsize=self.block_queue_size)
# When the transaction_queue is modified, we must set
# self._head_candidate_needs_updating to True in order to force the
# head candidate to be updated.
self.transaction_queue = TransactionQueue()
self._head_candidate_needs_updating = True
# Initialize a new head candidate.
_ = self.head_candidate
self.min_gasprice = 20 * 10**9 # TODO: better be an option to validator service?
self.add_blocks_lock = False
self.add_transaction_lock = gevent.lock.Semaphore()
self.broadcast_filter = DuplicatesFilter()
self.on_new_head_cbs = []
self.newblock_processing_times = deque(maxlen=1000)
gevent.spawn_later(self.process_time_queue_period, self.process_time_queue)
@property
def is_syncing(self):
return self.synchronizer.synctask is not None
@property
def is_mining(self):
if 'pow' in self.app.services:
return self.app.services.pow.active
if 'validator' in self.app.services:
return self.app.services.validator.active
return False
def process_time_queue(self):
try:
self.chain.process_time_queue()
except Exception as e:
log.info(str(e))
finally:
gevent.spawn_later(self.process_time_queue_period, self.process_time_queue)
# TODO: Move to pyethereum
def get_receipts(self, block):
# Receipts are no longer stored in the database, so need to generate
# them on the fly here.
temp_state = self.chain.mk_poststate_of_blockhash(block.header.prevhash)
initialize(temp_state, block)
for tx in block.transactions:
apply_transaction(temp_state, tx)
return temp_state.receipts
def _on_new_head(self, block):
log.debug('new head cbs', num=len(self.on_new_head_cbs))
self.transaction_queue = self.transaction_queue.diff(
block.transactions)
self._head_candidate_needs_updating = True
for cb in self.on_new_head_cbs:
cb(block)
@property
def head_candidate(self):
if self._head_candidate_needs_updating:
self._head_candidate_needs_updating = False
# Make a copy of self.transaction_queue because
# make_head_candidate modifies it.
txqueue = copy.deepcopy(self.transaction_queue)
self._head_candidate, self._head_candidate_state = make_head_candidate(
self.chain, txqueue, timestamp=int(time.time()))
return self._head_candidate
def add_transaction(self, tx, origin=None, force_broadcast=False, force=False):
if self.is_syncing:
if force_broadcast:
assert origin is None # only allowed for local txs
log.debug('force broadcasting unvalidated tx')
self.broadcast_transaction(tx, origin=origin)
return # we can not evaluate the tx based on outdated state
log.debug('add_transaction', locked=(not self.add_transaction_lock.locked()), tx=tx)
assert isinstance(tx, Transaction)
assert origin is None or isinstance(origin, BaseProtocol)
if tx.hash in self.broadcast_filter:
log.debug('discarding known tx') # discard early
return
# validate transaction
try:
# Transaction validation for broadcasting. Transaction is validated
# against the current head candidate.
validate_transaction(self._head_candidate_state, tx)
log.debug('valid tx, broadcasting')
self.broadcast_transaction(tx, origin=origin) # asap
except InvalidTransaction as e:
log.debug('invalid tx', error=e)
return
if origin is not None: # not locally added via jsonrpc
if not self.is_mining or self.is_syncing:
log.debug('discarding tx', syncing=self.is_syncing, mining=self.is_mining)
return
if tx.gasprice >= self.min_gasprice:
self.add_transaction_lock.acquire()
self.transaction_queue.add_transaction(tx, force=force)
self._head_candidate_needs_updating = True
self.add_transaction_lock.release()
else:
log.info("too low gasprice, ignore", tx=encode_hex(tx.hash)[:8], gasprice=tx.gasprice)
def check_header(self, header):
return check_pow(self.chain.state, header)
def add_block(self, t_block, proto):
"adds a block to the block_queue and spawns _add_block if not running"
self.block_queue.put((t_block, proto)) # blocks if full
if not self.add_blocks_lock:
self.add_blocks_lock = True # need to lock here (ctx switch is later)
gevent.spawn(self._add_blocks)
def add_mined_block(self, block):
log.debug('adding mined block', block=block)
assert isinstance(block, Block)
if self.chain.add_block(block):
log.debug('added', block=block, ts=time.time())
assert block == self.chain.head
self.transaction_queue = self.transaction_queue.diff(block.transactions)
self._head_candidate_needs_updating = True
self.broadcast_newblock(block, chain_difficulty=self.chain.get_score(block))
return True
log.debug('failed to add', block=block, ts=time.time())
return False
def knows_block(self, block_hash):
"if block is in chain or in queue"
if self.chain.has_blockhash(block_hash):
return True
# check if queued or processed
for i in range(len(self.block_queue.queue)):
if block_hash == self.block_queue.queue[i][0].header.hash:
return True
return False
def _add_blocks(self):
log.debug('add_blocks', qsize=self.block_queue.qsize(),
add_tx_lock=self.add_transaction_lock.locked())
assert self.add_blocks_lock is True
self.add_transaction_lock.acquire()
try:
while not self.block_queue.empty():
# sleep at the beginning because continue keywords will skip bottom
gevent.sleep(0.001)
t_block, proto = self.block_queue.peek() # peek: knows_block while processing
if self.chain.has_blockhash(t_block.header.hash):
log.warn('known block', block=t_block)
self.block_queue.get()
continue
if not self.chain.has_blockhash(t_block.header.prevhash):
log.warn('missing parent', block=t_block, head=self.chain.head)
self.block_queue.get()
continue
try: # deserialize
st = time.time()
block = t_block.to_block()
elapsed = time.time() - st
log.debug('deserialized', elapsed='%.4fs' % elapsed, ts=time.time(),
gas_used=block.gas_used, gpsec=self.gpsec(block.gas_used, elapsed))
except InvalidTransaction as e:
log.warn('invalid transaction', block=t_block, error=e, FIXME='ban node')
errtype = \
'InvalidNonce' if isinstance(e, InvalidNonce) else \
'NotEnoughCash' if isinstance(e, InsufficientBalance) else \
'OutOfGasBase' if isinstance(e, InsufficientStartGas) else \
'other_transaction_error'
sentry.warn_invalid(t_block, errtype)
self.block_queue.get()
continue
except VerificationFailed as e:
log.warn('verification failed', error=e, FIXME='ban node')
sentry.warn_invalid(t_block, 'other_block_error')
self.block_queue.get()
continue
# All checks passed
log.debug('adding', block=block, ts=time.time())
if self.chain.add_block(block):
now = time.time()
log.info('added', block=block, txs=block.transaction_count,
gas_used=block.gas_used)
if t_block.newblock_timestamp:
total = now - t_block.newblock_timestamp
self.newblock_processing_times.append(total)
avg = statistics.mean(self.newblock_processing_times)
med = statistics.median(self.newblock_processing_times)
max_ = max(self.newblock_processing_times)
min_ = min(self.newblock_processing_times)
log.info('processing time', last=total, avg=avg, max=max_, min=min_,
median=med)
if self.is_mining:
self.transaction_queue = self.transaction_queue.diff(block.transactions)
else:
log.warn('could not add', block=block)
self.block_queue.get() # remove block from queue (we peeked only)
finally:
self.add_blocks_lock = False
self.add_transaction_lock.release()
def gpsec(self, gas_spent=0, elapsed=0):
if gas_spent:
self.processed_gas += gas_spent
self.processed_elapsed += elapsed
return int(old_div(self.processed_gas, (0.001 + self.processed_elapsed)))
def broadcast_newblock(self, block, chain_difficulty=None, origin=None):
if not chain_difficulty:
assert self.chain.has_blockhash(block.hash)
chain_difficulty = self.chain.get_score(block)
assert isinstance(block, (eth_protocol.TransientBlock, Block))
if self.broadcast_filter.update(block.header.hash):
log.debug('broadcasting newblock', origin=origin)
bcast = self.app.services.peermanager.broadcast
bcast(eth_protocol.ETHProtocol, 'newblock', args=(block, chain_difficulty),
exclude_peers=[origin.peer] if origin else [])
else:
log.debug('already broadcasted block')
def broadcast_transaction(self, tx, origin=None):
assert isinstance(tx, Transaction)
if self.broadcast_filter.update(tx.hash):
log.debug('broadcasting tx', origin=origin)
bcast = self.app.services.peermanager.broadcast
bcast(eth_protocol.ETHProtocol, 'transactions', args=(tx,),
exclude_peers=[origin.peer] if origin else [])
else:
log.debug('already broadcasted tx')
def query_headers(self, hash_mode, max_hashes, skip, reverse, origin_hash=None, number=None):
headers = []
unknown = False
while not unknown and len(headers) < max_hashes:
if hash_mode:
if not origin_hash:
break
block = self.chain.get_block(origin_hash)
if not block:
break
# If reached genesis, stop
if block.number == 0:
break
origin = block.header
else:
# If reached genesis, stop
if number is None or number == 0:
break
block = self.chain.get_block_by_number(number)
if block is None:
break
origin = block.header
headers.append(origin)
if hash_mode: # hash traversal
if reverse:
for i in range(skip+1):
try:
block = self.chain.get_block(origin_hash)
if block:
origin_hash = block.prevhash
else:
unknown = True
break
except KeyError:
unknown = True
break
else:
blockhash = self.chain.get_blockhash_by_number(origin.number + skip + 1)
try:
# block = self.chain.get_block(blockhash)
if block and self.chain.get_blockhashes_from_hash(blockhash, skip+1)[skip] == origin_hash:
origin_hash = blockhash
else:
unknown = True
except KeyError:
unknown = True
else: # number traversal
if reverse:
if number >= (skip + 1):
number -= (skip + 1)
else:
unknown = True
else:
number += (skip + 1)
return headers
# wire protocol receivers ###########
def on_wire_protocol_start(self, proto):
log.debug('----------------------------------')
log.debug('on_wire_protocol_start', proto=proto)
assert isinstance(proto, self.wire_protocol)
# register callbacks
proto.receive_status_callbacks.append(self.on_receive_status)
proto.receive_newblockhashes_callbacks.append(self.on_newblockhashes)
proto.receive_transactions_callbacks.append(self.on_receive_transactions)
proto.receive_getblockheaders_callbacks.append(self.on_receive_getblockheaders)
proto.receive_blockheaders_callbacks.append(self.on_receive_blockheaders)
proto.receive_getblockbodies_callbacks.append(self.on_receive_getblockbodies)
proto.receive_blockbodies_callbacks.append(self.on_receive_blockbodies)
proto.receive_newblock_callbacks.append(self.on_receive_newblock)
# send status
head = self.chain.head
proto.send_status(chain_difficulty=self.chain.get_score(head), chain_head_hash=head.hash,
genesis_hash=self.chain.genesis.hash)
def on_wire_protocol_stop(self, proto):
assert isinstance(proto, self.wire_protocol)
log.debug('----------------------------------')
log.debug('on_wire_protocol_stop', proto=proto)
def on_receive_status(self, proto, eth_version, network_id, chain_difficulty, chain_head_hash,
genesis_hash):
log.debug('----------------------------------')
log.debug('status received', proto=proto, eth_version=eth_version)
if eth_version != proto.version:
if ('eth', proto.version) in proto.peer.remote_capabilities:
# if remote peer is capable of our version, keep the connection
# even the peer tried a different version
pass
else:
log.debug("no capable protocol to use, disconnect",
proto=proto, eth_version=eth_version)
proto.send_disconnect(proto.disconnect.reason.useless_peer)
return
if network_id != self.config['eth'].get('network_id', proto.network_id):
log.debug("invalid network id", remote_network_id=network_id,
expected_network_id=self.config['eth'].get('network_id', proto.network_id))
raise eth_protocol.ETHProtocolError('wrong network_id')
# check genesis
if genesis_hash != self.chain.genesis.hash:
log.warn("invalid genesis hash", remote_id=proto, genesis=encode_hex(genesis_hash))
raise eth_protocol.ETHProtocolError('wrong genesis block')
# initiate DAO challenge
self.dao_challenges[proto] = (DAOChallenger(self, proto), chain_head_hash, chain_difficulty)
def on_dao_challenge_answer(self, proto, result):
if result:
log.debug("DAO challenge passed")
_, chain_head_hash, chain_difficulty = self.dao_challenges[proto]
# request chain
self.synchronizer.receive_status(proto, chain_head_hash, chain_difficulty)
# send transactions
transactions = self.transaction_queue.peek()
if transactions:
log.debug("sending transactions", remote_id=proto)
proto.send_transactions(*transactions)
else:
log.debug("peer failed to answer DAO challenge, stop.", proto=proto)
if proto.peer:
proto.peer.stop()
del self.dao_challenges[proto]
# transactions
def on_receive_transactions(self, proto, transactions):
"receives rlp.decoded serialized"
log.debug('----------------------------------')
log.debug('remote_transactions_received', count=len(transactions), remote_id=proto)
for tx in transactions:
self.add_transaction(tx, origin=proto)
# blockhashes ###########
def on_newblockhashes(self, proto, newblockhashes):
"""
msg sent out if not the full block is propagated
chances are high, that we get the newblock, though.
"""
log.debug('----------------------------------')
log.debug("recv newblockhashes", num=len(newblockhashes), remote_id=proto)
assert len(newblockhashes) <= 256
self.synchronizer.receive_newblockhashes(proto, newblockhashes)
def on_receive_getblockheaders(self, proto, hash_or_number, block, amount, skip, reverse):
hash_mode = 1 if hash_or_number[0] else 0
block_id = encode_hex(hash_or_number[0]) if hash_mode else hash_or_number[1]
log.debug('----------------------------------')
log.debug("handle_getblockheaders", amount=amount, block=block_id)
headers = []
max_hashes = min(amount, self.wire_protocol.max_getblockheaders_count)
if hash_mode:
origin_hash = hash_or_number[0]
else:
if is_dao_challenge(self.config['eth']['block'], hash_or_number[1], amount, skip):
log.debug("sending: answer DAO challenge")
headers.append(build_dao_header(self.config['eth']['block']))
proto.send_blockheaders(*headers)
return
try:
origin_hash = self.chain.get_blockhash_by_number(hash_or_number[1])
except KeyError:
origin_hash = b''
if not origin_hash or not self.chain.has_blockhash(origin_hash):
log.debug('unknown block: {}'.format(encode_hex(origin_hash)))
proto.send_blockheaders(*[])
return
headers = self.query_headers(
hash_mode,
max_hashes,
skip,
reverse,
origin_hash=origin_hash,
number=block_id,
)
log.debug("sending: found blockheaders", count=len(headers))
proto.send_blockheaders(*headers)
def on_receive_blockheaders(self, proto, blockheaders):
log.debug('----------------------------------')
if blockheaders:
log.debug("on_receive_blockheaders", count=len(blockheaders), remote_id=proto,
first=encode_hex(blockheaders[0].hash), last=encode_hex(blockheaders[-1].hash))
else:
log.debug("recv 0 remote block headers, signifying genesis block")
if proto in self.dao_challenges:
self.dao_challenges[proto][0].receive_blockheaders(proto, blockheaders)
else:
self.synchronizer.receive_blockheaders(proto, blockheaders)
# blocks ################
def on_receive_getblockbodies(self, proto, blockhashes):
log.debug('----------------------------------')
log.debug("on_receive_getblockbodies", count=len(blockhashes))
found = []
for bh in blockhashes[:self.wire_protocol.max_getblocks_count]:
try:
found.append(self.chain.get_block(bh))
except KeyError:
log.debug("unknown block requested", block_hash=encode_hex(bh))
if found:
log.debug("found", count=len(found))
proto.send_blockbodies(*found)
def on_receive_blockbodies(self, proto, bodies):
log.debug('----------------------------------')
log.debug("recv block bodies", count=len(bodies), remote_id=proto)
if bodies:
self.synchronizer.receive_blockbodies(proto, bodies)
def on_receive_newblock(self, proto, block, chain_difficulty):
log.debug('----------------------------------')
log.debug("recv newblock", block=block, remote_id=proto)
self.synchronizer.receive_newblock(proto, block, chain_difficulty)
|
examples/basic/tube.py | hadivafaii/vedo | 836 | 12675407 | <gh_stars>100-1000
"""Use array to vary radius and color
of a line represented as a tube"""
from vedo import *
import numpy as np
settings.defaultFont = 'Quikhand'
ln = [[sin(x), cos(x), x / 2] for x in np.arange(0,9, 0.1)]
N = len(ln)
############################### a simple tube( along ln
t1 = Tube(ln, c="blue", r=0.08)
############################### vary radius
rads = [0.3*(cos(6.0*ir/N))**2+0.1 for ir in range(N)]
t2 = Tube(ln, r=rads, c="tomato", res=24)
############################### vary color
cols = [i for i in range(N)]
cols = makeBands(cols, 5) # make color bins
t3 = Tube(ln, r=rads, c=cols, res=24)
show(t1, __doc__, at=0, N=3, axes=dict(textScale=4), viewup="z")
show(t2, at=1)
show(t3, at=2, interactive=1).close()
|
Lib/idlelib/idle_test/test_macosx.py | oleksandr-pavlyk/cpython | 52,316 | 12675421 | "Test macosx, coverage 45% on Windows."
from idlelib import macosx
import unittest
from test.support import requires
import tkinter as tk
import unittest.mock as mock
from idlelib.filelist import FileList
mactypes = {'carbon', 'cocoa', 'xquartz'}
nontypes = {'other'}
alltypes = mactypes | nontypes
def setUpModule():
global orig_tktype
orig_tktype = macosx._tk_type
def tearDownModule():
macosx._tk_type = orig_tktype
class InitTktypeTest(unittest.TestCase):
"Test _init_tk_type."
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = tk.Tk()
cls.root.withdraw()
cls.orig_platform = macosx.platform
@classmethod
def tearDownClass(cls):
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
macosx.platform = cls.orig_platform
def test_init_sets_tktype(self):
"Test that _init_tk_type sets _tk_type according to platform."
for platform, types in ('darwin', alltypes), ('other', nontypes):
with self.subTest(platform=platform):
macosx.platform = platform
macosx._tk_type = None
macosx._init_tk_type()
self.assertIn(macosx._tk_type, types)
class IsTypeTkTest(unittest.TestCase):
"Test each of the four isTypeTk predecates."
isfuncs = ((macosx.isAquaTk, ('carbon', 'cocoa')),
(macosx.isCarbonTk, ('carbon')),
(macosx.isCocoaTk, ('cocoa')),
(macosx.isXQuartz, ('xquartz')),
)
@mock.patch('idlelib.macosx._init_tk_type')
def test_is_calls_init(self, mockinit):
"Test that each isTypeTk calls _init_tk_type when _tk_type is None."
macosx._tk_type = None
for func, whentrue in self.isfuncs:
with self.subTest(func=func):
func()
self.assertTrue(mockinit.called)
mockinit.reset_mock()
def test_isfuncs(self):
"Test that each isTypeTk return correct bool."
for func, whentrue in self.isfuncs:
for tktype in alltypes:
with self.subTest(func=func, whentrue=whentrue, tktype=tktype):
macosx._tk_type = tktype
(self.assertTrue if tktype in whentrue else self.assertFalse)\
(func())
class SetupTest(unittest.TestCase):
"Test setupApp."
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = tk.Tk()
cls.root.withdraw()
def cmd(tkpath, func):
assert isinstance(tkpath, str)
assert isinstance(func, type(cmd))
cls.root.createcommand = cmd
@classmethod
def tearDownClass(cls):
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
@mock.patch('idlelib.macosx.overrideRootMenu') #27312
def test_setupapp(self, overrideRootMenu):
"Call setupApp with each possible graphics type."
root = self.root
flist = FileList(root)
for tktype in alltypes:
with self.subTest(tktype=tktype):
macosx._tk_type = tktype
macosx.setupApp(root, flist)
if tktype in ('carbon', 'cocoa'):
self.assertTrue(overrideRootMenu.called)
overrideRootMenu.reset_mock()
if __name__ == '__main__':
unittest.main(verbosity=2)
|
lstm_chem/data_loader.py | Janson-L/fch-drug-discovery | 400 | 12675424 | <gh_stars>100-1000
import json
import os
import numpy as np
from tqdm import tqdm
from tensorflow.keras.utils import Sequence
from lstm_chem.utils.smiles_tokenizer import SmilesTokenizer
class DataLoader(Sequence):
def __init__(self, config, data_type='train'):
self.config = config
self.data_type = data_type
assert self.data_type in ['train', 'valid', 'finetune']
self.max_len = 0
if self.data_type == 'train':
self.smiles = self._load(self.config.data_filename)
elif self.data_type == 'finetune':
self.smiles = self._load(self.config.finetune_data_filename)
else:
pass
self.st = SmilesTokenizer()
self.one_hot_dict = self.st.one_hot_dict
self.tokenized_smiles = self._tokenize(self.smiles)
if self.data_type in ['train', 'valid']:
self.idx = np.arange(len(self.tokenized_smiles))
self.valid_size = int(
np.ceil(
len(self.tokenized_smiles) * self.config.validation_split))
np.random.seed(self.config.seed)
np.random.shuffle(self.idx)
def _set_data(self):
if self.data_type == 'train':
ret = [
self.tokenized_smiles[self.idx[i]]
for i in self.idx[self.valid_size:]
]
elif self.data_type == 'valid':
ret = [
self.tokenized_smiles[self.idx[i]]
for i in self.idx[:self.valid_size]
]
else:
ret = self.tokenized_smiles
return ret
def _load(self, data_filename):
length = self.config.data_length
print('loading SMILES...')
with open(data_filename) as f:
smiles = [s.rstrip() for s in f]
if length != 0:
smiles = smiles[:length]
print('done.')
return smiles
def _tokenize(self, smiles):
assert isinstance(smiles, list)
print('tokenizing SMILES...')
tokenized_smiles = [self.st.tokenize(smi) for smi in tqdm(smiles)]
if self.data_type == 'train':
for tokenized_smi in tokenized_smiles:
length = len(tokenized_smi)
if self.max_len < length:
self.max_len = length
self.config.train_smi_max_len = self.max_len
print('done.')
return tokenized_smiles
def __len__(self):
target_tokenized_smiles = self._set_data()
if self.data_type in ['train', 'valid']:
ret = int(
np.ceil(
len(target_tokenized_smiles) /
float(self.config.batch_size)))
else:
ret = int(
np.ceil(
len(target_tokenized_smiles) /
float(self.config.finetune_batch_size)))
return ret
def __getitem__(self, idx):
target_tokenized_smiles = self._set_data()
if self.data_type in ['train', 'valid']:
data = target_tokenized_smiles[idx *
self.config.batch_size:(idx + 1) *
self.config.batch_size]
else:
data = target_tokenized_smiles[idx *
self.config.finetune_batch_size:
(idx + 1) *
self.config.finetune_batch_size]
data = self._padding(data)
self.X, self.y = [], []
for tp_smi in data:
X = [self.one_hot_dict[symbol] for symbol in tp_smi[:-1]]
self.X.append(X)
y = [self.one_hot_dict[symbol] for symbol in tp_smi[1:]]
self.y.append(y)
self.X = np.array(self.X, dtype=np.float32)
self.y = np.array(self.y, dtype=np.float32)
return self.X, self.y
def _pad(self, tokenized_smi):
return ['G'] + tokenized_smi + ['E'] + [
'A' for _ in range(self.max_len - len(tokenized_smi))
]
def _padding(self, data):
padded_smiles = [self._pad(t_smi) for t_smi in data]
return padded_smiles
|
tensortrade/agents/dqn_agent.py | highburygambit/tensortrade | 3,081 | 12675426 | <reponame>highburygambit/tensortrade
# Copyright 2020 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import tensorflow as tf
from collections import namedtuple
from tensortrade.agents import Agent, ReplayMemory
from datetime import datetime
DQNTransition = namedtuple('DQNTransition', ['state', 'action', 'reward', 'next_state', 'done'])
class DQNAgent(Agent):
"""
References:
===========
- https://towardsdatascience.com/deep-reinforcement-learning-build-a-deep-q-network-dqn-to-play-cartpole-with-tensorflow-2-and-gym-8e105744b998
- https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html#dqn-algorithm
"""
def __init__(self,
env: 'TradingEnv',
policy_network: tf.keras.Model = None):
self.env = env
self.n_actions = env.action_space.n
self.observation_shape = env.observation_space.shape
self.policy_network = policy_network or self._build_policy_network()
self.target_network = tf.keras.models.clone_model(self.policy_network)
self.target_network.trainable = False
self.env.agent_id = self.id
def _build_policy_network(self):
network = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=self.observation_shape),
tf.keras.layers.Conv1D(filters=64, kernel_size=6, padding="same", activation="tanh"),
tf.keras.layers.MaxPooling1D(pool_size=2),
tf.keras.layers.Conv1D(filters=32, kernel_size=3, padding="same", activation="tanh"),
tf.keras.layers.MaxPooling1D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(self.n_actions, activation="sigmoid"),
tf.keras.layers.Dense(self.n_actions, activation="softmax")
])
return network
def restore(self, path: str, **kwargs):
self.policy_network = tf.keras.models.load_model(path)
self.target_network = tf.keras.models.clone_model(self.policy_network)
self.target_network.trainable = False
def save(self, path: str, **kwargs):
episode: int = kwargs.get('episode', None)
if episode:
filename = "policy_network__" + self.id[:7] + "__" + datetime.now().strftime("%Y%m%d_%H%M%S") + ".hdf5"
else:
filename = "policy_network__" + self.id[:7] + "__" + datetime.now().strftime("%Y%m%d_%H%M%S") + ".hdf5"
self.policy_network.save(path + filename)
def get_action(self, state: np.ndarray, **kwargs) -> int:
threshold: float = kwargs.get('threshold', 0)
rand = random.random()
if rand < threshold:
return np.random.choice(self.n_actions)
else:
return np.argmax(self.policy_network(np.expand_dims(state, 0)))
def _apply_gradient_descent(self, memory: ReplayMemory, batch_size: int, learning_rate: float, discount_factor: float):
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
loss = tf.keras.losses.Huber()
transitions = memory.sample(batch_size)
batch = DQNTransition(*zip(*transitions))
state_batch = tf.convert_to_tensor(batch.state)
action_batch = tf.convert_to_tensor(batch.action)
reward_batch = tf.convert_to_tensor(batch.reward, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(batch.next_state)
done_batch = tf.convert_to_tensor(batch.done)
with tf.GradientTape() as tape:
state_action_values = tf.math.reduce_sum(
self.policy_network(state_batch) * tf.one_hot(action_batch, self.n_actions),
axis=1
)
next_state_values = tf.where(
done_batch,
tf.zeros(batch_size),
tf.math.reduce_max(self.target_network(next_state_batch), axis=1)
)
expected_state_action_values = reward_batch + (discount_factor * next_state_values)
loss_value = loss(expected_state_action_values, state_action_values)
variables = self.policy_network.trainable_variables
gradients = tape.gradient(loss_value, variables)
optimizer.apply_gradients(zip(gradients, variables))
def train(self,
n_steps: int = None,
n_episodes: int = None,
save_every: int = None,
save_path: str = None,
callback: callable = None,
**kwargs) -> float:
batch_size: int = kwargs.get('batch_size', 128)
discount_factor: float = kwargs.get('discount_factor', 0.9999)
learning_rate: float = kwargs.get('learning_rate', 0.0001)
eps_start: float = kwargs.get('eps_start', 0.9)
eps_end: float = kwargs.get('eps_end', 0.05)
eps_decay_steps: int = kwargs.get('eps_decay_steps', 200)
update_target_every: int = kwargs.get('update_target_every', 1000)
memory_capacity: int = kwargs.get('memory_capacity', 1000)
render_interval: int = kwargs.get('render_interval', 50) # in steps, None for episode end renderers only
memory = ReplayMemory(memory_capacity, transition_type=DQNTransition)
episode = 0
total_steps_done = 0
total_reward = 0
stop_training = False
if n_steps and not n_episodes:
n_episodes = np.iinfo(np.int32).max
print('==== AGENT ID: {} ===='.format(self.id))
while episode < n_episodes and not stop_training:
state = self.env.reset()
done = False
steps_done = 0
while not done:
threshold = eps_end + (eps_start - eps_end) * np.exp(-total_steps_done / eps_decay_steps)
action = self.get_action(state, threshold=threshold)
next_state, reward, done, _ = self.env.step(action)
memory.push(state, action, reward, next_state, done)
state = next_state
total_reward += reward
steps_done += 1
total_steps_done += 1
if len(memory) < batch_size:
continue
self._apply_gradient_descent(memory, batch_size, learning_rate, discount_factor)
if n_steps and steps_done >= n_steps:
done = True
if render_interval is not None and steps_done % render_interval == 0:
self.env.render(
episode=episode,
max_episodes=n_episodes,
max_steps=n_steps
)
if steps_done % update_target_every == 0:
self.target_network = tf.keras.models.clone_model(self.policy_network)
self.target_network.trainable = False
is_checkpoint = save_every and episode % save_every == 0
if save_path and (is_checkpoint or episode == n_episodes - 1):
self.save(save_path, episode=episode)
if not render_interval or steps_done < n_steps:
self.env.render(
episode=episode,
max_episodes=n_episodes,
max_steps=n_steps
) # renderers final state at episode end if not rendered earlier
self.env.save()
episode += 1
mean_reward = total_reward / steps_done
return mean_reward
|
pyannote/audio/train/generator.py | avramandrei/pyannote-audio | 1,543 | 12675430 | <reponame>avramandrei/pyannote-audio
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2019-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
"""
TODO
"""
from abc import ABCMeta, abstractmethod
from typing import Iterator
from pyannote.audio.features.base import FeatureExtraction
from pyannote.database import Protocol
from pyannote.database import Subset
import warnings
import numpy as np
import pescador
class BatchGenerator(metaclass=ABCMeta):
"""Batch generator base class
Parameters
----------
feature_extraction : `FeatureExtraction`
protocol : `Protocol`
pyannote.database protocol used by the generator.
subset : {'train', 'development', 'test'}, optional
Subset used by the generator. Defaults to 'train'.
"""
@abstractmethod
def __init__(
self,
feature_extraction: FeatureExtraction,
protocol: Protocol,
subset: Subset = "train",
**kwargs,
):
pass
@property
@abstractmethod
def specifications(self) -> dict:
"""Generator specifications
Returns
-------
specifications : `dict`
Dictionary describing generator specifications.
"""
pass
@property
@abstractmethod
def batches_per_epoch(self) -> int:
"""Number of batches per epoch
Returns
-------
n_batches : `int`
Number of batches to make an epoch.
"""
pass
@abstractmethod
def samples(self) -> Iterator:
pass
def __call__(self) -> Iterator:
batches = pescador.maps.buffer_stream(
self.samples(), self.batch_size, partial=False, axis=None
)
while True:
next_batch = next(batches)
# HACK in some rare cases, .samples() yields samples
# HACK with different length leading to batch being of
# HACK type "object". for now, we simply discard those
# HACK buggy batches.
# TODO fix the problem upstream in .samples()
if any(batch.dtype == np.object_ for batch in next_batch.values()):
msg = f"Skipping malformed batch."
warnings.warn(msg)
continue
yield next_batch
|
tests/utils/test_exception.py | freefrag/mlflow | 1,825 | 12675471 | <filename>tests/utils/test_exception.py<gh_stars>1000+
from mlflow.exceptions import ExecutionException
def test_execution_exception_string_repr():
exc = ExecutionException("Uh oh")
assert str(exc) == "Uh oh"
|
tests/unit/test_model.py | l00ptr/temboard | 294 | 12675477 | <reponame>l00ptr/temboard<gh_stars>100-1000
import pytest
def test_check_connectivity_ok(mocker):
from temboardui.model import check_connectivity
engine = mocker.Mock(name='engine')
check_connectivity(engine)
assert engine.connect().close.called is True
def test_check_connectivity_sleep(mocker):
sleep = mocker.patch('temboardui.model.sleep')
from temboardui.model import check_connectivity
engine = mocker.Mock(name='engine')
engine.connect.side_effect = [Exception(), mocker.Mock(name='connection')]
check_connectivity(engine)
assert sleep.called is True
def test_check_connectivity_fail(mocker):
sleep = mocker.patch('temboardui.model.sleep')
from temboardui.model import check_connectivity
engine = mocker.Mock(name='engine')
engine.connect.side_effect = Exception()
with pytest.raises(Exception):
check_connectivity(engine)
assert sleep.called is True
def test_configure(mocker):
mod = 'temboardui.model'
Session = mocker.patch(mod + '.Session')
check = mocker.patch(mod + '.check_connectivity')
from temboardui.model import configure
configure(dsn='sqlite://') # LOL
assert Session.configure.called is True
assert check.called is True
check.side_effect = Exception()
with pytest.raises(SystemExit):
config = dict(host='h', port=5432, user='u', password='X', dbname='db')
configure(dsn=config)
|
pygtkweb/demos/027-fixed.py | takipsizad/pyjs | 739 | 12675504 | <gh_stars>100-1000
#!/usr/bin/env python
# example fixed.py
import pygtk
pygtk.require('2.0')
import gtk
class FixedExample:
# This callback method moves the button to a new position
# in the Fixed container.
def move_button(self, widget):
self.x = (self.x+30)%300
self.y = (self.y+50)%300
self.fixed.move(widget, self.x, self.y)
def __init__(self):
self.x = 50
self.y = 50
# Create a new window
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_title("Fixed Container")
# Here we connect the "destroy" event to a signal handler
window.connect("destroy", lambda w: gtk.main_quit())
# Sets the border width of the window.
window.set_border_width(10)
# Create a Fixed Container
self.fixed = gtk.Fixed()
window.add(self.fixed)
self.fixed.show()
for i in range(1, 4):
# Creates a new button with the label "Press me"
button = gtk.Button("Press me")
# When the button receives the "clicked" signal, it will call the
# method move_button().
button.connect("clicked", self.move_button)
# This packs the button into the fixed containers window.
self.fixed.put(button, i*50, i*50)
# The final step is to display this newly created widget.
button.show()
# Display the window
window.show()
def main():
# Enter the event loop
gtk.main()
return 0
if __name__ == "__main__":
FixedExample()
main()
|
examples/misc/djangotasks/todo/models.py | takipsizad/pyjs | 739 | 12675511 | <reponame>takipsizad/pyjs
from django.db import models
class Todo(models.Model):
task = models.CharField(max_length=30)
def __unicode__(self):
return unicode(self.task)
# Create your models here.
|
tests/runtime/test_nutterfixure_fullroundtriptests.py | cganta/dbtest | 130 | 12675525 | <reponame>cganta/dbtest<filename>tests/runtime/test_nutterfixure_fullroundtriptests.py
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
"""
import sys
import pytest
from runtime.nutterfixture import NutterFixture, tag
from common.testresult import TestResult
from tests.runtime.testnutterfixturebuilder import TestNutterFixtureBuilder
def test__execute_tests__two_valid_cases__returns_test_results_with_2_passed_test_results():
# Arrange
test_name_1 = "fred"
test_name_2 = "hank"
test_fixture = TestNutterFixtureBuilder() \
.with_name("MyClass") \
.with_before(test_name_1) \
.with_before(test_name_2) \
.with_run(test_name_1) \
.with_run(test_name_2) \
.with_assertion(test_name_1) \
.with_assertion(test_name_2) \
.with_after(test_name_1) \
.with_after(test_name_2) \
.build()
expected_result1 = TestResult(test_name_1, True, 1, [])
expected_result2 = TestResult(test_name_2, True, 1, [])
# Act
result = test_fixture().execute_tests().test_results
# Assert
assert len(result.results) == 2
assert __item_in_list_equalto(result.results, expected_result1)
assert __item_in_list_equalto(result.results, expected_result2)
def test__execute_tests__one_valid_one_invalid__returns_correct_test_results():
# Arrange
test_name_1 = "shouldpass"
test_name_2 = "shouldfail"
fail_func = AssertionHelper().assertion_fails
test_fixture = TestNutterFixtureBuilder() \
.with_name("MyClass") \
.with_before(test_name_1) \
.with_before(test_name_2) \
.with_run(test_name_1) \
.with_run(test_name_2) \
.with_assertion(test_name_1) \
.with_assertion(test_name_2, fail_func) \
.with_after(test_name_1) \
.with_after(test_name_2) \
.build()
expected_result1 = TestResult(test_name_1, True, 1, [])
expected_result2 = TestResult(test_name_2, False, 1, [], AssertionError("assert 1 == 2"))
# Act
result = test_fixture().execute_tests().test_results
# Assert
assert len(result.results) == 2
assert __item_in_list_equalto(result.results, expected_result1)
assert __item_in_list_equalto(result.results, expected_result2)
def test__execute_tests__one_run_throws__returns_one_failed_testresult():
# Arrange
test_name_1 = "shouldthrow"
fail_func = AssertionHelper().function_throws
test_fixture = TestNutterFixtureBuilder() \
.with_name("MyClass") \
.with_before(test_name_1) \
.with_run(test_name_1, fail_func) \
.with_assertion(test_name_1) \
.with_after(test_name_1) \
.build()
expected_result1 = TestResult(test_name_1, False, 1, [], ValueError())
# Act
result = test_fixture().execute_tests().test_results
# Assert
assert len(result.results) == 1
assert __item_in_list_equalto(result.results, expected_result1)
def test__execute_tests__one_has_tags_one_does_not__returns_tags_in_testresult():
# Arrange
class Wrapper(NutterFixture):
tag_list = ["taga", "tagb"]
@tag(tag_list)
def run_test_name(self):
lambda: 1 == 1
test_name_1 = "test_name"
test_name_2 = "test_name2"
test_fixture = TestNutterFixtureBuilder() \
.with_name(test_name_1) \
.with_run(test_name_1, Wrapper.run_test_name) \
.with_assertion(test_name_1) \
.with_after(test_name_1) \
.with_name(test_name_2) \
.with_run(test_name_2) \
.with_assertion(test_name_2) \
.with_after(test_name_2) \
.build()
# Act
result = test_fixture().execute_tests().test_results
# Assert
assert len(result.results) == 2
for res in result.results:
if res.test_name == test_name_1:
assert ("taga" in res.tags) == True
assert ("tagb" in res.tags) == True
if res.test_name == test_name_2:
assert len(res.tags) == 0
def test__execute_tests__one_test_case_with_all_methods__all_methods_called(mocker):
# Arrange
test_name_1 = "test"
test_fixture = TestNutterFixtureBuilder() \
.with_name("MyClass") \
.with_before_all() \
.with_before(test_name_1) \
.with_run(test_name_1) \
.with_assertion(test_name_1) \
.with_after(test_name_1) \
.with_after_all() \
.build()
mocker.patch.object(test_fixture, 'before_all')
mocker.patch.object(test_fixture, 'before_test')
mocker.patch.object(test_fixture, 'run_test')
mocker.patch.object(test_fixture, 'assertion_test')
mocker.patch.object(test_fixture, 'after_test')
mocker.patch.object(test_fixture, 'after_all')
# Act
result = test_fixture().execute_tests()
# Assert
test_fixture.before_all.assert_called_once_with()
test_fixture.before_test.assert_called_once_with()
test_fixture.run_test.assert_called_once_with()
test_fixture.assertion_test.assert_called_once_with()
test_fixture.after_test.assert_called_once_with()
test_fixture.after_all.assert_called_once_with()
def test__execute_tests__one_beforeall_2_assertions__all_methods_called(mocker):
# Arrange
test_name_1 = "test"
test_name_2 = "test2"
test_fixture = TestNutterFixtureBuilder() \
.with_name("MyClass") \
.with_before_all() \
.with_assertion(test_name_1) \
.with_assertion(test_name_2) \
.build()
mocker.patch.object(test_fixture, 'before_all')
mocker.patch.object(test_fixture, 'assertion_test')
mocker.patch.object(test_fixture, 'assertion_test2')
# Act
result = test_fixture().execute_tests()
# Assert
test_fixture.before_all.assert_called_once_with()
test_fixture.assertion_test.assert_called_once_with()
test_fixture.assertion_test.assert_called_once_with()
def __item_in_list_equalto(list, expected_item):
for item in list:
if (item == expected_item):
return True
return False
class AssertionHelper():
def assertion_fails(self):
assert 1 == 2
def function_throws(self):
raise ValueError()
|
llvm/tools/opt-viewer/optpmap.py | medismailben/llvm-project | 4,812 | 12675532 | import sys
import multiprocessing
_current = None
_total = None
def _init(current, total):
global _current
global _total
_current = current
_total = total
def _wrapped_func(func_and_args):
func, argument, should_print_progress, filter_ = func_and_args
if should_print_progress:
with _current.get_lock():
_current.value += 1
sys.stdout.write('\r\t{} of {}'.format(_current.value, _total.value))
sys.stdout.flush()
return func(argument, filter_)
def pmap(func, iterable, processes, should_print_progress, filter_=None, *args, **kwargs):
"""
A parallel map function that reports on its progress.
Applies `func` to every item of `iterable` and return a list of the
results. If `processes` is greater than one, a process pool is used to run
the functions in parallel. `should_print_progress` is a boolean value that
indicates whether a string 'N of M' should be printed to indicate how many
of the functions have finished being run.
"""
global _current
global _total
_current = multiprocessing.Value('i', 0)
_total = multiprocessing.Value('i', len(iterable))
func_and_args = [(func, arg, should_print_progress, filter_) for arg in iterable]
if processes == 1:
result = list(map(_wrapped_func, func_and_args, *args, **kwargs))
else:
pool = multiprocessing.Pool(initializer=_init,
initargs=(_current, _total,),
processes=processes)
result = pool.map(_wrapped_func, func_and_args, *args, **kwargs)
pool.close()
pool.join()
if should_print_progress:
sys.stdout.write('\r')
return result
|
third_party/google-endpoints/oauth2client/devshell.py | tingshao/catapult | 2,151 | 12675544 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utitilies for Google Developer Shell environment."""
import json
import os
import socket
from oauth2client._helpers import _to_bytes
from oauth2client import client
DEVSHELL_ENV = 'DEVSHELL_CLIENT_PORT'
class Error(Exception):
"""Errors for this module."""
pass
class CommunicationError(Error):
"""Errors for communication with the Developer Shell server."""
class NoDevshellServer(Error):
"""Error when no Developer Shell server can be contacted."""
# The request for credential information to the Developer Shell client socket
# is always an empty PBLite-formatted JSON object, so just define it as a
# constant.
CREDENTIAL_INFO_REQUEST_JSON = '[]'
class CredentialInfoResponse(object):
"""Credential information response from Developer Shell server.
The credential information response from Developer Shell socket is a
PBLite-formatted JSON array with fields encoded by their index in the
array:
* Index 0 - user email
* Index 1 - default project ID. None if the project context is not known.
* Index 2 - OAuth2 access token. None if there is no valid auth context.
"""
def __init__(self, json_string):
"""Initialize the response data from JSON PBLite array."""
pbl = json.loads(json_string)
if not isinstance(pbl, list):
raise ValueError('Not a list: ' + str(pbl))
pbl_len = len(pbl)
self.user_email = pbl[0] if pbl_len > 0 else None
self.project_id = pbl[1] if pbl_len > 1 else None
self.access_token = pbl[2] if pbl_len > 2 else None
def _SendRecv():
"""Communicate with the Developer Shell server socket."""
port = int(os.getenv(DEVSHELL_ENV, 0))
if port == 0:
raise NoDevshellServer()
sock = socket.socket()
sock.connect(('localhost', port))
data = CREDENTIAL_INFO_REQUEST_JSON
msg = '%s\n%s' % (len(data), data)
sock.sendall(_to_bytes(msg, encoding='utf-8'))
header = sock.recv(6).decode()
if '\n' not in header:
raise CommunicationError('saw no newline in the first 6 bytes')
len_str, json_str = header.split('\n', 1)
to_read = int(len_str) - len(json_str)
if to_read > 0:
json_str += sock.recv(to_read, socket.MSG_WAITALL).decode()
return CredentialInfoResponse(json_str)
class DevshellCredentials(client.GoogleCredentials):
"""Credentials object for Google Developer Shell environment.
This object will allow a Google Developer Shell session to identify its
user to Google and other OAuth 2.0 servers that can verify assertions. It
can be used for the purpose of accessing data stored under the user
account.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
"""
def __init__(self, user_agent=None):
super(DevshellCredentials, self).__init__(
None, # access_token, initialized below
None, # client_id
None, # client_secret
None, # refresh_token
None, # token_expiry
None, # token_uri
user_agent)
self._refresh(None)
def _refresh(self, http_request):
self.devshell_response = _SendRecv()
self.access_token = self.devshell_response.access_token
@property
def user_email(self):
return self.devshell_response.user_email
@property
def project_id(self):
return self.devshell_response.project_id
@classmethod
def from_json(cls, json_data):
raise NotImplementedError(
'Cannot load Developer Shell credentials from JSON.')
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize Developer Shell credentials.')
|
xautodl/procedures/optimizers.py | D-X-Y/NAS-Projects | 378 | 12675559 | #####################################################
# Copyright (c) <NAME> [GitHub D-X-Y], 2019.01 #
#####################################################
import math, torch
import torch.nn as nn
from bisect import bisect_right
from torch.optim import Optimizer
class _LRScheduler(object):
def __init__(self, optimizer, warmup_epochs, epochs):
if not isinstance(optimizer, Optimizer):
raise TypeError("{:} is not an Optimizer".format(type(optimizer).__name__))
self.optimizer = optimizer
for group in optimizer.param_groups:
group.setdefault("initial_lr", group["lr"])
self.base_lrs = list(
map(lambda group: group["initial_lr"], optimizer.param_groups)
)
self.max_epochs = epochs
self.warmup_epochs = warmup_epochs
self.current_epoch = 0
self.current_iter = 0
def extra_repr(self):
return ""
def __repr__(self):
return "{name}(warmup={warmup_epochs}, max-epoch={max_epochs}, current::epoch={current_epoch}, iter={current_iter:.2f}".format(
name=self.__class__.__name__, **self.__dict__
) + ", {:})".format(
self.extra_repr()
)
def state_dict(self):
return {
key: value for key, value in self.__dict__.items() if key != "optimizer"
}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
def get_lr(self):
raise NotImplementedError
def get_min_info(self):
lrs = self.get_lr()
return "#LR=[{:.6f}~{:.6f}] epoch={:03d}, iter={:4.2f}#".format(
min(lrs), max(lrs), self.current_epoch, self.current_iter
)
def get_min_lr(self):
return min(self.get_lr())
def update(self, cur_epoch, cur_iter):
if cur_epoch is not None:
assert (
isinstance(cur_epoch, int) and cur_epoch >= 0
), "invalid cur-epoch : {:}".format(cur_epoch)
self.current_epoch = cur_epoch
if cur_iter is not None:
assert (
isinstance(cur_iter, float) and cur_iter >= 0
), "invalid cur-iter : {:}".format(cur_iter)
self.current_iter = cur_iter
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group["lr"] = lr
class CosineAnnealingLR(_LRScheduler):
def __init__(self, optimizer, warmup_epochs, epochs, T_max, eta_min):
self.T_max = T_max
self.eta_min = eta_min
super(CosineAnnealingLR, self).__init__(optimizer, warmup_epochs, epochs)
def extra_repr(self):
return "type={:}, T-max={:}, eta-min={:}".format(
"cosine", self.T_max, self.eta_min
)
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
if (
self.current_epoch >= self.warmup_epochs
and self.current_epoch < self.max_epochs
):
last_epoch = self.current_epoch - self.warmup_epochs
# if last_epoch < self.T_max:
# if last_epoch < self.max_epochs:
lr = (
self.eta_min
+ (base_lr - self.eta_min)
* (1 + math.cos(math.pi * last_epoch / self.T_max))
/ 2
)
# else:
# lr = self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * (self.T_max-1.0) / self.T_max)) / 2
elif self.current_epoch >= self.max_epochs:
lr = self.eta_min
else:
lr = (
self.current_epoch / self.warmup_epochs
+ self.current_iter / self.warmup_epochs
) * base_lr
lrs.append(lr)
return lrs
class MultiStepLR(_LRScheduler):
def __init__(self, optimizer, warmup_epochs, epochs, milestones, gammas):
assert len(milestones) == len(gammas), "invalid {:} vs {:}".format(
len(milestones), len(gammas)
)
self.milestones = milestones
self.gammas = gammas
super(MultiStepLR, self).__init__(optimizer, warmup_epochs, epochs)
def extra_repr(self):
return "type={:}, milestones={:}, gammas={:}, base-lrs={:}".format(
"multistep", self.milestones, self.gammas, self.base_lrs
)
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
if self.current_epoch >= self.warmup_epochs:
last_epoch = self.current_epoch - self.warmup_epochs
idx = bisect_right(self.milestones, last_epoch)
lr = base_lr
for x in self.gammas[:idx]:
lr *= x
else:
lr = (
self.current_epoch / self.warmup_epochs
+ self.current_iter / self.warmup_epochs
) * base_lr
lrs.append(lr)
return lrs
class ExponentialLR(_LRScheduler):
def __init__(self, optimizer, warmup_epochs, epochs, gamma):
self.gamma = gamma
super(ExponentialLR, self).__init__(optimizer, warmup_epochs, epochs)
def extra_repr(self):
return "type={:}, gamma={:}, base-lrs={:}".format(
"exponential", self.gamma, self.base_lrs
)
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
if self.current_epoch >= self.warmup_epochs:
last_epoch = self.current_epoch - self.warmup_epochs
assert last_epoch >= 0, "invalid last_epoch : {:}".format(last_epoch)
lr = base_lr * (self.gamma**last_epoch)
else:
lr = (
self.current_epoch / self.warmup_epochs
+ self.current_iter / self.warmup_epochs
) * base_lr
lrs.append(lr)
return lrs
class LinearLR(_LRScheduler):
def __init__(self, optimizer, warmup_epochs, epochs, max_LR, min_LR):
self.max_LR = max_LR
self.min_LR = min_LR
super(LinearLR, self).__init__(optimizer, warmup_epochs, epochs)
def extra_repr(self):
return "type={:}, max_LR={:}, min_LR={:}, base-lrs={:}".format(
"LinearLR", self.max_LR, self.min_LR, self.base_lrs
)
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
if self.current_epoch >= self.warmup_epochs:
last_epoch = self.current_epoch - self.warmup_epochs
assert last_epoch >= 0, "invalid last_epoch : {:}".format(last_epoch)
ratio = (
(self.max_LR - self.min_LR)
* last_epoch
/ self.max_epochs
/ self.max_LR
)
lr = base_lr * (1 - ratio)
else:
lr = (
self.current_epoch / self.warmup_epochs
+ self.current_iter / self.warmup_epochs
) * base_lr
lrs.append(lr)
return lrs
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def get_optim_scheduler(parameters, config):
assert (
hasattr(config, "optim")
and hasattr(config, "scheduler")
and hasattr(config, "criterion")
), "config must have optim / scheduler / criterion keys instead of {:}".format(
config
)
if config.optim == "SGD":
optim = torch.optim.SGD(
parameters,
config.LR,
momentum=config.momentum,
weight_decay=config.decay,
nesterov=config.nesterov,
)
elif config.optim == "RMSprop":
optim = torch.optim.RMSprop(
parameters, config.LR, momentum=config.momentum, weight_decay=config.decay
)
else:
raise ValueError("invalid optim : {:}".format(config.optim))
if config.scheduler == "cos":
T_max = getattr(config, "T_max", config.epochs)
scheduler = CosineAnnealingLR(
optim, config.warmup, config.epochs, T_max, config.eta_min
)
elif config.scheduler == "multistep":
scheduler = MultiStepLR(
optim, config.warmup, config.epochs, config.milestones, config.gammas
)
elif config.scheduler == "exponential":
scheduler = ExponentialLR(optim, config.warmup, config.epochs, config.gamma)
elif config.scheduler == "linear":
scheduler = LinearLR(
optim, config.warmup, config.epochs, config.LR, config.LR_min
)
else:
raise ValueError("invalid scheduler : {:}".format(config.scheduler))
if config.criterion == "Softmax":
criterion = torch.nn.CrossEntropyLoss()
elif config.criterion == "SmoothSoftmax":
criterion = CrossEntropyLabelSmooth(config.class_num, config.label_smooth)
else:
raise ValueError("invalid criterion : {:}".format(config.criterion))
return optim, scheduler, criterion
|
Computer Vision/motion_detection.py | sattwik21/Hacktoberfest2021-1 | 215 | 12675566 | # we need to store the current frame of the video as soon as the video starts , we need to store that numpy array in variable , and have that variable static , so we don't want to change the value of the variable while the while loop runs in the script. as it will be set as the background.
# when the motion occur it will show the image \video
# it won't display the static background
import cv2
import time
first_frame=None
video=cv2.VideoCapture(0)
while True:
check , frame = video.read()
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
# GAUSSIAN BLUR METHOD : remove noise and smoth it , which increase the accuracy.
# GAUSSIAN KERNEL: parameter of blurriness
gray= cv2.GaussianBlur(gray,(21,21) , 0) # 0= STANDAR DEVIATION
if first_frame is None:
first_frame = gray
continue
# ABSOLUTE DIFFERENCE:
del_frame= cv2.absdiff(first_frame , gray)
# THRESHOLD METHOD:
thres_frame=cv2.threshold(del_frame, 30 ,255 , cv2.THRESH_BINARY)[1]
thres_frame=cv2.dilate(thres_frame, None , iterations=2)
# CONTOUR METHOD : we want to store the contour in a tuple
# RETRIEVE EXTERNAL METHOD : to draw external contours of the object
# APPROXIMATION METHOD : chain_approx_simpe
(cnts,__) = cv2.findContours(thres_frame.copy() , cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for countour in cnts:
if cv2.countourArea(countour) < 1000:
continue
(x , y ,w ,h) = cv2.boundingRect(countour)
cv2.rectangle(frame,(x,y), (x+w , y+h) , (0 ,255,0) , 3)
cv2.imshow("motion", gray)
cv2.imshow("compare", del_frame)
cv2.imshow("threshold" ,thres_frame )
print(gray)
if cv2.waitKey(1)==ord('q'): # press the "q" key and quit the video
break
video.release()
cv2.destroyAllWindows() |
fuzzers/symcc_aflplusplus/fuzzer.py | che30122/fuzzbench | 800 | 12675576 | <reponame>che30122/fuzzbench
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Uses the SymCC-AFL hybrid from SymCC. '''
import os
import time
import shutil
import threading
import subprocess
from fuzzers import utils
from fuzzers.afl import fuzzer as afl_fuzzer
from fuzzers.aflplusplus import fuzzer as aflplusplus_fuzzer
def get_symcc_build_dir(target_directory):
"""Return path to uninstrumented target directory."""
return os.path.join(target_directory, 'uninstrumented')
def build():
"""Build an AFL version and SymCC version of the benchmark"""
print("Step 1: Building with AFL")
build_directory = os.environ['OUT']
# Save the environment for use in SymCC
new_env = os.environ.copy()
# First build with AFL.
src = os.getenv('SRC')
work = os.getenv('WORK')
with utils.restore_directory(src), utils.restore_directory(work):
# Restore SRC to its initial state so we can build again without any
# trouble. For some OSS-Fuzz projects, build_benchmark cannot be run
# twice in the same directory without this.
aflplusplus_fuzzer.build("tracepc")
print("Step 2: Completed AFL build")
# Copy over AFL artifacts needed by SymCC.
shutil.copy("/afl/afl-fuzz", build_directory)
shutil.copy("/afl/afl-showmap", build_directory)
# Build the SymCC-instrumented target.
print("Step 3: Building the benchmark with SymCC")
symcc_build_dir = get_symcc_build_dir(os.environ['OUT'])
os.mkdir(symcc_build_dir)
# Set flags to ensure compilation with SymCC.
new_env['CC'] = "/symcc/build/symcc"
new_env['CXX'] = "/symcc/build/sym++"
new_env['CXXFLAGS'] = new_env['CXXFLAGS'].replace("-stlib=libc++", "")
new_env['FUZZER_LIB'] = '/libfuzzer-harness.o'
new_env['OUT'] = symcc_build_dir
new_env['CXXFLAGS'] += " -fno-sanitize=all "
new_env['CFLAGS'] += " -fno-sanitize=all "
# Setting this environment variable instructs SymCC to use the
# libcxx library compiled with SymCC instrumentation.
new_env['SYMCC_LIBCXX_PATH'] = "/libcxx_native_build"
# Instructs SymCC to consider no symbolic inputs at runtime. This is needed
# if, for example, some tests are run during compilation of the benchmark.
new_env['SYMCC_NO_SYMBOLIC_INPUT'] = "1"
# Build benchmark.
utils.build_benchmark(env=new_env)
# Copy over symcc artifacts and symbolic libc++.
shutil.copy(
"/symcc/build//SymRuntime-prefix/src/SymRuntime-build/libSymRuntime.so",
symcc_build_dir)
shutil.copy("/usr/lib/libz3.so", os.path.join(symcc_build_dir, "libz3.so"))
shutil.copy("/libcxx_native_build/lib/libc++.so.1", symcc_build_dir)
shutil.copy("/libcxx_native_build/lib/libc++abi.so.1", symcc_build_dir)
shutil.copy("/rust/bin/symcc_fuzzing_helper", symcc_build_dir)
def launch_afl_thread(input_corpus, output_corpus, target_binary,
additional_flags):
""" Simple wrapper for running AFL. """
afl_thread = threading.Thread(target=afl_fuzzer.run_afl_fuzz,
args=(input_corpus, output_corpus,
target_binary, additional_flags))
afl_thread.start()
return afl_thread
def fuzz(input_corpus, output_corpus, target_binary):
"""
Launches a master and a secondary instance of AFL, as well as
the symcc helper.
"""
target_binary_dir = os.path.dirname(target_binary)
symcc_workdir = get_symcc_build_dir(target_binary_dir)
target_binary_name = os.path.basename(target_binary)
symcc_target_binary = os.path.join(symcc_workdir, target_binary_name)
os.environ['AFL_DISABLE_TRIM'] = "1"
# Start a master and secondary instance of AFL.
# We need both because of the way SymCC works.
print('[run_fuzzer] Running AFL for SymCC')
afl_fuzzer.prepare_fuzz_environment(input_corpus)
launch_afl_thread(input_corpus, output_corpus, target_binary, ["-S", "afl"])
time.sleep(5)
launch_afl_thread(input_corpus, output_corpus, target_binary,
["-S", "afl-secondary"])
time.sleep(5)
# Start an instance of SymCC.
# We need to ensure it uses the symbolic version of libc++.
print("Starting the SymCC helper")
new_environ = os.environ.copy()
new_environ['LD_LIBRARY_PATH'] = symcc_workdir
cmd = [
os.path.join(symcc_workdir,
"symcc_fuzzing_helper"), "-o", output_corpus, "-a",
"afl-secondary", "-n", "symcc", "-m", "--", symcc_target_binary, "@@"
]
subprocess.Popen(cmd, env=new_environ)
|
src/sort.py | lady0528/Face-Track-Detect-Extract | 505 | 12675602 | <filename>src/sort.py<gh_stars>100-1000
"""
As implemented in https://github.com/abewley/sort but with some modifications
"""
from __future__ import print_function
import lib.utils as utils
import numpy as np
from src.data_association import associate_detections_to_trackers
from src.kalman_tracker import KalmanBoxTracker
logger = utils.Logger("MOT")
class Sort:
def __init__(self, max_age=1, min_hits=3):
"""
Sets key parameters for SORT
"""
self.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.frame_count = 0
def update(self, dets, img_size, root_dic, addtional_attribute_list, predict_num):
"""
Params:
dets - a numpy array of detections in the format [[x,y,w,h,score],[x,y,w,h,score],...]
Requires: this method must be called once for each frame even with empty detections.
Returns the a similar array, where the last column is the object ID.
NOTE:as in practical realtime MOT, the detector doesn't run on every single frame
"""
self.frame_count += 1
# get predicted locations from existing trackers.
trks = np.zeros((len(self.trackers), 5))
to_del = []
ret = []
for t, trk in enumerate(trks):
pos = self.trackers[t].predict() # kalman predict ,very fast ,<1ms
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
if np.any(np.isnan(pos)):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
if dets != []:
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets, trks)
# update matched trackers with assigned detections
for t, trk in enumerate(self.trackers):
if t not in unmatched_trks:
d = matched[np.where(matched[:, 1] == t)[0], 0]
trk.update(dets[d, :][0])
trk.face_addtional_attribute.append(addtional_attribute_list[d[0]])
# create and initialise new trackers for unmatched detections
for i in unmatched_dets:
trk = KalmanBoxTracker(dets[i, :])
trk.face_addtional_attribute.append(addtional_attribute_list[i])
logger.info("new Tracker: {0}".format(trk.id + 1))
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
if dets == []:
trk.update([])
d = trk.get_state()
if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
ret.append(np.concatenate((d, [trk.id + 1])).reshape(1, -1)) # +1 as MOT benchmark requires positive
i -= 1
# remove dead tracklet
if trk.time_since_update >= self.max_age or trk.predict_num >= predict_num or d[2] < 0 or d[3] < 0 or d[0] > img_size[1] or d[1] > img_size[0]:
if len(trk.face_addtional_attribute) >= 5:
utils.save_to_file(root_dic, trk)
logger.info('remove tracker: {0}'.format(trk.id + 1))
self.trackers.pop(i)
if len(ret) > 0:
return np.concatenate(ret)
return np.empty((0, 5))
|
third_party/blink/renderer/bindings/scripts/web_idl/namespace.py | chromium/chromium | 14,668 | 12675606 | <reponame>chromium/chromium<filename>third_party/blink/renderer/bindings/scripts/web_idl/namespace.py
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
from .attribute import Attribute
from .code_generator_info import CodeGeneratorInfo
from .composition_parts import WithCodeGeneratorInfo
from .composition_parts import WithComponent
from .composition_parts import WithDebugInfo
from .composition_parts import WithExposure
from .composition_parts import WithExtendedAttributes
from .constant import Constant
from .exposure import Exposure
from .ir_map import IRMap
from .make_copy import make_copy
from .operation import Operation
from .operation import OperationGroup
from .user_defined_type import UserDefinedType
class Namespace(UserDefinedType, WithExtendedAttributes, WithCodeGeneratorInfo,
WithExposure, WithComponent, WithDebugInfo):
"""https://webidl.spec.whatwg.org/#idl-namespaces"""
class IR(IRMap.IR, WithExtendedAttributes, WithCodeGeneratorInfo,
WithExposure, WithComponent, WithDebugInfo):
def __init__(self,
identifier,
is_partial,
attributes=None,
constants=None,
operations=None,
extended_attributes=None,
component=None,
debug_info=None):
assert isinstance(is_partial, bool)
assert attributes is None or isinstance(attributes, (list, tuple))
assert constants is None or isinstance(constants, (list, tuple))
assert operations is None or isinstance(operations, (list, tuple))
attributes = attributes or []
constants = constants or []
operations = operations or []
assert all(
isinstance(attribute, Attribute.IR) and attribute.is_readonly
and attribute.is_static for attribute in attributes)
assert all(
isinstance(constant, Constant.IR) for constant in constants)
assert all(
isinstance(operation, Operation.IR) and operation.identifier
and operation.is_static for operation in operations)
kind = (IRMap.IR.Kind.PARTIAL_NAMESPACE
if is_partial else IRMap.IR.Kind.NAMESPACE)
IRMap.IR.__init__(self, identifier=identifier, kind=kind)
WithExtendedAttributes.__init__(self, extended_attributes)
WithCodeGeneratorInfo.__init__(self)
WithExposure.__init__(self)
WithComponent.__init__(self, component)
WithDebugInfo.__init__(self, debug_info)
self.is_partial = is_partial
self.is_mixin = False
self.attributes = list(attributes)
self.constants = list(constants)
self.constructors = []
self.constructor_groups = []
self.named_constructors = []
self.named_constructor_groups = []
self.operations = list(operations)
self.operation_groups = []
def iter_all_members(self):
list_of_members = [
self.attributes,
self.constants,
self.operations,
]
return itertools.chain(*list_of_members)
def iter_all_overload_groups(self):
return iter(self.operation_groups)
def __init__(self, ir):
assert isinstance(ir, Namespace.IR)
assert not ir.is_partial
ir = make_copy(ir)
UserDefinedType.__init__(self, ir.identifier)
WithExtendedAttributes.__init__(self, ir, readonly=True)
WithCodeGeneratorInfo.__init__(self, ir, readonly=True)
WithExposure.__init__(self, ir, readonly=True)
WithComponent.__init__(self, ir, readonly=True)
WithDebugInfo.__init__(self, ir)
self._attributes = tuple([
Attribute(attribute_ir, owner=self)
for attribute_ir in ir.attributes
])
self._constants = tuple([
Constant(constant_ir, owner=self) for constant_ir in ir.constants
])
self._operations = tuple([
Operation(operation_ir, owner=self)
for operation_ir in ir.operations
])
self._operation_groups = tuple([
OperationGroup(operation_group_ir,
list(
filter(
lambda x: x.identifier == operation_group_ir
.identifier, self._operations)),
owner=self)
for operation_group_ir in ir.operation_groups
])
@property
def inherited(self):
"""Returns the inherited namespace or None."""
return None
@property
def deriveds(self):
"""Returns the list of the derived namespaces."""
return ()
@property
def attributes(self):
"""Returns attributes."""
return self._attributes
@property
def constants(self):
"""Returns constants."""
return self._constants
@property
def constructors(self):
"""Returns constructors."""
return ()
@property
def constructor_groups(self):
"""Returns groups of constructors."""
return ()
@property
def named_constructors(self):
"""Returns named constructors."""
return ()
@property
def named_constructor_groups(self):
"""Returns groups of overloaded named constructors."""
return ()
@property
def operations(self):
"""Returns operations."""
return self._operations
@property
def operation_groups(self):
"""Returns a list of OperationGroups."""
return self._operation_groups
@property
def exposed_constructs(self):
"""Returns exposed constructs."""
return ()
# UserDefinedType overrides
@property
def is_namespace(self):
return True
|
tracking/argmax_tracker.py | mhd-medfa/SiamR-CNN | 195 | 12675615 | import cv2
import random
import numpy as np
from got10k.trackers import Tracker
from config import config as cfg, finalize_configs
from tensorpack import PredictConfig, get_model_loader, OfflinePredictor, logger
from train import ResNetFPNModel
from common import CustomResize, box_to_point8, point8_to_box
class PrecomputingReferenceTracker(Tracker):
def __init__(self, name, need_network=True, need_img=True, model="best"):
super().__init__(name=name, is_deterministic=True)
self._resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE)
self._prev_box = None
self._ff_gt_feats = None
self._need_network = need_network
self._need_img = need_img
self._rotated_bbox = None
if need_network:
logger.set_logger_dir("/tmp/test_log_/" + str(random.randint(0, 10000)), 'd')
if model == "best":
load = "train_log/hard_mining3/model-1360500"
elif model == "nohardexamples":
load = "train_log/condrcnn_all_2gpu_lrreduce2/model-1200500"
elif model == "newrpn":
load = "train_log/newrpn1/model"
elif model =="resnet50_nohardexamples":
load = "train_log/condrcnn_all_resnet50/model-1200500"
cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3]
elif model =="resnet50":
load = "train_log/hard_mining3_resnet50/model-1360500"
cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3]
elif model == "gotonly":
load = "train_log/hard_mining3_onlygot/model-1361000"
elif model.startswith("checkpoint:"):
load = model.replace("checkpoint:", "")
else:
assert False, ("unknown model", model)
from dataset import DetectionDataset
# init tensorpack model
# cfg.freeze(False)
DetectionDataset() # initialize the config with information from our dataset
cfg.EXTRACT_GT_FEATURES = True
cfg.MODE_TRACK = False
extract_model = ResNetFPNModel()
extract_ff_feats_cfg = PredictConfig(
model=extract_model,
session_init=get_model_loader(load),
input_names=['image', 'roi_boxes'],
output_names=['rpn/feature'])
finalize_configs(is_training=False)
self._extract_func = OfflinePredictor(extract_ff_feats_cfg)
cfg.EXTRACT_GT_FEATURES = False
cfg.MODE_TRACK = True
cfg.USE_PRECOMPUTED_REF_FEATURES = True
self._pred_func = self._make_pred_func(load)
def _resize_image_together_with_boxes(self, img, *list_of_box_or_boxes):
resized_img, params = self._resizer.augment_return_params(img)
res_boxes = []
for box_or_boxes in list_of_box_or_boxes:
expand = len(box_or_boxes.shape) == 1
if expand:
boxes = box_or_boxes[np.newaxis]
else:
boxes = box_or_boxes
points = box_to_point8(boxes)
points = self._resizer.augment_coords(points, params)
resized_boxes = point8_to_box(points)
if expand:
resized_boxes = np.squeeze(resized_boxes, axis=0)
res_boxes.append(resized_boxes)
if len(res_boxes) == 1:
res_boxes = res_boxes[0]
return resized_img, res_boxes
def _make_pred_func(self, load):
from train import ResNetFPNTrackModel
pred_model = ResNetFPNTrackModel()
predcfg = PredictConfig(
model=pred_model,
session_init=get_model_loader(load),
input_names=pred_model.get_inference_tensor_names()[0],
output_names=pred_model.get_inference_tensor_names()[1])
return OfflinePredictor(predcfg)
def init(self, image, box):
ref_img = np.array(image)[..., ::-1]
if ref_img is None:
raise ValueError("failed to load img" + image.filename)
box[2] += box[0]
box[3] += box[1]
ref_bbox = box
self._prev_box = box
if self._need_network:
resized_ref_img, resized_ref_box = self._resize_image_together_with_boxes(ref_img, ref_bbox)
feats, = self._extract_func(resized_ref_img, resized_ref_box[np.newaxis])
self._ff_gt_feats = feats[0]
def update(self, image, use_confidences=False):
if self._need_img:
target_img = np.array(image)[..., ::-1]
if target_img is None:
raise ValueError("failed to load img" + str(target_img))
else:
target_img = None
new_box, score = self._update(target_img)
if new_box is not None:
self._prev_box = new_box
ret_box = self._prev_box.copy()
ret_box[2] -= ret_box[0]
ret_box[3] -= ret_box[1]
if self._rotated_bbox is not None:
ret_box = self._rotated_bbox
if use_confidences:
return ret_box, score
else:
return ret_box
class ArgmaxTracker(PrecomputingReferenceTracker):
def __init__(self):
super().__init__("ArgmaxTracker")
def _update(self, img):
from eval import predict_image_track_with_precomputed_ref_features
results = predict_image_track_with_precomputed_ref_features(img, self._ff_gt_feats, self._pred_func)
det_boxes = np.array([r.box for r in results])
det_scores = np.array([r.score for r in results])
if len(det_boxes) > 0:
return det_boxes[0], det_scores[0]
else:
return None, None
# just there to test the precomputing on against
# not intended to be used anymore
class NonPrecomputingArgmaxTracker(Tracker):
def __init__(self):
super().__init__(name='ArgmaxTracker', is_deterministic=True)
self._ref_img = None
self._ref_bbox = None
self._prev_box = None
model = self._init_model()
load = "train_log/condrcnn_onlygot/model-460000"
predcfg = PredictConfig(
model=model,
session_init=get_model_loader(load),
input_names=model.get_inference_tensor_names()[0],
output_names=model.get_inference_tensor_names()[1])
self._pred_func = OfflinePredictor(predcfg)
def _init_model(self):
logger.set_logger_dir("/tmp/test_log/", 'd')
from dataset import DetectionDataset
from train import ResNetFPNTrackModel
# init tensorpack model
cfg.freeze(False)
model = ResNetFPNTrackModel()
DetectionDataset() # initialize the config with information from our dataset
finalize_configs(is_training=False)
return model
def init(self, image, box):
self._ref_img = cv2.imread(image.filename, cv2.IMREAD_COLOR)
if self._ref_img is None:
raise ValueError("failed to load img" + str(self._ref_img))
box[2] += box[0]
box[3] += box[1]
self._ref_bbox = box
self._prev_box = box
def update(self, image):
target_img = cv2.imread(image.filename, cv2.IMREAD_COLOR)
# assert target_img is not None
if target_img is None:
raise ValueError("failed to load img" + str(target_img))
from eval import predict_image_track
results = predict_image_track(target_img, self._ref_img, self._ref_bbox, self._pred_func)
det_boxes = np.array([r.box for r in results])
det_scores = np.array([r.score for r in results])
if len(det_boxes) > 0:
self._prev_box = det_boxes[0]
ret_box = self._prev_box.copy()
ret_box[2] -= ret_box[0]
ret_box[3] -= ret_box[1]
return ret_box
|
mask-rcnn/mask_rcnn.py | mimigreg/basketballVideoAnalysis | 203 | 12675620 | <filename>mask-rcnn/mask_rcnn.py
# USAGE
# python mask_rcnn.py --mask-rcnn mask-rcnn-coco --image images/example_01.jpg
# python mask_rcnn.py --mask-rcnn mask-rcnn-coco --image images/example_03.jpg --visualize 1
# import the necessary packages
import numpy as np
import argparse
import random
import time
import cv2
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
ap.add_argument("-m", "--mask-rcnn", required=True, help="base path to mask-rcnn directory")
ap.add_argument("-v", "--visualize", type=int, default=0, help="whether or not we are going to visualize each instance")
ap.add_argument("-c", "--confidence", type=float, default=0.5, help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3, help="minimum threshold for pixel-wise mask segmentation")
args = vars(ap.parse_args())
# load the COCO class labels our Mask R-CNN was trained on
labelsPath = os.path.sep.join([args["mask_rcnn"],"object_detection_classes_coco.txt"])
LABELS = open(labelsPath).read().strip().split("\n")
# load the set of colors that will be used when visualizing a given instance segmentation
# colorsPath = os.path.sep.join([args["mask_rcnn"], "colors.txt"])
# COLORS = open(colorsPath).read().strip().split("\n")
RED_COLOR = np.array([255, 0, 0])
BLACK_COLOR = np.array([255, 255, 255])
# derive the paths to the Mask R-CNN weights and model configuration
weightsPath = os.path.sep.join([args["mask_rcnn"], "frozen_inference_graph.pb"])
configPath = os.path.sep.join([args["mask_rcnn"], "mask_rcnn_inception_v2_coco_2018_01_28.pbtxt"])
# load our Mask R-CNN trained on the COCO dataset (90 classes) from disk
print("[INFO] loading Mask R-CNN from disk...")
net = cv2.dnn.readNetFromTensorflow(weightsPath, configPath)
# load our input image and grab its spatial dimensions
image = cv2.imread(args["image"], cv2.IMREAD_UNCHANGED)
(H, W) = image.shape[:2]
print("[INFO] image size: {}x{} pixels".format(W, H))
# construct a blob from the input image and then perform a forward
# pass of the Mask R-CNN, giving us (1) the bounding box coordinates
# of the objects in the image along with (2) the pixel-wise segmentation
# for each specific object
blob = cv2.dnn.blobFromImage(image, swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
(boxes, masks) = net.forward(["detection_out_final", "detection_masks"])
end = time.time()
# show timing information and volume information on Mask R-CNN
print("[INFO] Mask R-CNN took {:.6f} seconds".format(end - start))
print("[INFO] boxes shape: {}".format(boxes.shape))
print("[INFO] boxes size: {}".format(boxes.size))
print("[INFO] masks shape: {}".format(masks.shape))
# loop over the number of detected objects
for i in range(0, boxes.shape[2]):
# extract the class ID of the detection along with the confidence
# (i.e., probability) associated with the prediction
classID = int(boxes[0, 0, i, 1])
confidence = boxes[0, 0, i, 2]
# filter out weak predictions by ensuring the detected probability
# is greater than the minimum probability
if confidence > args["confidence"]:
# clone our original image so we can draw on it
# clone = image.copy()
# scale the bounding box coordinates back relative to the
# size of the image and then compute the width and the height
# of the bounding box
box = boxes[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
boxW = endX - startX
boxH = endY - startY
# extract the pixel-wise segmentation for the object, resize
# the mask such that it's the same dimensions of the bounding
# box, and then finally threshold to create a *binary* mask
mask = masks[i, classID]
mask = cv2.resize(mask, (boxW, boxH), interpolation = cv2.INTER_NEAREST)
mask = (mask > args["threshold"])
# extract the ROI of the image
roi = image[startY:endY, startX:endX]
# check to see if are going to visualize how to extract the masked region itself
if args["visualize"] > 0:
# convert the mask from a boolean to an integer mask with
# to values: 0 or 255, then apply the mask
visMask = (mask * 255).astype("uint8")
instance = cv2.bitwise_and(roi, roi, mask=visMask)
# show the extracted ROI, the mask, along with the segmented instance
# cv2.imshow("ROI", roi)
# cv2.imshow("Mask", visMask)
# cv2.imshow("Segmented", instance)
# write the segmented image to disk
cv2.imwrite("output/segmented{}.png".format(i), instance)
# now, extract *only* the masked region of the ROI by passing in the boolean mask array as our slice condition
roi = roi[mask]
# Red will be used to visualize this particular instance segmentation
# then create a transparent overlay by blending the randomly selected color with the ROI
blended = ((0.4 * RED_COLOR) + (0.6 * roi)).astype("uint8")
# store the blended ROI in the original image
image[startY:endY, startX:endX][mask] = blended
# draw the bounding box of the instance on the image
cv2.rectangle(image, (startX, startY), (endX, endY), (255,255,255), 2)
# draw the predicted label and associated probability of the instance segmentation on the image
text = "{}: {:.4f}".format("Person", confidence)
cv2.putText(image, text, (startX, startY - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 2)
# show the output image
# cv2.imshow("Output", image)
# cv2.waitKey(0)
cv2.imwrite("output/result.jpg", image)
|
rllab/envs/mujoco/swimmer_env.py | ICRA-2018/gcg | 120 | 12675621 | from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from .mujoco_env import MujocoEnv
import numpy as np
from rllab.core.serializable import Serializable
from rllab.misc import logger
from rllab.misc import autoargs
class SwimmerEnv(MujocoEnv, Serializable):
FILE = 'swimmer.xml'
@autoargs.arg('ctrl_cost_coeff', type=float,
help='cost coefficient for controls')
def __init__(
self,
ctrl_cost_coeff=1e-2,
*args, **kwargs):
self.ctrl_cost_coeff = ctrl_cost_coeff
super(SwimmerEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
self.get_body_com("torso").flat,
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
forward_reward = self.get_body_comvel("torso")[0]
reward = forward_reward - ctrl_cost
done = False
return Step(next_obs, reward, done)
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
|
torrent_file/get_files.py | DazEB2/SimplePyScripts | 117 | 12675627 | <filename>torrent_file/get_files.py<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import effbot_bencode
import another_bencode
import bencode_py3
with open('_.torrent', 'rb') as f:
torrent_file_bytes = f.read()
torrent_file_text = torrent_file_bytes.decode('latin1')
torrent = effbot_bencode.decode(torrent_file_text)
print('effbot_bencode:')
print(f' {torrent}')
print(' Files:')
for file in torrent["info"]["files"]:
print(f" {'/'.join(file['path'])!r} - {file['length']:d} bytes")
print('\n')
torrent = another_bencode.decode(torrent_file_bytes)[0]
print('another_bencode:')
print(f' {torrent}')
print(' Files:')
for file in torrent[b"info"][b"files"]:
print(f" {b'/'.join(file[b'path']).decode('utf-8')!r} - {file[b'length']:d} bytes")
print('\n')
torrent = bencode_py3.bdecode(torrent_file_text)
print('bencode_py3:')
print(f' {torrent}')
print(' Files:')
for file in torrent["info"]["files"]:
print(f" {'/'.join(file['path'])!r} - {file['length']:d} bytes")
|
tests/tile_test.py | jnice-81/dace | 227 | 12675630 | <reponame>jnice-81/dace
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from __future__ import print_function
import argparse
import dace
import math
import numpy as np
W = dace.symbol('W')
H = dace.symbol('H')
TW = dace.symbol('TW')
TH = dace.symbol('TH')
@dace.program
def transpose_tiled(A: dace.float32[H, W], B: dace.float32[H, W]):
for tile_i, tile_j in dace.map[0:H:TH, 0:W:TW]:
for i, j in dace.map[0:TH, 0:TW]:
with dace.tasklet:
a << A[tile_j + j, tile_i + i]
b >> B[tile_i + i, tile_j + j]
b = a
def test():
W.set(128)
H.set(128)
TW.set(16)
TH.set(16)
print('Transpose (Tiled) %dx%d (tile size: %dx%d)' %
(W.get(), H.get(), TW.get(), TH.get()))
A = dace.ndarray([H, W], dtype=dace.float32)
B = dace.ndarray([H, W], dtype=dace.float32)
A[:] = np.random.rand(H.get(), W.get()).astype(dace.float32.type)
B[:] = dace.float32(0)
transpose_tiled(A, B, TW=TW, TH=TH)
diff = np.linalg.norm(np.transpose(A) - B) / (H.get() * W.get())
print("Difference:", diff)
assert diff <= 1e-5
if __name__ == "__main__":
test()
|
dask/bytes/tests/test_local.py | aeisenbarth/dask | 9,684 | 12675631 | <gh_stars>1000+
import gzip
import os
import pathlib
import sys
from functools import partial
from time import sleep
import cloudpickle
import pytest
from fsspec.compression import compr
from fsspec.core import open_files
from fsspec.implementations.local import LocalFileSystem
from packaging.version import parse as parse_version
from tlz import concat, valmap
from dask import compute
from dask.bytes.core import read_bytes
from dask.bytes.utils import compress
from dask.utils import filetexts
compute = partial(compute, scheduler="sync")
files = {
".test.accounts.1.json": (
b'{"amount": 100, "name": "Alice"}\n'
b'{"amount": 200, "name": "Bob"}\n'
b'{"amount": 300, "name": "Charlie"}\n'
b'{"amount": 400, "name": "Dennis"}\n'
),
".test.accounts.2.json": (
b'{"amount": 500, "name": "Alice"}\n'
b'{"amount": 600, "name": "Bob"}\n'
b'{"amount": 700, "name": "Charlie"}\n'
b'{"amount": 800, "name": "Dennis"}\n'
),
}
csv_files = {
".test.fakedata.1.csv": (b"a,b\n" b"1,2\n"),
".test.fakedata.2.csv": (b"a,b\n" b"3,4\n"),
"subdir/.test.fakedata.2.csv": (b"a,b\n" b"5,6\n"),
}
def to_uri(path):
return pathlib.Path(os.path.abspath(path)).as_uri()
def test_unordered_urlpath_errors():
# Unordered urlpath argument
with pytest.raises(TypeError):
read_bytes(
{
"sets/are.csv",
"unordered/so/they.csv",
"should/not/be.csv",
"allowed.csv",
}
)
def test_read_bytes():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*")
assert isinstance(sample, bytes)
assert sample[:5] == files[sorted(files)[0]][:5]
assert sample.endswith(b"\n")
assert isinstance(values, (list, tuple))
assert isinstance(values[0], (list, tuple))
assert hasattr(values[0][0], "dask")
assert sum(map(len, values)) >= len(files)
results = compute(*concat(values))
assert set(results) == set(files.values())
def test_read_bytes_sample_delimiter():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*", sample=80, delimiter=b"\n")
assert sample.endswith(b"\n")
sample, values = read_bytes(".test.accounts.1.json", sample=80, delimiter=b"\n")
assert sample.endswith(b"\n")
sample, values = read_bytes(".test.accounts.1.json", sample=2, delimiter=b"\n")
assert sample.endswith(b"\n")
def test_parse_sample_bytes():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*", sample="40 B")
assert len(sample) == 40
def test_read_bytes_no_sample():
with filetexts(files, mode="b"):
sample, _ = read_bytes(".test.accounts.1.json", sample=False)
assert sample is False
def test_read_bytes_blocksize_none():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*", blocksize=None)
assert sum(map(len, values)) == len(files)
@pytest.mark.parametrize("blocksize", [5.0, "5 B"])
def test_read_bytes_blocksize_types(blocksize):
with filetexts(files, mode="b"):
sample, vals = read_bytes(".test.account*", blocksize=blocksize)
results = compute(*concat(vals))
ourlines = b"".join(results).split(b"\n")
testlines = b"".join(files.values()).split(b"\n")
assert set(ourlines) == set(testlines)
def test_read_bytes_blocksize_float_errs():
with filetexts(files, mode="b"):
with pytest.raises(TypeError):
read_bytes(".test.account*", blocksize=5.5)
def test_read_bytes_include_path():
with filetexts(files, mode="b"):
_, _, paths = read_bytes(".test.accounts.*", include_path=True)
assert {os.path.split(path)[1] for path in paths} == files.keys()
def test_with_urls():
with filetexts(files, mode="b"):
# OS-independent file:// URI with glob *
url = to_uri(".test.accounts.") + "*"
sample, values = read_bytes(url, blocksize=None)
assert sum(map(len, values)) == len(files)
@pytest.mark.skipif(sys.platform == "win32", reason="pathlib and moto clash on windows")
def test_with_paths():
with filetexts(files, mode="b"):
url = pathlib.Path("./.test.accounts.*")
sample, values = read_bytes(url, blocksize=None)
assert sum(map(len, values)) == len(files)
with pytest.raises(OSError):
# relative path doesn't work
url = pathlib.Path("file://.test.accounts.*")
read_bytes(url, blocksize=None)
def test_read_bytes_block():
with filetexts(files, mode="b"):
for bs in [5, 15, 45, 1500]:
sample, vals = read_bytes(".test.account*", blocksize=bs)
assert list(map(len, vals)) == [
max((len(v) // bs), 1) for v in files.values()
]
results = compute(*concat(vals))
assert sum(len(r) for r in results) == sum(len(v) for v in files.values())
ourlines = b"".join(results).split(b"\n")
testlines = b"".join(files.values()).split(b"\n")
assert set(ourlines) == set(testlines)
def test_read_bytes_delimited():
with filetexts(files, mode="b"):
for bs in [5, 15, 45, "1.5 kB"]:
_, values = read_bytes(".test.accounts*", blocksize=bs, delimiter=b"\n")
_, values2 = read_bytes(".test.accounts*", blocksize=bs, delimiter=b"foo")
assert [a.key for a in concat(values)] != [b.key for b in concat(values2)]
results = compute(*concat(values))
res = [r for r in results if r]
assert all(r.endswith(b"\n") for r in res)
ourlines = b"".join(res).split(b"\n")
testlines = b"".join(files[k] for k in sorted(files)).split(b"\n")
assert ourlines == testlines
# delimiter not at the end
d = b"}"
_, values = read_bytes(".test.accounts*", blocksize=bs, delimiter=d)
results = compute(*concat(values))
res = [r for r in results if r]
# All should end in } except EOF
assert sum(r.endswith(b"}") for r in res) == len(res) - 2
ours = b"".join(res)
test = b"".join(files[v] for v in sorted(files))
assert ours == test
fmt_bs = [(fmt, None) for fmt in compr] + [(fmt, 10) for fmt in compr]
@pytest.mark.parametrize("fmt,blocksize", fmt_bs)
def test_compression(fmt, blocksize):
if fmt not in compress:
pytest.skip("compression function not provided")
files2 = valmap(compress[fmt], files)
with filetexts(files2, mode="b"):
if fmt and blocksize:
with pytest.raises(ValueError):
read_bytes(
".test.accounts.*.json",
blocksize=blocksize,
delimiter=b"\n",
compression=fmt,
)
return
sample, values = read_bytes(
".test.accounts.*.json",
blocksize=blocksize,
delimiter=b"\n",
compression=fmt,
)
assert sample[:5] == files[sorted(files)[0]][:5]
assert sample.endswith(b"\n")
results = compute(*concat(values))
assert b"".join(results) == b"".join([files[k] for k in sorted(files)])
def test_open_files():
with filetexts(files, mode="b"):
myfiles = open_files(".test.accounts.*")
assert len(myfiles) == len(files)
for lazy_file, data_file in zip(myfiles, sorted(files)):
with lazy_file as f:
x = f.read()
assert x == files[data_file]
@pytest.mark.parametrize("encoding", ["utf-8", "ascii"])
def test_open_files_text_mode(encoding):
with filetexts(files, mode="b"):
myfiles = open_files(".test.accounts.*", mode="rt", encoding=encoding)
assert len(myfiles) == len(files)
data = []
for file in myfiles:
with file as f:
data.append(f.read())
assert list(data) == [files[k].decode(encoding) for k in sorted(files)]
@pytest.mark.parametrize("mode", ["rt", "rb"])
@pytest.mark.parametrize("fmt", list(compr))
def test_open_files_compression(mode, fmt):
if fmt not in compress:
pytest.skip("compression function not provided")
files2 = valmap(compress[fmt], files)
with filetexts(files2, mode="b"):
myfiles = open_files(".test.accounts.*", mode=mode, compression=fmt)
data = []
for file in myfiles:
with file as f:
data.append(f.read())
sol = [files[k] for k in sorted(files)]
if mode == "rt":
sol = [b.decode() for b in sol]
assert list(data) == sol
def test_bad_compression():
with filetexts(files, mode="b"):
for func in [read_bytes, open_files]:
with pytest.raises(ValueError):
sample, values = func(".test.accounts.*", compression="not-found")
def test_not_found():
fn = "not-a-file"
with pytest.raises((FileNotFoundError, OSError), match=fn):
read_bytes(fn)
@pytest.mark.slow
def test_names():
with filetexts(files, mode="b"):
_, a = read_bytes(".test.accounts.*")
_, b = read_bytes(".test.accounts.*")
a = list(concat(a))
b = list(concat(b))
assert [aa._key for aa in a] == [bb._key for bb in b]
sleep(1)
for fn in files:
with open(fn, "ab") as f:
f.write(b"x")
_, c = read_bytes(".test.accounts.*")
c = list(concat(c))
assert [aa._key for aa in a] != [cc._key for cc in c]
@pytest.mark.parametrize("compression_opener", [(None, open), ("gzip", gzip.open)])
def test_open_files_write(tmpdir, compression_opener):
compression, opener = compression_opener
tmpdir = str(tmpdir)
files = open_files(tmpdir, num=2, mode="wb", compression=compression)
assert len(files) == 2
assert {f.mode for f in files} == {"wb"}
for fil in files:
with fil as f:
f.write(b"000")
files = sorted(os.listdir(tmpdir))
assert files == ["0.part", "1.part"]
with opener(os.path.join(tmpdir, files[0]), "rb") as f:
d = f.read()
assert d == b"000"
def test_pickability_of_lazy_files(tmpdir):
tmpdir = str(tmpdir)
with filetexts(files, mode="b"):
myfiles = open_files(".test.accounts.*")
myfiles2 = cloudpickle.loads(cloudpickle.dumps(myfiles))
for f, f2 in zip(myfiles, myfiles2):
assert f.path == f2.path
assert type(f.fs) == type(f2.fs)
with f as f_open, f2 as f2_open:
assert f_open.read() == f2_open.read()
def test_py2_local_bytes(tmpdir):
fn = str(tmpdir / "myfile.txt.gz")
with gzip.open(fn, mode="wb") as f:
f.write(b"hello\nworld")
files = open_files(fn, compression="gzip", mode="rt")
with files[0] as f:
assert all(isinstance(line, str) for line in f)
def test_abs_paths(tmpdir):
tmpdir = str(tmpdir)
here = os.getcwd()
os.chdir(tmpdir)
with open("tmp", "w") as f:
f.write("hi")
out = LocalFileSystem().glob("*")
assert len(out) == 1
assert "/" in out[0]
assert "tmp" in out[0]
fs = LocalFileSystem()
os.chdir(here)
with fs.open(out[0], "r") as f:
res = f.read()
assert res == "hi"
def test_get_pyarrow_filesystem():
from fsspec.implementations.local import LocalFileSystem
pa = pytest.importorskip("pyarrow")
if parse_version(pa.__version__).major >= 2:
pytest.skip("fsspec no loger inherits from pyarrow>=2.0.")
fs = LocalFileSystem()
assert isinstance(fs, pa.filesystem.FileSystem)
|
tests/components/cover/test_device_action.py | liangleslie/core | 30,023 | 12675667 | <reponame>liangleslie/core
"""The tests for Cover device actions."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.cover import (
DOMAIN,
SUPPORT_CLOSE,
SUPPORT_CLOSE_TILT,
SUPPORT_OPEN,
SUPPORT_OPEN_TILT,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
SUPPORT_STOP_TILT,
)
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.const import CONF_PLATFORM
from homeassistant.helpers import device_registry
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_registry import RegistryEntryHider
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.mark.parametrize(
"set_state,features_reg,features_state,expected_action_types",
[
(False, 0, 0, []),
(False, SUPPORT_CLOSE_TILT, 0, ["close_tilt"]),
(False, SUPPORT_CLOSE, 0, ["close"]),
(False, SUPPORT_OPEN_TILT, 0, ["open_tilt"]),
(False, SUPPORT_OPEN, 0, ["open"]),
(False, SUPPORT_SET_POSITION, 0, ["set_position"]),
(False, SUPPORT_SET_TILT_POSITION, 0, ["set_tilt_position"]),
(False, SUPPORT_STOP, 0, ["stop"]),
(True, 0, 0, []),
(True, 0, SUPPORT_CLOSE_TILT, ["close_tilt"]),
(True, 0, SUPPORT_CLOSE, ["close"]),
(True, 0, SUPPORT_OPEN_TILT, ["open_tilt"]),
(True, 0, SUPPORT_OPEN, ["open"]),
(True, 0, SUPPORT_SET_POSITION, ["set_position"]),
(True, 0, SUPPORT_SET_TILT_POSITION, ["set_tilt_position"]),
(True, 0, SUPPORT_STOP, ["stop"]),
],
)
async def test_get_actions(
hass,
device_reg,
entity_reg,
set_state,
features_reg,
features_state,
expected_action_types,
):
"""Test we get the expected actions from a cover."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
supported_features=features_reg,
)
if set_state:
hass.states.async_set(
f"{DOMAIN}.test_5678", "attributes", {"supported_features": features_state}
)
await hass.async_block_till_done()
expected_actions = []
expected_actions += [
{
"domain": DOMAIN,
"type": action,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": False},
}
for action in expected_action_types
]
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert_lists_same(actions, expected_actions)
@pytest.mark.parametrize(
"hidden_by,entity_category",
(
(RegistryEntryHider.INTEGRATION, None),
(RegistryEntryHider.USER, None),
(None, EntityCategory.CONFIG),
(None, EntityCategory.DIAGNOSTIC),
),
)
async def test_get_actions_hidden_auxiliary(
hass,
device_reg,
entity_reg,
hidden_by,
entity_category,
):
"""Test we get the expected actions from a hidden or auxiliary entity."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
entity_category=entity_category,
hidden_by=hidden_by,
supported_features=SUPPORT_CLOSE,
)
expected_actions = []
expected_actions += [
{
"domain": DOMAIN,
"type": action,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": True},
}
for action in ["close"]
]
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert_lists_same(actions, expected_actions)
async def test_get_action_capabilities(
hass, device_reg, entity_reg, enable_custom_integrations
):
"""Test we get the expected capabilities from a cover action."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init(empty=True)
platform.ENTITIES.append(
platform.MockCover(
name="Set position cover",
is_on=True,
unique_id="unique_set_pos_cover",
current_cover_position=50,
supported_features=SUPPORT_OPEN
| SUPPORT_CLOSE
| SUPPORT_STOP
| SUPPORT_OPEN_TILT
| SUPPORT_CLOSE_TILT
| SUPPORT_STOP_TILT,
),
)
ent = platform.ENTITIES[0]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert len(actions) == 5 # open, close, open_tilt, close_tilt
action_types = {action["type"] for action in actions}
assert action_types == {"open", "close", "stop", "open_tilt", "close_tilt"}
for action in actions:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.ACTION, action
)
assert capabilities == {"extra_fields": []}
async def test_get_action_capabilities_set_pos(
hass, device_reg, entity_reg, enable_custom_integrations
):
"""Test we get the expected capabilities from a cover action."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[1]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
expected_capabilities = {
"extra_fields": [
{
"name": "position",
"optional": True,
"type": "integer",
"default": 0,
"valueMax": 100,
"valueMin": 0,
}
]
}
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert len(actions) == 1 # set_position
action_types = {action["type"] for action in actions}
assert action_types == {"set_position"}
for action in actions:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.ACTION, action
)
if action["type"] == "set_position":
assert capabilities == expected_capabilities
else:
assert capabilities == {"extra_fields": []}
async def test_get_action_capabilities_set_tilt_pos(
hass, device_reg, entity_reg, enable_custom_integrations
):
"""Test we get the expected capabilities from a cover action."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[3]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
expected_capabilities = {
"extra_fields": [
{
"name": "position",
"optional": True,
"type": "integer",
"default": 0,
"valueMax": 100,
"valueMin": 0,
}
]
}
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert len(actions) == 3
action_types = {action["type"] for action in actions}
assert action_types == {"open", "close", "set_tilt_position"}
for action in actions:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.ACTION, action
)
if action["type"] == "set_tilt_position":
assert capabilities == expected_capabilities
else:
assert capabilities == {"extra_fields": []}
async def test_action(hass, enable_custom_integrations):
"""Test for cover actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event_open"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "open",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_close"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "close",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_stop"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "stop",
},
},
]
},
)
await hass.async_block_till_done()
open_calls = async_mock_service(hass, "cover", "open_cover")
close_calls = async_mock_service(hass, "cover", "close_cover")
stop_calls = async_mock_service(hass, "cover", "stop_cover")
hass.bus.async_fire("test_event_open")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 0
assert len(stop_calls) == 0
hass.bus.async_fire("test_event_close")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
assert len(stop_calls) == 0
hass.bus.async_fire("test_event_stop")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
assert len(stop_calls) == 1
async def test_action_tilt(hass, enable_custom_integrations):
"""Test for cover tilt actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event_open"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "open_tilt",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_close"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "close_tilt",
},
},
]
},
)
await hass.async_block_till_done()
open_calls = async_mock_service(hass, "cover", "open_cover_tilt")
close_calls = async_mock_service(hass, "cover", "close_cover_tilt")
hass.bus.async_fire("test_event_open")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 0
hass.bus.async_fire("test_event_close")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
hass.bus.async_fire("test_event_stop")
await hass.async_block_till_done()
assert len(open_calls) == 1
assert len(close_calls) == 1
async def test_action_set_position(hass, enable_custom_integrations):
"""Test for cover set position actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event_set_pos",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "set_position",
"position": 25,
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_set_tilt_pos",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "cover.entity",
"type": "set_tilt_position",
"position": 75,
},
},
]
},
)
await hass.async_block_till_done()
cover_pos_calls = async_mock_service(hass, "cover", "set_cover_position")
tilt_pos_calls = async_mock_service(hass, "cover", "set_cover_tilt_position")
hass.bus.async_fire("test_event_set_pos")
await hass.async_block_till_done()
assert len(cover_pos_calls) == 1
assert cover_pos_calls[0].data["position"] == 25
assert len(tilt_pos_calls) == 0
hass.bus.async_fire("test_event_set_tilt_pos")
await hass.async_block_till_done()
assert len(cover_pos_calls) == 1
assert len(tilt_pos_calls) == 1
assert tilt_pos_calls[0].data["tilt_position"] == 75
|
skidl/libs/wiznet_sklib.py | arjenroodselaar/skidl | 700 | 12675677 | from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
wiznet = SchLib(tool=SKIDL).add_parts(*[
Part(name='W5100',dest=TEMPLATE,tool=SKIDL,keywords='Wiznet Ethernet controller',description='WizNet W5100 10/100Mb Ethernet controller with TCP/IP stack',ref_prefix='U',num_units=1,fplist=['LQFP*10x10mm*Pitch0.4mm*'],do_erc=True,pins=[
Pin(num='1',name='RSET_BG',func=Pin.OUTPUT,do_erc=True),
Pin(num='2',name='VCC3V3A',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='4',name='GNDA',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='RXIP',do_erc=True),
Pin(num='6',name='RXIN',do_erc=True),
Pin(num='7',name='VCC1V8A',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='TXOP',func=Pin.OUTPUT,do_erc=True),
Pin(num='9',name='TXON',func=Pin.OUTPUT,do_erc=True),
Pin(num='10',name='GNDA',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='DATA6',func=Pin.BIDIR,do_erc=True),
Pin(num='30',name='SCLK',do_erc=True),
Pin(num='40',name='ADDR12',do_erc=True),
Pin(num='50',name='ADDR4',do_erc=True),
Pin(num='60',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='70',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='11',name='V18',func=Pin.PWROUT,do_erc=True),
Pin(num='21',name='DATA5',func=Pin.BIDIR,do_erc=True),
Pin(num='31',name='SEN',do_erc=True),
Pin(num='41',name='ADDR11',do_erc=True),
Pin(num='51',name='ADDR3',do_erc=True),
Pin(num='61',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='71',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='12',name='VCC3V3D',func=Pin.PWRIN,do_erc=True),
Pin(num='22',name='DATA4',func=Pin.BIDIR,do_erc=True),
Pin(num='32',name='GNDD',func=Pin.PWRIN,do_erc=True),
Pin(num='42',name='ADDR10',do_erc=True),
Pin(num='52',name='ADDR2',do_erc=True),
Pin(num='62',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='72',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='13',name='GNDD',func=Pin.PWRIN,do_erc=True),
Pin(num='23',name='DATA3',func=Pin.BIDIR,do_erc=True),
Pin(num='33',name='VCC1V8D',func=Pin.PWRIN,do_erc=True),
Pin(num='43',name='GNDD',func=Pin.PWRIN,do_erc=True),
Pin(num='53',name='ADDR1',do_erc=True),
Pin(num='63',name='OPMODE0',do_erc=True),
Pin(num='73',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='14',name='GNDD',func=Pin.PWRIN,do_erc=True),
Pin(num='24',name='DATA2',func=Pin.BIDIR,do_erc=True),
Pin(num='34',name='TEST_MODE3',do_erc=True),
Pin(num='44',name='VCC3V3D',func=Pin.PWRIN,do_erc=True),
Pin(num='54',name='ADDR0',do_erc=True),
Pin(num='64',name='OPMODE1',do_erc=True),
Pin(num='74',name='VCC1V8A',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='VCC1V8D',func=Pin.PWRIN,do_erc=True),
Pin(num='25',name='DATA1',func=Pin.BIDIR,do_erc=True),
Pin(num='35',name='TEST_MODE2',do_erc=True),
Pin(num='45',name='ADDR9',do_erc=True),
Pin(num='55',name='CS',do_erc=True),
Pin(num='65',name='OPMODE2',func=Pin.OUTPUT,do_erc=True),
Pin(num='75',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='16',name='VCC1V8D',func=Pin.PWRIN,do_erc=True),
Pin(num='26',name='DATA0',func=Pin.BIDIR,do_erc=True),
Pin(num='36',name='TEST_MODE1',do_erc=True),
Pin(num='46',name='ADDR8',do_erc=True),
Pin(num='56',name='INT',func=Pin.OUTPUT,do_erc=True),
Pin(num='66',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='76',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='17',name='GNDD',func=Pin.PWRIN,do_erc=True),
Pin(num='27',name='MISO',do_erc=True),
Pin(num='37',name='TEST_MODE0',do_erc=True),
Pin(num='47',name='ADDR7',do_erc=True),
Pin(num='57',name='WR',do_erc=True),
Pin(num='67',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='77',name='GNDA',func=Pin.PWRIN,do_erc=True),
Pin(num='18',name='VCC3V3D',func=Pin.PWRIN,do_erc=True),
Pin(num='28',name='MOSI',do_erc=True),
Pin(num='38',name='ADDR14',do_erc=True),
Pin(num='48',name='ADDR6',do_erc=True),
Pin(num='58',name='RD',do_erc=True),
Pin(num='68',name='GNDD',func=Pin.PWRIN,do_erc=True),
Pin(num='19',name='DATA7',func=Pin.BIDIR,do_erc=True),
Pin(num='29',name='SCS',do_erc=True),
Pin(num='39',name='ADDR13',do_erc=True),
Pin(num='49',name='ADDR5',do_erc=True),
Pin(num='59',name='RESET',do_erc=True),
Pin(num='69',name='VCC1V8D',func=Pin.PWRIN,do_erc=True)])])
|
tests/test_extraction_metrics.py | pwang00/PrivacyRaven | 121 | 12675682 | # This test code was modified from code written by the `hypothesis.extra.ghostwriter` module
# and is provided under the Creative Commons Zero public domain dedication.
import argparse
import numpy as np
import pytest
import torch
from hypothesis import assume, given
from hypothesis import strategies as st
from hypothesis.extra.numpy import arrays
import privacyraven.extraction.metrics
import privacyraven.extraction.synthesis
import privacyraven.utils.query
from privacyraven.models.victim import train_four_layer_mnist_victim
from privacyraven.utils import model_creation
from privacyraven.utils.data import get_emnist_data
from privacyraven.utils.query import get_target
# Establish strategies
device = torch.device("cpu")
model = train_four_layer_mnist_victim(gpus=torch.cuda.device_count())
def query_mnist(input_data):
return get_target(model, input_data, (1, 28, 28, 1))
def valid_query():
return st.just(query_mnist)
def valid_data():
return arrays(np.float64, (10, 28, 28, 1), st.floats())
@given(
test_data=valid_data(),
substitute_model=st.just(model),
query_victim=valid_query(),
victim_input_shape=st.just((1, 28, 28, 1)),
substitute_input_shape=st.just((1, 28, 28, 1)),
)
def label_agreement_returns_agreed(
test_data,
substitute_model,
query_victim,
victim_input_shape,
substitute_input_shape,
):
x = privacyraven.extraction.metrics.label_agreement(
test_data=test_data,
substitute_model=substitute_model,
query_victim=query_victim,
victim_input_shape=victim_input_shape,
substitute_input_shape=substitute_input_shape,
)
# Technically, x should be 10, but that may fail on
# a less faulty NN- an invariant we should not be
# testing here
assert x > 8
|
SoftLayer/fixtures/SoftLayer_Billing_Invoice.py | dvzrv/softlayer-python | 126 | 12675683 | getInvoiceTopLevelItems = [
{
'categoryCode': 'sov_sec_ip_addresses_priv',
'createDate': '2018-04-04T23:15:20-06:00',
'description': '64 Portable Private IP Addresses',
'id': 724951323,
'oneTimeAfterTaxAmount': '0',
'recurringAfterTaxAmount': '0',
'hostName': 'bleg',
'domainName': 'beh.com',
'category': {'name': 'Private (only) Secondary VLAN IP Addresses'},
'children': [
{
'id': 12345,
'category': {'name': 'Fake Child Category'},
'description': 'Blah',
'oneTimeAfterTaxAmount': 55.50,
'recurringAfterTaxAmount': 0.10
}
],
'location': {'name': 'fra02'}
}
]
|
Docs/torch_code_examples/model_validator_code_example.py | lipovsek/aimet | 945 | 12675742 | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
# pylint: disable=missing-docstring
""" These are code examples to be used when generating AIMET documentation via Sphinx """
import torch
import torch.nn.functional as F
# Model Validator related imports
from aimet_torch.model_validator.model_validator import ModelValidator
class ModelWithReusedNodes(torch.nn.Module):
""" Model that reuses a relu module. Expects input of shape (1, 3, 32, 32) """
def __init__(self):
super(ModelWithReusedNodes, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 8, kernel_size=2, stride=2, padding=2, bias=False)
self.bn1 = torch.nn.BatchNorm2d(8)
self.relu1 = torch.nn.ReLU(inplace=True)
self.linear = torch.nn.Linear(2592, 10)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.relu1(x)
x = self.bn1(x)
x = self.relu1(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
class ModelWithoutReusedNodes(torch.nn.Module):
""" Model that is fixed to not reuse modules. Expects input of shape (1, 3, 32, 32) """
def __init__(self):
super(ModelWithoutReusedNodes, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 8, kernel_size=2, stride=2, padding=2, bias=False)
self.bn1 = torch.nn.BatchNorm2d(8)
self.relu1 = torch.nn.ReLU(inplace=True)
self.relu2 = torch.nn.ReLU(inplace=True)
self.linear = torch.nn.Linear(2592, 10)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.relu1(x)
x = self.bn1(x)
x = self.relu2(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
class ModelWithFunctionalLinear(torch.nn.Module):
""" Model that uses a torch functional linear layer. Expects input of shape (1, 3, 32, 32) """
def __init__(self):
super(ModelWithFunctionalLinear, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 8, kernel_size=2, stride=2, padding=2, bias=False)
self.bn1 = torch.nn.BatchNorm2d(8)
self.relu1 = torch.nn.ReLU(inplace=True)
self.relu2 = torch.nn.ReLU(inplace=True)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.relu1(x)
x = self.bn1(x)
x = self.relu2(x)
x = x.view(x.size(0), -1)
x = F.linear(x, torch.randn(10, 2592))
return x
class ModelWithoutFunctionalLinear(torch.nn.Module):
""" Model that is fixed to use a linear module instead of functional. Expects input of shape (1, 3, 32, 32) """
def __init__(self):
super(ModelWithoutFunctionalLinear, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 8, kernel_size=2, stride=2, padding=2, bias=False)
self.bn1 = torch.nn.BatchNorm2d(8)
self.relu1 = torch.nn.ReLU(inplace=True)
self.relu2 = torch.nn.ReLU(inplace=True)
self.linear = torch.nn.Linear(2592, 10)
with torch.no_grad():
self.linear.weight = torch.nn.Parameter(torch.randn(10, 2592))
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.relu1(x)
x = self.bn1(x)
x = self.relu2(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
def validate_example_model():
# Load the model to validate
model = ModelWithReusedNodes()
# Output of ModelValidator.validate_model will be True if model is valid, False otherwise
ModelValidator.validate_model(model, model_input=torch.rand(1, 3, 32, 32))
if __name__ == '__main__':
validate_example_model()
|
tests/importer/onnx_/basic/test_expand.py | xhuohai/nncase | 510 | 12675744 | # Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
import pytest
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto
from onnx_test_runner import OnnxTestRunner
import numpy as np
def _make_module(in_shape, expand_shape, value_format):
inputs = []
outputs = []
initializers = []
nodes = []
# input
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, in_shape)
inputs.append('input')
# output
out = np.ones(in_shape) * np.ones(expand_shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, out.shape)
outputs.append('output')
# shape
shape_tensor = helper.make_tensor(
'shape',
TensorProto.INT64,
dims=[len(expand_shape)],
vals=expand_shape
)
if value_format == 'initializer':
initializers.append(shape_tensor)
else:
shape_node = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_tensor
)
nodes.append(shape_node)
inputs.append('shape')
# Expand
expand = onnx.helper.make_node(
'Expand',
inputs=inputs,
outputs=outputs,
)
nodes.append(expand)
graph_def = helper.make_graph(
nodes,
'test-model',
[input],
[output],
initializer=initializers
)
model_def = helper.make_model(graph_def, producer_name='kendryte')
return model_def
in_shapes = [
[3, 1]
]
expand_shapes = [
[1],
[1, 1],
[3, 4],
[2, 1, 6]
]
value_formats = [
['initializer'],
['constant']
]
@pytest.mark.parametrize('in_shape', in_shapes)
@pytest.mark.parametrize('expand_shape', expand_shapes)
@pytest.mark.parametrize('value_format', value_formats)
def test_expand(in_shape, expand_shape, value_format, request):
model_def = _make_module(in_shape, expand_shape, value_format)
runner = OnnxTestRunner(request.node.name)
model_file = runner.from_onnx_helper(model_def)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_expand.py'])
|
ndscheduler/corescheduler/datastore/base.py | JonathanCalderon/ndscheduler | 1,038 | 12675748 | """Base class to represent datastore."""
import dateutil.tz
import dateutil.parser
from apscheduler.jobstores import sqlalchemy as sched_sqlalchemy
from sqlalchemy import desc, select, MetaData
from ndscheduler.corescheduler import constants
from ndscheduler.corescheduler import utils
from ndscheduler.corescheduler.datastore import tables
class DatastoreBase(sched_sqlalchemy.SQLAlchemyJobStore):
instance = None
@classmethod
def get_instance(cls, db_config=None, table_names=None):
if not cls.instance:
cls.instance = cls(db_config, table_names)
return cls.instance
@classmethod
def destroy_instance(cls):
cls.instance = None
def __init__(self, db_config, table_names):
"""
:param dict db_config: dictionary containing values for db connection
:param dict table_names: dictionary containing the names for the jobs,
executions, or audit logs table, e.g. {
'executions_tablename': 'scheduler_executions',
'jobs_tablename': 'scheduler_jobs',
'auditlogs_tablename': 'scheduler_auditlogs'
}
If any of these keys is not provided, the default table name is selected from constants.py
"""
self.metadata = MetaData()
self.table_names = table_names
self.db_config = db_config
executions_tablename = constants.DEFAULT_EXECUTIONS_TABLENAME
jobs_tablename = constants.DEFAULT_JOBS_TABLENAME
auditlogs_tablename = constants.DEFAULT_AUDIT_LOGS_TABLENAME
if table_names:
if 'executions_tablename' in table_names:
executions_tablename = table_names['executions_tablename']
if 'jobs_tablename' in table_names:
jobs_tablename = table_names['jobs_tablename']
if 'auditlogs_tablename' in table_names:
auditlogs_tablename = table_names['auditlogs_tablename']
self.executions_table = tables.get_execution_table(self.metadata, executions_tablename)
self.auditlogs_table = tables.get_auditlogs_table(self.metadata, auditlogs_tablename)
super(DatastoreBase, self).__init__(url=self.get_db_url(), tablename=jobs_tablename)
self.metadata.create_all(self.engine)
def get_db_url(self):
"""We can use the dict passed from db_config_dict to construct a db url.
:return: Database url. See: http://docs.sqlalchemy.org/en/latest/core/engines.html
:rtype: str
"""
raise NotImplementedError('Please implement this function.')
def add_execution(self, execution_id, job_id, state, **kwargs):
"""Insert a record of execution to database.
:param str execution_id: Execution id.
:param str job_id: Job id.
:param int state: Execution state. See ndscheduler.constants.EXECUTION_*
"""
execution = {
'eid': execution_id,
'job_id': job_id,
'state': state
}
execution.update(kwargs)
execution_insert = self.executions_table.insert().values(**execution)
self.engine.execute(execution_insert)
def get_execution(self, execution_id):
"""Returns execution dict.
:param str execution_id: Execution id.
:return: Diction for execution info.
:rtype: dict
"""
selectable = select('*').where(self.executions_table.c.eid == execution_id)
rows = self.engine.execute(selectable)
for row in rows:
return self._build_execution(row)
def update_execution(self, execution_id, **kwargs):
"""Update execution in database.
:param str execution_id: Execution id.
:param kwargs: Keyword arguments.
"""
execution_update = self.executions_table.update().where(
self.executions_table.c.eid == execution_id).values(**kwargs)
self.engine.execute(execution_update)
def _build_execution(self, row):
"""Return job execution info from a row of scheduler_execution table.
:param obj row: A row instance of scheduler_execution table.
:return: A dictionary of job execution info.
:rtype: dict
"""
return_json = {
'execution_id': row.eid,
'state': constants.EXECUTION_STATUS_DICT[row.state],
'hostname': row.hostname,
'pid': row.pid,
'task_id': row.task_id,
'description': row.description,
'result': row.result,
'scheduled_time': self.get_time_isoformat_from_db(row.scheduled_time),
'updated_time': self.get_time_isoformat_from_db(row.updated_time)}
job = self.lookup_job(row.job_id)
if job:
return_json['job'] = {
'job_id': job.id,
'name': job.name,
'task_name': utils.get_job_name(job),
'pub_args': utils.get_job_args(job)}
return_json['job'].update(utils.get_cron_strings(job))
return return_json
def get_time_isoformat_from_db(self, time_object):
"""Convert time object from database to iso 8601 format.
:param object time_object: a time object from database, which is different on different
databases. Subclass of this class for specific database has to override this function.
:return: iso8601 format string
:rtype: str
"""
return time_object.isoformat()
def get_executions(self, time_range_start, time_range_end):
"""Returns info for multiple job executions.
:param str time_range_start: ISO format for time range starting point.
:param str time_range_end: ISO for time range ending point.
:return: A dictionary of multiple execution info, e.g.,
{
'executions': [...]
}
Sorted by updated_time.
:rtype: dict
"""
utc = dateutil.tz.gettz('UTC')
start_time = dateutil.parser.parse(time_range_start).replace(tzinfo=utc)
end_time = dateutil.parser.parse(time_range_end).replace(tzinfo=utc)
selectable = select('*').where(
self.executions_table.c.scheduled_time.between(
start_time, end_time)).order_by(desc(self.executions_table.c.updated_time))
rows = self.engine.execute(selectable)
return_json = {
'executions': [self._build_execution(row) for row in rows]}
return return_json
def add_audit_log(self, job_id, job_name, event, **kwargs):
"""Insert an audit log.
:param str job_id: string for job id.
:param str job_name: string for job name.
:param int event: integer for an event.
"""
audit_log = {
'job_id': job_id,
'job_name': job_name,
'event': event
}
audit_log.update(kwargs)
log_insert = self.auditlogs_table.insert().values(**audit_log)
self.engine.execute(log_insert)
def get_audit_logs(self, time_range_start, time_range_end):
"""Returns a list of audit logs.
:param str time_range_start: ISO format for time range starting point.
:param str time_range_end: ISO for time range ending point.
:return: A dictionary of multiple audit logs, e.g.,
{
'logs': [
{
'job_id': ...
'event': ...
'user': ...
'description': ...
}
]
}
Sorted by created_time.
:rtype: dict
"""
utc = dateutil.tz.gettz('UTC')
start_time = dateutil.parser.parse(time_range_start).replace(tzinfo=utc)
end_time = dateutil.parser.parse(time_range_end).replace(tzinfo=utc)
selectable = select('*').where(
self.auditlogs_table.c.created_time.between(
start_time, end_time)).order_by(desc(self.auditlogs_table.c.created_time))
rows = self.engine.execute(selectable)
return_json = {
'logs': [self._build_audit_log(row) for row in rows]}
return return_json
def _build_audit_log(self, row):
"""Return audit_log from a row of scheduler_auditlog table.
:param obj row: A row instance of scheduler_auditlog table.
:return: A dictionary of audit log.
:rtype: dict
"""
return_dict = {
'job_id': row.job_id,
'job_name': row.job_name,
'event': constants.AUDIT_LOG_DICT[row.event],
'user': row.user,
'created_time': self.get_time_isoformat_from_db(row.created_time),
'description': row.description}
return return_dict
|
src/tests/analyzer_helpers.py | swellander/tryceratops | 269 | 12675752 | import ast
import os
from typing import Iterable
from tryceratops.violations import Violation
def read_sample(filename: str) -> ast.AST:
ref_dir = f"{os.path.dirname(__file__)}/samples/violations/"
path = f"{ref_dir}{filename}.py"
with open(path) as sample:
content = sample.read()
loaded = ast.parse(content)
return loaded
def read_sample_lines(filename: str, dir: str = "violations") -> Iterable[str]:
ref_dir = f"{os.path.dirname(__file__)}/samples/{dir}/"
path = f"{ref_dir}{filename}.py"
with open(path) as sample:
return sample.readlines()
def assert_violation(code: str, msg: str, line: int, col: int, violation: Violation):
assert violation.line == line
assert violation.col == col
assert violation.code == code
assert violation.description == msg
|
scale/error/__init__.py | kaydoh/scale | 121 | 12675761 | <filename>scale/error/__init__.py
"""The standard interface for errors in scale"""
default_app_config = 'error.apps.ErrorConfig'
|
rfcn/function/train_rpn.py | YAMLONG/Deformable-ConvNets | 344 | 12675764 | <reponame>YAMLONG/Deformable-ConvNets
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by <NAME>
# --------------------------------------------------------
import argparse
import logging
import pprint
import mxnet as mx
from symbols import *
from core import callback, metric
from core.loader import AnchorLoader
from core.module import MutableModule
from utils.load_data import load_gt_roidb, merge_roidb, filter_roidb
from utils.load_model import load_param
from utils.PrefetchingIter import PrefetchingIter
from utils.lr_scheduler import WarmupMultiFactorScheduler
def train_rpn(cfg, dataset, image_set, root_path, dataset_path,
frequent, kvstore, flip, shuffle, resume,
ctx, pretrained, epoch, prefix, begin_epoch, end_epoch,
train_shared, lr, lr_step, logger=None, output_path=None):
# set up logger
if not logger:
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# set up config
cfg.TRAIN.BATCH_IMAGES = cfg.TRAIN.ALTERNATE.RPN_BATCH_IMAGES
# load symbol
sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
sym = sym_instance.get_symbol_rpn(cfg, is_train=True)
feat_sym = sym.get_internals()['rpn_cls_score_output']
# setup multi-gpu
batch_size = len(ctx)
input_batch_size = cfg.TRAIN.BATCH_IMAGES * batch_size
# print cfg
pprint.pprint(cfg)
logger.info('training rpn cfg:{}\n'.format(pprint.pformat(cfg)))
# load dataset and prepare imdb for training
image_sets = [iset for iset in image_set.split('+')]
roidbs = [load_gt_roidb(dataset, image_set, root_path, dataset_path, result_path=output_path,
flip=flip)
for image_set in image_sets]
roidb = merge_roidb(roidbs)
roidb = filter_roidb(roidb, cfg)
# load training data
train_data = AnchorLoader(feat_sym, roidb, cfg, batch_size=input_batch_size, shuffle=shuffle,
ctx=ctx, feat_stride=cfg.network.RPN_FEAT_STRIDE, anchor_scales=cfg.network.ANCHOR_SCALES,
anchor_ratios=cfg.network.ANCHOR_RATIOS, aspect_grouping=cfg.TRAIN.ASPECT_GROUPING)
# infer max shape
max_data_shape = [('data', (cfg.TRAIN.BATCH_IMAGES, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))]
max_data_shape, max_label_shape = train_data.infer_shape(max_data_shape)
print('providing maximum shape', max_data_shape, max_label_shape)
# infer shape
data_shape_dict = dict(train_data.provide_data_single + train_data.provide_label_single)
sym_instance.infer_shape(data_shape_dict)
# load and initialize params
if resume:
print('continue training from ', begin_epoch)
arg_params, aux_params = load_param(prefix, begin_epoch, convert=True)
else:
arg_params, aux_params = load_param(pretrained, epoch, convert=True)
sym_instance.init_weight_rpn(cfg, arg_params, aux_params)
# check parameter shapes
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict)
# create solver
data_names = [k[0] for k in train_data.provide_data_single]
label_names = [k[0] for k in train_data.provide_label_single]
if train_shared:
fixed_param_prefix = cfg.network.FIXED_PARAMS_SHARED
else:
fixed_param_prefix = cfg.network.FIXED_PARAMS
mod = MutableModule(sym, data_names=data_names, label_names=label_names,
logger=logger, context=ctx, max_data_shapes=[max_data_shape for _ in xrange(batch_size)],
max_label_shapes=[max_label_shape for _ in xrange(batch_size)], fixed_param_prefix=fixed_param_prefix)
# decide training params
# metric
eval_metric = metric.RPNAccMetric()
cls_metric = metric.RPNLogLossMetric()
bbox_metric = metric.RPNL1LossMetric()
eval_metrics = mx.metric.CompositeEvalMetric()
for child_metric in [eval_metric, cls_metric, bbox_metric]:
eval_metrics.add(child_metric)
# callback
batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=frequent)
# epoch_end_callback = mx.callback.do_checkpoint(prefix)
epoch_end_callback = mx.callback.module_checkpoint(mod, prefix, period=1, save_optimizer_states=True)
# decide learning rate
base_lr = lr
lr_factor = cfg.TRAIN.lr_factor
lr_epoch = [int(epoch) for epoch in lr_step.split(',')]
lr_epoch_diff = [epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch]
lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))
lr_iters = [int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff]
print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters)
lr_scheduler = WarmupMultiFactorScheduler(lr_iters, lr_factor, cfg.TRAIN.warmup, cfg.TRAIN.warmup_lr, cfg.TRAIN.warmup_step)
# optimizer
optimizer_params = {'momentum': cfg.TRAIN.momentum,
'wd': cfg.TRAIN.wd,
'learning_rate': lr,
'lr_scheduler': lr_scheduler,
'rescale_grad': 1.0,
'clip_gradient': None}
if not isinstance(train_data, PrefetchingIter):
train_data = PrefetchingIter(train_data)
# train
mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback, kvstore=kvstore,
optimizer='sgd', optimizer_params=optimizer_params,
arg_params=arg_params, aux_params=aux_params, begin_epoch=begin_epoch, num_epoch=end_epoch)
|
modules/people.py | groovychoons/yoda | 747 | 12675765 | from __future__ import absolute_import
from builtins import str
from builtins import input
import datetime
from .config import get_config_file_paths
from .util import *
# config file path
PEOPLE_CONFIG_FILE_PATH = get_config_file_paths()["PEOPLE_CONFIG_FILE_PATH"]
PEOPLE_CONFIG_FOLDER_PATH = get_folder_path_from_file_path(PEOPLE_CONFIG_FILE_PATH)
def get_friends_file_path(friend_name):
"""
get file path for friend's entry file
:return:
"""
return PEOPLE_CONFIG_FOLDER_PATH + "/" + friend_name + ".yaml"
def friend_name_exists(friend_name):
file_name = get_friends_file_path(friend_name)
return os.path.isfile(file_name)
def append_data_into_file(data, file_path):
"""
append data into existing file
:param data:
:param file_path:
"""
with open(file_path) as todays_tasks_entry:
# read contents
contents = yaml.load(todays_tasks_entry)
contents["entries"].append(data)
# enter data
with open(file_path, "w") as todays_tasks_entry:
yaml.dump(contents, todays_tasks_entry, default_flow_style=False)
def status():
"""
check status
"""
if os.path.isfile(PEOPLE_CONFIG_FILE_PATH):
with open(PEOPLE_CONFIG_FILE_PATH) as config_file:
contents = yaml.load(config_file)
entries = contents["entries"]
click.echo("People:")
click.echo("--------------------------------------")
click.echo(" Mob | DOB | Name ")
click.echo("------------|------------|------------")
for i, entry in enumerate(entries):
s_no = str(i)
name = entry["name"]
dob = entry["dob"]
mob = entry["mobile"]
click.echo(" " + mob + " | " + dob + " | " + name)
else:
click.echo(
chalk.red(
'The configuration file for this module does not exist. Please type "yoda people setup" to create a new one'
)
)
def setup():
"""
create new setup
:return:
"""
create_folder(PEOPLE_CONFIG_FOLDER_PATH)
click.echo(chalk.blue("Enter their name:"))
name = input().strip().lower()
if friend_name_exists(name):
click.echo(
chalk.red(
'A configuration with this friend name already exists.Please type "yoda people --help"'
)
)
click.echo(chalk.blue("Input their DOB (YYYY-MM-DD):"))
incorrect_date_format = True
while incorrect_date_format:
dob = input().strip()
try:
date_str = datetime.datetime.strptime(dob, "%Y-%m-%d").strftime("%Y-%m-%d")
if date_str != dob:
raise ValueError
incorrect_date_format = False
except ValueError:
click.echo(
chalk.red("Incorrect data format, should be YYYY-MM-DD. Please repeat:")
)
click.echo(chalk.blue("Enter their Mobile Number:"))
mobile = input().strip()
if os.path.isfile(PEOPLE_CONFIG_FILE_PATH):
setup_data = dict(name=name, mobile=mobile, dob=dob)
append_data_into_file(setup_data, PEOPLE_CONFIG_FILE_PATH)
else:
setup_data = dict(entries=[dict(name=name, mobile=mobile, dob=dob)])
input_data(setup_data, PEOPLE_CONFIG_FILE_PATH)
input_data(dict(entries=[]), get_friends_file_path(name))
def like():
"""
Adds likes
"""
click.echo(chalk.blue("For whom you want to add like for"))
friend_name = input().strip().lower()
FRIENDS_FILE_PATH = get_friends_file_path(friend_name)
if os.path.isfile(FRIENDS_FILE_PATH):
hashtags = []
with open(FRIENDS_FILE_PATH) as fin:
contents = yaml.load(fin)
entries = contents["entries"]
if "likes" in entries:
notes = entries["likes"]
del entries["likes"]
continue_adding_hashtags = True
while continue_adding_hashtags:
click.echo(chalk.blue("Enter what they like or -"))
hashtag = input().strip()
if hashtag == "-":
continue_adding_hashtags = False
else:
hashtags.append("#" + hashtag)
setup_data = dict(likes=hashtags)
append_data_into_file(setup_data, FRIENDS_FILE_PATH)
else:
click.echo(
chalk.red(
"Friend's config file doesn't exist. Type 'yoda people setup' to setup a friend"
)
)
def note():
"""
Adds notes
"""
click.echo(chalk.blue("For whom you want to add a note for"))
friend_name = input().strip().lower()
FRIENDS_FILE_PATH = get_friends_file_path(friend_name)
if os.path.isfile(FRIENDS_FILE_PATH):
notes = []
with open(FRIENDS_FILE_PATH) as fin:
contents = yaml.load(fin)
entries = contents["entries"]
if "notes" in entries:
notes = entries["notes"]
del entries["notes"]
continue_adding_notes = True
while continue_adding_notes:
click.echo(chalk.blue("Enter note or press -"))
note = input().strip()
if note == "-":
continue_adding_notes = False
else:
notes.append(note)
setup_data = dict(notes=notes)
append_data_into_file(setup_data, FRIENDS_FILE_PATH)
else:
click.echo(
chalk.red(
"Friend's config file doesn't exist. Type 'yoda people setup' to setup a friend"
)
)
def likes():
"""
view the things they like
"""
click.echo(chalk.blue("For whom you want to view likes for"))
friend_name = input().strip().lower()
FRIENDS_FILE_PATH = get_friends_file_path(friend_name)
if os.path.isfile(FRIENDS_FILE_PATH):
with open(FRIENDS_FILE_PATH) as fin:
contents = yaml.load(fin)
entries = contents["entries"]
likes = []
for entry in entries:
if "likes" in entry:
likes.extend(entry["likes"])
click.echo("Likes:")
for i, n in enumerate(likes):
click.echo(str(i) + ": " + n)
else:
click.echo(
chalk.red(
'The Likes file path for this module does not exist. Please type "yoda people like" to create a new one'
)
)
def notes():
"""
view notes
"""
click.echo(chalk.blue("For whom you want to view notes for"))
friend_name = input().strip().lower()
FRIENDS_FILE_PATH = get_friends_file_path(friend_name)
if os.path.isfile(FRIENDS_FILE_PATH):
with open(FRIENDS_FILE_PATH) as fin:
contents = yaml.load(fin)
entries = contents["entries"]
notes = []
for entry in entries:
if "notes" in entry:
notes.extend(entry["notes"])
click.echo("Notes:")
for i, n in enumerate(notes):
click.echo(str(i) + ": " + n)
else:
click.echo(
chalk.red(
'The Notes file path for this module does not exist. Please type "yoda people note" to create a new one'
)
)
def check_sub_command(c):
"""
command checker
:param c:
:return:
"""
sub_commands = {
"setup": setup,
"status": status,
"note": note,
"notes": notes,
"likes": likes,
"like": like,
# 'addbirth': addbirth,
# 'showbirth': showbirth
}
try:
return sub_commands[c]()
except KeyError:
click.echo(chalk.red("Command does not exist!"))
click.echo('Try "yoda love --help" for more info')
def process(input):
"""
the main process
:param input:
"""
_input = input.lower().strip()
check_sub_command(_input)
|
armada_backend/runtime_settings.py | firesoft/armada | 281 | 12675775 | from armada_backend import consul_config
from armada_backend.models.ships import get_ship_name, get_other_ship_ips
from armada_backend.utils import get_current_datacenter, is_ship_commander, \
setup_sentry, get_logger
from armada_command.dockyard import alias
from armada_command.scripts.compat import json
def _save_runtime_settings():
consul_settings = {
'is_commander': is_ship_commander(),
'name': get_ship_name(),
'ships': get_other_ship_ips(),
'datacenter': get_current_datacenter(),
'dockyards': alias.get_list(),
}
with open(consul_config.RUNTIME_SETTINGS_PATH, 'w') as runtime_settings:
runtime_settings.write(json.dumps(consul_settings, sort_keys=True, indent=4))
def override_runtime_settings(consul_mode=None, ship_name=None, ship_ips=None, datacenter=None):
consul_settings = {}
if consul_mode is not None:
consul_settings['is_commander'] = consul_mode != consul_config.ConsulMode.CLIENT
if ship_name is not None:
consul_settings['name'] = ship_name
if ship_ips is not None:
consul_settings['ships'] = ship_ips
if datacenter is not None:
consul_settings['datacenter'] = datacenter
with open(consul_config.OVERRIDE_RUNTIME_SETTINGS_PATH, 'w') as runtime_settings:
runtime_settings.write(json.dumps(consul_settings, sort_keys=True, indent=4))
def _init_dockyards():
try:
with open(consul_config.ORIGINAL_RUNTIME_SETTINGS_PATH) as runtime_settings_json:
runtime_settings = json.load(runtime_settings_json)
except Exception as e:
get_logger().exception(e)
runtime_settings = {}
# Initialize dockyard list with fallback dockyard.
if not alias.get_alias(alias.DOCKYARD_FALLBACK_ALIAS):
alias.set_alias(alias.DOCKYARD_FALLBACK_ALIAS, alias.DOCKYARD_FALLBACK_ADDRESS)
dockyards = runtime_settings.get('dockyards', {})
default_alias = None
for info in dockyards:
dockyard_alias = info.get('name')
if dockyard_alias and not alias.get_alias(dockyard_alias):
alias.set_alias(dockyard_alias, info.get('address'), info.get('user'), info.get('password'))
if info.get('is_default'):
default_alias = dockyard_alias
if default_alias:
alias.set_default(default_alias)
def main():
setup_sentry()
if not alias.get_initialized():
_init_dockyards()
alias.set_initialized()
_save_runtime_settings()
if __name__ == '__main__':
main()
|
terrascript/data/AdrienneCohea/nomadutility.py | mjuenema/python-terrascript | 507 | 12675778 | # terrascript/data/AdrienneCohea/nomadutility.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:22:45 UTC)
__all__ = []
|
test/functional/interface_zmq_dash.py | Easonyule/dash | 1,573 | 12675798 | #!/usr/bin/env python3
# Copyright (c) 2018-2021 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the dash specific ZMQ notification interfaces."""
import configparser
from enum import Enum
import io
import json
import random
import struct
import time
try:
import zmq
finally:
pass
from test_framework.test_framework import (
DashTestFramework, skip_if_no_bitcoind_zmq, skip_if_no_py3_zmq)
from test_framework.mininode import P2PInterface, network_thread_start
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str
from test_framework.messages import (
CBlock,
CGovernanceObject,
CGovernanceVote,
CInv,
COutPoint,
CRecoveredSig,
CTransaction,
FromHex,
hash256,
msg_clsig,
msg_inv,
msg_islock,
msg_tx,
ser_string,
uint256_from_str,
uint256_to_string
)
class ZMQPublisher(Enum):
hash_chain_lock = "hashchainlock"
hash_tx_lock = "hashtxlock"
hash_governance_vote = "hashgovernancevote"
hash_governance_object = "hashgovernanceobject"
hash_instantsend_doublespend = "hashinstantsenddoublespend"
hash_recovered_sig = "hashrecoveredsig"
raw_chain_lock = "rawchainlock"
raw_chain_lock_sig = "rawchainlocksig"
raw_tx_lock = "rawtxlock"
raw_tx_lock_sig = "rawtxlocksig"
raw_governance_vote = "rawgovernancevote"
raw_governance_object = "rawgovernanceobject"
raw_instantsend_doublespend = "rawinstantsenddoublespend"
raw_recovered_sig = "rawrecoveredsig"
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.islocks = {}
self.txes = {}
def send_islock(self, islock):
hash = uint256_from_str(hash256(islock.serialize()))
self.islocks[hash] = islock
inv = msg_inv([CInv(30, hash)])
self.send_message(inv)
def send_tx(self, tx):
hash = uint256_from_str(hash256(tx.serialize()))
self.txes[hash] = tx
inv = msg_inv([CInv(30, hash)])
self.send_message(inv)
def on_getdata(self, message):
for inv in message.inv:
if inv.hash in self.islocks:
self.send_message(self.islocks[inv.hash])
if inv.hash in self.txes:
self.send_message(self.txes[inv.hash])
class DashZMQTest (DashTestFramework):
def set_test_params(self):
# That's where the zmq publisher will listen for subscriber
self.address = "tcp://127.0.0.1:28333"
# node0 creates all available ZMQ publisher
node0_extra_args = ["-zmqpub%s=%s" % (pub.value, self.address) for pub in ZMQPublisher]
node0_extra_args.append("-whitelist=127.0.0.1")
node0_extra_args.append("-watchquorums") # have to watch quorums to receive recsigs and trigger zmq
self.set_dash_test_params(4, 3, fast_dip3_enforcement=True, extra_args=[node0_extra_args, [], [], []])
def run_test(self):
# Check that dashd has been built with ZMQ enabled.
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
skip_if_no_py3_zmq()
skip_if_no_bitcoind_zmq(self)
try:
# Setup the ZMQ subscriber socket
self.zmq_context = zmq.Context()
self.socket = self.zmq_context.socket(zmq.SUB)
self.socket.connect(self.address)
# Initialize the network
self.activate_dip8()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# Create an LLMQ for testing
self.quorum_type = 100 # llmq_test
self.quorum_hash = self.mine_quorum()
self.sync_blocks()
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
# Wait a moment to avoid subscribing to recovered sig in the test before the one from the chainlock
# has been sent which leads to test failure.
time.sleep(1)
# Test all dash related ZMQ publisher
self.test_recovered_signature_publishers()
self.test_chainlock_publishers()
self.test_instantsend_publishers()
self.test_governance_publishers()
finally:
# Destroy the ZMQ context.
self.log.debug("Destroying ZMQ context")
self.zmq_context.destroy(linger=None)
def subscribe(self, publishers):
# Subscribe to a list of ZMQPublishers
for pub in publishers:
self.socket.subscribe(pub.value)
def unsubscribe(self, publishers):
# Unsubscribe from a list of ZMQPublishers
for pub in publishers:
self.socket.unsubscribe(pub.value)
def receive(self, publisher, flags=0):
# Receive a ZMQ message and validate it's sent from the correct ZMQPublisher
topic, body, seq = self.socket.recv_multipart(flags)
# Topic should match the publisher value
assert_equal(topic.decode(), publisher.value)
return io.BytesIO(body)
def test_recovered_signature_publishers(self):
def validate_recovered_sig(request_id, msg_hash):
# Make sure the recovered sig exists by RPC
rpc_recovered_sig = self.get_recovered_sig(request_id, msg_hash)
# Validate hashrecoveredsig
zmq_recovered_sig_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_recovered_sig).read(32))
assert_equal(zmq_recovered_sig_hash, msg_hash)
# Validate rawrecoveredsig
zmq_recovered_sig_raw = CRecoveredSig()
zmq_recovered_sig_raw.deserialize(self.receive(ZMQPublisher.raw_recovered_sig))
assert_equal(zmq_recovered_sig_raw.llmqType, rpc_recovered_sig['llmqType'])
assert_equal(uint256_to_string(zmq_recovered_sig_raw.quorumHash), rpc_recovered_sig['quorumHash'])
assert_equal(uint256_to_string(zmq_recovered_sig_raw.id), rpc_recovered_sig['id'])
assert_equal(uint256_to_string(zmq_recovered_sig_raw.msgHash), rpc_recovered_sig['msgHash'])
assert_equal(bytes_to_hex_str(zmq_recovered_sig_raw.sig), rpc_recovered_sig['sig'])
recovered_sig_publishers = [
ZMQPublisher.hash_recovered_sig,
ZMQPublisher.raw_recovered_sig
]
self.log.info("Testing %d recovered signature publishers" % len(recovered_sig_publishers))
# Subscribe to recovered signature messages
self.subscribe(recovered_sig_publishers)
# Generate a ChainLock and make sure this leads to valid recovered sig ZMQ messages
rpc_last_block_hash = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(rpc_last_block_hash)
height = self.nodes[0].getblockcount()
rpc_request_id = hash256(ser_string(b"clsig") + struct.pack("<I", height))[::-1].hex()
validate_recovered_sig(rpc_request_id, rpc_last_block_hash)
# Sign an arbitrary and make sure this leads to valid recovered sig ZMQ messages
sign_id = uint256_to_string(random.getrandbits(256))
sign_msg_hash = uint256_to_string(random.getrandbits(256))
for mn in self.get_quorum_masternodes(self.quorum_hash):
mn.node.quorum("sign", self.quorum_type, sign_id, sign_msg_hash)
validate_recovered_sig(sign_id, sign_msg_hash)
# Unsubscribe from recovered signature messages
self.unsubscribe(recovered_sig_publishers)
def test_chainlock_publishers(self):
chain_lock_publishers = [
ZMQPublisher.hash_chain_lock,
ZMQPublisher.raw_chain_lock,
ZMQPublisher.raw_chain_lock_sig
]
self.log.info("Testing %d ChainLock publishers" % len(chain_lock_publishers))
# Subscribe to ChainLock messages
self.subscribe(chain_lock_publishers)
# Generate ChainLock
generated_hash = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(generated_hash)
rpc_best_chain_lock = self.nodes[0].getbestchainlock()
rpc_best_chain_lock_hash = rpc_best_chain_lock["blockhash"]
rpc_best_chain_lock_sig = rpc_best_chain_lock["signature"]
assert_equal(generated_hash, rpc_best_chain_lock_hash)
rpc_chain_locked_block = self.nodes[0].getblock(rpc_best_chain_lock_hash)
rpc_chain_lock_height = rpc_chain_locked_block["height"]
rpc_chain_lock_hash = rpc_chain_locked_block["hash"]
assert_equal(generated_hash, rpc_chain_lock_hash)
# Validate hashchainlock
zmq_chain_lock_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_chain_lock).read(32))
assert_equal(zmq_chain_lock_hash, rpc_best_chain_lock_hash)
# Validate rawchainlock
zmq_chain_locked_block = CBlock()
zmq_chain_locked_block.deserialize(self.receive(ZMQPublisher.raw_chain_lock))
assert(zmq_chain_locked_block.is_valid())
assert_equal(zmq_chain_locked_block.hash, rpc_chain_lock_hash)
# Validate rawchainlocksig
zmq_chain_lock_sig_stream = self.receive(ZMQPublisher.raw_chain_lock_sig)
zmq_chain_locked_block = CBlock()
zmq_chain_locked_block.deserialize(zmq_chain_lock_sig_stream)
assert(zmq_chain_locked_block.is_valid())
zmq_chain_lock = msg_clsig()
zmq_chain_lock.deserialize(zmq_chain_lock_sig_stream)
assert_equal(zmq_chain_lock.height, rpc_chain_lock_height)
assert_equal(uint256_to_string(zmq_chain_lock.blockHash), rpc_chain_lock_hash)
assert_equal(zmq_chain_locked_block.hash, rpc_chain_lock_hash)
assert_equal(bytes_to_hex_str(zmq_chain_lock.sig), rpc_best_chain_lock_sig)
# Unsubscribe from ChainLock messages
self.unsubscribe(chain_lock_publishers)
def test_instantsend_publishers(self):
instantsend_publishers = [
ZMQPublisher.hash_tx_lock,
ZMQPublisher.raw_tx_lock,
ZMQPublisher.raw_tx_lock_sig,
ZMQPublisher.hash_instantsend_doublespend,
ZMQPublisher.raw_instantsend_doublespend
]
self.log.info("Testing %d InstantSend publishers" % len(instantsend_publishers))
# Subscribe to InstantSend messages
self.subscribe(instantsend_publishers)
# Initialize test node
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
# Make sure all nodes agree
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
# Create two raw TXs, they will conflict with each other
rpc_raw_tx_1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)
rpc_raw_tx_2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)
# Send the first transaction and wait for the InstantLock
rpc_raw_tx_1_hash = self.nodes[0].sendrawtransaction(rpc_raw_tx_1['hex'])
self.wait_for_instantlock(rpc_raw_tx_1_hash, self.nodes[0])
# Validate hashtxlock
zmq_tx_lock_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_tx_lock).read(32))
assert_equal(zmq_tx_lock_hash, rpc_raw_tx_1['txid'])
# Validate rawtxlock
zmq_tx_lock_raw = CTransaction()
zmq_tx_lock_raw.deserialize(self.receive(ZMQPublisher.raw_tx_lock))
assert(zmq_tx_lock_raw.is_valid())
assert_equal(zmq_tx_lock_raw.hash, rpc_raw_tx_1['txid'])
# Validate rawtxlocksig
zmq_tx_lock_sig_stream = self.receive(ZMQPublisher.raw_tx_lock_sig)
zmq_tx_lock_tx = CTransaction()
zmq_tx_lock_tx.deserialize(zmq_tx_lock_sig_stream)
assert(zmq_tx_lock_tx.is_valid())
assert_equal(zmq_tx_lock_tx.hash, rpc_raw_tx_1['txid'])
zmq_tx_lock = msg_islock()
zmq_tx_lock.deserialize(zmq_tx_lock_sig_stream)
assert_equal(uint256_to_string(zmq_tx_lock.txid), rpc_raw_tx_1['txid'])
# Try to send the second transaction. This must throw an RPC error because it conflicts with rpc_raw_tx_1
# which already got the InstantSend lock.
assert_raises_rpc_error(-26, "tx-txlock-conflict", self.nodes[0].sendrawtransaction, rpc_raw_tx_2['hex'])
# Validate hashinstantsenddoublespend
zmq_double_spend_hash2 = bytes_to_hex_str(self.receive(ZMQPublisher.hash_instantsend_doublespend).read(32))
zmq_double_spend_hash1 = bytes_to_hex_str(self.receive(ZMQPublisher.hash_instantsend_doublespend).read(32))
assert_equal(zmq_double_spend_hash2, rpc_raw_tx_2['txid'])
assert_equal(zmq_double_spend_hash1, rpc_raw_tx_1['txid'])
# Validate rawinstantsenddoublespend
zmq_double_spend_tx_2 = CTransaction()
zmq_double_spend_tx_2.deserialize(self.receive(ZMQPublisher.raw_instantsend_doublespend))
assert (zmq_double_spend_tx_2.is_valid())
assert_equal(zmq_double_spend_tx_2.hash, rpc_raw_tx_2['txid'])
zmq_double_spend_tx_1 = CTransaction()
zmq_double_spend_tx_1.deserialize(self.receive(ZMQPublisher.raw_instantsend_doublespend))
assert(zmq_double_spend_tx_1.is_valid())
assert_equal(zmq_double_spend_tx_1.hash, rpc_raw_tx_1['txid'])
# No islock notifications when tx is not received yet
self.nodes[0].generate(1)
rpc_raw_tx_3 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)
islock = self.create_islock(rpc_raw_tx_3['hex'])
self.test_node.send_islock(islock)
# Validate NO hashtxlock
time.sleep(1)
try:
self.receive(ZMQPublisher.hash_tx_lock, zmq.NOBLOCK)
assert(False)
except zmq.ZMQError:
# this is expected
pass
# Now send the tx itself
self.test_node.send_tx(FromHex(msg_tx(), rpc_raw_tx_3['hex']))
self.wait_for_instantlock(rpc_raw_tx_3['txid'], self.nodes[0])
# Validate hashtxlock
zmq_tx_lock_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_tx_lock).read(32))
assert_equal(zmq_tx_lock_hash, rpc_raw_tx_3['txid'])
# Drop test node connection
self.nodes[0].disconnect_p2ps()
# Unsubscribe from InstantSend messages
self.unsubscribe(instantsend_publishers)
def test_governance_publishers(self):
governance_publishers = [
ZMQPublisher.hash_governance_object,
ZMQPublisher.raw_governance_object,
ZMQPublisher.hash_governance_vote,
ZMQPublisher.raw_governance_vote
]
self.log.info("Testing %d governance publishers" % len(governance_publishers))
# Subscribe to governance messages
self.subscribe(governance_publishers)
# Create a proposal and submit it to the network
proposal_rev = 1
proposal_time = int(time.time())
proposal_data = {
"type": 1, # GOVERNANCE_OBJECT_PROPOSAL
"name": "Test",
"start_epoch": proposal_time,
"end_epoch": proposal_time + 60,
"payment_amount": 5,
"payment_address": self.nodes[0].getnewaddress(),
"url": "https://dash.org"
}
proposal_hex = ''.join(format(x, '02x') for x in json.dumps(proposal_data).encode())
collateral = self.nodes[0].gobject("prepare", "0", proposal_rev, proposal_time, proposal_hex)
self.wait_for_instantlock(collateral, self.nodes[0])
self.nodes[0].generate(6)
self.sync_blocks()
rpc_proposal_hash = self.nodes[0].gobject("submit", "0", proposal_rev, proposal_time, proposal_hex, collateral)
# Validate hashgovernanceobject
zmq_governance_object_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_governance_object).read(32))
assert_equal(zmq_governance_object_hash, rpc_proposal_hash)
zmq_governance_object_raw = CGovernanceObject()
zmq_governance_object_raw.deserialize(self.receive(ZMQPublisher.raw_governance_object))
assert_equal(zmq_governance_object_raw.nHashParent, 0)
assert_equal(zmq_governance_object_raw.nRevision, proposal_rev)
assert_equal(zmq_governance_object_raw.nTime, proposal_time)
assert_equal(json.loads(zmq_governance_object_raw.vchData.decode()), proposal_data)
assert_equal(zmq_governance_object_raw.nObjectType, proposal_data["type"])
assert_equal(zmq_governance_object_raw.masternodeOutpoint.hash, COutPoint().hash)
assert_equal(zmq_governance_object_raw.masternodeOutpoint.n, COutPoint().n)
# Vote for the proposal and validate the governance vote message
map_vote_outcomes = {
0: "none",
1: "yes",
2: "no",
3: "abstain"
}
map_vote_signals = {
0: "none",
1: "funding",
2: "valid",
3: "delete",
4: "endorsed"
}
self.nodes[0].gobject("vote-many", rpc_proposal_hash, map_vote_signals[1], map_vote_outcomes[1])
rpc_proposal_votes = self.nodes[0].gobject('getcurrentvotes', rpc_proposal_hash)
# Validate hashgovernancevote
zmq_governance_vote_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_governance_vote).read(32))
assert(zmq_governance_vote_hash in rpc_proposal_votes)
# Validate rawgovernancevote
zmq_governance_vote_raw = CGovernanceVote()
zmq_governance_vote_raw.deserialize(self.receive(ZMQPublisher.raw_governance_vote))
assert_equal(uint256_to_string(zmq_governance_vote_raw.nParentHash), rpc_proposal_hash)
rpc_vote_parts = rpc_proposal_votes[zmq_governance_vote_hash].split(':')
rpc_outpoint_parts = rpc_vote_parts[0].split('-')
assert_equal(uint256_to_string(zmq_governance_vote_raw.masternodeOutpoint.hash), rpc_outpoint_parts[0])
assert_equal(zmq_governance_vote_raw.masternodeOutpoint.n, int(rpc_outpoint_parts[1]))
assert_equal(zmq_governance_vote_raw.nTime, int(rpc_vote_parts[1]))
assert_equal(map_vote_outcomes[zmq_governance_vote_raw.nVoteOutcome], rpc_vote_parts[2])
assert_equal(map_vote_signals[zmq_governance_vote_raw.nVoteSignal], rpc_vote_parts[3])
# Unsubscribe from governance messages
self.unsubscribe(governance_publishers)
if __name__ == '__main__':
DashZMQTest().main()
|
convlab/modules/nlu/multiwoz/svm/corpora/scripts/score.py | ngduyanhece/ConvLab | 405 | 12675800 | from __future__ import print_function
# Modified by Microsoft Corporation.
# Licensed under the MIT license.
###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2019
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import argparse
import json
import math
import os
import sys
import traceback
from collections import defaultdict
import misc
SCHEDULES = [1,2]
LABEL_SCHEMES = ["a","b"]
EPS = 0.00001
def main(argv):
install_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
utils_dirname = os.path.join(install_path,'lib')
sys.path.append(utils_dirname)
from dataset_walker import dataset_walker
list_dir = os.path.join(install_path,'config')
parser = argparse.ArgumentParser(description='Evaluate output from a belief tracker.')
parser.add_argument('--dataset', dest='dataset', action='store', metavar='DATASET', required=True,
help='The dataset to analyze')
parser.add_argument('--dataroot',dest='dataroot',action='store', metavar='PATH', required=True,
help='Will look for corpus in <destroot>/<dataset>/...')
parser.add_argument('--trackfile',dest='scorefile',action='store',metavar='JSON_FILE',required=True,
help='File containing score JSON')
parser.add_argument('--scorefile',dest='csv',action='store',metavar='CSV_FILE',required=True,
help='File to write with CSV scoring data')
parser.add_argument('--ontology',dest='ontology',action='store',metavar='JSON_FILE',required=True,
help='JSON Ontology file')
parser.add_argument('--rocdump',dest='rocdump',action='store',metavar='FILE_STEM',
help='If present, use this file stem to write out ROC plot data: filestem.<schedule>.<slot>.<type>.csv, where type is either roc (which contains the ROC curve coordinates) or scores (which contains the raw scores used to compute the ROC curves).')
args = parser.parse_args()
sessions = dataset_walker(args.dataset,dataroot=args.dataroot,labels=True)
tracker_output = json.load(open(args.scorefile))
ontology = json.load(open(args.ontology))
slots_informable = ontology["informable"].keys()
slots_requestable = ontology["requestable"]
csvfile = open(args.csv,'w')
# what stats are there?
stats = []
stat_classes = [Stat_Accuracy, Stat_Probs, Stat_MRR, Stat_Updates, Stat_ROC]
for schedule in SCHEDULES:
for label_scheme in LABEL_SCHEMES:
for component in ['goal','requested', 'method', 'all']:
if component == 'goal' :
for slot in slots_informable + ['all','joint','joint_independent'] :
for stat_class in stat_classes:
stats.append((('goal', slot), (schedule, label_scheme), stat_class()))
elif component == 'requested' :
if label_scheme != "a" :
continue
for slot in slots_requestable + ['all'] :
for stat_class in stat_classes:
stats.append((('requested', slot), (schedule, label_scheme), stat_class()))
elif component == 'method' :
for stat_class in stat_classes:
stats.append((('method',), (schedule, label_scheme), stat_class()))
elif component == 'all' :
for stat_class in stat_classes:
stats.append((('all',), (schedule, label_scheme), stat_class()))
turn_counter = 0.0
for session_num, (session_tracker, session) in enumerate(zip(tracker_output['sessions'], sessions)):
for _, _, stat_class in stats:
stat_class.newDialog()
session_id = session.log['session-id']
try:
# these are the set of slots 'mentioned so far', i.e. for schedule2
S = defaultdict(lambda : set([]))
S_requested = set([])
session_length = len(session)
goal_labels_b, method_labels_b = misc.LabelsB(session, ontology)
method_schedule_2 = False # whether schedule 2 is active for method
for turn_num, ((log_turn,label_turn),_tracker_turn) in enumerate(zip(session,session_tracker['turns'])):
turn_counter += 1.0
S_new = misc.S(log_turn, ontology)
for slot in S_new :
S[slot] = S[slot].union(S_new[slot])
# remove just informed slots from S_requested
S_requested = S_requested.difference(misc.SysInformed(log_turn))
# add in ones from slu hyps
S_requested = S_requested.union(set(misc.S_requested(log_turn)))
tracker_goal_labels = _tracker_turn["goal-labels"]
for slot in slots_informable:
if slot in tracker_goal_labels :
tracker_goal_labels[slot] = normalise_dist(tracker_goal_labels[slot].items(), (session_id, turn_num, "goal."+slot))
else :
tracker_goal_labels[slot] = [(None, 1.0)]
# prepare for joint goals scoring:
tracker_goal_joint_labels = "independent"
if "goal-labels-joint" in _tracker_turn :
tracker_goal_joint_labels = _tracker_turn["goal-labels-joint"]
if tracker_goal_joint_labels != "independent" :
# tracker_goal_joint_labels must be a list of joint hyps
tracker_goal_joint_labels = [(hyp["slots"], hyp["score"]) for hyp in tracker_goal_joint_labels]
tracker_goal_joint_labels = normalise_dist(tracker_goal_joint_labels, (session_id, turn_num, "goal.joint"))
# also gather the correct joint label
true_goal_joint = None
for slot in label_turn["goal-labels"]:
if true_goal_joint == None :
true_goal_joint = {}
true_goal_joint[slot] = label_turn["goal-labels"][slot]
true_goal_joint_b = None
for slot in goal_labels_b[turn_num]:
if true_goal_joint_b == None :
true_goal_joint_b = {}
true_goal_joint_b[slot] = goal_labels_b[turn_num][slot]
tracker_requested_slots = _tracker_turn["requested-slots"]
for slot in tracker_requested_slots:
dist = [(True, tracker_requested_slots[slot]), (False,1.0-tracker_requested_slots[slot])]
tracker_requested_slots[slot] = normalise_dist(dist, (session_id, turn_num, "requested."+slot))
tracker_method_label = normalise_dist(_tracker_turn["method-label"].items(), (session_id, turn_num,"method"))
# for method schedule 2, work out whether any slu-hyp has been given
# which informs the method:
if not method_schedule_2 :
mact = log_turn["output"]["dialog-acts"]
for slu_hyp in log_turn["input"]["live"]["slu-hyps"] :
user_act = slu_hyp["slu-hyp"]
method_label = misc.MethodLabel(user_act, mact)
if method_label != "none" :
method_schedule_2 = True
break
for component, (schedule, label_scheme), stat_class in stats:
if component[0] == "goal" and (component[1] == "joint" or component[1] == "joint_independent"):
if schedule == 2:
# calculate schedule2 applicability
applies = False
for slot in slots_informable:
if len(S[slot]) > 0:
applies = True
break
if not applies :
continue
this_true_label = true_goal_joint
if label_scheme == "b" :
this_true_label = true_goal_joint_b
if tracker_goal_joint_labels == "independent" or component[1] == "joint_independent" :
stat_class.add(tracker_goal_labels, this_true_label, (session_id, turn_num, component, schedule, label_scheme), independent=True)
else :
stat_class.add(tracker_goal_joint_labels, this_true_label, (session_id, turn_num, component, schedule, label_scheme))
if (component[0] == "goal" or component[0] == "all") and (len(component)==1 or ("joint" not in component[1])) :
if component[0] == "all" or component[1] == "all" :
slots = slots_informable[:]
else :
slots = [component[1]]
for slot in slots:
if schedule ==2 and len(S[slot]) == 0 :
continue
dist = tracker_goal_labels[slot]
true_label = None
if slot in label_turn["goal-labels"] :
true_label = label_turn["goal-labels"][slot]
if label_scheme == "b" :
true_label = None
if slot in goal_labels_b[turn_num] :
true_label = goal_labels_b[turn_num][slot]
stat_class.add(dist, true_label, (session_id, turn_num, component, schedule, label_scheme))
if component[0] == "requested" or component[0] == "all" :
if component[0] == "all" or component[1] == "all":
slots = slots_requestable[:]
else :
slots = [component[1]]
for slot in slots:
if schedule ==2 and (slot not in S_requested):
continue
dist = [(False,1.0), (True,0.0)]
if slot in tracker_requested_slots :
dist = tracker_requested_slots[slot]
true_label = (slot in label_turn["requested-slots"])
stat_class.add(dist, true_label, (session_id, turn_num, component, schedule, label_scheme))
if component[0] == "method" or component[0] == "all":
if schedule == 2 and not method_schedule_2:
continue # no slu hyp informing the method has been given yet.
dist = tracker_method_label
true_label = label_turn["method-label"]
if label_scheme == "b" :
true_label = method_labels_b[turn_num]
stat_class.add(dist, true_label, (session_id, turn_num, component, schedule, label_scheme))
except KeyboardInterrupt :
raise
except:
traceback.print_exc(file=sys.stdout)
print("While scoring " + str(session_id))
# output to csv
print(( "state_component, stat, schedule, label_scheme, N, result"), file=csvfile)
for stat in stats:
component, (schedule, label_scheme), stat_class = stat
results = stat_class.results()
for stat_subname, N, result in results:
if result == None :
result = "-"
else :
result = "%.7f"%result
print(( "%s, %s, %i, %s, %i, %s"%(".".join(component), stat_subname, schedule, label_scheme, N, result)), file=csvfile)
if isinstance(stat_class, Stat_ROC) and (args.rocdump):
rocfile = args.rocdump + '.schedule' + str(schedule) + str(label_scheme)+'.' + (".".join(component)) + '.roc.csv'
scoresfile = args.rocdump + '.schedule' + str(schedule) + str(label_scheme)+'.' + (".".join(component)) + '.scores.csv'
stat_class.DumpROCToFile(rocfile)
stat_class.DumpScoresToFile(scoresfile)
print('basic,total_wall_time,,,,%s' % (tracker_output['wall-time']), file=csvfile)
print('basic,sessions,,,,%s' % (len(sessions)), file=csvfile)
print('basic,turns,,,,%i' % (int(turn_counter)), file=csvfile)
print('basic,wall_time_per_turn,,,,%s' % (tracker_output['wall-time'] / turn_counter), file=csvfile)
print('basic,dataset,,,,%s' % (tracker_output['dataset'] ), file=csvfile)
csvfile.close()
def normalise_dist(dist, this_id=None):
# take dist , convert to a new list of tuples, ordered and made to sum up to
# no more than 1
out = dist[:]
context_string = ""
if this_id != None :
context_string = this_id[0] + (", turn %i" % this_id[1]) + ", "+this_id[2]
for i in range(len(out)):
if out[i][1] < 0.0 :
print('WARNING: Score is less than 0.0, changing to 0.0',context_string, file=sys.stderr)
total_p = sum([x[1] for x in out])
if total_p >1.0 :
if abs(total_p - 1.0) > EPS :
print('WARNING: scores sum to more than 1, renormalising',context_string, file=sys.stderr)
out = [(x[0],x[1]/total_p) for x in out]
total_p = 1.0
out.append((None, 1.0-total_p))
out.sort(key = lambda x:-x[1])
return out
class Stat(object):
def __init__(self, ):
pass
def add(self, dist, true_label, this_id, independent=False):
pass
def results(self, ):
return []
def newDialog(self) :
return
class Stat_Accuracy(Stat):
def __init__(self, ):
self.N = 0.0
self.correct = 0.0
def add(self, dist, true_label, this_id, independent=False):
if independent :
top_hyp, _ = tophyp_independent(dist)
self.correct += int(top_hyp == true_label)
else :
self.correct += int(dist[0][0]== true_label)
self.N += 1
def results(self, ):
acc = None
if self.N > 0.0:
acc = self.correct/self.N
return [
("acc", self.N, acc)
]
class Stat_MRR(Stat):
def __init__(self, ):
self.N = 0.0
self.numerator = 0.0
def add(self, dist, true_label, this_id, independent=False):
recip_rank = 0.0
if independent :
ranks = []
for slot in dist:
found = False
for i, (hyp, _) in enumerate(dist[slot]):
if ((true_label == None or slot not in true_label) and hyp == None) or (true_label != None and slot in true_label and hyp == true_label[slot]) :
ranks.append(i)
found = True
break
if not found :
ranks.append(None)
if None in ranks :
recip_rank = 0.0
else :
rank = 1.0
for r in ranks:
rank *= (1+r)
recip_rank = 1.0/rank
else :
for i, (hyp, _) in enumerate(dist):
if hyp == true_label :
recip_rank = 1.0/(1.0+i)
break
self.numerator += recip_rank
self.N += 1
def results(self, ):
mrr = None
if self.N > 0.0:
mrr = self.numerator/self.N
return [
("mrr", self.N, mrr)
]
class Stat_Probs(Stat):
def __init__(self, ):
self.N = 0.0
self.numerator_l2 = 0.0
self.numerator_brier = 0.0
self.numerator_avgp = 0.0
self.numerator_neglogp = 0.0
self.dialog_acc = []
def add(self, dist, true_label, this_id,independent=False):
if independent :
ps = []
for slot in dist:
found = False
for (hyp, score) in dist[slot]:
if ((true_label == None or slot not in true_label) and hyp == None) or (true_label != None and slot in true_label and hyp == true_label[slot]) :
ps.append(score)
found = True
if not found :
ps.append(0.0)
p = 1.0
for p_ in ps:
p *= p_
sum_q = 1-p
sum_q2 = 1.0
for slot in dist:
sum_q2 *= sum([score**2 for hyp_,score in dist[slot]])
sum_q2 = sum_q2 - p**2
self.numerator_l2 += (1-p)**2 + sum_q2
self.numerator_brier += (1-p)**2 +(sum_q)**2
self.numerator_avgp += p
self.numerator_neglogp += -math.log(max(0.0001, p))
else :
p = 0.0
qs = []
for hyp, _p in dist:
if hyp == true_label :
p = _p
else :
qs.append(_p)
self.numerator_l2 += (1-p)**2 + sum([q**2 for q in qs])
self.numerator_brier += (1-p)**2 + sum(qs)**2
self.numerator_avgp += p
self.numerator_neglogp += -math.log(max(0.0001, p))
self.N += 1
def results(self, ):
l2 = None
brier = None
avgp = None
neglogp = None
if self.N > 0.0:
l2 = self.numerator_l2/self.N
brier = self.numerator_brier/self.N
avgp = self.numerator_avgp/self.N
neglogp = self.numerator_neglogp/self.N
return [
("l2", self.N, l2),
("l2.binary", self.N, brier),
("avgp", self.N, avgp),
("neglogp", self.N, neglogp),
]
class Stat_Updates(Stat):
def __init__(self, ):
# page 10 of <NAME> et al,
# Evaluating Discourse Understanding in Spoken Dialogue Systems
self.N = 0.0
self.correct_updates = 0.0
self.update_insertions = 0.0
self.update_substitutions = 0.0
self.update_deletions = 0.0
def add(self, dist, true_label, this_id, independent=False):
if independent :
current, _ = tophyp_independent(dist)
else :
current = dist[0][0]
self.correct_updates += int((self.previous != true_label) \
and (self.previous != current) \
and (true_label == current))
self.update_insertions += int((self.previous == true_label) \
and (self.previous != current) )
self.update_substitutions += int((self.previous != true_label) \
and (self.previous != current) \
and (true_label != current))
self.update_deletions += int((self.previous != true_label) \
and (self.previous == current) )
self.previous = current
self.N += 1
def results(self, ):
acc = None
prec = None
acc_denom = (self.correct_updates+self.update_substitutions+self.update_deletions)
prec_denom = (self.correct_updates+self.update_substitutions+self.update_insertions)
if acc_denom > 0 :
acc = self.correct_updates/acc_denom
if prec_denom > 0:
prec = self.correct_updates/prec_denom
return [
("update.acc", self.N, acc),
("update.prec", self.N, prec),
]
def newDialog(self) :
self.previous = None
def _changingIndices(x) :
out = [0]
value = x[0]
for i, x_value in enumerate(x) :
if x_value != value :
out.append(i)
value = x_value
return out
def _cumSum(x) :
out = []
cum = 0.0
for x_value in x:
cum += x_value
out.append(cum)
return out
class Stat_ROC(Stat):
def __init__(self):
self.data = []
self.N = 0
def add(self, dist, true_label, this_id, independent=False):
if independent :
top_hyp, score = tophyp_independent(dist)
label = top_hyp == true_label
else :
label = dist[0][0]== true_label
score = dist[0][1]
self.data.append(
(label, score)
)
self.N = len(self.data)
def results(self, ):
self._calculateROC()
return (
('roc.v1_eer', self.N, self.EER() ),
('roc.v1_ca05', self.N, self.CA_at_FA(0.05) ),
('roc.v1_ca10', self.N, self.CA_at_FA(0.10) ),
('roc.v1_ca20', self.N, self.CA_at_FA(0.20) ),
('roc.v2_ca05', self.N, self.CA_at_FA(0.05,version=2) ),
('roc.v2_ca10', self.N, self.CA_at_FA(0.10,version=2) ),
('roc.v2_ca20', self.N, self.CA_at_FA(0.20,version=2) ),
)
def EER(self) :
if (self.N < 2):
return None
for (t,ta,fa,tr,fr) in self.roc_curve:
if (fr >= fa):
return float(fr + fa)/self.N
print('Could not find a place where FR >= FA')
return None
def _calculateROC(self) :
self.data.sort(key=lambda x:-x[1])
N = len(self.data)
if N <= 2 :
self.roc_curve = []
return
indices = _changingIndices([x[1] for x in self.data[:-1]]) + [N-1]
# true/false accepts/rejects
cumsum = _cumSum([int(x[0]) for x in self.data])
N_true = sum([int(x[0]) for x in self.data])
N_false = N-N_true
frs = [N_true-cumsum[i] for i in indices]
trs = [N_false-i+cumsum[i]-1 for i in indices]
fas = [i-cumsum[i]+1 for i in indices]
tas = [cumsum[i] for i in indices]
thresholds = [self.data[i][1] for i in indices]
self.roc_curve = zip(thresholds,tas, fas, trs, frs)
self.roc_curve.reverse() # so thresholds are increasing
def CA_at_FA(self,fa_thresh,version=1):
assert (version in [1,2]),'Dont know version %s' % (version)
if (self.N < 2):
return None
if (version == 1):
for (t,ta,fa,tr,fr) in self.roc_curve:
if (float(fa)/self.N <= fa_thresh):
return float(ta)/self.N
print('Could not find a place where FA <= FA_THRESH')
return None
else:
for (t,ta,fa,tr,fr) in self.roc_curve:
try :
ta_rate = ta/(ta + fr)
fa_rate = fa/(fa + tr)
if (fa_rate <= fa_thresh):
return ta_rate
except ZeroDivisionError :
continue
return None
def DumpROCToFile(self,filename):
pass
def DumpScoresToFile(self,filename):
print("creating", filename)
f = open(filename,'w')
print('label,score', file=f)
for label, score in self.data:
print('%s,%s'%(label,score), file=f)
f.close()
def tophyp_independent(dists) :
top_hyp = None
top_score = 1.0
for slot in dists :
top,score = dists[slot][0]
if top != None:
if top_hyp == None :
top_hyp = {}
top_hyp[slot] = top
top_score *= score
return (top_hyp, top_score)
if (__name__ == '__main__'):
main(sys.argv)
|
nemo_text_processing/inverse_text_normalization/de/taggers/fraction.py | hamjam/NeMo | 4,145 | 12675825 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
convert_space,
delete_space,
)
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
e.g. ein halb -> tokens { name: "1/2" }
e.g. ein ein halb -> tokens { name: "1 1/2" }
e.g. drei zwei ein hundertstel -> tokens { name: "3 2/100" }
Args:
itn_cardinal_tagger: ITN cardinal tagger
tn_fraction_verbalizer: TN fraction verbalizer
"""
def __init__(self, itn_cardinal_tagger: GraphFst, tn_fraction_verbalizer: GraphFst, deterministic: bool = True):
super().__init__(name="fraction", kind="classify", deterministic=deterministic)
tagger = tn_fraction_verbalizer.graph.invert().optimize()
delete_optional_sign = pynini.closure(pynutil.delete("negative: ") + pynini.cross("\"true\" ", "-"), 0, 1)
delete_integer_marker = (
pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
) @ itn_cardinal_tagger.graph_no_exception
delete_numerator_marker = (
pynutil.delete("numerator: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
) @ itn_cardinal_tagger.graph_no_exception
delete_denominator_marker = (
pynutil.insert('/')
+ (pynutil.delete("denominator: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\""))
@ itn_cardinal_tagger.graph_no_exception
)
graph = (
pynini.closure(delete_integer_marker + pynini.accep(" "), 0, 1)
+ delete_numerator_marker
+ delete_space
+ delete_denominator_marker
).optimize()
verbalizer = delete_optional_sign + graph
self.graph = tagger @ verbalizer
graph = pynutil.insert("name: \"") + convert_space(self.graph) + pynutil.insert("\"")
self.fst = graph.optimize()
|
pypy/module/_pypyjson/test/test__pypyjson.py | nanjekyejoannah/pypy | 333 | 12675829 | # -*- encoding: utf-8 -*-
import pytest
from pypy.module._pypyjson.interp_decoder import JSONDecoder, Terminator, MapBase
from rpython.rtyper.lltypesystem import lltype, rffi
class TestJson(object):
def test_skip_whitespace(self):
s = ' hello '
dec = JSONDecoder(self.space, s)
assert dec.pos == 0
assert dec.skip_whitespace(0) == 3
assert dec.skip_whitespace(3) == 3
assert dec.skip_whitespace(8) == len(s)
dec.close()
def test_json_map(self):
m = Terminator(self.space)
w_a = self.space.newutf8("a", 1)
w_b = self.space.newutf8("b", 1)
w_c = self.space.newutf8("c", 1)
m1 = m.get_next(w_a, '"a"', 0, 3, m)
assert m1.w_key == w_a
assert m1.nextmap_first is None
assert m1.key_repr == '"a"'
assert m1.key_repr_cmp('"a": 123', 0)
assert not m1.key_repr_cmp('b": 123', 0)
assert m.nextmap_first.w_key == w_a
m2 = m.get_next(w_a, '"a"', 0, 3, m)
assert m2 is m1
m3 = m.get_next(w_b, '"b"', 0, 3, m)
assert m3.w_key == w_b
assert m3.nextmap_first is None
assert m3.key_repr == '"b"'
assert m.nextmap_first is m1
m4 = m3.get_next(w_c, '"c"', 0, 3, m)
assert m4.w_key == w_c
assert m4.nextmap_first is None
assert m4.key_repr == '"c"'
assert m3.nextmap_first is m4
def test_json_map_get_index(self):
m = Terminator(self.space)
w_a = self.space.newutf8("a", 1)
w_b = self.space.newutf8("b", 1)
w_c = self.space.newutf8("c", 1)
m1 = m.get_next(w_a, 'a"', 0, 2, m)
assert m1.get_index(w_a) == 0
assert m1.get_index(w_b) == -1
m2 = m.get_next(w_b, 'b"', 0, 2, m)
assert m2.get_index(w_b) == 0
assert m2.get_index(w_a) == -1
m3 = m2.get_next(w_c, 'c"', 0, 2, m)
assert m3.get_index(w_b) == 0
assert m3.get_index(w_c) == 1
assert m3.get_index(w_a) == -1
def test_jsonmap_fill_dict(self):
from collections import OrderedDict
m = Terminator(self.space)
space = self.space
w_a = space.newutf8("a", 1)
w_b = space.newutf8("b", 1)
w_c = space.newutf8("c", 1)
m1 = m.get_next(w_a, 'a"', 0, 2, m)
m2 = m1.get_next(w_b, 'b"', 0, 2, m)
m3 = m2.get_next(w_c, 'c"', 0, 2, m)
d = OrderedDict()
m3.fill_dict(d, [space.w_None, space.w_None, space.w_None])
assert list(d) == [w_a, w_b, w_c]
def test_repeated_key_get_next(self):
m = Terminator(self.space)
w_a = self.space.newutf8("a", 1)
w_b = self.space.newutf8("b", 1)
w_c = self.space.newutf8("c", 1)
m1 = m.get_next(w_a, '"a"', 0, 3, m)
m1 = m1.get_next(w_b, '"b"', 0, 3, m)
m1 = m1.get_next(w_c, '"c"', 0, 3, m)
m2 = m1.get_next(w_a, '"a"', 0, 3, m)
assert m2 is None
def test_decode_key_map(self):
m = Terminator(self.space)
m_diff = Terminator(self.space)
for s1 in ["abc", "1001" * 10, u"ä".encode("utf-8")]:
s = ' "%s" "%s" "%s"' % (s1, s1, s1)
dec = JSONDecoder(self.space, s)
assert dec.pos == 0
m1 = dec.decode_key_map(dec.skip_whitespace(0), m)
assert m1.w_key._utf8 == s1
assert m1.key_repr == '"%s"' % s1
# check caching on w_key level
m2 = dec.decode_key_map(dec.skip_whitespace(dec.pos), m_diff)
assert m1.w_key is m2.w_key
# check caching on map level
m3 = dec.decode_key_map(dec.skip_whitespace(dec.pos), m_diff)
assert m3 is m2
dec.close()
def test_decode_string_caching(self):
for s1 in ["abc", u"ä".encode("utf-8")]:
s = '"%s" "%s" "%s"' % (s1, s1, s1)
dec = JSONDecoder(self.space, s)
dec.MIN_SIZE_FOR_STRING_CACHE = 0
assert dec.pos == 0
w_x = dec.decode_string(1)
w_y = dec.decode_string(dec.skip_whitespace(dec.pos) + 1)
assert w_x is not w_y
# check caching
w_z = dec.decode_string(dec.skip_whitespace(dec.pos) + 1)
assert w_z is w_y
dec.close()
def _make_some_maps(self):
# base -> m1 -> m2 -> m3
# \-> m4
w_a = self.space.newutf8("a", 1)
w_b = self.space.newutf8("b", 1)
w_c = self.space.newutf8("c", 1)
w_d = self.space.newutf8("d", 1)
base = Terminator(self.space)
base.instantiation_count = 6
m1 = base.get_next(w_a, 'a"', 0, 2, base)
m2 = m1.get_next(w_b, 'b"', 0, 2, base)
m3 = m2.get_next(w_c, 'c"', 0, 2, base)
m4 = m2.get_next(w_d, 'd"', 0, 2, base)
return base, m1, m2, m3, m4
# unit tests for map state transistions
def test_fringe_to_useful(self):
base, m1, m2, m3, m4 = self._make_some_maps()
base.instantiation_count = 6
assert m1.state == MapBase.FRINGE
m1.instantiation_count = 6
assert m2.state == MapBase.PRELIMINARY
m2.instantiation_count = 6
assert m3.state == MapBase.PRELIMINARY
m3.instantiation_count = 2
assert m2.nextmap_first is m3
assert m4.state == MapBase.PRELIMINARY
m4.instantiation_count = 4
m1.mark_useful(base)
assert m1.state == MapBase.USEFUL
assert m2.state == MapBase.USEFUL
assert m3.state == MapBase.FRINGE
assert m4.state == MapBase.USEFUL
assert m2.nextmap_first is m4
assert m1.number_of_leaves == 2
base._check_invariants()
def test_number_of_leaves(self):
w_x = self.space.newutf8("x", 1)
base, m1, m2, m3, m4 = self._make_some_maps()
assert base.number_of_leaves == 2
assert m1.number_of_leaves == 2
assert m2.number_of_leaves == 2
assert m3.number_of_leaves == 1
assert m4.number_of_leaves == 1
m5 = m2.get_next(w_x, 'x"', 0, 2, base)
assert base.number_of_leaves == 3
assert m1.number_of_leaves == 3
assert m2.number_of_leaves == 3
assert m5.number_of_leaves == 1
def test_number_of_leaves_after_mark_blocked(self):
w_x = self.space.newutf8("x", 1)
base, m1, m2, m3, m4 = self._make_some_maps()
m5 = m2.get_next(w_x, 'x"', 0, 2, base)
assert base.number_of_leaves == 3
m2.mark_blocked(base)
assert base.number_of_leaves == 1
def test_mark_useful_cleans_fringe(self):
base, m1, m2, m3, m4 = self._make_some_maps()
base.instantiation_count = 6
assert m1.state == MapBase.FRINGE
m1.instantiation_count = 6
m2.instantiation_count = 6
m3.instantiation_count = 2
m4.instantiation_count = 4
assert base.current_fringe == {m1: None}
m1.mark_useful(base)
assert base.current_fringe == {m3: None}
def test_cleanup_fringe(self):
w_a = self.space.newutf8("a", 1)
w_b = self.space.newutf8("b", 1)
w_c = self.space.newutf8("c", 1)
w_d = self.space.newutf8("d", 1)
base = Terminator(self.space)
base.instantiation_count = 6
m1 = base.get_next(w_a, 'a"', 0, 2, base)
m2 = base.get_next(w_b, 'b"', 0, 2, base)
m3 = base.get_next(w_c, 'c"', 0, 2, base)
m4 = base.get_next(w_d, 'd"', 0, 2, base)
m5 = m4.get_next(w_a, 'a"', 0, 2, base)
base.instantiation_count = 7
m1.instantiation_count = 2
m2.instantiation_count = 2
m3.instantiation_count = 2
m4.instantiation_count = 1
m5.instantiation_count = 1
assert base.current_fringe == dict.fromkeys([m1, m2, m3, m4])
base.cleanup_fringe()
assert base.current_fringe == dict.fromkeys([m1, m2, m3])
assert m4.state == MapBase.BLOCKED
assert m4.nextmap_first is None
assert m4.nextmap_all is None
assert m5.state == MapBase.BLOCKED
assert m5.nextmap_first is None
assert m5.nextmap_all is None
def test_deal_with_blocked(self):
w_a = self.space.newutf8("a", 1)
w_b = self.space.newutf8("b", 1)
w_c = self.space.newutf8("c", 1)
space = self.space
s = '{"a": 1, "b": 2, "c": 3}'
dec = JSONDecoder(space, s)
dec.startmap = base = Terminator(space)
m1 = base.get_next(w_a, 'a"', 0, 2, base)
m2 = m1.get_next(w_b, 'b"', 0, 2, base)
m2.mark_blocked(base)
w_res = dec.decode_object(1)
assert space.int_w(space.len(w_res)) == 3
assert space.int_w(space.getitem(w_res, w_a)) == 1
assert space.int_w(space.getitem(w_res, w_b)) == 2
assert space.int_w(space.getitem(w_res, w_c)) == 3
dec.close()
def test_deal_with_blocked_number_of_leaves(self):
w_a = self.space.newutf8("a", 1)
w_b = self.space.newutf8("b", 1)
w_x = self.space.newutf8("x", 1)
w_u = self.space.newutf8("u", 1)
space = self.space
base = Terminator(space)
m1 = base.get_next(w_a, 'a"', 0, 2, base)
m2 = m1.get_next(w_b, 'b"', 0, 2, base)
m2.get_next(w_x, 'x"', 0, 2, base)
m2.get_next(w_u, 'u"', 0, 2, base)
assert base.number_of_leaves == 2
m2.mark_blocked(base)
assert base.number_of_leaves == 1
def test_instatiation_count(self):
m = Terminator(self.space)
dec = JSONDecoder(self.space, '"abc" "def"')
m1 = dec.decode_key_map(dec.skip_whitespace(0), m)
m2 = dec.decode_key_map(dec.skip_whitespace(6), m1)
m1 = dec.decode_key_map(dec.skip_whitespace(0), m)
m2 = dec.decode_key_map(dec.skip_whitespace(6), m1)
m1 = dec.decode_key_map(dec.skip_whitespace(0), m)
assert m1.instantiation_count == 3
assert m2.instantiation_count == 2
dec.close()
class AppTest(object):
spaceconfig = {"objspace.usemodules._pypyjson": True}
def test_raise_on_unicode(self):
import _pypyjson
raises(TypeError, _pypyjson.loads, u"42")
def test_decode_constants(self):
import _pypyjson
assert _pypyjson.loads('null') is None
raises(ValueError, _pypyjson.loads, 'nul')
raises(ValueError, _pypyjson.loads, 'nu')
raises(ValueError, _pypyjson.loads, 'n')
raises(ValueError, _pypyjson.loads, 'nuXX')
#
assert _pypyjson.loads('true') is True
raises(ValueError, _pypyjson.loads, 'tru')
raises(ValueError, _pypyjson.loads, 'tr')
raises(ValueError, _pypyjson.loads, 't')
raises(ValueError, _pypyjson.loads, 'trXX')
#
assert _pypyjson.loads('false') is False
raises(ValueError, _pypyjson.loads, 'fals')
raises(ValueError, _pypyjson.loads, 'fal')
raises(ValueError, _pypyjson.loads, 'fa')
raises(ValueError, _pypyjson.loads, 'f')
raises(ValueError, _pypyjson.loads, 'falXX')
def test_decode_string(self):
import _pypyjson
res = _pypyjson.loads('"hello"')
assert res == u'hello'
assert type(res) is unicode
def test_decode_string_utf8(self):
import _pypyjson
s = u'àèìòù'
res = _pypyjson.loads('"%s"' % s.encode('utf-8'))
assert res == s
def test_skip_whitespace(self):
import _pypyjson
s = ' "hello" '
assert _pypyjson.loads(s) == u'hello'
s = ' "hello" extra'
raises(ValueError, "_pypyjson.loads(s)")
def test_unterminated_string(self):
import _pypyjson
s = '"hello' # missing the trailing "
raises(ValueError, "_pypyjson.loads(s)")
def test_escape_sequence(self):
import _pypyjson
assert _pypyjson.loads(r'"\\"') == u'\\'
assert _pypyjson.loads(r'"\""') == u'"'
assert _pypyjson.loads(r'"\/"') == u'/'
assert _pypyjson.loads(r'"\b"') == u'\b'
assert _pypyjson.loads(r'"\f"') == u'\f'
assert _pypyjson.loads(r'"\n"') == u'\n'
assert _pypyjson.loads(r'"\r"') == u'\r'
assert _pypyjson.loads(r'"\t"') == u'\t'
def test_escape_sequence_in_the_middle(self):
import _pypyjson
s = r'"hello\nworld"'
assert _pypyjson.loads(s) == "hello\nworld"
def test_unterminated_string_after_escape_sequence(self):
import _pypyjson
s = r'"hello\nworld' # missing the trailing "
raises(ValueError, "_pypyjson.loads(s)")
def test_escape_sequence_unicode(self):
import _pypyjson
s = r'"\u1234"'
assert _pypyjson.loads(s) == u'\u1234'
def test_escape_sequence_mixed_with_utf8(self):
import _pypyjson
utf8 = u'ä"'.encode("utf-8")
assert _pypyjson.loads(r'"abc\\' + utf8) == u'abc\\ä'
assert _pypyjson.loads(r'"abc\"' + utf8) == u'abc"ä'
assert _pypyjson.loads(r'"def\u1234' + utf8) == u'def\u1234ä'
def test_invalid_utf_8(self):
import _pypyjson
s = '"\xe0"' # this is an invalid UTF8 sequence inside a string
raises(UnicodeDecodeError, "_pypyjson.loads(s)")
def test_decode_numeric(self):
import sys
import _pypyjson
def check(s, val):
res = _pypyjson.loads(s)
assert type(res) is type(val)
assert res == val
#
check('42', 42)
check('-42', -42)
check('42.123', 42.123)
check('42E0', 42.0)
check('42E3', 42000.0)
check('42E-1', 4.2)
check('42E+1', 420.0)
check('42.123E3', 42123.0)
check('0', 0)
check('-0', 0)
check('0.123', 0.123)
check('0E3', 0.0)
check('5E0001', 50.0)
check(str(1 << 32), 1 << 32)
check(str(1 << 64), 1 << 64)
#
x = str(sys.maxint+1) + '.123'
check(x, float(x))
x = str(sys.maxint+1) + 'E1'
check(x, float(x))
x = str(sys.maxint+1) + 'E-1'
check(x, float(x))
#
check('1E400', float('inf'))
## # these are non-standard but supported by CPython json
check('Infinity', float('inf'))
check('-Infinity', float('-inf'))
def test_nan(self):
import math
import _pypyjson
res = _pypyjson.loads('NaN')
assert math.isnan(res)
def test_decode_numeric_invalid(self):
import _pypyjson
def error(s):
raises(ValueError, _pypyjson.loads, s)
#
error(' 42 abc')
error('.123')
error('+123')
error('12.')
error('12.-3')
error('12E')
error('12E-')
error('0123') # numbers can't start with 0
def test_decode_object(self):
import _pypyjson
assert _pypyjson.loads('{}') == {}
assert _pypyjson.loads('{ }') == {}
#
s = '{"hello": "world", "aaa": "bbb"}'
assert _pypyjson.loads(s) == {'hello': 'world',
'aaa': 'bbb'}
assert _pypyjson.loads(s) == {'hello': 'world',
'aaa': 'bbb'}
raises(ValueError, _pypyjson.loads, '{"key"')
raises(ValueError, _pypyjson.loads, '{"key": 42')
assert _pypyjson.loads('{"neighborhood": ""}') == {
"neighborhood": ""}
def test_decode_object_nonstring_key(self):
import _pypyjson
raises(ValueError, "_pypyjson.loads('{42: 43}')")
def test_decode_array(self):
import _pypyjson
assert _pypyjson.loads('[]') == []
assert _pypyjson.loads('[ ]') == []
assert _pypyjson.loads('[1]') == [1]
assert _pypyjson.loads('[1, 2]') == [1, 2]
raises(ValueError, "_pypyjson.loads('[1: 2]')")
raises(ValueError, "_pypyjson.loads('[1, 2')")
raises(ValueError, """_pypyjson.loads('["extra comma",]')""")
def test_unicode_surrogate_pair(self):
import _pypyjson
expected = u'z\U0001d120x'
res = _pypyjson.loads('"z\\ud834\\udd20x"')
assert res == expected
def test_unicode_not_a_surrogate_pair(self):
import _pypyjson
res = _pypyjson.loads('"z\\ud800\\ud800x"')
assert list(res) == [u'z', u'\ud800', u'\ud800', u'x']
res = _pypyjson.loads('"z\\udbff\\uffffx"')
assert list(res) == [u'z', u'\udbff', u'\uffff', u'x']
res = _pypyjson.loads('"z\\ud800\\ud834\\udd20x"')
assert res == u'z\ud800\U0001d120x'
res = _pypyjson.loads('"z\\udc00\\udc00x"')
assert list(res) == [u'z', u'\udc00', u'\udc00', u'x']
def test_lone_surrogate(self):
import _pypyjson
json = '{"a":"\\uD83D"}'
res = _pypyjson.loads(json)
assert res == {u'a': u'\ud83d'}
def test_cache_keys(self):
import _pypyjson
json = '[{"a": 1}, {"a": 2}]'
res = _pypyjson.loads(json)
assert res == [{u'a': 1}, {u'a': 2}]
def test_huge_map(self):
import _pypyjson
import __pypy__
s = '{' + ",".join('"%s": %s' % (i, i) for i in range(200)) + '}'
res = _pypyjson.loads(s)
assert len(res) == 200
assert __pypy__.strategy(res) == "UnicodeDictStrategy"
def test_tab_in_string_should_fail(self):
import _pypyjson
# http://json.org/JSON_checker/test/fail25.json
s = '["\ttab\tcharacter\tin\tstring\t"]'
raises(ValueError, "_pypyjson.loads(s)")
def test_raw_encode_basestring_ascii(self):
import _pypyjson
def check(s):
s = _pypyjson.raw_encode_basestring_ascii(s)
assert type(s) is str
return s
assert check("") == ""
assert check(u"") == ""
assert check("abc ") == "abc "
assert check(u"abc ") == "abc "
raises(UnicodeDecodeError, check, "\xc0")
assert check("\xc2\x84") == "\\u0084"
assert check("\xf0\x92\x8d\x85") == "\\ud808\\udf45"
assert check(u"\ud808\udf45") == "\\ud808\\udf45"
assert check(u"\U00012345") == "\\ud808\\udf45"
assert check("a\"c") == "a\\\"c"
assert check("\\\"\b\f\n\r\t") == '\\\\\\"\\b\\f\\n\\r\\t'
assert check("\x07") == "\\u0007"
def test_error_position(self):
import _pypyjson
test_cases = [
('[,', "No JSON object could be decoded: unexpected ',' at char 1"),
('{"spam":[}', "No JSON object could be decoded: unexpected '}' at char 9"),
('[42:', "Unexpected ':' when decoding array (char 3)"),
('[42 "spam"', "Unexpected '\"' when decoding array (char 4)"),
('[42,]', "No JSON object could be decoded: unexpected ']' at char 4"),
('{"spam":[42}', "Unexpected '}' when decoding array (char 11)"),
('["]', 'Unterminated string starting at char 1'),
('["spam":', "Unexpected ':' when decoding array (char 7)"),
('[{]', "Key name must be string at char 2"),
('"\\X"', "Invalid \\escape: X (char 1)"),
('"\\ "', "Invalid \\escape: (char 1)"),
('"\\', "Invalid \\escape: (char 1)"),
]
for inputtext, errmsg in test_cases:
exc = raises(ValueError, _pypyjson.loads, inputtext)
assert str(exc.value) == errmsg
def test_repeated_key(self):
import _pypyjson
a = '{"abc": "4", "k": 1, "k": 2}'
d = _pypyjson.loads(a)
assert d == {u"abc": u"4", u"k": 2}
a = '{"abc": "4", "k": 1, "k": 1.5, "c": null, "k": 2}'
d = _pypyjson.loads(a)
assert d == {u"abc": u"4", u"c": None, u"k": 2}
|
instant/tests/base.py | synw/django-instant | 103 | 12675844 | from pathlib import Path
from django.test import TestCase
from django.test.client import RequestFactory
from django.contrib.auth.models import User
from django.conf import settings
from instant.models import Channel
class InstantBaseTest(TestCase):
user = None
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user( # type: ignore
"myuser", "<EMAIL>", "password"
)
self.superuser = User.objects.create_superuser( # type: ignore
"superuser", "<EMAIL>", "password"
)
@property
def base_dir(self) -> Path:
d = settings.BASE_DIR
if isinstance(d, str):
d = Path(d)
return d
def reset(self):
for chan in Channel.objects.all():
chan.delete()
|
software/fpga/ov3/sim/test_sdramctlmux.py | twam/ov_ftdi | 247 | 12675862 | from migen import *
from ovhw.sdram_mux import SDRAMMux
import unittest
import sim.sdram_test_util
class SDRAMMultiTester(sim.sdram_test_util.SDRAMUTFramework):
class TestBench(Module):
"""
Test module consisting of the Emulated SDRAM + Controller complex, and an
SDRAM mux.
For each generator passed as an argument, a master will be created and
attached to the SDRAM mux
"""
def __init__(self, sdram_modname):
self.submodules.cpx = sim.sdram_test_util.TestSDRAMComplex(sdram_modname)
self.submodules.mux = SDRAMMux(self.cpx.hostif)
self.complete = False
# Create and attach the masters
self.masters = []
def bind(self, gen):
master = sim.sdram_test_util.TestMaster(
self.mux.getPort(), stop_on_finish=False)
master.setSeq(gen(master))
self.masters.append(master)
setattr(self.submodules, "master_%d" % len(self.masters), master)
def do_simulation(self, selfp):
# Finalize simulation when all masters have run to completion
self.complete = all(m.complete for m in self.masters)
if self.complete:
raise StopSimulation
def setUp(self):
self.tb = self.TestBench(self.SDRAM_MODNAME)
def _run_gen(self, gens, n=25000):
# wrapper function that calls a series of generators
# in sequence while binding masters through
def wrap(gl):
if not isinstance(gl, list):
gl = [gl]
return lambda master: (g(master) for g in gl)
for gen in gens:
self.tb.bind(wrap(gen))
# We defer the inner setup to here as the fragment emitted will
# depend on how many master generators we're using
self._inner_setup()
with self.sim as sim:
sim.run(n)
def tearDown(self):
# Test ran to completion
self.assertTrue(all(m.complete for m in self.tb.masters))
class SDRAMMultiMasterTests:
def testBytes0(self):
self._run_gen(
[
[
self._rw(0, 128),
self._rw(800, 10),
self._rw(900, 10),
],
[
self._rw(128,128),
self._wait(1000),
self._rw(700,10),
],
self._rw(256, 128),
self._rw(256+128,128),
])
def testBytesEndOfMem(self):
self._run_gen([
self._rw(480, 50),
self._rw(480+512, 50)
])
def testWriteTermination(self):
self._run_gen([
self._overlap(80,100),
self._overlap(80+512,100),
])
def testWriteEOMTermination(self):
self._run_gen([
self._overlap(500,13),
self._overlap(1012, 13)
])
def testBackBackReads(self):
self._run_gen([
self._b2b_read(0,128),
self._b2b_read(512,128)
])
def testBackBackReadsOVL(self):
self._run_gen([
self._b2b_read(512-64,128),
self._b2b_read(1024-64,128)
])
class SDRAMMuxTests_mt48lc16m16a2(SDRAMMultiMasterTests,
sim.sdram_test_util.SDRAMTestSequences,
SDRAMMultiTester,
unittest.TestCase):
SDRAM_MODNAME = "mt48lc16m16a2"
if __name__ == "__main__":
unittest.main()
|
components/partition_table/tests/gen_esp32part_tests.py | ghsecuritylab/git_hub_other | 104 | 12675868 | #!/usr/bin/env python
import unittest
import struct
import csv
import sys
import subprocess
import tempfile
import os
sys.path.append("..")
from gen_esp32part import *
SIMPLE_CSV = """
# Name,Type,SubType,Offset,Size,Flags
factory,0,2,65536,1048576,
"""
LONGER_BINARY_TABLE = ""
# type 0x00, subtype 0x00,
# offset 64KB, size 1MB
LONGER_BINARY_TABLE += "\xAA\x50\x00\x00" + \
"\x00\x00\x01\x00" + \
"\x00\x00\x10\x00" + \
"factory\0" + ("\0"*8) + \
"\x00\x00\x00\x00"
# type 0x01, subtype 0x20,
# offset 0x110000, size 128KB
LONGER_BINARY_TABLE += "\xAA\x50\x01\x20" + \
"\x00\x00\x11\x00" + \
"\x00\x02\x00\x00" + \
"data" + ("\0"*12) + \
"\x00\x00\x00\x00"
# type 0x10, subtype 0x00,
# offset 0x150000, size 1MB
LONGER_BINARY_TABLE += "\xAA\x50\x10\x00" + \
"\x00\x00\x15\x00" + \
"\x00\x10\x00\x00" + \
"second" + ("\0"*10) + \
"\x00\x00\x00\x00"
LONGER_BINARY_TABLE += "\xFF" * 32
def _strip_trailing_ffs(binary_table):
"""
Strip all FFs down to the last 32 bytes (terminating entry)
"""
while binary_table.endswith("\xFF"*64):
binary_table = binary_table[0:len(binary_table)-32]
return binary_table
class CSVParserTests(unittest.TestCase):
def test_simple_partition(self):
table = PartitionTable.from_csv(SIMPLE_CSV)
self.assertEqual(len(table), 1)
self.assertEqual(table[0].name, "factory")
self.assertEqual(table[0].type, 0)
self.assertEqual(table[0].subtype, 2)
self.assertEqual(table[0].offset, 65536)
self.assertEqual(table[0].size, 1048576)
def test_require_type(self):
csv = """
# Name,Type, SubType,Offset,Size
ihavenotype,
"""
with self.assertRaisesRegexp(InputError, "type"):
PartitionTable.from_csv(csv)
def test_type_subtype_names(self):
csv_magicnumbers = """
# Name, Type, SubType, Offset, Size
myapp, 0, 0,, 0x100000
myota_0, 0, 0x10,, 0x100000
myota_1, 0, 0x11,, 0x100000
myota_15, 0, 0x1f,, 0x100000
mytest, 0, 0x20,, 0x100000
myota_status, 1, 0,, 0x100000
"""
csv_nomagicnumbers = """
# Name, Type, SubType, Offset, Size
myapp, app, factory,, 0x100000
myota_0, app, ota_0,, 0x100000
myota_1, app, ota_1,, 0x100000
myota_15, app, ota_15,, 0x100000
mytest, app, test,, 0x100000
myota_status, data, ota,, 0x100000
"""
# make two equivalent partition tables, one using
# magic numbers and one using shortcuts. Ensure they match
magic = PartitionTable.from_csv(csv_magicnumbers)
magic.verify()
nomagic = PartitionTable.from_csv(csv_nomagicnumbers)
nomagic.verify()
self.assertEqual(nomagic["myapp"].type, 0)
self.assertEqual(nomagic["myapp"].subtype, 0)
self.assertEqual(nomagic["myapp"], magic["myapp"])
self.assertEqual(nomagic["myota_0"].type, 0)
self.assertEqual(nomagic["myota_0"].subtype, 0x10)
self.assertEqual(nomagic["myota_0"], magic["myota_0"])
self.assertEqual(nomagic["myota_15"], magic["myota_15"])
self.assertEqual(nomagic["mytest"], magic["mytest"])
self.assertEqual(nomagic["myota_status"], magic["myota_status"])
#self.assertEqual(nomagic.to_binary(), magic.to_binary())
def test_unit_suffixes(self):
csv = """
# Name, Type, Subtype, Offset, Size
one_megabyte, app, factory, 32k, 1M
"""
t = PartitionTable.from_csv(csv)
t.verify()
self.assertEqual(t[0].offset, 32*1024)
self.assertEqual(t[0].size, 1*1024*1024)
def test_default_offsets(self):
csv = """
# Name, Type, Subtype, Offset, Size
first, app, factory,, 1M
second, data, 0x15,, 1M
minidata, data, 0x40,, 32K
otherapp, app, factory,, 1M
"""
t = PartitionTable.from_csv(csv)
# 'first'
self.assertEqual(t[0].offset, 0x010000) # 64KB boundary as it's an app image
self.assertEqual(t[0].size, 0x100000) # Size specified in CSV
# 'second'
self.assertEqual(t[1].offset, 0x110000) # prev offset+size
self.assertEqual(t[1].size, 0x100000) # Size specified in CSV
# 'minidata'
self.assertEqual(t[2].offset, 0x210000)
# 'otherapp'
self.assertEqual(t[3].offset, 0x220000) # 64KB boundary as it's an app image
def test_negative_size_to_offset(self):
csv = """
# Name, Type, Subtype, Offset, Size
first, app, factory, 0x10000, -2M
second, data, 0x15, , 1M
"""
t = PartitionTable.from_csv(csv)
t.verify()
# 'first'
self.assertEqual(t[0].offset, 0x10000) # in CSV
self.assertEqual(t[0].size, 0x200000 - t[0].offset) # Up to 2M
# 'second'
self.assertEqual(t[1].offset, 0x200000) # prev offset+size
def test_overlapping_offsets_fail(self):
csv = """
first, app, factory, 0x100000, 2M
second, app, ota_0, 0x200000, 1M
"""
t = PartitionTable.from_csv(csv)
with self.assertRaisesRegexp(InputError, "overlap"):
t.verify()
class BinaryOutputTests(unittest.TestCase):
def test_binary_entry(self):
csv = """
first, 0x30, 0xEE, 0x100400, 0x300000
"""
t = PartitionTable.from_csv(csv)
tb = _strip_trailing_ffs(t.to_binary())
self.assertEqual(len(tb), 64)
self.assertEqual('\xAA\x50', tb[0:2]) # magic
self.assertEqual('\x30\xee', tb[2:4]) # type, subtype
eo, es = struct.unpack("<LL", tb[4:12])
self.assertEqual(eo, 0x100400) # offset
self.assertEqual(es, 0x300000) # size
def test_multiple_entries(self):
csv = """
first, 0x30, 0xEE, 0x100400, 0x300000
second,0x31, 0xEF, , 0x100000
"""
t = PartitionTable.from_csv(csv)
tb = _strip_trailing_ffs(t.to_binary())
self.assertEqual(len(tb), 96)
self.assertEqual('\xAA\x50', tb[0:2])
self.assertEqual('\xAA\x50', tb[32:34])
def test_encrypted_flag(self):
csv = """
# Name, Type, Subtype, Offset, Size, Flags
first, app, factory,, 1M, encrypted
"""
t = PartitionTable.from_csv(csv)
self.assertTrue(t[0].encrypted)
tb = _strip_trailing_ffs(t.to_binary())
tr = PartitionTable.from_binary(tb)
self.assertTrue(tr[0].encrypted)
class BinaryParserTests(unittest.TestCase):
def test_parse_one_entry(self):
# type 0x30, subtype 0xee,
# offset 1MB, size 2MB
entry = "\xAA\x50\x30\xee" + \
"\x00\x00\x10\x00" + \
"\x00\x00\x20\x00" + \
"0123456789abc\0\0\0" + \
"\x00\x00\x00\x00" + \
"\xFF" * 32
# verify that parsing 32 bytes as a table
# or as a single Definition are the same thing
t = PartitionTable.from_binary(entry)
self.assertEqual(len(t), 1)
t[0].verify()
e = PartitionDefinition.from_binary(entry[:32])
self.assertEqual(t[0], e)
e.verify()
self.assertEqual(e.type, 0x30)
self.assertEqual(e.subtype, 0xEE)
self.assertEqual(e.offset, 0x100000)
self.assertEqual(e.size, 0x200000)
self.assertEqual(e.name, "0123456789abc")
def test_multiple_entries(self):
t = PartitionTable.from_binary(LONGER_BINARY_TABLE)
t.verify()
self.assertEqual(3, len(t))
self.assertEqual(t[0].type, PartitionDefinition.APP_TYPE)
self.assertEqual(t[0].name, "factory")
self.assertEqual(t[1].type, PartitionDefinition.DATA_TYPE)
self.assertEqual(t[1].name, "data")
self.assertEqual(t[2].type, 0x10)
self.assertEqual(t[2].name, "second")
round_trip = _strip_trailing_ffs(t.to_binary())
self.assertEqual(round_trip, LONGER_BINARY_TABLE)
def test_bad_magic(self):
bad_magic = "OHAI" + \
"\x00\x00\x10\x00" + \
"\x00\x00\x20\x00" + \
"0123456789abc\0\0\0" + \
"\x00\x00\x00\x00"
with self.assertRaisesRegexp(InputError, "Invalid magic bytes"):
PartitionTable.from_binary(bad_magic)
def test_bad_length(self):
bad_length = "OHAI" + \
"\x00\x00\x10\x00" + \
"\x00\x00\x20\x00" + \
"0123456789"
with self.assertRaisesRegexp(InputError, "32 bytes"):
PartitionTable.from_binary(bad_length)
class CSVOutputTests(unittest.TestCase):
def test_output_simple_formatting(self):
table = PartitionTable.from_csv(SIMPLE_CSV)
as_csv = table.to_csv(True)
c = csv.reader(as_csv.split("\n"))
# first two lines should start with comments
self.assertEqual(c.next()[0][0], "#")
self.assertEqual(c.next()[0][0], "#")
row = c.next()
self.assertEqual(row[0], "factory")
self.assertEqual(row[1], "0")
self.assertEqual(row[2], "2")
self.assertEqual(row[3], "0x10000") # reformatted as hex
self.assertEqual(row[4], "0x100000") # also hex
# round trip back to a PartitionTable and check is identical
roundtrip = PartitionTable.from_csv(as_csv)
self.assertEqual(roundtrip, table)
def test_output_smart_formatting(self):
table = PartitionTable.from_csv(SIMPLE_CSV)
as_csv = table.to_csv(False)
c = csv.reader(as_csv.split("\n"))
# first two lines should start with comments
self.assertEqual(c.next()[0][0], "#")
self.assertEqual(c.next()[0][0], "#")
row = c.next()
self.assertEqual(row[0], "factory")
self.assertEqual(row[1], "app")
self.assertEqual(row[2], "2")
self.assertEqual(row[3], "0x10000")
self.assertEqual(row[4], "1M")
# round trip back to a PartitionTable and check is identical
roundtrip = PartitionTable.from_csv(as_csv)
self.assertEqual(roundtrip, table)
class CommandLineTests(unittest.TestCase):
def test_basic_cmdline(self):
try:
binpath = tempfile.mktemp()
csvpath = tempfile.mktemp()
# copy binary contents to temp file
with open(binpath, 'w') as f:
f.write(LONGER_BINARY_TABLE)
# run gen_esp32part.py to convert binary file to CSV
subprocess.check_call([sys.executable, "../gen_esp32part.py",
binpath, csvpath])
# reopen the CSV and check the generated binary is identical
with open(csvpath, 'r') as f:
from_csv = PartitionTable.from_csv(f.read())
self.assertEqual(_strip_trailing_ffs(from_csv.to_binary()), LONGER_BINARY_TABLE)
# run gen_esp32part.py to conver the CSV to binary again
subprocess.check_call([sys.executable, "../gen_esp32part.py",
csvpath, binpath])
# assert that file reads back as identical
with open(binpath, 'rb') as f:
binary_readback = f.read()
binary_readback = _strip_trailing_ffs(binary_readback)
self.assertEqual(binary_readback, LONGER_BINARY_TABLE)
finally:
for path in binpath, csvpath:
try:
os.remove(path)
except OSError:
pass
if __name__ =="__main__":
unittest.main()
|
examples/27_get_module_properties.py | drbitboy/pylogix | 350 | 12675875 | <gh_stars>100-1000
'''
the following import is only necessary because eip.py is not in this directory
'''
import sys
sys.path.append('..')
'''
Get the properties of a module in the specified slot
In this example, we're getting the slot 0 module
properties
'''
from pylogix import PLC
with PLC() as comm:
comm.IPAddress = '192.168.1.9'
prop = comm.GetModuleProperties(0)
print(prop.Value.ProductName, prop.Value.Revision)
|
plotdevice/lib/cocoa.py | plotdevice/plotdevice | 110 | 12675900 | # all the NSBits and NSPieces
from Quartz import CALayer, CGColorCreate, CGContextAddPath, CGContextAddRect, CGContextBeginPath, \
CGContextBeginTransparencyLayer, CGContextBeginTransparencyLayerWithRect, \
CGContextClip, CGContextClipToMask, CGContextDrawPath, CGContextEOClip, \
CGContextEndTransparencyLayer, CGContextRestoreGState, CGContextSaveGState, \
CGContextSetAlpha, CGContextSetBlendMode, CGContextSetFillColorWithColor, \
CGContextSetLineCap, CGContextSetLineDash, CGContextSetLineJoin, CGContextSetLineWidth, \
CGContextSetStrokeColorWithColor, CGImageGetBitsPerComponent, CGImageGetBitsPerPixel, \
CGImageGetBytesPerRow, CGImageGetDataProvider, CGImageGetHeight, CGImageGetWidth, \
CGImageMaskCreate, CGPathAddCurveToPoint, CGPathAddLineToPoint, CGPathCloseSubpath, \
CGPathCreateCopy, CGPathCreateMutable, CGPathRelease, CGPathMoveToPoint, kCGBlendModeClear, \
kCGBlendModeColor, kCGBlendModeColorBurn, kCGBlendModeColorDodge, kCGBlendModeCopy, \
kCGBlendModeDarken, kCGBlendModeDestinationAtop, kCGBlendModeDestinationIn, \
kCGBlendModeDestinationOut, kCGBlendModeDestinationOver, kCGBlendModeDifference, \
kCGBlendModeExclusion, kCGBlendModeHardLight, kCGBlendModeHue, kCGBlendModeLighten, \
kCGBlendModeLuminosity, kCGBlendModeMultiply, kCGBlendModeNormal, kCGBlendModeOverlay, \
kCGBlendModePlusDarker, kCGBlendModePlusLighter, kCGBlendModeSaturation, \
kCGBlendModeScreen, kCGBlendModeSoftLight, kCGBlendModeSourceAtop, kCGBlendModeSourceIn, \
kCGBlendModeSourceOut, kCGBlendModeXOR, kCGLineCapButt, kCGLineCapRound, kCGLineCapSquare, \
kCGLineJoinBevel, kCGLineJoinMiter, kCGLineJoinRound, kCGPathFill, kCGPathFillStroke, \
kCGPathStroke, kCIInputImageKey
from AppKit import NSAlert, NSApp, NSApplication, NSApplicationActivationPolicyAccessory, \
NSBackingStoreBuffered, NSBeep, NSBezierPath, NSBitmapImageRep, NSBorderlessWindowMask, \
NSButton, NSCenterTextAlignment, NSChangeAutosaved, NSChangeCleared, NSChangeDone, \
NSChangeReadOtherContents, NSChangeRedone, NSChangeUndone, NSClipView, \
NSClosePathBezierPathElement, NSColor, NSColorSpace, NSCompositeCopy, \
NSCompositeSourceOver, NSContentsCellMask, NSCriticalAlertStyle, NSCursor, \
NSCurveToBezierPathElement, NSDeviceCMYKColorSpace, NSDeviceRGBColorSpace, NSDocument, \
NSDocumentController, NSFindPboard, NSFixedPitchFontMask, NSFocusRingTypeExterior, \
NSFont, NSFontDescriptor, NSFontManager, NSForegroundColorAttributeName, NSGIFFileType, \
NSGradient, NSGraphicsContext, NSGraphiteControlTint, NSImage, NSImageCacheNever, \
NSImageCompressionFactor, NSImageInterpolationHigh, NSItalicFontMask, NSJPEGFileType, \
NSJustifiedTextAlignment, NSLayoutManager, NSLeftTextAlignment, NSLineBreakByWordWrapping, \
NSLineToBezierPathElement, NSMenu, NSMenuItem, NSMiniControlSize, NSMoveToBezierPathElement, \
NSMutableParagraphStyle, NSNib, NSOffState, NSOnState, NSPDFPboardType, NSPNGFileType, \
NSParagraphStyleAttributeName, NSPasteboard, NSPasteboardURLReadingContentsConformToTypesKey, \
NSPasteboardURLReadingFileURLsOnlyKey, NSPostScriptPboardType, NSPrintOperation, NSRectFill, \
NSRectFillUsingOperation, NSResponder, NSRightTextAlignment, NSSavePanel, NSScreen, NSShadow, \
NSSlider, NSSmallControlSize, NSSplitView, NSStringPboardType, NSSwitchButton, NSTIFFFileType, \
NSTIFFPboardType, NSTextContainer, NSTextField, NSTextFinder, NSTextStorage, NSTextView, \
NSTrackingActiveInActiveApp, NSTrackingArea, NSTrackingMouseEnteredAndExited, NSUnboldFontMask, \
NSUnitalicFontMask, NSUnionRect, NSView, NSViewFrameDidChangeNotification, NSWindow, \
NSWindowBackingLocationVideoMemory, NSWindowController, NSWorkspace, NSKernAttributeName
from Foundation import CIAffineTransform, CIColorMatrix, CIContext, CIFilter, CIImage, \
CIVector, Foundation, NO, NSAffineTransform, NSAffineTransformStruct, \
NSAttributedString, NSAutoreleasePool, NSBundle, NSData, NSDate, NSDateFormatter, \
NSFileCoordinator, NSFileHandle, NSFileHandleDataAvailableNotification, NSIntersectionRange, \
NSHeight, NSInsetRect, NSIntersectionRect, NSLocale, NSLog, NSMacOSRomanStringEncoding, \
NSMakeRange, NSMidX, NSMidY, NSMutableAttributedString, NSNotificationCenter, NSObject,\
NSOffsetRect, NSOperationQueue, NSPoint, NSRect, NSRectFromString, NSSelectorFromString, \
NSSize, NSString, NSStringFromRect, NSTimeZone, NSTimer, NSURL, NSUserDefaults, \
NSUTF8StringEncoding, NSWidth
from WebKit import WebView
from objc import IBOutlet, IBAction |
tests/python/unittest/test_contrib_krprod.py | Vikas-kum/incubator-mxnet | 228 | 12675911 | <reponame>Vikas-kum/incubator-mxnet
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import numpy as np
import mxnet as mx
from numpy.testing import assert_allclose
def assert_mx_allclose(A, B, **kwds):
return assert_allclose(A.asnumpy(), B.asnumpy(), **kwds)
def test_krprod_one_input():
A = mx.nd.arange(1,9).reshape((2,4))
out = mx.nd.khatri_rao(A)
assert_mx_allclose(out, A, rtol=1e-12)
def test_krprod_two_inputs():
A = mx.nd.arange(1,7).reshape((3,2))
B = mx.nd.arange(1,3).reshape((1,2))
out = mx.nd.khatri_rao(A, B)
expected = mx.nd.array([[1,4],[3,8],[5,12]])
assert_mx_allclose(out, expected, rtol=1e-12)
A = mx.nd.arange(1,7).reshape((3,2))
B = mx.nd.arange(1,9).reshape((4,2))
out = mx.nd.khatri_rao(A, B)
expected = mx.nd.array([[1,4],[3,8],[5,12],[7,16],[3,8],[9,16],[15,24],
[21,32],[5,12],[15,24],[25,36],[35,48]])
assert_mx_allclose(out, expected, rtol=1e-12)
def test_krprod_three_inputs():
A = mx.nd.arange(1,7).reshape((3,2))
B = mx.nd.arange(1,3).reshape((1,2))
C = mx.nd.arange(1,5).reshape((2,2))
out = mx.nd.khatri_rao(A, B, C)
expected = mx.nd.array([[1,8],[3,16],[3,16],[9,32],[5,24],[15,48]])
assert_mx_allclose(out, expected, rtol=1e-12)
out_AB = mx.nd.khatri_rao(A, B)
out = mx.nd.khatri_rao(out_AB, C)
assert_mx_allclose(out, expected, rtol=1e-12)
out_BC = mx.nd.khatri_rao(B, C)
out = mx.nd.khatri_rao(A, out_BC)
assert_mx_allclose(out, expected, rtol=1e-12)
|
tools/perf/page_sets/blank_page_with_large_profile.py | google-ar/chromium | 777 | 12675931 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from page_sets import pregenerated_large_profile_shared_state
from telemetry.page import page as page_module
from telemetry import story
class BlankPageWithLargeProfile(page_module.Page):
def __init__(self, url, page_set):
super(BlankPageWithLargeProfile, self).__init__(
url=url, page_set=page_set,
shared_page_state_class=pregenerated_large_profile_shared_state.
PregeneratedLargeProfileSharedState)
class BlankPageSetWithLargeProfile(story.StorySet):
"""A single blank page loaded with a large profile."""
def __init__(self):
super(BlankPageSetWithLargeProfile, self).__init__()
self.AddStory(BlankPageWithLargeProfile(
'file://blank_page/blank_page.html', self))
|
GP/Assets/code/extensions/IOV2.py | EnviralDesign/GeoPix | 134 | 12675973 | <gh_stars>100-1000
import os
TDF = op.TDModules.mod.TDFunctions
TDJ = op.TDModules.mod.TDJSON
# define IO globals.
# IO_BACKEND = op.IOV2
# IO_TEMPLATES = op.IO_templates
# IO_SCENE = parent.IO.par.Scenegeocomp.eval()
# IO_TOOLS = op.IO_logic
# IO_VIEWPORT = op.IOV2.op('Graph')
# IO_NOTIFICATIONS = op.IO_notif_center
# IO_RENDERPICK = op.IOV2.op('Graph').par.Graphrenderpick.eval()
# define EDITOR globals.
# EDITOR_BACKEND = op.sceneOutliner
# define IO misc variables.
# Undeletable_Io_Nodes = op.IOV2.par.Undeletablenodes.eval().split(' ')
'''
NEW way to select IO relative global OPs
parent.IO.IO_BACKEND()
parent.IO.IO_TEMPLATES()
parent.IO.IO_SCENE()
parent.IO.IO_TOOLS()
parent.IO.IO_VIEWPORT()
parent.IO.IO_NOTIFICATIONS()
parent.IO.IO_RENDERPICK()
parent.IO.EDITOR_BACKEND()
parent.IO.Undeletable_Io_Nodes()
'''
class IOV2:
def __init__(self, ownerComp):
# The component to which this extension is attached
self.ownerComp = ownerComp
self.DraggedItem = None # promoted attribute
self.SelectedItems = [] # promoted attribute
TDF.createProperty(self, 'HoveredItem', value=None, dependable=True, readOnly=False)
def RESET(self):
op.IO_viewport.Reset()
nodesToNotDelete = op.IOV2.UNDELETABLE_IO_NODES()
for each in op.IO_scene.children:
if each.name not in nodesToNotDelete:
each.destroy()
op.IOV2.par.Scenegeocomp = '/software/top/IOV2/IO_ROOT'
# reset UI
op.IoGraphUI.RESET()
return
def TriggerActiveMacros(self):
if parent.IO.IO_VIEWPORT().par.Isgraphroot.eval():
sel = self.GetSelectedObjects()
sel = [ each for each in sel if each.par.Objtype == 10 ]
for each in sel:
each.Start()
else:
parent.IO.IO_SCENE().parent.obj.Start()
def StopActiveMacros(self):
if parent.IO.IO_VIEWPORT().par.Isgraphroot.eval():
sel = self.GetSelectedObjects()
sel = [ each for each in sel if each.par.Objtype == 10 ]
for each in sel:
each.Stop()
else:
parent.IO.IO_SCENE().parent.obj.Stop()
def StopAllMacros(self):
sel = op.IO_scene.findChildren(depth=1)
sel = [ each for each in sel if each.par.Objtype == 10 ]
for each in sel:
each.Stop()
def GetNamesByType(self,tag=None):
alreadyExistingName = []
if tag != None:
alreadyExisting = parent.IO.IO_BACKEND().GetObjectsByTagPrefix([tag])
alreadyExistingName = [x.par.Name.eval() for x in alreadyExisting]
return alreadyExistingName
def GetMaxOffsets(self,tag=None):
alreadyExistingXpos = 0
if tag != None:
alreadyExisting = parent.IO.IO_BACKEND().GetObjectsByTagPrefix([tag])
alreadyExistingName = [x.par.Name.eval() for x in alreadyExisting]
if len(alreadyExisting):
alreadyExistingXpos += max([x.par.Tx.val for x in alreadyExisting]) + 100
return alreadyExistingXpos
def IsFileReal(self, filePath):
return os.path.isfile(filePath)
def GetPathSafely(self, filePath):
filePath = tdu.expandPath(filePath)
if self.IsFileReal(filePath):
return filePath
else:
return ''
def GetAllUpstreamNodes_______________________(self, InitialNodeList ):
##### recursive function! this searches up stream and finds all IO nodes that are eventual inputs
##### of the given nodes.
def getUpstreamComps(comp):
returnList = []
if len(comp.inputs) > 0:
for x in comp.inputs:
returnList += [ x.parent() ]
return returnList
def recursiveUpstreamSearch(listOfComps = []):
returnlist = []
if len(listOfComps) > 0:
for comp in listOfComps:
returnlist += [ comp ]
upstreamComps = recursiveUpstreamSearch( getUpstreamComps( comp ) )
returnlist += upstreamComps
return returnlist
allUpstreamNodes = recursiveUpstreamSearch(InitialNodeList)
allUpstreamNodes = list(set(allUpstreamNodes))
return allUpstreamNodes
def GetOriginNodesUpstream___________________(self, InitialNodeList ):
allUpstreamNodes = self.GetAllUpstreamNodes(InitialNodeList)
return [ x for x in allUpstreamNodes if len(x.inputs) == 0 ]
def CalculateSelectedObjectsBounds(self, optionalOps=[]):
if len(optionalOps) == 0:
sel = self.GetSelected()
else:
sel = optionalOps
returnDict = {}
returnDict['minx'] = 0
returnDict['miny'] = 0
returnDict['maxx'] = 0
returnDict['maxy'] = 0
returnDict['maxDim'] = 0
returnDict['centerx'] = 0
returnDict['centery'] = 0
if len(sel) > 0:
# compute min/max stuff manually using dims of geo comp, and the user TX /TY
boundsStuff = [ x.op('GEO').computeBounds() for x in sel ]
wd2 = [ x.size[0]/2 for x in boundsStuff ]
hd2 = [ x.size[1]/2 for x in boundsStuff ]
minx = min([ x.par.Tx - wd2[i] for i,x in enumerate(sel) ])
miny = min([ x.par.Ty - hd2[i] for i,x in enumerate(sel) ])
maxx = max([ x.par.Tx + wd2[i] for i,x in enumerate(sel) ])
maxy = max([ x.par.Ty + hd2[i] for i,x in enumerate(sel) ])
maxDim = max( (maxx - minx) , (maxy - miny) )
# print(maxDim)
centerx = (minx + maxx) / 2
centery = (miny + maxy) / 2
# returnDict = {}
returnDict['minx'] = minx
returnDict['miny'] = miny
returnDict['maxx'] = maxx
returnDict['maxy'] = maxy
returnDict['maxDim'] = maxDim
returnDict['centerx'] = centerx
returnDict['centery'] = centery
# print(returnDict)
return returnDict
def GetAllObjects(self):
f = parent.IO.IO_SCENE().findChildren(type=geometryCOMP, depth=1, maxDepth=1, parName="Objtype")
return f
def GetSelectedObjects(self):
f = parent.IO.IO_SCENE().findChildren(type=geometryCOMP, depth=1, maxDepth=1, parName="Objtype")
f = [x for x in f if x.par.Selected.eval() == True]
return f
def GetObjectsByObjtype(self, macroOp=None, typeList=[]):
macroOp = op(macroOp)
if macroOp == None:
f = parent.IO.IO_SCENE().findChildren(type=geometryCOMP, depth=1, maxDepth=1 )
else:
f = macroOp.op('GRAPH').findChildren(type=geometryCOMP, depth=1, maxDepth=1 )
f = [x for x in f if x.par.Objtype.eval() in typeList]
return f
def GetObjectsFromIoNames(self, nameList=[]):
# this gets us objects from a search of the currently active path.
f = parent.IO.IO_SCENE().findChildren(type=geometryCOMP, maxDepth=1)
# print(f)
f = [x for x in f if x.par.Name.eval() in nameList]
return f
def GetObjectsByBounds(self, min, max):
txMin = min[0]
tyMin = min[1]
txMax = max[0]
tyMax = max[1]
f = parent.IO.IO_SCENE().findChildren(type=geometryCOMP, depth=1)
# f = [ x for x in f if x.par.Objtype not in [5] ] # get rid of macros.
f = [ x for x in f if x.par.render == True ] # get rid of things that are invisible - not rendering.
filteredItems = []
for item in f:
mat = item.worldTransform
s,r,t = mat.decompose()
tx,ty = t[0],t[1]
if tx > txMin and tx < txMax and ty > tyMin and ty < tyMax:
filteredItems += [ item ]
return filteredItems
def Select(self, ops = [None]):
# gather all objects
found = parent.IO.IO_SCENE().findChildren(parName="Selected", depth=1)
# deselect all objects.
for x in found:
x.par.Selected = 0
x.selected = 0
# get only real ops
ops = [ op(x) for x in ops if op(x) ]
for thisOp in ops:
# select the provided object.
if hasattr( thisOp.par , 'Selected' ):
thisOp.par.Selected = 1
thisOp.selected = 1
else:
debug('could not set Selected par.. probably safe to ignore unless this happens often.')
parent.IO.IO_BACKEND().par.Lastselected = ops[-1] if len(ops) else ''
return
def SelectAdd(self, ops = [None]):
# get only real ops.
ops = [ op(x) for x in ops if op(x) ]
for thisOp in ops:
# select the provided object.
thisOp.par.Selected = 1
thisOp.selected = 1
parent.IO.IO_BACKEND().par.Lastselected = thisOp
return
def SelectRemove(self, ops = [None]):
# get only real ops.
ops = [ op(x) for x in ops if op(x) ]
for thisOp in ops:
# select the provided object.
thisOp.par.Selected = 0
thisOp.selected = 0
if parent.IO.IO_BACKEND().par.Lastselected.eval() == thisOp:
parent.IO.IO_BACKEND().par.Lastselected = ''
return
def DeselectAll(self):
# gather all objects
found = parent.IO.IO_SCENE().findChildren(parName="Selected", depth=1)
# deselect all objects.
for x in found:
x.par.Selected = 0
x.selected = 0
parent.IO.IO_BACKEND().par.Lastselected = ''
return
def Refresh_IO_Connections(self, target=None):
target = op(target)
if target == None:
target = parent.IO.IO_SCENE()
GatherIOs = target.findChildren(tags=['_GatherIO_'])
for x in GatherIOs:
x.run()
def Get_Node_Coordinates(self, nodeList = [] , coordinateSpace = "World" ):
'''
Given a list of valid TD geomCOMP objects, this function returns a list
of world x/y coordinates.
ie. worldCoords = parent.IO.IO_BACKEND().Get_Node_Coordinates([parent.obj] , coordinateSpace="World" )
'''
coordList = []
if coordinateSpace == "World":
for node in nodeList:
mat = node.worldTransform
s,r,t = mat.decompose()
coordList += [ [t[0],t[1]] ]
return coordList
elif coordinateSpace == "Local":
for node in nodeList:
mat = node.localTransform
s,r,t = mat.decompose()
coordList += [ [t[0],t[1]] ]
return coordList
else:
debug('coordinate space type "%s" not supported'%(coordinateSpace))
return None
def Set_Node_Coordinates(self, nodeList = [] , coordList = [] , coordinateSpace = "World" ):
'''
Given a list of valid TD geomCOMP objects, this function set's their x/y
positions in world space.
ie. worldCoords = parent.IO.IO_BACKEND().Set_Node_Coordinates( nodeList=[parent.obj] , coordList=[] coordinateSpace="World" )
'''
if coordinateSpace == "World":
for i,node in enumerate(nodeList):
# nodeMat = tdu.Matrix()
parentMat = node.parent().worldTransform
parentMat.invert()
parentMat.translate(coordList[i][0],coordList[i][1],0)
s,r,t = parentMat.decompose()
node.par.Tx = t[0]
# node.par.Ty = t[1]
# node.nodeCenterX = node.par.Tx * 2
# node.nodeCenterY = node.par.Ty * 2
elif coordinateSpace == "Local":
for i,node in enumerate(nodeList):
node.par.Tx =coordList[i][0]
node.par.Ty = coordList[i][1]
# node.nodeCenterX = node.par.Tx * 2
# node.nodeCenterY = node.par.Ty * 2
else:
debug('coordinate space type "%s" not supported'%(coordinateSpace))
return None
def DragClipToIO(self, ClipPath=None, baseName=None, extension=None):
'''
this function facilitates importing of video clips by dragging them from
windows explorer to the IO graph
'''
if parent.IO.IO_VIEWPORT().par.Isgraphroot:
op.NOTIFV2.Notify('Content type nodes can only be created from within a Macro. To use these, please create a Macro first, then click -Enter Macro- at the top, and then create your content graph.')
else:
if ClipPath != None:
if extension in tdu.fileTypes['image']+tdu.fileTypes['movie']:
nameStr = tdu.legalName(baseName)
templateFileInNode = op.IO_TEMPLATES_V3.op("TEX/Load/ExternalFile")
newlyMadeOp = parent.IO.CreateNode(templateFileInNode.id)
newlyMadeOp.par.Clip = ClipPath
newlyMadeOp.par.Clipreload.pulse()
if extension in tdu.fileTypes['audio']:
nameStr = tdu.legalName(baseName)
templateFileInNode = op.IO_TEMPLATES_V3.op("AUDIO/Load_/AudioFile")
newlyMadeOp = parent.IO.CreateNode(templateFileInNode.id)
newlyMadeOp.par.Clip = ClipPath
newlyMadeOp.par.Clipreload.pulse()
return
def UniquifyObjName(self, someOpName):
uniqueName = None
if someOpName:
found = parent.IO.IO_SCENE().findChildren(parName="Name", depth=1, maxDepth=1)
foundNames = [x.par.Name.eval() for x in found]
# print(someOpName, foundNames)
# if someOpName in foundNames:
# if someOpName[-1].isalpha():
# someOpName[1:-1] + int(float(someOpName[-1]))+1
uniqueName = mod.globalFuncs.uniquifyString( someOpName, foundNames )
# someOp.par.Name = uniqueName
return uniqueName
def DeleteSelected(self):
'''
Delete selected IO objects.
'''
# gather all objects
found = parent.IO.IO_SCENE().findChildren(parName="Selected", depth=1, maxDepth=1)
found = [x for x in found if x.par.Selected.eval() == 1]
parent.IO.EDITOR_BACKEND().mod.tdUtils_V2.killAllRuns()
GlobalNodeCount = 0
# '''
# deselect all objects.
for x in found:
if x.name not in parent.IO.UNDELETABLE_IO_NODES():
if x.parent() == op.IO_scene:
GlobalNodeCount += 1
x.destroy()
else:
# debug('Not allowed to delete system nodes.')
op.NOTIFV2.Notify('Not allowed to delete system nodes.')
parent.IO.IO_BACKEND().par.Lastselected = ''
if GlobalNodeCount > 0:
# print('Some nodes that were deleted were global nodes, attempting to prune all macro inputs')
self.MacroInput_PruneInvalidConnections_GLOBAL()
# '''
return
def StoreSelected(self, usePlaceTarget = False):
if usePlaceTarget == False:
f = self.GetSelected()
else:
f = [ parent.IO.IO_BACKEND().par.Placetarget.eval() ]
f = [ x for x in f if op(x) != None ]
self.SelectedItems = f
return
def GetSelected(self):
'''
returns list of selected items.
'''
found = parent.IO.IO_SCENE().findChildren(parName="Selected", depth=1)
found = [x for x in found if x.par.Selected == 1]
# self.SelectedItems = found
return found
def TranslateNodes_Init(self):
self.StoreNodePositions()
self.StoreSelected()
def StoreNodePositions(self, usePlaceTarget = False):
'''
Stores x/y pos so that we can transform or reset nodes.
'''
# macroX = 0
# macroY = 0
if usePlaceTarget == False:
found = self.GetSelected()
else:
# graph = parent.IO.IO_VIEWPORT()
# if graph.par.Isgraphroot.eval() == False:
# macroX = - parent.IO.IO_VIEWPORT().par.Scenegeocomp.eval().parent.obj.par.Tx
# macroY = - parent.IO.IO_VIEWPORT().par.Scenegeocomp.eval().parent.obj.par.Ty
found = [ parent.IO.IO_BACKEND().par.Placetarget.eval() ]
found = [ x for x in found if op(x) != None ]
storedItems = {}
startPos = {"Tx":parent.IO.IO_RENDERPICK()['pik_tx'].eval() , "Ty":parent.IO.IO_RENDERPICK()['pik_ty'].eval() }
for x in found:
# print(x.par.Tx, x.par.Ty)
storedItems[x.name] = {'Tx':x.par.Tx.eval(), 'Ty':x.par.Ty.eval()}
# print(startPos)
# print(storedItems)
parent.IO.IO_VIEWPORT().store('storedItems', storedItems)
parent.IO.IO_VIEWPORT().store('startPos', startPos)
return
def TranslateNodes_Move(self, usePlaceTarget = False):
'''
moves selected nodes by the difference the IO_ RENDERPICK coordinates have moved.
happens based on the stored start positions, so can be called blindly
until the transform is accepted or cancelled.
'''
# macroX = 0
# macroY = 0
# if usePlaceTarget == True:
# graph = parent.IO.IO_VIEWPORT()
# if graph.par.Isgraphroot.eval() == False:
# macroX = - parent.IO.IO_VIEWPORT().par.Scenegeocomp.eval().parent.obj.par.Tx
# macroY = - parent.IO.IO_VIEWPORT().par.Scenegeocomp.eval().parent.obj.par.Ty
currentPikPos = {"Tx":parent.IO.IO_RENDERPICK()['pik_tx'].eval() , "Ty":parent.IO.IO_RENDERPICK()['pik_ty'].eval() }
storedItems = parent.IO.IO_VIEWPORT().fetch('storedItems', {})
startPos = parent.IO.IO_VIEWPORT().fetch('startPos', currentPikPos)
# found = self.GetSelected()
found = self.SelectedItems
for x in found:
if op(x) != None:
fetchedPos = storedItems[x.name]
x.par.Tx = -(startPos['Tx'] - currentPikPos['Tx']) + fetchedPos['Tx']
x.par.Ty = -(startPos['Ty'] - currentPikPos['Ty']) + fetchedPos['Ty']
# x.nodeCenterX = x.par.Tx * 2
# x.nodeCenterY = x.par.Ty * 2
def TranslateNodes_Finish(self):
'''
finishes the translate, by doing nothing, just turning off transform mode.
'''
# found = self.SelectedItems
# print(found)
# for x in found:
# x.nodeX = x.par.Tx
# x.nodeY = x.par.Ty
# debug('ending translate...')
parent.IO.IO_VIEWPORT().par.Translatemode = 0
parent.IO.IO_TOOLS().par.Mode = 0
def TranslateNodes_Cancel(self):
'''
cancels the translate, by re writing old values saved when the translate initiated.
'''
# debug('canceling Translate...')
# if we're even in translate mode to begin with...
if parent.IO.IO_VIEWPORT().par.Translatemode:
storedItems = parent.IO.IO_VIEWPORT().fetch('storedItems', {})
# foundw = self.GetSelected()
found = self.SelectedItems
for x in found:
fetchedPos = storedItems[x.name]
x.par.Tx = fetchedPos['Tx']
x.par.Ty = fetchedPos['Ty']
parent.IO.IO_VIEWPORT().par.Translatemode = 0
parent.IO.IO_TOOLS().par.Mode = 0
def DeleteAllObjects(self, typeIgnore=[]):
# gather all objects
found = parent.IO.IO_SCENE().findChildren(parName="Selected", depth=1, maxDepth=1)
parent.IO.EDITOR_BACKEND().mod.tdUtils_V2.killAllRuns()
for x in found:
if x.name not in parent.IO.UNDELETABLE_IO_NODES():
x.destroy()
else:
# if x.name == "Default_0":
# x.nodeCenterX = -200
# x.nodeCenterY = 0
# x.par.Tx = -100
# x.par.Ty = 0
# debug('Not allowed to delete system nodes.')
# op.NOTIFV2.Notify('Not allowed to delete system nodes.')
pass
parent.IO.IO_BACKEND().par.Lastselected = ''
return
def DuplicateSelectedObjects(self):
sel = self.GetSelected()
translationDict = {}
if len(sel):
newSel = []
before = set(parent.IO.IO_SCENE().children)
ui.copyOPs( sel )
ui.pasteOPs( parent.IO.IO_SCENE() )
after = set(parent.IO.IO_SCENE().children)
newSel = list(after.difference( before ))
for new in newSel:
new.par.Name = self.UniquifyObjName(new.par.Name.eval())
boundsDict = self.CalculateSelectedObjectsBounds( optionalOps = sel )
xSize = 0 # no value for x size, since we want to offset duplicates DOWN.
ySize = ((boundsDict['maxy']-boundsDict['miny']) + 10) * -1
# offset duplicated objects to be non overlapping by bounding size.
for newOp in newSel:
newOp.par.Tx += xSize
newOp.par.Ty += ySize
newOp.nodeX += xSize*2
newOp.nodeY += ySize*2
for newlyMadeOp in newSel:
# IoGatherScripts = newlyMadeOp.findChildren(tags=['_GatherIO_'])
# for x in IoGatherScripts:
# x.run()
self.Refresh_IO_Connections(newlyMadeOp)
# update selection.
parent.IO.IO_BACKEND().par.Lastselected = newSel[-1]
self.Select(newSel)
return
def Node_Disconnect(self, connectionsTab, A ):
A += 1
a_SOURCE = connectionsTab[A,'Name'].val
try:
a_path = op( connectionsTab[A,'path'] ).parent.obj
except:
a_path = op( connectionsTab[A,'path'] )
a_connId = connectionsTab[A,'connectorID']
a_dir = connectionsTab[A,'direction']
try:
if a_dir == "in":
a_path.inputConnectors[a_connId].disconnect()
elif a_dir == "out":
a_path.outputConnectors[a_connId].disconnect()
except:
op.NOTIFV2.Notify('Could not disconnect : %s'%(a_path) )
return
def Node_Connect(self, connectionsTab, A, B):
A += 1
B += 1
a_SOURCE = connectionsTab[A,'Name'].val
try:
a_path = op( connectionsTab[A,'path'] ).parent.obj
except:
a_path = op( connectionsTab[A,'path'] )
# print(op( connectionsTab[A,'path'] ))
# inputOpPath = '/'.join(connectionsTab[B,'path'].val.split('/')[0:-1])
B_DEST = op(connectionsTab[B,'path'].val)
try:
# b_path = op( inputOpPath ).parent.obj
b_path = op( B_DEST ).parent.obj
except:
# b_path = op( inputOpPath )
b_path = op( B_DEST )
# print(b_path)
a_type = connectionsTab[A,'GP_type']
b_type = connectionsTab[B,'GP_type']
a_connId = connectionsTab[A,'connectorID']
b_connId = connectionsTab[B,'connectorID']
a_dir = connectionsTab[A,'direction']
b_dir = connectionsTab[B,'direction']
canWeConnect = 1
# print(a_path,b_path)
if a_path != None and b_path != None:
# first check, are we connecting two connectors on the same object?
# we're not allowed to do this.
if a_path == b_path:
canWeConnect = 0
# we also need to make sure we're connecting like operators. chop-chop or top-top
if a_type != b_type:
canWeConnect = 0
op.NOTIFV2.Notify('You can only connect similar inputs and outputs.')
# if user tried to connect output to output or input to input
if a_dir == b_dir:
canWeConnect = 0
op.NOTIFV2.Notify('You can only connect out->in or in<-out')
# connect it!!!
if canWeConnect == 1:
if a_dir == "out":
a_path.outputConnectors[a_connId].connect(b_path.inputConnectors[b_connId])
elif a_dir == "in":
b_path.outputConnectors[b_connId].connect(a_path.inputConnectors[a_connId])
# print( ' Connecting! ' if canWeConnect else "can't connect..." )
# disconnect
elif a_path != None and b_path == None:
if a_dir == "out":
a_path.outputConnectors[a_connId].disconnect()
elif a_dir == "in":
a_path.inputConnectors[a_connId].disconnect()
else:
pass
return
#############################################################################################
########################### begin macro OUTPUT functions ####################################
#############################################################################################
def GetParentMacroRefIfExists(self, someOp ):
return someOp.parent.obj.parent.obj if someOp.parent.obj.parent.obj.name != "IO_ROOT" else None
def GetOutputsByNameFromMacro(self, macroOp=None, nameList=[] ):
assert macroOp != None, "You must provide a valid macro operator for this function to work."
# this gets us objects from a search of the currently active path.
f = parent.IO.IO_SCENE().findChildren(type=geometryCOMP, maxDepth=1, depth=1)
f = [ x for x in f if x.par.Selected == 1]
f = [ x for x in f if x.par.Objtype == 10 ]
assert len(f) <= 1, "Should not be trying to retrieve output nodes for more than 1 macro at a time... "
f2 = macroOp.op('GRAPH').findChildren( type=geometryCOMP , depth=1, maxDepth=1, parName="Objtype" )
f2 = [ x for x in f2 if x.par.Objtype.eval() in [ 14,15,16 ] ]
f2 = [ x for x in f2 if x.par.Name in nameList ]
return f2
def GetOutputsIndexDictFromMacro(self, macroOp=None ):
assert macroOp != None, "You must provide a valid macro operator for this function to work."
f = macroOp.op('GRAPH').findChildren( type=geometryCOMP , depth=1, maxDepth=1, parName="Objtype")
f = [ x for x in f if x.par.Objtype.eval() in [ 14,15,16 ] ]
f = reversed(sorted(f, key=lambda node: node.nodeY))
f2 = { myOp.par.Name.eval():i for i,myOp in enumerate(f) }
return f2
def MacroOutput_Connect(self , A , B ):
'''
Connects a texture [or audio etc] output of a macro, to an input tile on the right.
'''
def getOutputType( index ):
outputLookupDat = parent.IO.op('sceneData/null_current_macros_Tex_Out_Names')
lookupDat = op.IO_TEMPLATES_V3.op('null_PIK_LOOKUP')
Objtype = int(outputLookupDat[index+1,'Objtype'])
# now find the macro output node type ie.tex,chan,audio
d = {}
for i in range(lookupDat.numRows):
try:
d[ int(lookupDat[i,'-objtype-']) ] = str(lookupDat[i,'-type-'])
except:
pass
texType = d[Objtype]
# now find the objtype of the macro output. 60,61,62, etc.
d = {}
for i in range(lookupDat.numRows):
try:
if int(lookupDat[i,'-objtype-']) >= 60 and int(lookupDat[i,'-objtype-']) < 65:
d[ lookupDat[i,'-type-'].val ] = str(lookupDat[i,'-objtype-'])
except:
pass
return d[texType]
def getInputType( index ):
TileInfo = op.TileManager.GetInfoFromHovered()
Objtype = TileInfo['Objtype']
return Objtype
def getInputInfo( index ):
TileInfo = op.TileManager.GetInfoFromHovered()
return TileInfo
connCompat = op.IO_TEMPLATES_V3.op('null_connectionCompatibility')
currentRoot = parent.IO.IO_BACKEND().par.Scenegeocomp.eval()
assert currentRoot != None, 'root object should definitely not be None, check this out..'
sel = self.GetSelectedObjects()
assert len(sel) == 1, 'should not even be able to disconnect a macro nodes output when multiple or no nodes are selected, check this out.'
macroOp = sel[0]
OutList = macroOp.par.Out.eval()
assert isinstance( OutList , list ) , 'Outlist is not a list! this should not happen'
indexLookupDict = self.GetOutputsIndexDictFromMacro( macroOp )
invertedDict = {v:k for k,v in indexLookupDict.items()}
OutType = int(getOutputType(A))
InType = int(getInputType(B))
allowedTypes = connCompat[ str(InType) ,1].val.split(',')
allowedTypes = [int(x) for x in allowedTypes if x.isnumeric()]
# print(OutType, allowedTypes)
if OutType in allowedTypes:
# print(OutType,InType, allowedTypes)
OUT_NAME = invertedDict[A]
IN_NAME = getInputInfo(B)['Name']
InOnlyNames = [x['B'] for x in OutList]
if IN_NAME not in InOnlyNames:
OutList += [ {"A":OUT_NAME , "B":IN_NAME} ]
macroOp.par.Out = OutList
elif IN_NAME in InOnlyNames:
indexOfInName = InOnlyNames.index(IN_NAME)
OutList[indexOfInName]["A"] = OUT_NAME
macroOp.par.Out = OutList
else:
debug('wtf? check this out')
else:
op.NOTIFV2.Notify('Connection between those types is not compatible.')
def MacroOutput_PruneInvalidConnections( self ):
'''
This function will check the currently selected macro, examine it's connections, and determine if any are invalid.
If any are, they will be removed from the connections list so that it becomes valid again.
'''
tilesDat = op.TileManager.op('null_tileNames')
currentRoot = parent.IO.IO_BACKEND().par.Scenegeocomp.eval()
assert currentRoot != None, 'root object should definitely not be None, check this out..'
sel = self.GetSelectedObjects()
assert len(sel) == 1, 'should not even be able to disconnect a macro nodes output when multiple nodes are selected, check this out.'
macroOp = sel[0]
OutList = macroOp.par.Out.eval()
NewOutList = []
assert isinstance( OutList , list ), 'variable stored in Out parameter was not a list, this is an error. check this out'
A_Names = self.GetOutputsByNameFromMacro(macroOp , [x['A'] for x in OutList] )
A_Names = [ each.par.Name.eval() for each in A_Names ]
B_Names = list(map(str,tilesDat.col('Name')[1::]))
for each in OutList:
if each['A'] in A_Names and each['B'] in B_Names:
NewOutList += [each]
macroOp.par.Out = NewOutList
return
def MacroOutput_UpdateConnectionNames( self , FindName , ReplaceName , ObjNameType , ObjRef ):
'''
Given a find and replace string, this function goes through ALL macros, and attempts to find and replace the connection names.
The is intended to be used when the user changes the name of a projector or a macro out connector, since connections are maintained
based on user given name, not operator name, this means things can break when the user renames things, common task for cleaning u
and organizing a file after some work has been done.
Last argument(ObjNameType) can be the following: [ 'local' , 'global' , ]
'''
# Isgraphroot = parent.IO.IO_VIEWPORT().par.Isgraphroot.eval()
# print(op.IO_viewport.par.Isgraphroot.eval())
# if op.IO_viewport.par.Isgraphroot.eval() == False:
# import inspect
# print('asd')
# print( inspect.stack() )
if FindName not in [None]:
TileNamesDat = op.TileManager.op('null_tileNames')
TileNames = list(map(str,TileNamesDat.col('Name')[1::]))
if ObjNameType in ['global']: # if the changed thing is a global thing, like a projector, we need to update all macros.
allObjects = parent.IO.op('IO_ROOT').findChildren(type=geometryCOMP, maxDepth=1, parName='Objtype')
allMacros = [ x for x in allObjects if x.par.Objtype.eval() in [10] ] # get all macros
elif ObjNameType in ['local']: # if the changed thing is a tex out node OF a macro, we only need to update that specific macro.
allMacros = [ ObjRef.parent.obj ]
for macro in allMacros:
OutList = macro.par.Out.eval()
NewOutList = []
for ConnectionDict in OutList:
NewOutList += [
{
"A":ConnectionDict['A'].replace(FindName,ReplaceName) if ConnectionDict['A'].endswith(FindName) else ConnectionDict['A'],
"B":ConnectionDict['B'].replace(FindName,ReplaceName) if ConnectionDict['B'].endswith(FindName) else ConnectionDict['B']
}
]
macro.par.Out = NewOutList
return
def MacroOutput_Disconnect(self, OutputIndex ):
'''
Disconnects a macro output from it's target.
'''
self.MacroOutput_PruneInvalidConnections()
currentRoot = parent.IO.IO_BACKEND().par.Scenegeocomp.eval()
assert currentRoot != None, 'root object should definitely not be None, check this out..'
sel = self.GetSelectedObjects()
assert len(sel) == 1, 'should not even be able to disconnect a macro nodes output when multiple nodes are selected, check this out.'
macroOp = sel[0]
OutList = macroOp.par.Out.eval()
assert isinstance( OutList , list ), 'variable stored in Out parameter was not a list, this is an error. check this out'
indexLookupDict = self.GetOutputsIndexDictFromMacro( macroOp )
invertedDict = {v:k for k,v in indexLookupDict.items()}
fetchedNameToDelete = invertedDict.get(OutputIndex,':ERR:')
assert fetchedNameToDelete != ':ERR:', 'Output index did not correspond to any output stored in dict... check this out.'
NewOutList = [ each for each in OutList if each['A'] != fetchedNameToDelete ]
macroOp.par.Out = NewOutList
return
def MacroOutput_Reset(self):
'''
Connects a texture [or audio etc] output of a macro, to an input tile on the right.
'''
currentRoot = parent.IO.IO_BACKEND().par.Scenegeocomp.eval()
if currentRoot != None:
sel = self.GetSelectedObjects()
if len(sel) == 1:
macroOp = sel[0]
macroOp.par.Out = []
else:
debug('multiple macros selected... this should not happen.')
else:
debug('root object should definitely not be None, check this out..')
#############################################################################################
########################### begin macro INPUT functions #####################################
#############################################################################################
def GetInputsByNameFromMacro(self, macroOp=None, nameList=[] ):
assert macroOp != None, "You must provide a valid macro operator for this function to work."
# this gets us objects from a search of the currently active path.
f = parent.IO.IO_SCENE().findChildren(type=geometryCOMP, maxDepth=1, depth=1)
f = [ x for x in f if x.par.Selected == 1] # filter down to selected only
f = [ x for x in f if x.par.Objtype == 10 ] # filter down to Macros only.
assert len(f) <= 1, "Should not be trying to retrieve output nodes for more than 1 macro at a time... "
f2 = macroOp.op('GRAPH').findChildren( type=geometryCOMP , depth=1, maxDepth=1, parName="Objtype" )
f2 = [ x for x in f2 if x.par.Objtype.eval() in [ 17,18,19 ] ]
f2 = [ x for x in f2 if x.par.Name in nameList ]
return f2
def GetInputsIndexDictFromMacro(self, macroOp=None ):
assert macroOp != None, "You must provide a valid macro operator for this function to work."
f = macroOp.op('GRAPH').findChildren( type=geometryCOMP , depth=1, maxDepth=1, parName="Objtype")
f = [ x for x in f if x.par.Objtype.eval() in [ 17,18,19 ] ]
f = reversed(sorted(f, key=lambda node: node.nodeY))
f2 = { myOp.par.Name.eval():i for i,myOp in enumerate(f) }
return f2
def MacroInput_Connect(self , connectionsTab , A , B ):
'''
Connects a texture [or audio etc] node to an input of a macro, on it's left.
'''
def getInputInfo( index ):
inputLookupDat = parent.IO.op('sceneData/null_current_macros_Tex_In_Names')
lookupDat = op.IO_TEMPLATES_V3.op('null_PIK_LOOKUP')
Objtype = int(inputLookupDat[index+1,'Objtype'])
# now find the macro output node type ie.tex,chan,audio
d = {}
for i in range(lookupDat.numRows):
try:
d[ int(lookupDat[i,'-objtype-']) ] = str(lookupDat[i,'-type-'])
except:
pass
texType = d[Objtype]
# now find the objtype of the macro output. 65,66,67, etc.
d = {}
for i in range(lookupDat.numRows):
try:
if int(lookupDat[i,'-objtype-']) >= 65 and int(lookupDat[i,'-objtype-']) < 70:
d[ lookupDat[i,'-type-'].val ] = str(lookupDat[i,'-objtype-'])
except:
pass
returnDict = {}
returnDict['objtype'] = int(d[texType])
returnDict['textype'] = texType
return returnDict
def getOutputInfo( connectionsTab , index ):
index += 1
try:
a_path = op( connectionsTab[index,'path'] ).parent.obj
except:
a_path = op( connectionsTab[index,'path'] )
d = {}
d['path'] = str(connectionsTab[index,'path'])
d['source'] = str(connectionsTab[index,'Name'])
d['type'] = str(connectionsTab[index,'GP_type'])
d['connId'] = int(connectionsTab[index,'connectorID'])
d['dir'] = str(connectionsTab[index,'direction'])
return d
currentRoot = parent.IO.IO_BACKEND().par.Scenegeocomp.eval()
assert currentRoot != None, 'root object should definitely not be None, check this out..'
sel = self.GetSelectedObjects()
assert len(sel) == 1, 'should not even be able to disconnect a macro nodes output when multiple or no nodes are selected, check this out.'
macroOp = sel[0]
InList = macroOp.par.In.eval()
assert isinstance( InList , list ) , 'Outlist is not a list! this should not happen'
indexLookupDict = self.GetInputsIndexDictFromMacro( macroOp )
invertedDict = {v:k for k,v in indexLookupDict.items()}
# print(indexLookupDict)
OutInfo = getOutputInfo(connectionsTab,A)
InInfo = getInputInfo(B)
# print(OutInfo)
out_direction = OutInfo['dir']
out_textype = OutInfo['type']
in_textype = InInfo['textype']
textype_check = out_textype == in_textype
direction_check = out_direction == "out"
if textype_check == False:
op.NOTIFV2.Notify('Connection between those types is not compatible.')
if direction_check == False:
op.NOTIFV2.Notify('Can only connect outputs of IO nodes to inputs of Macros.')
if textype_check and direction_check:
OUT_NAME = OutInfo["source"] + "/" + str(OutInfo["connId"])
IN_NAME = invertedDict[B]
# print(IN_NAME)
InOnlyNames = [x['B'] for x in InList]
# print(OUT_NAME)
# print(IN_NAME)
if IN_NAME not in InOnlyNames:
InList += [ {"A":OUT_NAME , "B":IN_NAME} ]
macroOp.par.In = InList
elif IN_NAME in InOnlyNames:
indexOfInName = InOnlyNames.index(IN_NAME)
InList[indexOfInName]["A"] = OUT_NAME
macroOp.par.In = InList
else:
debug('wtf? check this out')
else:
op.NOTIFV2.Notify('Connection between those types is not compatible.')
return
def MacroInput_PruneInvalidConnections( self ):
'''
This function will check the currently selected macro, examine it's input connections, and determine if any are invalid.
If any are, they will be removed from the connections list so that it becomes valid again.
'''
rootNodesDat = op.IOV2.op('sceneData/null_IOV2_ROOTNODES')
currentRoot = parent.IO.IO_BACKEND().par.Scenegeocomp.eval()
assert currentRoot != None, 'root object should definitely not be None, check this out..'
sel = self.GetSelectedObjects()
assert len(sel) == 1, 'should not even be able to disconnect a macro nodes output when multiple nodes are selected, check this out.'
macroOp = sel[0]
InList = macroOp.par.In.eval()
NewInList = []
assert isinstance( InList , list ), 'variable stored in In parameter was not a list, this is an error. check this out'
A_Names = list(map(str,rootNodesDat.col('Name')[1::]))
B_Names = self.GetInputsByNameFromMacro(macroOp , [x['B'] for x in InList] )
B_Names = [ each.par.Name.eval() for each in B_Names ]
for each in InList:
if each['A'].split('/')[0] in A_Names and each['B'] in B_Names:
NewInList += [each]
macroOp.par.In = NewInList
return
def MacroInput_PruneInvalidConnections_GLOBAL( self ):
'''
This function will check all macros, examine their input connections, and determine if any are invalid.
If any are, they will be removed from the connections list so that it becomes valid again.
'''
rootNodesDat = op.IOV2.op('sceneData/null_IOV2_ROOTNODES')
allObjects = parent.IO.op('IO_ROOT').findChildren(type=geometryCOMP, maxDepth=1, parName='Objtype')
allMacros = [ x for x in allObjects if x.par.Objtype.eval() in [10] ] # get all macros
for macroOp in allMacros:
InList = macroOp.par.In.eval()
NewInList = []
assert isinstance( InList , list ), 'variable stored in In parameter was not a list, this is an error. check this out'
A_Names = list(map(str,rootNodesDat.col('Name')[1::]))
B_Names = self.GetInputsByNameFromMacro(macroOp , [x['B'] for x in InList] )
B_Names = [ each.par.Name.eval() for each in B_Names ]
for each in InList:
if each['A'].split('/')[0] in A_Names and each['B'] in B_Names:
NewInList += [each]
macroOp.par.In = NewInList
return
def MacroInput_UpdateConnectionNames( self , FindName , ReplaceName , ObjNameType , ObjRef ):
'''
Given a find and replace string, this function goes through ALL macros, and attempts to find and replace the connection names.
The is intended to be used when the user changes the name of a projector or a macro out connector, since connections are maintained
based on user given name, not operator name, this means things can break when the user renames things, common task for cleaning u
and organizing a file after some work has been done.
Last argument(ObjNameType) can be the following: [ 'Projector' , 'TexOut' , ]
'''
# Isgraphroot = parent.IO.IO_VIEWPORT().par.Isgraphroot.eval()
# if op.IO_viewport.par.Isgraphroot.eval() == False:
if ObjNameType in ["global"]: # if the changed thing is an ALL, we want to update all macros since outputs can be connected to several macros.
allObjects = parent.IO.op('IO_ROOT').findChildren(type=geometryCOMP, maxDepth=1, parName='Objtype')
allMacros = [ x for x in allObjects if x.par.Objtype.eval() in [10] ] # get all macros
elif ObjNameType in ["local"]: # if the changed thing is a tex out node OF a macro, we only need to update that specific macro.
allMacros = [ ObjRef.parent.obj ]
# print(FindName,ReplaceName,ObjNameType)
for macro in allMacros:
InList = macro.par.In.eval()
NewInList = []
for ConnectionDict in InList:
NewInList += [
{
"A":ConnectionDict['A'].replace(FindName,ReplaceName) ,
"B":ConnectionDict['B'].replace(FindName,ReplaceName)
}
]
macro.par.In = NewInList
return
def MacroInput_Disconnect(self, OutputIndex ):
'''
Disconnects a macro input from it's source.
'''
self.MacroInput_PruneInvalidConnections()
currentRoot = parent.IO.IO_BACKEND().par.Scenegeocomp.eval()
assert currentRoot != None, 'root object should definitely not be None, check this out..'
sel = self.GetSelectedObjects()
assert len(sel) == 1, 'should not even be able to disconnect a macro nodes output when multiple nodes are selected, check this out.'
macroOp = sel[0]
InList = macroOp.par.In.eval()
assert isinstance( InList , list ), 'variable stored in In parameter was not a list, this is an error. check this out'
indexLookupDict = self.GetInputsIndexDictFromMacro( macroOp )
invertedDict = {v:k for k,v in indexLookupDict.items()}
fetchedNameToDelete = invertedDict.get(OutputIndex,':ERR:')
assert fetchedNameToDelete != ':ERR:', 'Input index did not correspond to any output stored in dict... check this out.'
NewInList = [ each for each in InList if each['B'] != fetchedNameToDelete ]
macroOp.par.In = NewInList
return
def MacroInput_Reset(self):
'''
resets the connection param to default
'''
currentRoot = parent.IO.IO_BACKEND().par.Scenegeocomp.eval()
if currentRoot != None:
sel = self.GetSelectedObjects()
if len(sel) == 1:
macroOp = sel[0]
macroOp.par.In = []
else:
debug('multiple macros selected... this should not happen.')
else:
debug('root object should definitely not be None, check this out..')
def UpdateWidgetCaptureNames(self,old,new):
f = op.IO_scene.findChildren(parName = "clone")
allWidgetCaptures = [ x for x in f if x.par.clone.eval() == op.IO_TEMPLATES_V3.op('CHAN/Capture_/WidgetCapture') ]
numChanged = 0
for widgetCapture in allWidgetCaptures:
oldString = widgetCapture.par.Widgetselector.eval()
oldStringSplit = oldString.split('/')
oldStringSplit[0] = oldStringSplit[0].replace( old , new )
newString = '/'.join(oldStringSplit)
print(widgetCapture , old,new)
widgetCapture.par.Widgetselector = newString
numChanged += (newString != oldString)*1
if numChanged > 0:
op.NOTIFV2.Notify('%i Widget Capture node(s) in IO had their Selector names updated.'%(numChanged))
op.IO_RightBar_V2.par.Regen.pulse()
return
def Load_Conns____________________________(self, connsDict=None, translationDict = None, target=None):
assert connsDict != None, 'Please supply a connection DICT.'
# debug('Loading IO graph Connections from Project Save.')
if op(target) == None:
t = parent.IO.IO_SCENE()
else:
t = op(target)
# exectionString = ""
for k,v in connsDict.items():
# try:
if translationDict == None:
A_name = v['A_name']
B_name = v['B_name']
else:
A_name = translationDict.get( v['A_name'] , v['A_name'] )
B_name = translationDict.get( v['B_name'] )
A_index = v['A_index']
B_index = v['B_index']
try:
t.op( A_name ).outputConnectors[ A_index ].connect( t.op( B_name ).inputConnectors[ B_index ] )
except:
debug('Could not connect %s:%i to %s:%i, this may be normal during pruning.'%( A_name,A_index,B_name,B_index ) )
def Dict_to_JSON_______________________________(self, myDict=None):
return TDJ.jsonToText(myDict)
def DICT_to_IO_____________________________(self, Dict=None, target=None):
if op(target) == None:
target = parent.IO.IO_SCENE()
# define some attribute types as things we want to handle first, and last.
# everything else can go in the middle.
firstOrder = [ '_name',
'_typeID',
]
lastOrder = ['_parent']
translationDict = {}
# print(jsonFile)
# get the actual dict.
ioDict = Dict
### first pass.
# we can iterate through our IO dict, we can discard the key, it's just a counter, though we might want it later.
for k_,v_ in ioDict.items():
# print(k_)
if k_ not in ['connections'] :
opName = v_['_name']
opType = v_['_typeID']
# find children in IO
IO_Found = parent.IO.IO_TEMPLATES().findChildren(tags=[opType], depth=1)
# alternatively, find children in the sampler module where our scheme template lies.
Sampler_Found = parent.IO.EDITOR_BACKEND().op('Sampler').findChildren(tags=[opType], depth=1)
# combine all sources into 1 list. we should only have 1 item in the sum of lists.
found = IO_Found + Sampler_Found
# print(opType, "-", found)
found = found[0]
newObj = target.copy(found, name=opName)
# if we're loading or importing things that exist by name, track name change TD does so we can still perform post ops..
translationDict[opName] = newObj.name
for k2,v2 in v_.items():
# now we deal with intermediate, attribute setting.
if k2[0] != "_":
foundAttr = getattr( newObj.par, k2, ':PAR-ERR:' )
if foundAttr != ':PAR-ERR:':
foundAttr.val = v2
else:
debug('Could not set parameter %s, probably due to format change.'%(k2))
# try:
# newObj.nodeCenterX = newObj.par.Tx * 2
# newObj.nodeCenterY = newObj.par.Ty * 2
# newObj.nodeWidth = 160
# newObj.nodeHeight = 130
# pass
# except:
# debug('-- could not set nodex/y from Tx Ty on this IO node: %s'%(newObj))
return translationDict
def GetIoJson___________________________(self):
'''
Creates an undo state of the current state of the IO scene and returns it's json.
'''
selectedOnly = False
ioDict = self.IO_To_Dict(selectedOnly)
ioJson = self.Dict_to_JSON(ioDict)
# print(ioJson)
return ioJson
def Save_IO_________________________(self, selectedOnly = False):
'''
converts the IO scene into a dict, then to a json and then writes it to disk where user chooses.
'''
debug('saving IO graph...')
IO_VIEWPORT_ = {}
IO_VIEWPORT_['IO'] = self.IO_To_Dict(selectedOnly)
IO_VIEWPORT_['IOCONNS'] = self.GatherConnections()
ioJson = self.Dict_to_JSON(IO_VIEWPORT_)
path = parent.IO.EDITOR_BACKEND().mod.tdUtils_V2.LaunchFileBrowser(load=False, start='USER\IO_VIEWPORTS',fileTypes=['GPgraph'],title='Save IO graph AS:')
if path:
f = open(path,'w')
f.write(ioJson)
f.close()
return
def Load_IO_________________________(self, ioDict=None, clearFirst = True):
# print('attempting to load damnit')
# parent.IO.IO_BACKEND().Refresh_Folders()
templateArea = parent.IO.IO_TEMPLATES()
# assume the user did not pass in a dict from a master save file somewhere...
# we need to handle prompting the user with a file load dialogue.
if ioDict == None:
# pass
path = parent.IO.EDITOR_BACKEND().mod.tdUtils_V2.LaunchFileBrowser(load=True, start='USER\IO_VIEWPORTS',fileTypes=['GPgraph'],title='Load IO graph :')
if path:
f = open(path,'r')
fRead = f.read()
# get the actual dict.
ioDict = TDJ.textToJSON(fRead, showErrors=True)
if clearFirst:
self.DeleteAllObjects()
before = set(parent.IO.IO_SCENE().children)
translationDict = self.DICT_to_IO( ioDict.get('IO', {}) )
after = set(parent.IO.IO_SCENE().children)
conns = self.Load_Conns( ioDict.get("IOCONNS", {}) , translationDict )
newSel = list(after.difference(before))
# fix name overlaps.
for new in newSel:
new.par.Name = self.UniquifyObjName(new.par.Name.eval())
for newlyMadeOp in newSel:
self.Refresh_IO_Connections(newlyMadeOp)
# IoGatherScripts = newlyMadeOp.findChildren(tags=['_GatherIO_'])
# for x in IoGatherScripts:
# x.run()
# update selection.
parent.IO.IO_BACKEND().par.Lastselected = newSel[-1]
self.Select(newSel)
else:
if clearFirst:
self.DeleteAllObjects()
debug('Loading IO from supplied dict...')
self.DICT_to_IO( ioDict )
# '''
found = parent.IO.IO_SCENE().findChildren(tags=['_GatherIO_'])
for x in found:
x.run(delayFrames=1)
# '''
def GetDrivableByName(self,itemName):
'''
This function is generically named, because it must also exist in extensions for other areas that must be able to fetch
an object by it's user given name. The contents of these functions will differ from area to area, but abstract that away.
This function is mostly used by the UI mapper.
'''
retVal = None
macroName = None
nodeName = None
# print(itemName)
#
if ':' in itemName:
macroName = itemName.split(':')[0]
nodeName = itemName.split(':')[1]
foundMacros = op.IO_scene.findChildren( parName='Name', parValue=macroName, maxDepth=1, depth=1 )
if len(foundMacros) == 1:
foundNodes = foundMacros[0].op('GRAPH').findChildren( parName='Name', parValue=nodeName, maxDepth=1, depth=1 )
# debug(foundNodes)
if len(foundNodes) == 1:
# assert len(foundNodes) == 1,'did not find exactly one node by this name<%s>, something is wrong, check this out'%(nodeName)
retVal = foundNodes[0]
else:
nodeName = itemName
# debug('------B-------', nodeName)
foundNodes = op.IO_scene.findChildren( parName='Name', parValue=nodeName, maxDepth=1, depth=1 )
# assert len(foundNodes) == 1,'did not find exactly one node by this name<%s>, something is wrong, check this out'%(nodeName)
if len(foundNodes) == 1:
retVal = foundNodes[0]
return retVal
|
cogdl/data/__init__.py | YuHuang42/cogdl | 824 | 12675982 | <gh_stars>100-1000
from .data import Data
from .batch import Batch
from .dataset import Dataset
from .dataloader import DataLoader, DataListLoader, DenseDataLoader
from .download import download_url
from .extract import extract_tar, extract_zip, extract_bz2, extract_gz
__all__ = [
"Data",
"Batch",
"Dataset",
"DataLoader",
"DataListLoader",
"DenseDataLoader",
"download_url",
"extract_tar",
"extract_zip",
"extract_bz2",
"extract_gz",
]
|
milk/tests/test_normalise.py | luispedro/milk | 284 | 12675984 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2012, <NAME> <<EMAIL>>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
import numpy
import numpy as np
from milk.supervised.normalise import sample_to_2min
import milk.supervised.normalise
def test_zscore_normalise():
I=milk.supervised.normalise.zscore_normalise()
numpy.random.seed(1234)
features = numpy.random.rand(20,100)
L = numpy.zeros(100)
model = I.train(features, L)
transformed = np.array([model.apply(f) for f in features])
assert np.all( transformed.mean(0)**2 < 1e-7 )
assert np.all( np.abs(transformed.std(0) - 1) < 1e-3 )
def test_sample_to_2min():
A = np.zeros(256, np.int32)
def test_one(A):
selected = sample_to_2min(A)
ratios = []
for l0 in set(A):
for l1 in set(A):
ratios.append( (A[selected] == l0).sum() / (A[selected] == l1).sum() )
assert np.max(ratios) <= 2.001
A[20:] = 1
yield test_one, A
A[21:] = 1
yield test_one, A
A[129:] = 2
yield test_one, A
def test_sample_to_2min_list():
from collections import defaultdict
def count(xs):
counts = defaultdict(int)
for x in xs:
counts[x] += 1
return counts
labels = ["A"]*8 + ["B"]*12 + ["C"]*16 + ["D"] * 24 + ["E"] * 1000
selected = sample_to_2min(labels)
before = count(labels)
after = count(np.array(labels)[selected])
assert max(after.values()) == min(before.values())*2
def test_interval_normalise():
interval = milk.supervised.normalise.interval_normalise()
np.random.seed(105)
features = np.random.randn(100, 5)
model = interval.train(features, features[0] > 0)
transformed = np.array([model.apply(f) for f in features])
assert np.allclose(transformed.min(0), -1)
assert np.allclose(transformed.max(0), +1)
def test_nanstd():
from milk.unsupervised.normalise import _nanstd
np.random.seed(234)
for i in range(8):
x = np.random.rand(200,231)
np.allclose(_nanstd(x,0), x.std(0))
np.allclose(_nanstd(x,1), x.std(1))
|
mmcv/video/__init__.py | WanEnNg/mmcv | 384 | 12676045 | <reponame>WanEnNg/mmcv
from .io import Cache, VideoReader, frames2video
from .processing import convert_video, resize_video, cut_video, concat_video
from .optflow import (flowread, flowwrite, quantize_flow, dequantize_flow,
flow_warp)
__all__ = [
'Cache', 'VideoReader', 'frames2video', 'convert_video', 'resize_video',
'cut_video', 'concat_video', 'flowread', 'flowwrite', 'quantize_flow',
'dequantize_flow', 'flow_warp'
]
|
become_yukarin/config/config.py | nameless-writer/become-yukarin | 562 | 12676059 | <gh_stars>100-1000
import json
from pathlib import Path
from typing import Dict
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Union
from become_yukarin.param import Param
class DatasetConfig(NamedTuple):
param: Param
input_glob: Path
target_glob: Path
input_mean_path: Path
input_var_path: Path
target_mean_path: Path
target_var_path: Path
features: List[str]
train_crop_size: int
input_global_noise: float
input_local_noise: float
target_global_noise: float
target_local_noise: float
seed: int
num_test: int
class ModelConfig(NamedTuple):
in_channels: int
out_channels: int
generator_base_channels: int
generator_extensive_layers: int
discriminator_base_channels: int
discriminator_extensive_layers: int
weak_discriminator: bool
class LossConfig(NamedTuple):
mse: float
adversarial: float
class TrainConfig(NamedTuple):
batchsize: int
gpu: int
log_iteration: int
snapshot_iteration: int
class ProjectConfig(NamedTuple):
name: str
tags: List[str]
class Config(NamedTuple):
dataset: DatasetConfig
model: ModelConfig
loss: LossConfig
train: TrainConfig
project: ProjectConfig
def save_as_json(self, path):
d = _namedtuple_to_dict(self)
json.dump(d, open(path, 'w'), indent=2, sort_keys=True, default=_default_path)
def _default_path(o):
if isinstance(o, Path):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
def _namedtuple_to_dict(o: NamedTuple):
return {
k: v if not hasattr(v, '_asdict') else _namedtuple_to_dict(v)
for k, v in o._asdict().items()
}
def create_from_json(s: Union[str, Path]):
try:
d = json.loads(s)
except TypeError:
d = json.load(open(s))
backward_compatible(d)
return Config(
dataset=DatasetConfig(
param=Param(),
input_glob=Path(d['dataset']['input_glob']),
target_glob=Path(d['dataset']['target_glob']),
input_mean_path=Path(d['dataset']['input_mean_path']),
input_var_path=Path(d['dataset']['input_var_path']),
target_mean_path=Path(d['dataset']['target_mean_path']),
target_var_path=Path(d['dataset']['target_var_path']),
features=d['dataset']['features'],
train_crop_size=d['dataset']['train_crop_size'],
input_global_noise=d['dataset']['input_global_noise'],
input_local_noise=d['dataset']['input_local_noise'],
target_global_noise=d['dataset']['target_global_noise'],
target_local_noise=d['dataset']['target_local_noise'],
seed=d['dataset']['seed'],
num_test=d['dataset']['num_test'],
),
model=ModelConfig(
in_channels=d['model']['in_channels'],
out_channels=d['model']['out_channels'],
generator_base_channels=d['model']['generator_base_channels'],
generator_extensive_layers=d['model']['generator_extensive_layers'],
discriminator_base_channels=d['model']['discriminator_base_channels'],
discriminator_extensive_layers=d['model']['discriminator_extensive_layers'],
weak_discriminator=d['model']['weak_discriminator'],
),
loss=LossConfig(
mse=d['loss']['mse'],
adversarial=d['loss']['adversarial'],
),
train=TrainConfig(
batchsize=d['train']['batchsize'],
gpu=d['train']['gpu'],
log_iteration=d['train']['log_iteration'],
snapshot_iteration=d['train']['snapshot_iteration'],
),
project=ProjectConfig(
name=d['project']['name'],
tags=d['project']['tags'],
)
)
def backward_compatible(d: Dict):
if 'input_global_noise' not in d['dataset']:
d['dataset']['input_global_noise'] = d['dataset']['global_noise']
d['dataset']['input_local_noise'] = d['dataset']['local_noise']
if 'target_global_noise' not in d['dataset']:
d['dataset']['target_global_noise'] = d['dataset']['global_noise']
d['dataset']['target_local_noise'] = d['dataset']['local_noise']
if 'generator_base_channels' not in d['model']:
d['model']['generator_base_channels'] = 64
d['model']['generator_extensive_layers'] = 8
d['model']['discriminator_base_channels'] = 32
d['model']['discriminator_extensive_layers'] = 5
if 'weak_discriminator' not in d['model']:
d['model']['weak_discriminator'] = False
|
configs/pointnet2/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class.py | Guangyun-Xu/mmdetection3d | 2,216 | 12676076 | <reponame>Guangyun-Xu/mmdetection3d<gh_stars>1000+
_base_ = [
'../_base_/datasets/s3dis_seg-3d-13class.py',
'../_base_/models/pointnet2_msg.py',
'../_base_/schedules/seg_cosine_50e.py', '../_base_/default_runtime.py'
]
# data settings
data = dict(samples_per_gpu=16)
evaluation = dict(interval=2)
# model settings
model = dict(
backbone=dict(in_channels=9), # [xyz, rgb, normalized_xyz]
decode_head=dict(
num_classes=13, ignore_index=13,
loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight
test_cfg=dict(
num_points=4096,
block_size=1.0,
sample_rate=0.5,
use_normalized_coord=True,
batch_size=24))
# runtime settings
checkpoint_config = dict(interval=2)
# PointNet2-MSG needs longer training time than PointNet2-SSG
runner = dict(type='EpochBasedRunner', max_epochs=80)
|
mara_pipelines/incremental_processing/processed_files.py | timgates42/mara-pipelines | 1,398 | 12676098 | <gh_stars>1000+
"""Functions for keeping track whether an input file has already been 'processed' """
import datetime
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
import mara_db.config
import mara_db.dbs
import mara_db.postgresql
Base = declarative_base()
class ProcessedFile(Base):
"""A local file that has been 'processed' (e.g. has been read)"""
__tablename__ = 'data_integration_processed_file'
node_path = sqlalchemy.Column(sqlalchemy.ARRAY(sqlalchemy.Text), primary_key=True)
file_name = sqlalchemy.Column(sqlalchemy.Text, primary_key=True)
last_modified_timestamp = sqlalchemy.Column(sqlalchemy.TIMESTAMP(timezone=True))
def track_processed_file(node_path: str, file_name: str, last_modified_timestamp: datetime):
"""
Records that a file has been 'processed' by a node
Args:
node_path: The path of the node that processed the file
file_name: The name of the file that has been processed
last_modified_timestamp: The time when the file was modified last
Returns: True
"""
with mara_db.postgresql.postgres_cursor_context('mara') as cursor:
cursor.execute(f'''
INSERT INTO data_integration_processed_file (node_path, file_name, last_modified_timestamp)
VALUES ({'%s,%s,%s'})
ON CONFLICT (node_path, file_name)
DO UPDATE SET last_modified_timestamp = EXCLUDED.last_modified_timestamp
''', (node_path, file_name, last_modified_timestamp))
return True
def already_processed_files(node_path: str) -> {str: datetime}:
"""
Returns all files that already have been processed by a node
Args:
node_path: The path of the node that processed the file
Returns:
A mapping of file names to timestamps of last modification
"""
with mara_db.postgresql.postgres_cursor_context('mara') as cursor:
cursor.execute(f"""
SELECT file_name, last_modified_timestamp
FROM data_integration_processed_file WHERE node_path = {'%s'}
""", (node_path,))
return {row[0]: row[1] for row in cursor.fetchall()}
|
saleor/giftcard/events.py | MertIV/saleor | 15,337 | 12676099 | from typing import Iterable, List, Optional, Tuple
from ..account.models import User
from ..app.models import App
from ..core.utils.validators import user_is_valid
from . import GiftCardEvents
from .models import GiftCard, GiftCardEvent
UserType = Optional[User]
AppType = Optional[App]
def gift_card_issued_event(
gift_card: GiftCard,
user: UserType,
app: AppType,
):
if not user_is_valid(user):
user = None
balance_data = {
"currency": gift_card.currency,
"initial_balance": gift_card.initial_balance_amount,
"current_balance": gift_card.current_balance_amount,
}
return GiftCardEvent.objects.create(
gift_card=gift_card,
user=user,
app=app,
type=GiftCardEvents.ISSUED,
parameters={"balance": balance_data, "expiry_date": gift_card.expiry_date},
)
def gift_card_sent_event(
gift_card_id: int, user_id: Optional[int], app_id: Optional[int], email: str
):
return GiftCardEvent.objects.create(
gift_card_id=gift_card_id,
user_id=user_id,
app_id=app_id,
type=GiftCardEvents.SENT_TO_CUSTOMER,
parameters={"email": email},
)
def gift_card_resent_event(
gift_card_id: int, user_id: Optional[int], app_id: Optional[int], email: str
):
return GiftCardEvent.objects.create(
gift_card_id=gift_card_id,
user_id=user_id,
app_id=app_id,
type=GiftCardEvents.RESENT,
parameters={"email": email},
)
def gift_card_balance_reset_event(
gift_card: GiftCard,
old_gift_card: GiftCard,
user: UserType,
app: AppType,
):
if not user_is_valid(user):
user = None
balance_data = {
"currency": gift_card.currency,
"initial_balance": gift_card.initial_balance_amount,
"current_balance": gift_card.current_balance_amount,
"old_currency": gift_card.currency,
"old_initial_balance": old_gift_card.initial_balance_amount,
"old_current_balance": old_gift_card.current_balance_amount,
}
return GiftCardEvent.objects.create(
gift_card=gift_card,
user=user,
app=app,
type=GiftCardEvents.BALANCE_RESET,
parameters={"balance": balance_data},
)
def gift_card_expiry_date_updated_event(
gift_card: GiftCard,
old_gift_card: GiftCard,
user: UserType,
app: AppType,
):
if not user_is_valid(user):
user = None
return GiftCardEvent.objects.create(
gift_card=gift_card,
user=user,
app=app,
type=GiftCardEvents.EXPIRY_DATE_UPDATED,
parameters={
"expiry_date": gift_card.expiry_date,
"old_expiry_date": old_gift_card.expiry_date,
},
)
def gift_card_tags_updated_event(
gift_card: GiftCard,
old_tags: List[str],
user: UserType,
app: AppType,
):
if not user_is_valid(user):
user = None
return GiftCardEvent.objects.create(
gift_card=gift_card,
user=user,
app=app,
type=GiftCardEvents.TAGS_UPDATED,
parameters={
"tags": list(
gift_card.tags.order_by("name").values_list("name", flat=True)
),
"old_tags": old_tags,
},
)
def gift_card_activated_event(
gift_card: GiftCard,
user: UserType,
app: AppType,
):
if not user_is_valid(user):
user = None
return GiftCardEvent.objects.create(
gift_card=gift_card,
user=user,
app=app,
type=GiftCardEvents.ACTIVATED,
)
def gift_card_deactivated_event(
gift_card: GiftCard,
user: UserType,
app: AppType,
):
if not user_is_valid(user):
user = None
return GiftCardEvent.objects.create(
gift_card=gift_card,
user=user,
app=app,
type=GiftCardEvents.DEACTIVATED,
)
def gift_cards_activated_event(
gift_card_ids: Iterable[int],
user: UserType,
app: AppType,
):
if not user_is_valid(user):
user = None
events = [
GiftCardEvent(
gift_card_id=gift_card_id,
user=user,
app=app,
type=GiftCardEvents.ACTIVATED,
)
for gift_card_id in gift_card_ids
]
return GiftCardEvent.objects.bulk_create(events)
def gift_cards_deactivated_event(
gift_card_ids: Iterable[int],
user: UserType,
app: AppType,
):
if not user_is_valid(user):
user = None
events = [
GiftCardEvent(
gift_card_id=gift_card_id,
user=user,
app=app,
type=GiftCardEvents.DEACTIVATED,
)
for gift_card_id in gift_card_ids
]
return GiftCardEvent.objects.bulk_create(events)
def gift_card_note_added_event(
gift_card: GiftCard, user: UserType, app: AppType, message: str
) -> GiftCardEvent:
if not user_is_valid(user):
user = None
return GiftCardEvent.objects.create(
gift_card=gift_card,
user=user,
app=app,
type=GiftCardEvents.NOTE_ADDED,
parameters={"message": message},
)
def gift_cards_used_in_order_event(
balance_data: Iterable[Tuple[GiftCard, float]],
order_id: int,
user: UserType,
app: AppType,
):
if not user_is_valid(user):
user = None
events = [
GiftCardEvent(
gift_card=gift_card,
user=user,
app=app,
type=GiftCardEvents.USED_IN_ORDER,
parameters={
"order_id": order_id,
"balance": {
"currency": gift_card.currency,
"current_balance": gift_card.current_balance.amount,
"old_current_balance": previous_balance,
},
},
)
for gift_card, previous_balance in balance_data
]
return GiftCardEvent.objects.bulk_create(events)
def gift_cards_bought_event(
gift_cards: Iterable[GiftCard], order_id: int, user: UserType, app: AppType
):
if not user_is_valid(user):
user = None
events = [
GiftCardEvent(
gift_card=gift_card,
user=user,
app=app,
type=GiftCardEvents.BOUGHT,
parameters={"order_id": order_id, "expiry_date": gift_card.expiry_date},
)
for gift_card in gift_cards
]
return GiftCardEvent.objects.bulk_create(events)
|
tests/functional/context_methods/first_dependency.py | tomasfarias/dbt-core | 799 | 12676120 | <gh_stars>100-1000
import pytest
from dbt.tests.fixtures.project import write_project_files
first_dependency__dbt_project_yml = """
name: 'first_dep'
version: '1.0'
config-version: 2
profile: 'default'
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
require-dbt-version: '>=0.1.0'
target-path: "target" # directory which will store compiled SQL files
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
vars:
first_dep:
first_dep_global: 'first_dep_global_value_overridden'
seeds:
quote_columns: True
"""
first_dependency__models__nested__first_dep_model_sql = """
select
'{{ var("first_dep_global") }}' as first_dep_global,
'{{ var("from_root_to_first") }}' as from_root
"""
first_dependency__seeds__first_dep_expected_csv = """first_dep_global,from_root
first_dep_global_value_overridden,root_first_value
"""
class FirstDependencyProject:
@pytest.fixture(scope="class")
def first_dependency(self, project):
first_dependency_files = {
"dbt_project.yml": first_dependency__dbt_project_yml,
"models": {
"nested": {
"first_dep_model.sql": first_dependency__models__nested__first_dep_model_sql
}
},
"seeds": {"first_dep_expected.csv": first_dependency__seeds__first_dep_expected_csv},
}
write_project_files(project.project_root, "first_dependency", first_dependency_files)
|
vigranumpy/examples/graph_smoothing.py | BSeppke/vigra | 316 | 12676139 | <reponame>BSeppke/vigra
import vigra
from vigra import graphs
# parameter:
filepath = '12003.jpg' # input image path
sigmaGradMag = 2.0 # sigma Gaussian gradient
superpixelDiameter = 10 # super-pixel size
slicWeight = 10.0 # SLIC color - spatial weight
gamma = 0.15 # exp(-gamma * edgeIndicator)
edgeThreshold = 2.5 # values higher are considered as edges
scale = 1.0 # how much smoothing
iterations = 10 # how man smoothing iterations
# load image and convert to LAB
img = vigra.impex.readImage(filepath)
res = vigra.filters.nonlinearDiffusion(img, 1.9, 20.0)
vigra.imshow(res)
vigra.show() |
spacy/tests/lang/vi/test_serialize.py | rynoV/spaCy | 22,040 | 12676143 | <reponame>rynoV/spaCy
from spacy.lang.vi import Vietnamese
from ...util import make_tempdir
def test_vi_tokenizer_serialize(vi_tokenizer):
tokenizer_bytes = vi_tokenizer.to_bytes()
nlp = Vietnamese()
nlp.tokenizer.from_bytes(tokenizer_bytes)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
assert nlp.tokenizer.use_pyvi is True
with make_tempdir() as d:
file_path = d / "tokenizer"
vi_tokenizer.to_disk(file_path)
nlp = Vietnamese()
nlp.tokenizer.from_disk(file_path)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
assert nlp.tokenizer.use_pyvi is True
# mode is (de)serialized correctly
nlp = Vietnamese.from_config({"nlp": {"tokenizer": {"use_pyvi": False}}})
nlp_bytes = nlp.to_bytes()
nlp_r = Vietnamese()
nlp_r.from_bytes(nlp_bytes)
assert nlp_bytes == nlp_r.to_bytes()
assert nlp_r.tokenizer.use_pyvi is False
with make_tempdir() as d:
nlp.to_disk(d)
nlp_r = Vietnamese()
nlp_r.from_disk(d)
assert nlp_bytes == nlp_r.to_bytes()
assert nlp_r.tokenizer.use_pyvi is False
|
orochi/website/migrations/0025_dump_banner.py | garanews/orochi | 121 | 12676145 | <reponame>garanews/orochi<filename>orochi/website/migrations/0025_dump_banner.py
# Generated by Django 3.1.3 on 2020-11-06 16:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0024_auto_20200930_1428'),
]
operations = [
migrations.AddField(
model_name='dump',
name='banner',
field=models.CharField(blank=True, max_length=500, null=True),
),
]
|
syfertext/data/units/token_meta.py | Dat-Boi-Arjun/SyferText | 204 | 12676168 | <reponame>Dat-Boi-Arjun/SyferText<gh_stars>100-1000
class TokenMeta:
"""This class holds some meta data about a token from the text held by a Doc object.
This allows to create a Token object when needed.
"""
def __init__(self, text: str, space_after: bool):
"""Initializes a TokenMeta object
Args:
text (str): The token's text.
space_after (bool): Whether the token is followed by a single white
space (True) or not (False).
"""
self.text = text
self.space_after = space_after
# A dictionary to hold custom attributes
self.attributes: Dict[str, List[str]] = dict()
|
test/unit/agent/collectors/plus/stream.py | dp92987/nginx-amplify-agent | 308 | 12676186 | # -*- coding: utf-8 -*-
from hamcrest import *
from test.base import BaseTestCase
from amplify.agent.common.context import context
from amplify.agent.objects.plus.object import NginxStreamObject
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class StreamCollectorTestCase(BaseTestCase):
def setup_method(self, method):
super(StreamCollectorTestCase, self).setup_method(method)
context.plus_cache = None
context._setup_plus_cache()
def test_gather_data(self):
stream = NginxStreamObject(local_name='some_stream', parent_local_id='nginx123', root_uuid='root123')
stream.plus_status_internal_url_cache = 'test_status'
# Get the stream collector
stream_collector = stream.collectors[-1]
context.plus_cache.put('test_status', (
{
u'streams': {
u'some_stream': {
u'connections': 0,
u'discarded': 0,
u'processing': 0,
u'received': 0,
u'sent': 0,
u'sessions': {
u'2xx': 0,
u'4xx': 0,
u'5xx': 0,
u'total': 0
}
},
u'udp_dns': {
u'connections': 0,
u'discarded': 0,
u'processing': 0,
u'received': 0,
u'sent': 0,
u'sessions': {
u'2xx': 0,
u'4xx': 0,
u'5xx': 0,
u'total': 0
}
}
}
},
1
))
data = stream_collector.gather_data()
assert_that(data, not_(equal_to([])))
assert_that(data, has_length(1))
def test_collect(self):
stream = NginxStreamObject(local_name='some_stream', parent_local_id='nginx123', root_uuid='root123')
stream.plus_status_internal_url_cache = 'test_status'
# Get the stream collector
stream_collector = stream.collectors[-1]
assert_that(stream_collector.last_collect, equal_to(None))
context.plus_cache.put('test_status', (
{
u'streams': {
u'some_stream': {
u'connections': 2,
u'discarded': 10,
u'processing': 60,
u'received': 0,
u'sent': 0,
u'sessions': {
u'2xx': 0,
u'4xx': 0,
u'5xx': 0,
u'total': 0
}
},
u'udp_dns': {
u'connections': 0,
u'discarded': 1,
u'processing': 0,
u'received': 0,
u'sent': 0,
u'sessions': {
u'2xx': 0,
u'4xx': 0,
u'5xx': 0,
u'total': 0
}
}
}
},
1
))
context.plus_cache.put('test_status', (
{
u'streams': {
u'some_stream': {
u'connections': 30,
u'discarded': 5,
u'processing': 5,
u'received': 0,
u'sent': 0,
u'sessions': {
u'2xx': 0,
u'4xx': 0,
u'5xx': 0,
u'total': 0
}
},
u'udp_dns': {
u'connections': 0,
u'discarded': 2,
u'processing': 0,
u'received': 0,
u'sent': 0,
u'sessions': {
u'2xx': 0,
u'4xx': 0,
u'5xx': 0,
u'total': 0
}
}
}
},
2
))
stream_collector.collect()
assert_that(stream_collector.last_collect, equal_to(2))
assert_that(stream.statsd.current, not_(has_length(0)))
assert_that(stream.statsd.current, has_key('counter'))
counters = stream.statsd.current['counter']
for key in (
'plus.stream.bytes_sent',
'plus.stream.bytes_rcvd',
'plus.stream.status.2xx',
'plus.stream.status.4xx',
'plus.stream.status.5xx',
'plus.stream.conn.accepted',
):
assert_that(counters, has_key(key))
assert_that(counters['plus.stream.conn.accepted'][0], equal_to([2, 28]))
gauges = stream.statsd.current['gauge']
assert_that(gauges, has_key('plus.stream.conn.active'))
assert_that(gauges['plus.stream.conn.active'][0], equal_to((1, 60)))
|
tools/bin/ext/figleaf/annotate.py | YangHao666666/hawq | 450 | 12676188 | """
Common functions for annotating files with figleaf coverage information.
"""
import sys, os
from optparse import OptionParser
import ConfigParser
import re
import logging
import figleaf
thisdir = os.path.dirname(__file__)
try: # 2.3 compatibility
logging.basicConfig(format='%(message)s', level=logging.WARNING)
except TypeError:
pass
logger = logging.getLogger('figleaf.annotate')
DEFAULT_CONFIGURE_FILE = ".figleafrc"
### utilities
def safe_conf_get(conf, section, name, default):
try:
val = conf.get(section, name)
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
val = default
return val
def configure(parser):
"""
Configure the optparse.OptionParser object with defaults, optionally
loaded from a configuration file.
"""
CONFIG_FILE = os.environ.get('FIGLEAFRC', DEFAULT_CONFIGURE_FILE)
parser.add_option("-c", "--coverage-file", action="store",
type="string", dest="coverage_file",
help="File containing figleaf coverage information.")
parser.add_option("-s", "--sections-file", action="store",
type="string", dest="sections_file",
help="File containing figleaf sections coverage info.")
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose")
conf_file = ConfigParser.ConfigParser()
conf_file.read(CONFIG_FILE) # ignores if not present
default_coverage_file = safe_conf_get(conf_file,
'figleaf', 'coverage_file',
'.figleaf')
default_sections_file = safe_conf_get(conf_file,
'figleaf', 'sections_file',
'.figleaf_sections')
default_verbose = int(safe_conf_get(conf_file, 'figleaf', 'verbose',
0))
parser.set_defaults(coverage_file=default_coverage_file,
sections_file=default_sections_file,
verbose=default_verbose)
def filter_coverage(coverage, re_match):
"""
...
"""
if not re_match:
return coverage
regexp = re.compile(re_match)
d = {}
for filename, lines in coverage.items():
if regexp.match(filename):
d[filename] = lines
return d
### commands
def list(options, match=""):
"""
List the filenames in the coverage file, optionally limiting it to
those files matching to the regexp 'match'.
"""
if options.verbose:
print>>sys.stderr, '** Reading coverage from coverage file %s' % \
(options.coverage_file,)
if match:
print>>sys.stderr, '** Filtering against regexp "%s"' % (match,)
coverage = figleaf.read_coverage(options.coverage_file)
coverage = filter_coverage(coverage, match)
for filename in coverage.keys():
print filename
def list_sections(options, match=""):
"""
List the filenames in the coverage file, optionally limiting it to
those files matching to the regexp 'match'.
"""
if options.verbose:
print>>sys.stderr, '** Reading sections info from sections file %s' % \
(options.sections_file,)
if match:
print>>sys.stderr, '** Filtering against regexp "%s"' % (match,)
fp = open(options.sections_file)
figleaf.load_pickled_coverage(fp) # @CTB
data = figleaf.internals.CoverageData(figleaf._t)
coverage = data.gather_files()
coverage = filter_coverage(coverage, match)
for filename in coverage.keys():
print filename
###
def read_exclude_patterns(filename):
"""
Read in exclusion patterns from a file; these are just regexps.
"""
if not filename:
return []
exclude_patterns = []
fp = open(filename)
for line in fp:
line = line.rstrip()
if line and not line.startswith('#'):
pattern = re.compile(line)
exclude_patterns.append(pattern)
return exclude_patterns
def read_files_list(filename):
"""
Read in a list of files from a file; these are relative or absolute paths.
"""
s = {}
for line in open(filename):
f = line.strip()
s[os.path.abspath(f)] = 1
return s
def filter_files(filenames, exclude_patterns = [], files_list = {}):
files_list = dict(files_list) # make copy
# list of files specified?
if files_list:
for filename in files_list.keys():
yield filename
filenames = [ os.path.abspath(x) for x in filenames ]
for filename in filenames:
try:
del files_list[filename]
except KeyError:
logger.info('SKIPPING %s -- not in files list' % (filename,))
return
### no files list given -- handle differently
for filename in filenames:
abspath = os.path.abspath(filename)
# check to see if we match anything in the exclude_patterns list
skip = False
for pattern in exclude_patterns:
if pattern.search(filename):
logger.info('SKIPPING %s -- matches exclusion pattern' % \
(filename,))
skip = True
break
if skip:
continue
# next, check to see if we're part of the figleaf package.
if thisdir in filename:
logger.debug('SKIPPING %s -- part of the figleaf package' % \
(filename,))
continue
# also, check for <string> (source file from things like 'exec'):
if filename == '<string>':
continue
# miscellaneous other things: doctests
if filename.startswith('<doctest '):
continue
yield filename
###
def main():
parser = OptionParser()
configure(parser)
options, args = parser.parse_args()
if not len(args):
print "ERROR: You must specify a command like 'list' or 'report'. Use"
print "\n %s -h\n" % (sys.argv[0],)
print "for help on commands and options."
sys.exit(-1)
cmd = args.pop(0)
if cmd == 'list':
list(options, *args)
elif cmd == 'list_sections':
list_sections(options, *args)
sys.exit(0)
|
muddery/launcher/upgrader/upgrader_0_6_4.py | dongwudanci/muddery | 127 | 12676208 | <reponame>dongwudanci/muddery<gh_stars>100-1000
"""
Upgrade custom's game dir to the latest version.
"""
import traceback
import os
import django.core.management
from evennia.server.evennia_launcher import init_game_directory
from muddery.launcher.upgrader.base_upgrader import BaseUpgrader
from muddery.launcher.upgrader.utils import file_append
from muddery.launcher.utils import import_system_data
from muddery.server.utils.exception import MudderyError, ERR
class Upgrader(BaseUpgrader):
"""
Upgrade a game dir to a specified version.
"""
# Can upgrade the game of version between from_version and to_version.
# from min version 0.6.2 (include this version)
from_min_version = (0, 6, 4)
# from max version 0.6.4 (not include this version)
from_max_version = (0, 6, 5)
target_version = None
def upgrade_game(self, game_dir, game_template, muddery_lib):
"""
Upgrade a game.
Args:
game_dir: (string) the game dir to be upgraded.
game_template: (string) the game template used to upgrade the game dir.
muddery_lib: (string) muddery's dir
"""
os.chdir(game_dir)
init_game_directory(game_dir, check_db=False)
# make new migrations
django_args = ["makemigrations", "worlddata"]
django_kwargs = {}
django.core.management.call_command(*django_args, **django_kwargs)
django_args = ["migrate", "worlddata"]
django_kwargs = {"database": "worlddata"}
django.core.management.call_command(*django_args, **django_kwargs)
def upgrade_data(self, data_path, game_template, muddery_lib):
"""
Upgrade game data.
Args:
data_path: (string) the data path to be upgraded.
game_template: (string) the game template used to upgrade the game dir.
muddery_lib: (string) muddery's dir
"""
pass
|
bookwyrm/migrations/0110_auto_20211015_1734.py | mouse-reeve/fedireads | 270 | 12676223 | <filename>bookwyrm/migrations/0110_auto_20211015_1734.py<gh_stars>100-1000
# Generated by Django 3.2.5 on 2021-10-15 17:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0109_status_edited_date"),
]
operations = [
migrations.AddField(
model_name="quotation",
name="raw_quote",
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name="status",
name="raw_content",
field=models.TextField(blank=True, null=True),
),
]
|
src/UQpy/Distributions/collection/uniform.py | marrov/UQpy | 132 | 12676227 | import scipy.stats as stats
from UQpy.Distributions.baseclass import DistributionContinuous1D
class Uniform(DistributionContinuous1D):
"""
Uniform distribution having probability density function
.. math:: f(x|a, b) = \dfrac{1}{b-a}
where :math:`a=loc` and :math:`b=loc+scale`
**Inputs:**
* **loc** (`float`):
lower bound
* **scale** (`float`):
range
The following methods are available for ``Uniform``:
* ``cdf``, ``pdf``, ``log_pdf``, ``icdf``, ``rvs``, ``moments``, ``fit``.
"""
def __init__(self, loc=0., scale=1.):
super().__init__(loc=loc, scale=scale, order_params=('loc', 'scale'))
self._construct_from_scipy(scipy_name=stats.uniform)
|
examples/pubsub.py | eoghanmurray/aredis | 832 | 12676239 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import aredis
import asyncio
import concurrent.futures
import time
import logging
async def wait_for_message(pubsub, timeout=2, ignore_subscribe_messages=False):
now = time.time()
timeout = now + timeout
while now < timeout:
message = await pubsub.get_message(
ignore_subscribe_messages=ignore_subscribe_messages,
timeout=1
)
if message is not None:
print(message)
await asyncio.sleep(0.01)
now = time.time()
return None
async def subscribe(client):
await client.flushdb()
pubsub = client.pubsub()
assert pubsub.subscribed is False
await pubsub.subscribe('foo')
# assert await pubsub.subscribe() is True
await wait_for_message(pubsub)
async def publish(client):
# sleep to wait for subscriber to listen
await asyncio.sleep(1)
await client.publish('foo', 'test message')
await client.publish('foo', 'quit')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
client = aredis.StrictRedis()
loop = asyncio.get_event_loop()
loop.set_debug(enabled=True)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(asyncio.run_coroutine_threadsafe, publish(client), loop)
loop.run_until_complete(subscribe(client))
|
Models/dataloader/cub/grid/cub.py | icoz69/DeepEMD | 444 | 12676241 | <gh_stars>100-1000
import os
import os.path as osp
import random
import torch
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
class CUB(Dataset):
def __init__(self, setname, args=None):
IMAGE_PATH = os.path.join(args.data_dir, 'cub/')
SPLIT_PATH = os.path.join(args.data_dir, 'cub/split/')
txt_path = osp.join(SPLIT_PATH, setname + '.csv')
lines = [x.strip() for x in open(txt_path, 'r').readlines()][1:]
data = []
label = []
lb = -1
self.wnids = []
if setname == 'train':
lines.pop(5864)#this image file is broken
self.setname = setname
for l in lines:
context = l.split(',')
name = context[0]
wnid = context[1]
path = osp.join(IMAGE_PATH, name)
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
data.append(path)
label.append(lb)
self.data = data
self.label = label
self.num_class = np.unique(np.array(label)).shape[0]
if 'patch_list' not in vars(args).keys():
self.patch_list = [2, 3]
print('do not assign num_patch parameter, set as default:',self.patch_list)
else:
self.patch_list = args.patch_list
if 'patch_ratio' not in vars(args).keys():
self.patch_ratio = 2
print('do not assign patch_ratio parameter, set as default:',self.patch_ratio)
else:
self.patch_ratio = args.patch_ratio
if setname == 'val' or setname == 'test':
image_size = 84
self.transform = transforms.Compose([
transforms.Resize([image_size, image_size]),
transforms.ToTensor(),
transforms.Normalize(np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
np.array([x / 255.0 for x in [63.0, 62.1, 66.7]]))
])
elif setname == 'train':
image_size = 84
self.transform = transforms.Compose([
transforms.Resize([image_size, image_size]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
np.array([x / 255.0 for x in [63.0, 62.1, 66.7]]))
])
else:
raise ValueError('Unkown set')
def __len__(self):
return len(self.data)
def get_grid_location(self, size, ratio, num_grid):
'''
:param size: size of the height/width
:param ratio: generate grid size/ even divided grid size
:param num_grid: number of grid
:return: a list containing the coordinate of the grid
'''
raw_grid_size = int(size / num_grid)
enlarged_grid_size = int(size / num_grid * ratio)
center_location = raw_grid_size // 2
location_list = []
for i in range(num_grid):
location_list.append((max(0, center_location - enlarged_grid_size // 2),
min(size, center_location + enlarged_grid_size // 2)))
center_location = center_location + raw_grid_size
return location_list
def get_pyramid(self, img, num_patch):
if self.setname == 'val' or self.setname == 'test':
num_grid = num_patch
grid_ratio = self.patch_ratio
elif self.setname == 'train':
num_grid = num_patch
grid_ratio = 1 + 2 * random.random()
else:
raise ValueError('no such that')
w, h = img.size
grid_locations_w = self.get_grid_location(w, grid_ratio, num_grid)
grid_locations_h = self.get_grid_location(h, grid_ratio, num_grid)
patches_list = []
for i in range(num_grid):
for j in range(num_grid):
patch_location_w = grid_locations_w[j]
patch_location_h = grid_locations_h[i]
left_up_corner_w = patch_location_w[0]
left_up_corner_h = patch_location_h[0]
right_down_cornet_w = patch_location_w[1]
right_down_cornet_h = patch_location_h[1]
patch = img.crop((left_up_corner_w, left_up_corner_h, right_down_cornet_w, right_down_cornet_h))
patch = self.transform(patch)
patches_list.append(patch)
return patches_list
def __getitem__(self, i):
path, label = self.data[i], self.label[i]
image = Image.open(path).convert('RGB')
patch_list = []
for num_patch in self.patch_list:
patches = self.get_pyramid(image, num_patch)
patch_list.extend(patches)
patch_list = torch.stack(patch_list, dim=0)
return patch_list, label
if __name__ == '__main__':
pass
|
exercises/twelve-days/twelve_days.py | kishankj/python | 1,177 | 12676244 | def recite(start_verse, end_verse):
pass
|
airflow/api_connexion/schemas/enum_schemas.py | ChaseKnowlden/airflow | 15,947 | 12676280 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marshmallow import fields, validate
from airflow.utils.state import State
class DagStateField(fields.String):
"""Schema for DagState Enum"""
def __init__(self, **metadata):
super().__init__(**metadata)
self.validators = [validate.OneOf(State.dag_states)] + list(self.validators)
class TaskInstanceStateField(fields.String):
"""Schema for TaskInstanceState Enum"""
def __init__(self, **metadata):
super().__init__(**metadata)
self.validators = [validate.OneOf(State.task_states)] + list(self.validators)
|
tools/pythonpkg/tests/fast/arrow/test_timestamps.py | AldoMyrtaj/duckdb | 2,816 | 12676315 | import duckdb
import os
import datetime
import pytest
try:
import pyarrow as pa
import pandas as pd
can_run = True
except:
can_run = False
class TestArrowTimestamps(object):
def test_timestamp_types(self, duckdb_cursor):
if not can_run:
return
data = (pa.array([datetime.datetime.now()], type=pa.timestamp('ns')),pa.array([datetime.datetime.now()], type=pa.timestamp('us')),pa.array([datetime.datetime.now()], pa.timestamp('ms')),pa.array([datetime.datetime.now()], pa.timestamp('s')))
arrow_table = pa.Table.from_arrays([data[0],data[1],data[2],data[3]],['a','b','c','d'])
rel = duckdb.from_arrow_table(arrow_table).arrow()
assert (rel['a'] == arrow_table['a'])
assert (rel['b'] == arrow_table['b'])
assert (rel['c'] == arrow_table['c'])
assert (rel['d'] == arrow_table['d'])
def test_timestamp_nulls(self, duckdb_cursor):
if not can_run:
return
data = (pa.array([None], type=pa.timestamp('ns')),pa.array([None], type=pa.timestamp('us')),pa.array([None], pa.timestamp('ms')),pa.array([None], pa.timestamp('s')))
arrow_table = pa.Table.from_arrays([data[0],data[1],data[2],data[3]],['a','b','c','d'])
rel = duckdb.from_arrow_table(arrow_table).arrow()
assert (rel['a'] == arrow_table['a'])
assert (rel['b'] == arrow_table['b'])
assert (rel['c'] == arrow_table['c'])
assert (rel['d'] == arrow_table['d'])
def test_timestamp_overflow(self, duckdb_cursor):
if not can_run:
return
data = (pa.array([9223372036854775807], pa.timestamp('s')),pa.array([9223372036854775807], pa.timestamp('ms')),pa.array([9223372036854775807], pa.timestamp('us')))
arrow_table = pa.Table.from_arrays([data[0],data[1],data[2]],['a','b','c'])
arrow_from_duck = duckdb.from_arrow_table(arrow_table).arrow()
assert (arrow_from_duck['a'] == arrow_table['a'])
assert (arrow_from_duck['b'] == arrow_table['b'])
assert (arrow_from_duck['c'] == arrow_table['c'])
with pytest.raises(Exception):
duck_rel = duckdb.from_arrow_table(arrow_table)
res = duck_rel.project('a::TIMESTAMP_US')
res.fetchone()
with pytest.raises(Exception):
duck_rel = duckdb.from_arrow_table(arrow_table)
res = duck_rel.project('b::TIMESTAMP_US')
res.fetchone()
with pytest.raises(Exception):
duck_rel = duckdb.from_arrow_table(arrow_table)
res = duck_rel.project('c::TIMESTAMP_NS')
res.fetchone()
|
resource_tracker/migrations/0003_auto_20211006_1722.py | LaudateCorpus1/squest | 112 | 12676318 | # Generated by Django 3.2.7 on 2021-10-06 15:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('resource_tracker', '0002_auto_20210929_2131'),
]
operations = [
migrations.AlterField(
model_name='resourcegroupattributedefinition',
name='consume_from',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='consumers', related_query_name='consumer', to='resource_tracker.resourcepoolattributedefinition'),
),
migrations.AlterField(
model_name='resourcegroupattributedefinition',
name='produce_for',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='producers', related_query_name='producer', to='resource_tracker.resourcepoolattributedefinition'),
),
migrations.AlterField(
model_name='resourcegroupattributedefinition',
name='resource_group_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='attribute_definitions', related_query_name='attribute_definition', to='resource_tracker.resourcegroup'),
),
migrations.AlterField(
model_name='resourcegrouptextattributedefinition',
name='resource_group_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='text_attribute_definitions', related_query_name='text_attribute_definition', to='resource_tracker.resourcegroup'),
),
]
|
wav2clip/pre_training/model.py | LianQi-Kevin/wav2clip-changed | 102 | 12676337 | <reponame>LianQi-Kevin/wav2clip-changed
import pytorch_lightning as pl
import torch
from ..model.encoder import MLPLayers
from ..model.resnet import BasicBlock
from ..model.resnet import ResNet
from .loss import CLIPLoss1D
class LightningBase(pl.LightningModule):
def training_step(self, batch, batch_idx):
loss = self.step(batch, batch_idx)
self.log(
"train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True
)
return {"loss": loss, "log": {"train_loss": loss}}
def validation_step(self, batch, batch_idx):
loss = self.step(batch, batch_idx)
return {"val_loss": loss}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
self.log("val_loss", avg_loss, prog_bar=True)
return {
"avg_val_loss": avg_loss,
"log": {"val_loss": avg_loss},
"progress_bar": {"val_loss": avg_loss},
}
def test_step(self, batch, batch_idx):
loss = self.step(batch, batch_idx)
return {"test_loss": loss}
def test_epoch_end(self, outputs):
avg_loss = torch.stack([x["test_loss"] for x in outputs]).mean()
self.log("test_loss", avg_loss, prog_bar=True)
return {
"avg_test_loss": avg_loss,
"log": {"test_loss": avg_loss},
"progress_bar": {"test_loss": avg_loss},
}
def configure_optimizers(self):
if self.args["optimizer"] == "SGD":
optimizer = torch.optim.SGD(
self.parameters(), lr=self.args.learning_rate, momentum=0.9
)
elif self.args["optimizer"] == "Adam":
optimizer = torch.optim.Adam(
self.parameters(), lr=self.args["learning_rate"]
)
else:
assert False
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode="min",
factor=0.1,
patience=self.args["lr_scheduler_patience"],
min_lr=1e-6,
verbose=True,
)
return {
"optimizer": optimizer,
"lr_scheduler": scheduler,
"monitor": "val_loss",
}
class ResNetDistillation(LightningBase):
def __init__(
self,
args,
):
super().__init__()
assert args["loss"] in (
"clip_loss",
"clip_loss_x",
"clip_loss_i",
)
self.loss = args["loss"]
self.args = args
self.audio_encoder = ResNet(
BasicBlock,
[2, 2, 2, 2],
num_classes=309,
pool="avgpool",
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
)
self.image_transform = None
self.audio_transform = None
if self.loss == "clip_loss":
self.loss_fn = CLIPLoss1D()
elif self.loss == "clip_loss_i":
self.image_transform = MLPLayers(
units=args["MLPLayers.units"], dropout=args["MLPLayers.dropout"]
)
self.loss_fn = CLIPLoss1D()
elif self.loss == "clip_loss_x":
self.image_transform = MLPLayers(
units=args["MLPLayers.units"], dropout=args["MLPLayers.dropout"]
)
self.audio_transform = MLPLayers(
units=args["MLPLayers.units"], dropout=args["MLPLayers.dropout"]
)
self.loss_fn_i = CLIPLoss1D()
self.loss_fn_a = CLIPLoss1D()
else:
assert False
def forward(self, audio, images):
audio_output = self.audio_encoder(audio.float())
image_output = torch.mean(images.float(), 1)
if self.loss == "clip_loss_i":
image_output = self.image_transform(image_output)
elif self.loss == "clip_loss_x":
transformed_image = self.image_transform(image_output)
transformed_audio = self.audio_transform(audio_output)
return audio_output, image_output, transformed_audio, transformed_image
return audio_output, image_output
def step(self, batch, batch_idx):
audio, images = batch
if self.loss == "clip_loss_x":
audio_out, image_out, transformed_audio, transformed_image = self.forward(
audio, images
)
loss = (
self.loss_fn_a(audio_out, transformed_image)
+ self.loss_fn_i(transformed_audio, image_out)
) / 2
else:
audio_out, image_out = self.forward(audio, images)
loss = self.loss_fn(audio_out, image_out)
return loss
|
docs/build/main.py | cclauss/python-devtools | 487 | 12676350 | <filename>docs/build/main.py<gh_stars>100-1000
#!/usr/bin/env python3
import re
import sys
from importlib.machinery import SourceFileLoader
from pathlib import Path
THIS_DIR = Path(__file__).parent
PROJECT_ROOT = THIS_DIR / '..' / '..'
def main():
history = (PROJECT_ROOT / 'HISTORY.md').read_text()
history = re.sub(r'#(\d+)', r'[#\1](https://github.com/samuelcolvin/python-devtools/issues/\1)', history)
history = re.sub(r'( +)@([\w\-]+)', r'\1[@\2](https://github.com/\2)', history, flags=re.I)
history = re.sub('@@', '@', history)
(PROJECT_ROOT / 'docs/.history.md').write_text(history)
version = SourceFileLoader('version', str(PROJECT_ROOT / 'devtools/version.py')).load_module()
(PROJECT_ROOT / 'docs/.version.md').write_text(f'Documentation for version: **v{version.VERSION}**\n')
sys.path.append(str(THIS_DIR.resolve()))
from gen_html import gen_examples_html
return gen_examples_html()
if __name__ == '__main__':
sys.exit(main())
|
core/REST_views.py | gcnoopy/YaraGuardian | 178 | 12676394 | import re
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from .services import get_group_or_404
from .REST_permissions import (group_member,
IsGroupOwner,
IsGroupAdmin,
IsGroupAdminOrMemberReadOnly,
IsGroupOwnerOrPublicReadOnly)
from .REST_serializers import (PublicGroupSerializer,
GroupMetaUpdateSerializer,
PrivateUserSerializer,
PrivateGroupSerializer)
from .patterns import group_name_pattern
from .models import GroupMeta
User = get_user_model()
group_name_regex = re.compile('^' + group_name_pattern + '$')
class AccountView(RetrieveAPIView):
"""
List authenticated user details
"""
permission_classes = [IsAuthenticated]
serializer_class = PrivateUserSerializer
def get_object(self):
return self.request.user
class AccountGroupsView(APIView):
"""
get:
List authenticated user groups.
post:
Create a new group context.
"""
permission_classes = [IsAuthenticated]
def get(self, request):
serializer = PrivateGroupSerializer(request.user.groups.all(), many=True)
return Response(serializer.data)
def post(self, request, **kwargs):
group_name = request.data.get('name', None)
# Ensure group name was specified
if not group_name:
return Response({'errors': ['No Group Name Specified']},
status=status.HTTP_400_BAD_REQUEST)
# Verify group does not already exist
elif Group.objects.filter(name=group_name).exists():
return Response({'errors': ['Group Already Exists']},
status=status.HTTP_400_BAD_REQUEST)
# Verify a user with the same name does not already exist
elif User.objects.filter(username=group_name).exists():
return Response({'errors': ['Group Already Exists']},
status=status.HTTP_400_BAD_REQUEST)
# Verify group is allowed in URL routing
elif not group_name_regex.match(group_name):
return Response({'errors': ['Invalid Group Name']},
status=status.HTTP_400_BAD_REQUEST)
# Create group and group meta
else:
group_object = Group.objects.create(name=group_name)
group_object.save()
group_meta = GroupMeta.objects.create(group=group_object, owner=request.user)
group_meta.save()
request.user.groups.add(group_object)
return Response(status=status.HTTP_201_CREATED)
class GroupsView(ListAPIView):
"""
List details on all groups.
"""
permission_classes = [IsAuthenticated]
serializer_class = PublicGroupSerializer
queryset = Group.objects.all()
class GroupDetailsView(APIView):
"""
get:
Retrieve details on a specific group.
put:
Update group
delete:
Delete specified group.
"""
permission_classes = [IsGroupOwnerOrPublicReadOnly]
def get(self, request, group_name):
group_object = get_group_or_404(group_name)
if group_member(request):
serializer = PrivateGroupSerializer(group_object)
else:
serializer = PublicGroupSerializer(group_object)
return Response(serializer.data)
def patch(self, request, group_name):
group_object = get_group_or_404(group_name)
serializer = GroupMetaUpdateSerializer(group_object.groupmeta,
data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
def delete(self, request, group_name):
group_object = get_group_or_404(group_name)
if group_name != request.user.username:
group_object.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class GroupMembersView(APIView):
"""
get:
List group members.
patch:
Add new specified group members.
delete:
Remove specified members from group.
"""
permission_classes = [IsGroupOwner]
def get(self, request, group_name):
group_object = get_group_or_404(group_name)
return Response(group_object.user_set.all().values_list('username', flat=True))
def patch(self, request, group_name):
group_object = get_group_or_404(group_name)
group_owner_name = group_object.groupmeta.owner.username
# Gather the submitted member name list
try:
new_member_names = request.data.getlist('member')
except AttributeError:
new_member_names = request.data.get('member', list())
if not isinstance(new_member_names, list):
new_member_names = [new_member_names]
# Ensure group owner doesn't get inadvertently processed
if group_owner_name in new_member_names:
new_member_names = list(set(new_member_names))
new_member_names.remove(group_owner_name)
# Gather user objects for processing
new_members = User.objects.filter(username__in=new_member_names)
# Add members
for member in new_members:
member.groups.add(group_object)
return Response(group_object.user_set.all().values_list('username', flat=True))
def delete(self, request, group_name):
group_object = get_group_or_404(group_name)
group_owner_name = group_object.groupmeta.owner.username
# Gather user objects for processing
member_name_list = request.query_params.getlist('member')
removed_members = User.objects.filter(username__in=member_name_list)
# Remove from group
for member in removed_members.exclude(username=group_owner_name):
group_object.groupmeta.admins.remove(member)
member.groups.remove(group_object)
return Response(group_object.user_set.all().values_list('username', flat=True))
class GroupAdminsView(APIView):
"""
get:
List group admins.
patch:
Add new specified group admins.
delete:
Remove specified admins from group.
"""
permission_classes = [IsGroupOwner]
def get(self, request, group_name):
group_object = get_group_or_404(group_name)
return Response(group_object.groupmeta.admins.all().values_list('username', flat=True))
def patch(self, request, group_name):
group_object = get_group_or_404(group_name)
group_owner_name = group_object.groupmeta.owner.username
# Gather the submitted admin name list
try:
new_admin_names = request.data.getlist('admin')
except AttributeError:
new_admin_names = request.data.get('admin', list())
if not isinstance(new_admin_names, list):
new_admin_names = [new_admin_names]
# Ensure group owner doesn't get inadvertently processed
if group_owner_name in new_admin_names:
new_admin_names = list(set(new_admin_names))
new_admin_names.remove(group_owner_name)
# Gather user objects for processing
new_admin_users = User.objects.filter(username__in=new_admin_names)
# Add admins
for admin in new_admin_users:
admin.groups.add(group_object)
group_object.groupmeta.admins.add(admin)
return Response(group_object.groupmeta.admins.all().values_list('username', flat=True))
def delete(self, request, group_name):
group_object = get_group_or_404(group_name)
admin_name_list = request.query_params.getlist('admin')
# Gather user objects for processing
removed_admins = User.objects.filter(username__in=admin_name_list)
# Remove from group
for admin in removed_admins:
group_object.groupmeta.admins.remove(admin)
return Response(group_object.groupmeta.admins.all().values_list('username', flat=True))
class GroupSourcesView(APIView):
"""
get:
List group sources.
patch:
Add new specified group sources.
delete:
Remove specified sources from group.
"""
permission_classes = [IsGroupAdminOrMemberReadOnly]
def get(self, request, group_name):
group_object = get_group_or_404(group_name)
group_metadata = group_object.groupmeta
return Response(group_metadata.source_options)
def patch(self, request, group_name):
group_object = get_group_or_404(group_name)
group_metadata = group_object.groupmeta
# Gather the submitted source list
try:
new_sources = request.data.getlist('source')
except AttributeError:
new_sources = request.data.get('source', list())
if not isinstance(new_sources, list):
new_sources = [new_sources]
for source in new_sources:
if source not in group_metadata.source_options:
group_metadata.source_options.append(source)
group_metadata.save()
return Response(group_metadata.source_options)
def delete(self, request, group_name):
group_object = get_group_or_404(group_name)
group_metadata = group_object.groupmeta
source_list = request.query_params.getlist('source')
for source in source_list:
try:
group_metadata.source_options.remove(source)
except ValueError:
pass
group_metadata.save()
return Response(group_metadata.source_options)
class GroupCategoriesView(APIView):
"""
get:
List group categories.
patch:
Add new specified group categories.
delete:
Remove specified categories from group.
"""
permission_classes = [IsGroupAdminOrMemberReadOnly]
def get(self, request, group_name):
group_object = get_group_or_404(group_name)
group_metadata = group_object.groupmeta
return Response(group_metadata.category_options)
def patch(self, request, group_name):
group_object = get_group_or_404(group_name)
group_metadata = group_object.groupmeta
# Gather the submitted category list
try:
new_categories = request.data.getlist('category')
except AttributeError:
new_categories = request.data.get('category', list())
if not isinstance(new_categories, list):
new_categories = [new_categories]
for category in new_categories:
if category not in group_metadata.category_options:
group_metadata.category_options.append(category)
group_metadata.save()
return Response(group_metadata.category_options)
def delete(self, request, group_name):
group_object = get_group_or_404(group_name)
group_metadata = group_object.groupmeta
category_list = request.query_params.getlist('category')
for category in category_list:
try:
group_metadata.category_options.remove(category)
except ValueError:
pass
group_metadata.save()
return Response(group_metadata.category_options)
|
examples/language_model/gpt/predict.py | JeremyZhao1998/PaddleNLP | 7,091 | 12676400 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 TsinghuaAI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Many thanks for following projects.
# https://github.com/TsinghuaAI/CPM-Generate
# https://github.com/jm12138/CPM-Generate-Paddle
import sys
import argparse
import numpy as np
import paddle
from paddlenlp.transformers import GPTModel, GPTForGreedyGeneration
from paddlenlp.transformers import GPTChineseTokenizer, GPTTokenizer
from paddlenlp.utils.log import logger
MODEL_CLASSES = {
"gpt-cn": (GPTForGreedyGeneration, GPTChineseTokenizer),
"gpt": (GPTForGreedyGeneration, GPTTokenizer),
}
class Demo:
def __init__(self,
model_type="gpt-cn",
model_name_or_path="gpt-cpm-large-cn",
max_predict_len=32):
model_class, tokenizer_class = MODEL_CLASSES[model_type]
self.tokenizer = tokenizer_class.from_pretrained(model_name_or_path)
logger.info('Loading the model parameters, please wait...')
self.model = model_class.from_pretrained(
model_name_or_path,
max_predict_len=max_predict_len,
eol_token_id=self.tokenizer.eol_token_id)
self.model.eval()
logger.info('Model loaded.')
# prediction function
def predict(self, text):
ids = self.tokenizer(text)["input_ids"]
input_ids = paddle.to_tensor(
np.array(ids).reshape(1, -1).astype('int64'))
out = self.model(input_ids)
out = [int(x) for x in out.numpy().reshape([-1])]
logger.info(self.tokenizer.convert_ids_to_string(out))
# One shot example
def ask_question_cn(self, question):
self.predict("问题:中国的首都是哪里?答案:北京。\n问题:%s 答案:" % question)
def ask_question_en(self, question):
self.predict(
"Question: Where is the capital of China? Answer: Beijing. \n Question:%s Answer:"
% question)
# dictation poetry
def dictation_poetry_cn(self, front):
self.predict('''默写古诗: 大漠孤烟直,长河落日圆。\n%s''' % front)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "gpt-cn":
demo = Demo("gpt-cn", "gpt-cpm-large-cn")
demo.ask_question_cn("苹果的CEO是谁?")
demo.dictation_poetry_cn("举杯邀明月,")
else:
demo = Demo("gpt", "gpt2-medium-en")
demo.ask_question_en("Who is the CEO of Apple?")
|
flask_reddit/users/views.py | huhansan666666/flask_reddit | 461 | 12676434 | # -*- coding: utf-8 -*-
"""
"""
from flask import (Blueprint, request, render_template, flash, g, session,
redirect, url_for, abort)
from flask_reddit import db
from flask_reddit.users.models import User
from flask_reddit.frontends.views import get_subreddits
from flask_reddit.users.decorators import requires_login
mod = Blueprint('users', __name__, url_prefix='/users')
@mod.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = User.query.get(session['user_id'])
@mod.route('/<username>/')
def home_page(username=None):
if not username:
abort(404)
user = User.query.filter_by(username=username).first()
if not user:
abort(404)
return render_template('users/profile.html', user=g.user, current_user=user,
subreddits = get_subreddits())
|
examples/advanced_tensorflow/server.py | Chris-george-anil/flower | 895 | 12676440 | from typing import Any, Callable, Dict, List, Optional, Tuple
import flwr as fl
import tensorflow as tf
def main() -> None:
# Load and compile model for
# 1. server-side parameter initialization
# 2. server-side parameter evaluation
model = tf.keras.applications.EfficientNetB0(
input_shape=(32, 32, 3), weights=None, classes=10
)
model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"])
# Create strategy
strategy = fl.server.strategy.FedAvg(
fraction_fit=0.3,
fraction_eval=0.2,
min_fit_clients=3,
min_eval_clients=2,
min_available_clients=10,
eval_fn=get_eval_fn(model),
on_fit_config_fn=fit_config,
on_evaluate_config_fn=evaluate_config,
initial_parameters=fl.common.weights_to_parameters(model.get_weights()),
)
# Start Flower server for four rounds of federated learning
fl.server.start_server("[::]:8080", config={"num_rounds": 4}, strategy=strategy)
def get_eval_fn(model):
"""Return an evaluation function for server-side evaluation."""
# Load data and model here to avoid the overhead of doing it in `evaluate` itself
(x_train, y_train), _ = tf.keras.datasets.cifar10.load_data()
# Use the last 5k training examples as a validation set
x_val, y_val = x_train[45000:50000], y_train[45000:50000]
# The `evaluate` function will be called after every round
def evaluate(
weights: fl.common.Weights,
) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]:
model.set_weights(weights) # Update model with the latest parameters
loss, accuracy = model.evaluate(x_val, y_val)
return loss, {"accuracy": accuracy}
return evaluate
def fit_config(rnd: int):
"""Return training configuration dict for each round.
Keep batch size fixed at 32, perform two rounds of training with one
local epoch, increase to two local epochs afterwards.
"""
config = {
"batch_size": 32,
"local_epochs": 1 if rnd < 2 else 2,
}
return config
def evaluate_config(rnd: int):
"""Return evaluation configuration dict for each round.
Perform five local evaluation steps on each client (i.e., use five
batches) during rounds one to three, then increase to ten local
evaluation steps.
"""
val_steps = 5 if rnd < 4 else 10
return {"val_steps": val_steps}
if __name__ == "__main__":
main()
|
examples/Redfish/set_ethernet_management_iface_static_ip.py | JohnAZoidberg/python-ilorest-library | 214 | 12676449 | # Copyright 2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
"""
An example of setting the Manager ethernet interface static IP
"""
import sys
import json
from redfish import RedfishClient
from redfish.rest.v1 import ServerDownOrUnreachableError
from get_resource_directory import get_resource_directory
def set_ilo_static_ipv4(_redfishobj, ipv4_dict, dns_dict):
ethernet_data = {}
body = dict()
resource_instances = get_resource_directory(_redfishobj)
if DISABLE_RESOURCE_DIR or not resource_instances:
#if we do not have a resource directory or want to force it's non use to find the
#relevant URI
managers_uri = _redfishobj.root.obj['Managers']['@odata.id']
managers_response = _redfishobj.get(managers_uri)
managers_members_uri = next(iter(managers_response.obj['Members']))['@odata.id']
managers_members_response = _redfishobj.get(managers_members_uri)
manager_ethernet_interfaces = managers_members_response.obj['EthernetInterfaces']\
['@odata.id']
manager_ethernet_interfaces_response = _redfishobj.get(manager_ethernet_interfaces)
manager_ethernet_interfaces_members = manager_ethernet_interfaces_response.\
obj['Members']
for _member in manager_ethernet_interfaces_members:
_tmp = _redfishobj.get(_member['@odata.id']).obj
ethernet_data[_member['@odata.id']] = _tmp
else:
#Use Resource directory to find the relevant URI
for instance in resource_instances:
if '#EthernetInterfaceCollection.' in instance['@odata.type'] and 'Managers' in \
instance['@odata.id']:
ethernet_uri = instance['@odata.id']
ethernet_interfaces = _redfishobj.get(ethernet_uri).obj['Members']
for _ethernet_interface in ethernet_interfaces:
ethernet_data[_ethernet_interface['@odata.id']] = _redfishobj.\
get(_ethernet_interface['@odata.id']).dict
break
if ethernet_data:
print("\n\nShowing all available ethernet management interfaces before changes:\n\n")
for interface in ethernet_data:
sys.stdout.write("Ethernet Management Inteface \'%s\'\n" % ethernet_data\
[interface].get('Id'))
sys.stdout.write("\'DHCPv4\':\n")
if ethernet_data[interface].get('DHCPv4'):
print(json.dumps(ethernet_data[interface].get('DHCPv4'), indent=4, sort_keys=True))
else:
print(json.dumps(ethernet_data[interface]['Oem']['Hpe'].get('DHCPv4'), indent=4, \
sort_keys=True))
sys.stdout.write("\'IPv4\':\n")
if ethernet_data[interface].get('IPv4StaticAddresses'):
print(json.dumps(ethernet_data[interface].get('IPv4Addresses'), indent=4, \
sort_keys=True))
sys.stdout.write("\'StaticNameServers\':\n")
if ethernet_data[interface].get('StaticNameServers'):
print(json.dumps(ethernet_data[interface].get('StaticNameServers'), indent=4, \
sort_keys=True))
for ethernet in ethernet_data:
sys.stdout.write("Ethernet Interface: %s\n" % ethernet)
ans = input("Would you like to modify this interface? (y/n)\n")
if "n" in ans:
continue
if 'DHCPv4' in ethernet_data[ethernet]:
if ethernet_data[ethernet]['DHCPv4'].get('UseDNSServers'):
resp = _redfishobj.patch(ethernet, {"DHCPv4": {"UseDNSServers": False}})
ilo_response(_redfishobj, resp)
if ethernet_data[ethernet]['DHCPv4'].get('UseGateway'):
resp = _redfishobj.patch(ethernet, {"DHCPv4": {"UseGateway": False}})
ilo_response(_redfishobj, resp)
if 'IPv4StaticAddresses' in ethernet_data[ethernet]:
body.update({"IPv4Addresses": [ipv4_dict]})
if 'StaticNameServers' in ethernet_data[ethernet]:
body.update({"StaticNameServers" : [dns_dict.get('PrimaryDNS'), \
dns_dict.get('SecondaryDNS')]})
else:
body.update({"Oem": {"Hpe": {"IPv4": {"DNSServers": [dns_dict.get('PrimaryDNS'), \
dns_dict.get('SecondaryDNS')]}}}})
resp = _redfishobj.patch(ethernet, body)
ilo_response(_redfishobj, resp)
break
def ilo_response(_redfishobj, resp):
if resp.status == 400:
try:
print(json.dumps(resp.obj['error']['@Message.ExtendedInfo'], indent=4, \
sort_keys=True))
except Exception as excp:
sys.stderr.write("A response error occurred, unable to access iLO Extended "\
"Message Info...")
elif resp.status != 200:
sys.stderr.write("An http response of \'%s\' was returned.\n" % resp.status)
else:
print("Success! Suggest to reset iLO for settings to take effect.\n")
print(json.dumps(resp.dict, indent=4, sort_keys=True))
if __name__ == "__main__":
# When running on the server locally use the following commented values
#SYSTEM_URL = None
#LOGIN_ACCOUNT = None
#LOGIN_PASSWORD = <PASSWORD>
# When running remotely connect using the secured (https://) address,
# account name, and password to send https requests
# SYSTEM_URL acceptable examples:
# "https://10.0.0.100"
# "https://ilo.hostname"
SYSTEM_URL = "https://10.0.0.100"
LOGIN_ACCOUNT = "admin"
LOGIN_PASSWORD = "password"
#IPv4 settings for Address, Gateway and SubnetMask as well as DNS.
IPV4_DICT = {'Address':'172.16.58.3', \
'Gateway':'192.168.3.11', \
'SubnetMask':'255.255.252.0' \
}
DNS_DICT = {'PrimaryDNS':'172.16.31.10', \
'SecondaryDNS':'172.16.58.3' \
}
# flag to force disable resource directory. Resource directory and associated operations are
# intended for HPE servers.
DISABLE_RESOURCE_DIR = False
try:
# Create a Redfish client object
REDFISHOBJ = RedfishClient(base_url=SYSTEM_URL, username=LOGIN_ACCOUNT, \
password=<PASSWORD>)
# Login with the Redfish client
REDFISHOBJ.login()
except ServerDownOrUnreachableError as excp:
sys.stderr.write("ERROR: server not reachable or does not support RedFish.\n")
sys.exit()
set_ilo_static_ipv4(REDFISHOBJ, IPV4_DICT, DNS_DICT)
REDFISHOBJ.logout()
|
mapper_examples/postgresql_hostname_tenant_mapper.py | leapoli/django-db-multitenant | 126 | 12676450 | """
https://gist.github.com/stephane/08b649ea818bd9dce2ff33903ba94aba
Maps a request to a tenant using the first part of the hostname.
For example:
foo.example.com:8000 -> foo
bar.baz.example.com -> bar
This is a simple example; you should probably verify tenant names
are valid against a whitelist before returning them, since the returned
tenant name will be issued in a `SET search_path TO` SQL query.
Take care to create the corresponding schema first, with ``psql``:
db=# CREATE SCHEMA foo;
You can set the tenant in command line with:
TENANT_NAME=foo ./manage.my migrate
With PostgreSQL, it's possible to have complex setups where
some tables are public so you can set the schema to:
SET search_path TO foo,public;
To have an access to public and foo tables at the same time.
https://www.postgresql.org/docs/current/static/ddl-schemas.html
"""
import re
from db_multitenant import mapper
HOST_REGEX = re.compile(r'(\w+)[\.|$]')
class TenantMapper(mapper.TenantMapper):
def get_tenant_name(self, request):
"""Takes the first part of the hostname as the tenant"""
hostname = request.get_host()
match = HOST_REGEX.search(hostname)
tenant_name = match.groups()[0].lower() if match else None
# Compare against a whitelist or fallback to 'public'?
if not tenant_name:
raise ValueError('Unable to find the tenant name from `%s`.' % hostname)
return tenant_name
def get_db_name(self, request, tenant_name):
# Still use the DB name of settings
return None
def get_cache_prefix(self, request, tenant_name, db_name):
"""The arguments db_name and tenant_name are provided by the methods of this TenantMapper"""
return 'tenant-%s' % tenant_name
|
app/tests/service_container_test.py | golem4300/quattran | 183 | 12676457 | <filename>app/tests/service_container_test.py
import unittest
import pytest
from mock import Mock
from app import ServiceContainer
from app.exceptions import ServiceNotFoundException, ContainerAlreadyBootedException
class PluginsTest(unittest.TestCase):
@staticmethod
def test_register_singleton():
service_container = ServiceContainer()
service = Mock()
service_container.register_singleton('mock_service', service)
service_container.boot()
assert service_container.has('mock_service') is True
assert service_container.get('mock_service') is service
@staticmethod
def test_register_decorator():
service_container = ServiceContainer()
@service_container.register('test_service')
class TestService(object):
pass
service_container.boot()
assert service_container.has('test_service') is True
assert isinstance(service_container.get('test_service'), TestService) is True
@staticmethod
def test_get_service_unknown():
service_container = ServiceContainer()
service_container.boot()
with pytest.raises(ServiceNotFoundException):
service_container.get('test_service')
@staticmethod
def test_register_decorator_args():
service_container = ServiceContainer()
another_service = Mock()
service_container.register_singleton('another_service', another_service)
param_service = Mock()
service_container.register_singleton('param_service', param_service)
service_container.set_parameter('test_param', 'hello')
service_container.set_parameter('service_param', 'param_service')
@service_container.register('test_service', ['@another_service', '%test_param%', '%service_param%', 'static'])
class TestService(object):
def __init__(self, ts_another_service, ts_test_param, ts_param_service, ts_static_val):
self.another_service = ts_another_service
self.test_param = ts_test_param
self.param_service = ts_param_service
self.static_val = ts_static_val
service_container.boot()
test_service = service_container.get('test_service')
assert service_container.has('test_service') is True
assert isinstance(test_service, TestService) is True
assert test_service.another_service is another_service
assert test_service.test_param is 'hello'
assert test_service.param_service is param_service
assert test_service.static_val is 'static'
@staticmethod
def test_register_decorator_kwargs():
service_container = ServiceContainer()
another_service = Mock()
service_container.register_singleton('another_service', another_service)
param_service = Mock()
service_container.register_singleton('param_service', param_service)
service_container.set_parameter('test_param', 'hello')
service_container.set_parameter('service_param', 'param_service')
@service_container.register('test_service', keywordsargs={'ts_another_service': '@another_service', 'ts_test_param': '%test_param%', 'ts_param_service': '%service_param%', 'ts_static_val': 'static'})
class TestService(object):
def __init__(self, ts_another_service=None, ts_test_param=None, ts_param_service=None, ts_static_val=None):
self.another_service = ts_another_service
self.test_param = ts_test_param
self.param_service = ts_param_service
self.static_val = ts_static_val
service_container.boot()
test_service = service_container.get('test_service')
assert service_container.has('test_service') is True
assert isinstance(test_service, TestService) is True
assert test_service.another_service is another_service
assert test_service.test_param is 'hello'
assert test_service.param_service is param_service
assert test_service.static_val is 'static'
@staticmethod
def test_register_tags():
service_container = ServiceContainer()
another_service = Mock()
service_container.register_singleton('another_service', another_service, tags=['tag_one', 'tag_two', 'tag_three'])
@service_container.register('test_service', tags=['tag_one', 'tag_two'])
# pylint: disable=unused-variable
class TestService(object):
def __init__(self, ts_another_service=None, ts_test_param=None, ts_param_service=None, ts_static_val=None):
self.another_service = ts_another_service
self.test_param = ts_test_param
self.param_service = ts_param_service
self.static_val = ts_static_val
service_container.boot()
tag_one_services = service_container.get_by_tag('tag_one')
tag_two_services = service_container.get_by_tag('tag_two')
tag_three_services = service_container.get_by_tag('tag_three')
tag_four_services = service_container.get_by_tag('tag_four')
assert len(tag_one_services) is 2
assert len(tag_two_services) is 2
assert len(tag_three_services) is 1
assert len(tag_four_services) is 0
@staticmethod
def test_compiler_pass():
service_container = ServiceContainer()
@service_container.register_compiler_pass()
# pylint: disable=unused-variable
def compiler_pass(sc):
sc.set_parameter('compiler_set', 'test')
service_container.boot()
assert service_container.get_parameter('compiler_set') is 'test'
@staticmethod
def test_compiler_pass_already_booted():
service_container = ServiceContainer()
service_container.boot()
with pytest.raises(ContainerAlreadyBootedException):
@service_container.register_compiler_pass()
# pylint: disable=unused-variable
def compiler_pass(sc):
sc.set_parameter('compiler_set', 'test')
@staticmethod
def test_boot_already_booted():
service_container = ServiceContainer()
service_container.boot()
with pytest.raises(ContainerAlreadyBootedException):
service_container.boot()
|
airbyte-integrations/bases/base-normalization/integration_tests/test_ephemeral.py | weltam/airbyte | 6,215 | 12676458 | <gh_stars>1000+
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
import os
import pathlib
import re
import shutil
import tempfile
from distutils.dir_util import copy_tree
from typing import Any, Dict
import pytest
from integration_tests.dbt_integration_test import DbtIntegrationTest
from normalization.destination_type import DestinationType
from normalization.transform_catalog.catalog_processor import CatalogProcessor
temporary_folders = set()
dbt_test_utils = DbtIntegrationTest()
@pytest.fixture(scope="module", autouse=True)
def before_all_tests(request):
destinations_to_test = dbt_test_utils.get_test_targets()
if DestinationType.POSTGRES.value not in destinations_to_test:
destinations_to_test.append(DestinationType.POSTGRES.value)
dbt_test_utils.set_target_schema("test_ephemeral")
dbt_test_utils.change_current_test_dir(request)
dbt_test_utils.setup_db(destinations_to_test)
os.environ["PATH"] = os.path.abspath("../.venv/bin/") + ":" + os.environ["PATH"]
yield
dbt_test_utils.tear_down_db()
for folder in temporary_folders:
print(f"Deleting temporary test folder {folder}")
shutil.rmtree(folder, ignore_errors=True)
@pytest.fixture
def setup_test_path(request):
dbt_test_utils.change_current_test_dir(request)
print(f"Running from: {pathlib.Path().absolute()}")
print(f"Current PATH is: {os.environ['PATH']}")
yield
os.chdir(request.config.invocation_dir)
@pytest.mark.parametrize("column_count", [1000])
@pytest.mark.parametrize("destination_type", list(DestinationType))
def test_destination_supported_limits(destination_type: DestinationType, column_count: int):
if destination_type.value not in dbt_test_utils.get_test_targets() or destination_type.value == DestinationType.MYSQL.value:
# In MySQL, the max number of columns is limited by row size (8KB),
# not by absolute column count. It is way fewer than 1000.
pytest.skip(f"Destinations {destination_type} is not in NORMALIZATION_TEST_TARGET env variable (MYSQL is also skipped)")
if destination_type.value == DestinationType.ORACLE.value:
column_count = 998
run_test(destination_type, column_count)
@pytest.mark.parametrize(
"integration_type, column_count, expected_exception_message",
[
("Postgres", 1665, "target lists can have at most 1664 entries"),
("BigQuery", 2500, "The view is too large."),
("Snowflake", 2000, "Operation failed because soft limit on objects of type 'Column' per table was exceeded."),
("Redshift", 1665, "target lists can have at most 1664 entries"),
("MySQL", 250, "Row size too large"),
("Oracle", 1001, "ORA-01792: maximum number of columns in a table or view is 1000"),
("MSSQL", 1025, "exceeds the maximum of 1024 columns."),
],
)
def test_destination_failure_over_limits(integration_type: str, column_count: int, expected_exception_message: str, setup_test_path):
destination_type = DestinationType.from_string(integration_type)
if destination_type.value not in dbt_test_utils.get_test_targets():
pytest.skip(f"Destinations {destination_type} is not in NORMALIZATION_TEST_TARGET env variable")
run_test(destination_type, column_count, expected_exception_message)
def test_empty_streams(setup_test_path):
run_test(DestinationType.POSTGRES, 0)
def test_stream_with_1_airbyte_column(setup_test_path):
run_test(DestinationType.POSTGRES, 1)
def run_test(destination_type: DestinationType, column_count: int, expected_exception_message: str = ""):
if destination_type.value == DestinationType.ORACLE.value:
# Oracle does not allow changing to random schema
dbt_test_utils.set_target_schema("test_normalization")
else:
dbt_test_utils.set_target_schema("test_ephemeral")
print("Testing ephemeral")
integration_type = destination_type.value
# Create the test folder with dbt project and appropriate destination settings to run integration tests from
test_root_dir = setup_test_dir(integration_type)
destination_config = dbt_test_utils.generate_profile_yaml_file(destination_type, test_root_dir)
# generate a catalog and associated dbt models files
generate_dbt_models(destination_type, test_root_dir, column_count)
# Use destination connector to create empty _airbyte_raw_* tables to use as input for the test
assert setup_input_raw_data(integration_type, test_root_dir, destination_config)
if expected_exception_message:
with pytest.raises(AssertionError):
dbt_test_utils.dbt_run(destination_type, test_root_dir)
assert search_logs_for_pattern(test_root_dir + "/dbt_output.log", expected_exception_message)
else:
dbt_test_utils.dbt_run(destination_type, test_root_dir)
def search_logs_for_pattern(log_file: str, pattern: str):
with open(log_file, "r") as file:
for line in file:
if re.search(pattern, line):
return True
return False
def setup_test_dir(integration_type: str) -> str:
"""
We prepare a clean folder to run the tests from.
"""
test_root_dir = f"{pathlib.Path().joinpath('..', 'build', 'normalization_test_output', integration_type.lower()).resolve()}"
os.makedirs(test_root_dir, exist_ok=True)
test_root_dir = tempfile.mkdtemp(dir=test_root_dir)
temporary_folders.add(test_root_dir)
shutil.rmtree(test_root_dir, ignore_errors=True)
print(f"Setting up test folder {test_root_dir}")
copy_tree("../dbt-project-template", test_root_dir)
if integration_type == DestinationType.MSSQL.value:
copy_tree("../dbt-project-template-mysql", test_root_dir)
elif integration_type == DestinationType.MYSQL.value:
copy_tree("../dbt-project-template-mysql", test_root_dir)
elif integration_type == DestinationType.ORACLE.value:
copy_tree("../dbt-project-template-oracle", test_root_dir)
return test_root_dir
def setup_input_raw_data(integration_type: str, test_root_dir: str, destination_config: Dict[str, Any]) -> bool:
"""
This should populate the associated "raw" tables from which normalization is reading from when running dbt CLI.
"""
config_file = os.path.join(test_root_dir, "destination_config.json")
with open(config_file, "w") as f:
f.write(json.dumps(destination_config))
commands = [
"docker",
"run",
"--rm",
"--init",
"-v",
f"{test_root_dir}:/data",
"--network",
"host",
"-i",
f"airbyte/destination-{integration_type.lower()}:dev",
"write",
"--config",
"/data/destination_config.json",
"--catalog",
"/data/catalog.json",
]
# Force a reset in destination raw tables
return dbt_test_utils.run_destination_process("", test_root_dir, commands)
def generate_dbt_models(destination_type: DestinationType, test_root_dir: str, column_count: int):
"""
This is the normalization step generating dbt models files from the destination_catalog.json taken as input.
"""
output_directory = os.path.join(test_root_dir, "models", "generated")
shutil.rmtree(output_directory, ignore_errors=True)
catalog_processor = CatalogProcessor(output_directory, destination_type)
catalog_config = {
"streams": [
{
"stream": {
"name": dbt_test_utils.generate_random_string(f"stream_with_{column_count}_columns"),
"json_schema": {
"type": ["null", "object"],
"properties": {},
},
"supported_sync_modes": ["incremental"],
"source_defined_cursor": True,
"default_cursor_field": [],
},
"sync_mode": "incremental",
"cursor_field": [],
"destination_sync_mode": "overwrite",
}
]
}
if column_count == 1:
catalog_config["streams"][0]["stream"]["json_schema"]["properties"]["_airbyte_id"] = {"type": "integer"}
else:
for column in [dbt_test_utils.random_string(5) for _ in range(column_count)]:
catalog_config["streams"][0]["stream"]["json_schema"]["properties"][column] = {"type": "string"}
catalog = os.path.join(test_root_dir, "catalog.json")
with open(catalog, "w") as fh:
fh.write(json.dumps(catalog_config))
catalog_processor.process(catalog, "_airbyte_data", dbt_test_utils.target_schema)
|
src/modules/mod_socketcontrol.py | albertz/music-player | 132 | 12676473 | # -*- coding: utf-8 -*-
# MusicPlayer, https://github.com/albertz/music-player
# Copyright (c) 2013, <NAME>, www.az2000.de
# All rights reserved.
# This code is under the 2-clause BSD license, see License.txt in the root directory of this project.
import sys, os
import appinfo
import utils
import binstruct
def _handleConnection(conn):
conn.setblocking(True)
f = conn.makefile()
binstruct.write(f, (appinfo.appid, "SocketControl", 0.1))
f.flush()
try:
clientappid,clientname,clientver,clientstatus = binstruct.read(f)
except binstruct.FormatError:
print "socketcontrol.handleConnection: wrong signature"
return
if clientstatus != "ok":
print "socketcontrol.handleConnection: status returned %s" % status
return
from State import state
from queue import queue
shellGlobals = {
"state": state,
"queue": queue,
}
globals = locals = shellGlobals
COMPILE_STRING_FN = "<socketcontrol input>"
while True:
try:
idx,s = binstruct.varDecode(f)
except Exception:
# probably closed
return
assert isinstance(s, (str,unicode))
try:
c = utils.interactive_py_compile(s, COMPILE_STRING_FN)
except Exception as e:
answer = (idx, "compile-exception", (e.__class__.__name__, str(e)))
else:
try:
ret = eval(c, globals, locals)
except Exception as e:
answer = (idx, "eval-exception", (e.__class__.__name__, str(e)))
else:
if ret is not None:
try:
ret = repr(ret)
except Exception as e:
ret = "<repr exception: %s: %s>" % (e.__class__.__name__, str(e))
answer = (idx, "return", ret)
f.write(binstruct.varEncode(answer).tostring())
f.flush()
def handleConnection(conn, address):
print "socketcontrol: accepted", address
utils.daemonThreadCall(lambda: _handleConnection(conn), name="socketcontrol.handleConnection")
def socketcontrolMain():
import tempfile
tmpdir = tempfile.gettempdir() or "/tmp"
sockfilename = "%s/%s-%i-socketcontrol" % (tmpdir, appinfo.appid, os.getpid())
globals()["socketfile"] = sockfilename # copy
import socket
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(sockfilename)
os.chmod(sockfilename, 0700)
s.listen(1)
def listenThread():
print "socketcontrol: listening on", sockfilename
while True:
conn, address = s.accept()
handleConnection(conn, address)
conn, address = None, None # remove refs here
utils.daemonThreadCall(listenThread, name="socketcontrol.listen")
from State import state
for ev,args,kwargs in state.updates.read():
pass
try: s.shutdown(socket.SHUT_RDWR)
except Exception: pass
try: s.close()
except Exception: pass
try: os.unlink(sockfilename)
except Exception: pass
|
tests/integration/test_replicated_database/test.py | mrk-andreev/ClickHouse | 8,629 | 12676476 | <reponame>mrk-andreev/ClickHouse
import os
import shutil
import time
import re
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry, assert_logs_contain
from helpers.network import PartitionManager
test_recover_staled_replica_run = 1
cluster = ClickHouseCluster(__file__)
main_node = cluster.add_instance(
"main_node",
main_configs=["configs/config.xml"],
user_configs=["configs/settings.xml"],
with_zookeeper=True,
stay_alive=True,
macros={"shard": 1, "replica": 1},
)
dummy_node = cluster.add_instance(
"dummy_node",
main_configs=["configs/config.xml"],
user_configs=["configs/settings.xml"],
with_zookeeper=True,
stay_alive=True,
macros={"shard": 1, "replica": 2},
)
competing_node = cluster.add_instance(
"competing_node",
main_configs=["configs/config.xml"],
user_configs=["configs/settings.xml"],
with_zookeeper=True,
macros={"shard": 1, "replica": 3},
)
snapshotting_node = cluster.add_instance(
"snapshotting_node",
main_configs=["configs/config.xml"],
user_configs=["configs/settings.xml"],
with_zookeeper=True,
macros={"shard": 2, "replica": 1},
)
snapshot_recovering_node = cluster.add_instance(
"snapshot_recovering_node",
main_configs=["configs/config.xml"],
user_configs=["configs/settings.xml"],
with_zookeeper=True,
)
all_nodes = [
main_node,
dummy_node,
competing_node,
snapshotting_node,
snapshot_recovering_node,
]
uuid_regex = re.compile("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}")
def assert_create_query(nodes, table_name, expected):
replace_uuid = lambda x: re.sub(uuid_regex, "uuid", x)
query = "show create table {}".format(table_name)
for node in nodes:
assert_eq_with_retry(node, query, expected, get_result=replace_uuid)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_create_replicated_table(started_cluster):
main_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica' || '1');"
)
dummy_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');"
)
assert (
"Explicit zookeeper_path and replica_name are specified"
in main_node.query_and_get_error(
"CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) "
"ENGINE=ReplicatedMergeTree('/test/tmp', 'r') ORDER BY k PARTITION BY toYYYYMM(d);"
)
)
assert (
"Explicit zookeeper_path and replica_name are specified"
in main_node.query_and_get_error(
"CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) "
"ENGINE=ReplicatedMergeTree('/test/tmp', 'r', d, k, 8192);"
)
)
assert "Old syntax is not allowed" in main_node.query_and_get_error(
"CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) "
"ENGINE=ReplicatedMergeTree('/test/tmp/{shard}', '{replica}', d, k, 8192);"
)
main_node.query(
"CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree ORDER BY k PARTITION BY toYYYYMM(d);"
)
expected = (
"CREATE TABLE testdb.replicated_table\\n(\\n `d` Date,\\n `k` UInt64,\\n `i32` Int32\\n)\\n"
"ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')\\n"
"PARTITION BY toYYYYMM(d)\\nORDER BY k\\nSETTINGS index_granularity = 8192"
)
assert_create_query([main_node, dummy_node], "testdb.replicated_table", expected)
# assert without replacing uuid
assert main_node.query("show create testdb.replicated_table") == dummy_node.query(
"show create testdb.replicated_table"
)
main_node.query("DROP DATABASE testdb SYNC")
dummy_node.query("DROP DATABASE testdb SYNC")
@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"])
def test_simple_alter_table(started_cluster, engine):
main_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');"
)
dummy_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');"
)
# test_simple_alter_table
name = "testdb.alter_test_{}".format(engine)
main_node.query(
"CREATE TABLE {} "
"(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "
"ENGINE = {} PARTITION BY StartDate ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID);".format(
name, engine
)
)
main_node.query("ALTER TABLE {} ADD COLUMN Added0 UInt32;".format(name))
main_node.query("ALTER TABLE {} ADD COLUMN Added2 UInt32;".format(name))
main_node.query(
"ALTER TABLE {} ADD COLUMN Added1 UInt32 AFTER Added0;".format(name)
)
main_node.query(
"ALTER TABLE {} ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2;".format(
name
)
)
main_node.query(
"ALTER TABLE {} ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B;".format(
name
)
)
main_node.query(
"ALTER TABLE {} ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1;".format(
name
)
)
full_engine = (
engine
if not "Replicated" in engine
else engine + "(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')"
)
expected = (
"CREATE TABLE {}\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"
" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n"
" `ToDrop` UInt32,\\n `Added0` UInt32,\\n `Added1` UInt32,\\n `Added2` UInt32,\\n"
" `AddedNested1.A` Array(UInt32),\\n `AddedNested1.B` Array(UInt64),\\n `AddedNested1.C` Array(String),\\n"
" `AddedNested2.A` Array(UInt32),\\n `AddedNested2.B` Array(UInt64)\\n)\\n"
"ENGINE = {}\\nPARTITION BY StartDate\\nORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)\\n"
"SETTINGS index_granularity = 8192".format(name, full_engine)
)
assert_create_query([main_node, dummy_node], name, expected)
# test_create_replica_after_delay
competing_node.query(
"CREATE DATABASE IF NOT EXISTS testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');"
)
name = "testdb.alter_test_{}".format(engine)
main_node.query("ALTER TABLE {} ADD COLUMN Added3 UInt32;".format(name))
main_node.query("ALTER TABLE {} DROP COLUMN AddedNested1;".format(name))
main_node.query("ALTER TABLE {} RENAME COLUMN Added1 TO AddedNested1;".format(name))
full_engine = (
engine
if not "Replicated" in engine
else engine + "(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')"
)
expected = (
"CREATE TABLE {}\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"
" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n"
" `ToDrop` UInt32,\\n `Added0` UInt32,\\n `AddedNested1` UInt32,\\n `Added2` UInt32,\\n"
" `AddedNested2.A` Array(UInt32),\\n `AddedNested2.B` Array(UInt64),\\n `Added3` UInt32\\n)\\n"
"ENGINE = {}\\nPARTITION BY StartDate\\nORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)\\n"
"SETTINGS index_granularity = 8192".format(name, full_engine)
)
assert_create_query([main_node, dummy_node, competing_node], name, expected)
main_node.query("DROP DATABASE testdb SYNC")
dummy_node.query("DROP DATABASE testdb SYNC")
competing_node.query("DROP DATABASE testdb SYNC")
def get_table_uuid(database, name):
return main_node.query(
f"SELECT uuid FROM system.tables WHERE database = '{database}' and name = '{name}'"
).strip()
@pytest.fixture(scope="module", name="attachable_part")
def fixture_attachable_part(started_cluster):
main_node.query(f"CREATE DATABASE testdb_attach_atomic ENGINE = Atomic")
main_node.query(
f"CREATE TABLE testdb_attach_atomic.test (CounterID UInt32) ENGINE = MergeTree ORDER BY (CounterID)"
)
main_node.query(f"INSERT INTO testdb_attach_atomic.test VALUES (123)")
main_node.query(
f"ALTER TABLE testdb_attach_atomic.test FREEZE WITH NAME 'test_attach'"
)
table_uuid = get_table_uuid("testdb_attach_atomic", "test")
return os.path.join(
main_node.path,
f"database/shadow/test_attach/store/{table_uuid[:3]}/{table_uuid}/all_1_1_0",
)
@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"])
def test_alter_attach(started_cluster, attachable_part, engine):
main_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');"
)
dummy_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');"
)
name = "alter_attach_test_{}".format(engine)
main_node.query(
f"CREATE TABLE testdb.{name} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)"
)
table_uuid = get_table_uuid("testdb", name)
# Provide and attach a part to the main node
shutil.copytree(
attachable_part,
os.path.join(
main_node.path,
f"database/store/{table_uuid[:3]}/{table_uuid}/detached/all_1_1_0",
),
)
main_node.query(f"ALTER TABLE testdb.{name} ATTACH PART 'all_1_1_0'")
# On the main node, data is attached
assert main_node.query(f"SELECT CounterID FROM testdb.{name}") == "123\n"
# On the other node, data is replicated only if using a Replicated table engine
if engine == "ReplicatedMergeTree":
assert dummy_node.query(f"SELECT CounterID FROM testdb.{name}") == "123\n"
else:
assert dummy_node.query(f"SELECT CounterID FROM testdb.{name}") == ""
main_node.query("DROP DATABASE testdb SYNC")
dummy_node.query("DROP DATABASE testdb SYNC")
@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"])
def test_alter_drop_part(started_cluster, engine):
main_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');"
)
dummy_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');"
)
table = f"alter_drop_{engine}"
part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0"
main_node.query(
f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)"
)
main_node.query(f"INSERT INTO testdb.{table} VALUES (123)")
if engine == "MergeTree":
dummy_node.query(f"INSERT INTO testdb.{table} VALUES (456)")
main_node.query(f"ALTER TABLE testdb.{table} DROP PART '{part_name}'")
assert main_node.query(f"SELECT CounterID FROM testdb.{table}") == ""
if engine == "ReplicatedMergeTree":
# The DROP operation is still replicated at the table engine level
assert dummy_node.query(f"SELECT CounterID FROM testdb.{table}") == ""
else:
assert dummy_node.query(f"SELECT CounterID FROM testdb.{table}") == "456\n"
main_node.query("DROP DATABASE testdb SYNC")
dummy_node.query("DROP DATABASE testdb SYNC")
@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"])
def test_alter_detach_part(started_cluster, engine):
main_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');"
)
dummy_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');"
)
table = f"alter_detach_{engine}"
part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0"
main_node.query(
f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)"
)
main_node.query(f"INSERT INTO testdb.{table} VALUES (123)")
if engine == "MergeTree":
dummy_node.query(f"INSERT INTO testdb.{table} VALUES (456)")
main_node.query(f"ALTER TABLE testdb.{table} DETACH PART '{part_name}'")
detached_parts_query = f"SELECT name FROM system.detached_parts WHERE database='testdb' AND table='{table}'"
assert main_node.query(detached_parts_query) == f"{part_name}\n"
if engine == "ReplicatedMergeTree":
# The detach operation is still replicated at the table engine level
assert dummy_node.query(detached_parts_query) == f"{part_name}\n"
else:
assert dummy_node.query(detached_parts_query) == ""
main_node.query("DROP DATABASE testdb SYNC")
dummy_node.query("DROP DATABASE testdb SYNC")
@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"])
def test_alter_drop_detached_part(started_cluster, engine):
main_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');"
)
dummy_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');"
)
table = f"alter_drop_detached_{engine}"
part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0"
main_node.query(
f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)"
)
main_node.query(f"INSERT INTO testdb.{table} VALUES (123)")
main_node.query(f"ALTER TABLE testdb.{table} DETACH PART '{part_name}'")
if engine == "MergeTree":
dummy_node.query(f"INSERT INTO testdb.{table} VALUES (456)")
dummy_node.query(f"ALTER TABLE testdb.{table} DETACH PART '{part_name}'")
main_node.query(f"ALTER TABLE testdb.{table} DROP DETACHED PART '{part_name}'")
detached_parts_query = f"SELECT name FROM system.detached_parts WHERE database='testdb' AND table='{table}'"
assert main_node.query(detached_parts_query) == ""
assert dummy_node.query(detached_parts_query) == f"{part_name}\n"
main_node.query("DROP DATABASE testdb SYNC")
dummy_node.query("DROP DATABASE testdb SYNC")
def test_alter_fetch(started_cluster):
main_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');"
)
dummy_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');"
)
main_node.query(
"CREATE TABLE testdb.fetch_source (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)"
)
main_node.query(
"CREATE TABLE testdb.fetch_target (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)"
)
main_node.query("INSERT INTO testdb.fetch_source VALUES (123)")
table_uuid = get_table_uuid("testdb", "fetch_source")
main_node.query(
f"ALTER TABLE testdb.fetch_target FETCH PART 'all_0_0_0' FROM '/clickhouse/tables/{table_uuid}/{{shard}}' "
)
detached_parts_query = "SELECT name FROM system.detached_parts WHERE database='testdb' AND table='fetch_target'"
assert main_node.query(detached_parts_query) == "all_0_0_0\n"
assert dummy_node.query(detached_parts_query) == ""
main_node.query("DROP DATABASE testdb SYNC")
dummy_node.query("DROP DATABASE testdb SYNC")
def test_alters_from_different_replicas(started_cluster):
main_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');"
)
dummy_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');"
)
# test_alters_from_different_replicas
competing_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');"
)
main_node.query(
"CREATE TABLE testdb.concurrent_test "
"(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "
"ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192);"
)
main_node.query(
"CREATE TABLE testdb.dist AS testdb.concurrent_test ENGINE = Distributed(testdb, testdb, concurrent_test, CounterID)"
)
dummy_node.stop_clickhouse(kill=True)
settings = {"distributed_ddl_task_timeout": 5}
assert (
"There are 1 unfinished hosts (0 of them are currently active)"
in competing_node.query_and_get_error(
"ALTER TABLE testdb.concurrent_test ADD COLUMN Added0 UInt32;",
settings=settings,
)
)
settings = {
"distributed_ddl_task_timeout": 5,
"distributed_ddl_output_mode": "null_status_on_timeout",
}
assert "shard1|replica2\t\\N\t\\N" in main_node.query(
"ALTER TABLE testdb.concurrent_test ADD COLUMN Added2 UInt32;",
settings=settings,
)
settings = {
"distributed_ddl_task_timeout": 5,
"distributed_ddl_output_mode": "never_throw",
}
assert "shard1|replica2\t\\N\t\\N" in competing_node.query(
"ALTER TABLE testdb.concurrent_test ADD COLUMN Added1 UInt32 AFTER Added0;",
settings=settings,
)
dummy_node.start_clickhouse()
main_node.query(
"ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2;"
)
competing_node.query(
"ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B;"
)
main_node.query(
"ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1;"
)
expected = (
"CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"
" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32,\\n"
" `Added0` UInt32,\\n `Added1` UInt32,\\n `Added2` UInt32,\\n `AddedNested1.A` Array(UInt32),\\n"
" `AddedNested1.B` Array(UInt64),\\n `AddedNested1.C` Array(String),\\n `AddedNested2.A` Array(UInt32),\\n"
" `AddedNested2.B` Array(UInt64)\\n)\\n"
"ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192)"
)
assert_create_query([main_node, competing_node], "testdb.concurrent_test", expected)
# test_create_replica_after_delay
main_node.query("DROP TABLE testdb.concurrent_test SYNC")
main_node.query(
"CREATE TABLE testdb.concurrent_test "
"(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "
"ENGINE = ReplicatedMergeTree ORDER BY CounterID;"
)
expected = (
"CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"
" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n"
"ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192"
)
assert_create_query([main_node, competing_node], "testdb.concurrent_test", expected)
main_node.query(
"INSERT INTO testdb.dist (CounterID, StartDate, UserID) SELECT number, addDays(toDate('2020-02-02'), number), intHash32(number) FROM numbers(10)"
)
# test_replica_restart
main_node.restart_clickhouse()
expected = (
"CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"
" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n"
"ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192"
)
# test_snapshot_and_snapshot_recover
snapshotting_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard2', 'replica1');"
)
snapshot_recovering_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard2', 'replica2');"
)
assert_create_query(all_nodes, "testdb.concurrent_test", expected)
main_node.query("SYSTEM FLUSH DISTRIBUTED testdb.dist")
main_node.query(
"ALTER TABLE testdb.concurrent_test UPDATE StartDate = addYears(StartDate, 1) WHERE 1"
)
res = main_node.query("ALTER TABLE testdb.concurrent_test DELETE WHERE UserID % 2")
assert (
"shard1|replica1" in res
and "shard1|replica2" in res
and "shard1|replica3" in res
)
assert "shard2|replica1" in res and "shard2|replica2" in res
expected = (
"1\t1\tmain_node\n"
"1\t2\tdummy_node\n"
"1\t3\tcompeting_node\n"
"2\t1\tsnapshotting_node\n"
"2\t2\tsnapshot_recovering_node\n"
)
assert (
main_node.query(
"SELECT shard_num, replica_num, host_name FROM system.clusters WHERE cluster='testdb'"
)
== expected
)
# test_drop_and_create_replica
main_node.query("DROP DATABASE testdb SYNC")
main_node.query(
"CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');"
)
expected = (
"CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"
" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n"
"ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192"
)
assert_create_query([main_node, competing_node], "testdb.concurrent_test", expected)
assert_create_query(all_nodes, "testdb.concurrent_test", expected)
for node in all_nodes:
node.query("SYSTEM SYNC REPLICA testdb.concurrent_test")
expected = (
"0\t2021-02-02\t4249604106\n"
"1\t2021-02-03\t1343103100\n"
"4\t2021-02-06\t3902320246\n"
"7\t2021-02-09\t3844986530\n"
"9\t2021-02-11\t1241149650\n"
)
assert_eq_with_retry(
dummy_node,
"SELECT CounterID, StartDate, UserID FROM testdb.dist ORDER BY CounterID",
expected,
)
main_node.query("DROP DATABASE testdb SYNC")
dummy_node.query("DROP DATABASE testdb SYNC")
competing_node.query("DROP DATABASE testdb SYNC")
snapshotting_node.query("DROP DATABASE testdb SYNC")
snapshot_recovering_node.query("DROP DATABASE testdb SYNC")
def test_recover_staled_replica(started_cluster):
main_node.query(
"CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica1');"
)
started_cluster.get_kazoo_client("zoo1").set(
"/clickhouse/databases/recover/logs_to_keep", b"10"
)
dummy_node.query(
"CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica2');"
)
settings = {"distributed_ddl_task_timeout": 0}
main_node.query("CREATE TABLE recover.t1 (n int) ENGINE=Memory", settings=settings)
dummy_node.query(
"CREATE TABLE recover.t2 (s String) ENGINE=Memory", settings=settings
)
main_node.query(
"CREATE TABLE recover.mt1 (n int) ENGINE=MergeTree order by n",
settings=settings,
)
dummy_node.query(
"CREATE TABLE recover.mt2 (n int) ENGINE=MergeTree order by n",
settings=settings,
)
main_node.query(
"CREATE TABLE recover.rmt1 (n int) ENGINE=ReplicatedMergeTree order by n",
settings=settings,
)
dummy_node.query(
"CREATE TABLE recover.rmt2 (n int) ENGINE=ReplicatedMergeTree order by n",
settings=settings,
)
main_node.query(
"CREATE TABLE recover.rmt3 (n int) ENGINE=ReplicatedMergeTree order by n",
settings=settings,
)
dummy_node.query(
"CREATE TABLE recover.rmt5 (n int) ENGINE=ReplicatedMergeTree order by n",
settings=settings,
)
main_node.query(
"CREATE MATERIALIZED VIEW recover.mv1 (n int) ENGINE=ReplicatedMergeTree order by n AS SELECT n FROM recover.rmt1",
settings=settings,
)
dummy_node.query(
"CREATE MATERIALIZED VIEW recover.mv2 (n int) ENGINE=ReplicatedMergeTree order by n AS SELECT n FROM recover.rmt2",
settings=settings,
)
main_node.query(
"CREATE DICTIONARY recover.d1 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n "
"SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt1' PASSWORD '' DB 'recover')) "
"LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT())"
)
dummy_node.query(
"CREATE DICTIONARY recover.d2 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n "
"SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt2' PASSWORD '' DB 'recover')) "
"LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT())"
)
for table in ["t1", "t2", "mt1", "mt2", "rmt1", "rmt2", "rmt3", "rmt5"]:
main_node.query("INSERT INTO recover.{} VALUES (42)".format(table))
for table in ["t1", "t2", "mt1", "mt2"]:
dummy_node.query("INSERT INTO recover.{} VALUES (42)".format(table))
for table in ["rmt1", "rmt2", "rmt3", "rmt5"]:
main_node.query("SYSTEM SYNC REPLICA recover.{}".format(table))
with PartitionManager() as pm:
pm.drop_instance_zk_connections(dummy_node)
dummy_node.query_and_get_error("RENAME TABLE recover.t1 TO recover.m1")
main_node.query_with_retry(
"RENAME TABLE recover.t1 TO recover.m1", settings=settings
)
main_node.query_with_retry(
"ALTER TABLE recover.mt1 ADD COLUMN m int", settings=settings
)
main_node.query_with_retry(
"ALTER TABLE recover.rmt1 ADD COLUMN m int", settings=settings
)
main_node.query_with_retry(
"RENAME TABLE recover.rmt3 TO recover.rmt4", settings=settings
)
main_node.query_with_retry("DROP TABLE recover.rmt5", settings=settings)
main_node.query_with_retry("DROP DICTIONARY recover.d2", settings=settings)
main_node.query_with_retry(
"CREATE DICTIONARY recover.d2 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n "
"SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt1' PASSWORD '' DB 'recover')) "
"LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT());",
settings=settings,
)
inner_table = (
".inner_id."
+ dummy_node.query_with_retry(
"SELECT uuid FROM system.tables WHERE database='recover' AND name='mv1'"
).strip()
)
main_node.query_with_retry(
"ALTER TABLE recover.`{}` MODIFY COLUMN n int DEFAULT 42".format(
inner_table
),
settings=settings,
)
main_node.query_with_retry(
"ALTER TABLE recover.mv1 MODIFY QUERY SELECT m FROM recover.rmt1".format(
inner_table
),
settings=settings,
)
main_node.query_with_retry(
"RENAME TABLE recover.mv2 TO recover.mv3".format(inner_table),
settings=settings,
)
main_node.query_with_retry(
"CREATE TABLE recover.tmp AS recover.m1", settings=settings
)
main_node.query_with_retry("DROP TABLE recover.tmp", settings=settings)
main_node.query_with_retry(
"CREATE TABLE recover.tmp AS recover.m1", settings=settings
)
main_node.query_with_retry("DROP TABLE recover.tmp", settings=settings)
main_node.query_with_retry(
"CREATE TABLE recover.tmp AS recover.m1", settings=settings
)
assert (
main_node.query(
"SELECT name FROM system.tables WHERE database='recover' AND name NOT LIKE '.inner_id.%' ORDER BY name"
)
== "d1\nd2\nm1\nmt1\nmt2\nmv1\nmv3\nrmt1\nrmt2\nrmt4\nt2\ntmp\n"
)
query = (
"SELECT name, uuid, create_table_query FROM system.tables WHERE database='recover' AND name NOT LIKE '.inner_id.%' "
"ORDER BY name SETTINGS show_table_uuid_in_table_create_query_if_not_nil=1"
)
expected = main_node.query(query)
assert_eq_with_retry(dummy_node, query, expected)
assert (
main_node.query(
"SELECT count() FROM system.tables WHERE database='recover' AND name LIKE '.inner_id.%'"
)
== "2\n"
)
assert (
dummy_node.query(
"SELECT count() FROM system.tables WHERE database='recover' AND name LIKE '.inner_id.%'"
)
== "2\n"
)
for table in [
"m1",
"t2",
"mt1",
"mt2",
"rmt1",
"rmt2",
"rmt4",
"d1",
"d2",
"mv1",
"mv3",
]:
assert main_node.query("SELECT (*,).1 FROM recover.{}".format(table)) == "42\n"
for table in ["t2", "rmt1", "rmt2", "rmt4", "d1", "d2", "mt2", "mv1", "mv3"]:
assert dummy_node.query("SELECT (*,).1 FROM recover.{}".format(table)) == "42\n"
for table in ["m1", "mt1"]:
assert dummy_node.query("SELECT count() FROM recover.{}".format(table)) == "0\n"
global test_recover_staled_replica_run
assert (
dummy_node.query(
"SELECT count() FROM system.tables WHERE database='recover_broken_tables'"
)
== f"{test_recover_staled_replica_run}\n"
)
assert (
dummy_node.query(
"SELECT count() FROM system.tables WHERE database='recover_broken_replicated_tables'"
)
== f"{test_recover_staled_replica_run}\n"
)
test_recover_staled_replica_run += 1
table = dummy_node.query(
"SHOW TABLES FROM recover_broken_tables LIKE 'mt1_29_%' LIMIT 1"
).strip()
assert (
dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table))
== "42\n"
)
table = dummy_node.query(
"SHOW TABLES FROM recover_broken_replicated_tables LIKE 'rmt5_29_%' LIMIT 1"
).strip()
assert (
dummy_node.query(
"SELECT (*,).1 FROM recover_broken_replicated_tables.{}".format(table)
)
== "42\n"
)
expected = "Cleaned 6 outdated objects: dropped 1 dictionaries and 3 tables, moved 2 tables"
assert_logs_contain(dummy_node, expected)
dummy_node.query("DROP TABLE recover.tmp")
assert_eq_with_retry(
main_node,
"SELECT count() FROM system.tables WHERE database='recover' AND name='tmp'",
"0\n",
)
main_node.query("DROP DATABASE recover SYNC")
dummy_node.query("DROP DATABASE recover SYNC")
def test_startup_without_zk(started_cluster):
with PartitionManager() as pm:
pm.drop_instance_zk_connections(main_node)
err = main_node.query_and_get_error(
"CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');"
)
assert "ZooKeeper" in err
main_node.query(
"CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');"
)
# main_node.query("CREATE TABLE startup.rmt (n int) ENGINE=ReplicatedMergeTree order by n")
main_node.query("CREATE TABLE startup.rmt (n int) ENGINE=MergeTree order by n")
main_node.query("INSERT INTO startup.rmt VALUES (42)")
with PartitionManager() as pm:
pm.drop_instance_zk_connections(main_node)
main_node.restart_clickhouse(stop_start_wait_sec=30)
assert main_node.query("SELECT (*,).1 FROM startup.rmt") == "42\n"
for _ in range(10):
try:
main_node.query("CREATE TABLE startup.m (n int) ENGINE=Memory")
break
except:
time.sleep(1)
main_node.query("EXCHANGE TABLES startup.rmt AND startup.m")
assert main_node.query("SELECT (*,).1 FROM startup.m") == "42\n"
main_node.query("DROP DATABASE startup SYNC")
def test_server_uuid(started_cluster):
uuid1 = main_node.query("select serverUUID()")
uuid2 = dummy_node.query("select serverUUID()")
assert uuid1 != uuid2
main_node.restart_clickhouse()
uuid1_after_restart = main_node.query("select serverUUID()")
assert uuid1 == uuid1_after_restart
def test_sync_replica(started_cluster):
main_node.query(
"CREATE DATABASE test_sync_database ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');"
)
dummy_node.query(
"CREATE DATABASE test_sync_database ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');"
)
number_of_tables = 1000
settings = {"distributed_ddl_task_timeout": 0}
with PartitionManager() as pm:
pm.drop_instance_zk_connections(dummy_node)
for i in range(number_of_tables):
main_node.query(
"CREATE TABLE test_sync_database.table_{} (n int) ENGINE=MergeTree order by n".format(
i
),
settings=settings,
)
# wait for host to reconnect
dummy_node.query_with_retry("SELECT * FROM system.zookeeper WHERE path='/'")
dummy_node.query("SYSTEM SYNC DATABASE REPLICA test_sync_database")
assert dummy_node.query(
"SELECT count() FROM system.tables where database='test_sync_database'"
).strip() == str(number_of_tables)
assert main_node.query(
"SELECT count() FROM system.tables where database='test_sync_database'"
).strip() == str(number_of_tables)
engine_settings = {"default_table_engine": "ReplicatedMergeTree"}
dummy_node.query(
"CREATE TABLE test_sync_database.table (n int, primary key n) partition by n",
settings=engine_settings,
)
main_node.query("INSERT INTO test_sync_database.table SELECT * FROM numbers(10)")
dummy_node.query("TRUNCATE TABLE test_sync_database.table", settings=settings)
dummy_node.query(
"ALTER TABLE test_sync_database.table ADD COLUMN m int", settings=settings
)
main_node.query(
"SYSTEM SYNC DATABASE REPLICA ON CLUSTER test_sync_database test_sync_database"
)
lp1 = main_node.query(
"select value from system.zookeeper where path='/clickhouse/databases/test1/replicas/shard1|replica1' and name='log_ptr'"
)
lp2 = main_node.query(
"select value from system.zookeeper where path='/clickhouse/databases/test1/replicas/shard1|replica2' and name='log_ptr'"
)
max_lp = main_node.query(
"select value from system.zookeeper where path='/clickhouse/databases/test1/' and name='max_log_ptr'"
)
assert lp1 == max_lp
assert lp2 == max_lp
|
torch_geometric/nn/pool/__init__.py | NucciTheBoss/pytorch_geometric | 2,350 | 12676489 | <filename>torch_geometric/nn/pool/__init__.py
from torch import Tensor
from torch_geometric.typing import OptTensor
from .max_pool import max_pool, max_pool_x, max_pool_neighbor_x
from .avg_pool import avg_pool, avg_pool_x, avg_pool_neighbor_x
from .graclus import graclus
from .voxel_grid import voxel_grid
from .topk_pool import TopKPooling
from .sag_pool import SAGPooling
from .edge_pool import EdgePooling
from .asap import ASAPooling
from .pan_pool import PANPooling
from .mem_pool import MemPooling
try:
import torch_cluster
except ImportError:
torch_cluster = None
def fps(x: Tensor, batch: OptTensor = None, ratio: float = 0.5,
random_start: bool = True) -> Tensor:
r"""A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature
Learning on Point Sets in a Metric Space"
<https://arxiv.org/abs/1706.02413>`_ paper, which iteratively samples the
most distant point with regard to the rest points.
Args:
x (Tensor): Node feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
batch (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
ratio (float, optional): Sampling ratio. (default: :obj:`0.5`)
random_start (bool, optional): If set to :obj:`False`, use the first
node in :math:`\mathbf{X}` as starting node. (default: obj:`True`)
:rtype: :class:`LongTensor`
.. code-block:: python
import torch
from torch_geometric.nn import fps
x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
batch = torch.tensor([0, 0, 0, 0])
index = fps(x, batch, ratio=0.5)
"""
return torch_cluster.fps(x, batch, ratio, random_start)
def knn(x: Tensor, y: Tensor, k: int, batch_x: OptTensor = None,
batch_y: OptTensor = None, cosine: bool = False,
num_workers: int = 1) -> Tensor:
r"""Finds for each element in :obj:`y` the :obj:`k` nearest points in
:obj:`x`.
Args:
x (Tensor): Node feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
y (Tensor): Node feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{M \times F}`.
k (int): The number of neighbors.
batch_x (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
batch_y (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each
node to a specific example. (default: :obj:`None`)
cosine (boolean, optional): If :obj:`True`, will use the cosine
distance instead of euclidean distance to find nearest neighbors.
(default: :obj:`False`)
num_workers (int): Number of workers to use for computation. Has no
effect in case :obj:`batch_x` or :obj:`batch_y` is not
:obj:`None`, or the input lies on the GPU. (default: :obj:`1`)
:rtype: :class:`LongTensor`
.. code-block:: python
import torch
from torch_geometric.nn import knn
x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
batch_x = torch.tensor([0, 0, 0, 0])
y = torch.Tensor([[-1, 0], [1, 0]])
batch_y = torch.tensor([0, 0])
assign_index = knn(x, y, 2, batch_x, batch_y)
"""
return torch_cluster.knn(x, y, k, batch_x, batch_y, cosine, num_workers)
def knn_graph(x: Tensor, k: int, batch: OptTensor = None, loop: bool = False,
flow: str = 'source_to_target', cosine: bool = False,
num_workers: int = 1) -> Tensor:
r"""Computes graph edges to the nearest :obj:`k` points.
Args:
x (Tensor): Node feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
k (int): The number of neighbors.
batch (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
loop (bool, optional): If :obj:`True`, the graph will contain
self-loops. (default: :obj:`False`)
flow (string, optional): The flow direction when using in combination
with message passing (:obj:`"source_to_target"` or
:obj:`"target_to_source"`). (default: :obj:`"source_to_target"`)
cosine (boolean, optional): If :obj:`True`, will use the cosine
distance instead of euclidean distance to find nearest neighbors.
(default: :obj:`False`)
num_workers (int): Number of workers to use for computation. Has no
effect in case :obj:`batch` is not :obj:`None`, or the input lies
on the GPU. (default: :obj:`1`)
:rtype: :class:`LongTensor`
.. code-block:: python
import torch
from torch_geometric.nn import knn_graph
x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
batch = torch.tensor([0, 0, 0, 0])
edge_index = knn_graph(x, k=2, batch=batch, loop=False)
"""
return torch_cluster.knn_graph(x, k, batch, loop, flow, cosine,
num_workers)
def radius(x: Tensor, y: Tensor, r: float, batch_x: OptTensor = None,
batch_y: OptTensor = None, max_num_neighbors: int = 32,
num_workers: int = 1) -> Tensor:
r"""Finds for each element in :obj:`y` all points in :obj:`x` within
distance :obj:`r`.
Args:
x (Tensor): Node feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
y (Tensor): Node feature matrix
:math:`\mathbf{Y} \in \mathbb{R}^{M \times F}`.
r (float): The radius.
batch_x (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
batch_y (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each
node to a specific example. (default: :obj:`None`)
max_num_neighbors (int, optional): The maximum number of neighbors to
return for each element in :obj:`y`. (default: :obj:`32`)
num_workers (int): Number of workers to use for computation. Has no
effect in case :obj:`batch_x` or :obj:`batch_y` is not
:obj:`None`, or the input lies on the GPU. (default: :obj:`1`)
:rtype: :class:`LongTensor`
.. code-block:: python
import torch
from torch_geometric.nn import radius
x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
batch_x = torch.tensor([0, 0, 0, 0])
y = torch.Tensor([[-1, 0], [1, 0]])
batch_y = torch.tensor([0, 0])
assign_index = radius(x, y, 1.5, batch_x, batch_y)
"""
return torch_cluster.radius(x, y, r, batch_x, batch_y, max_num_neighbors,
num_workers)
def radius_graph(x: Tensor, r: float, batch: OptTensor = None,
loop: bool = False, max_num_neighbors: int = 32,
flow: str = 'source_to_target',
num_workers: int = 1) -> Tensor:
r"""Computes graph edges to all points within a given distance.
Args:
x (Tensor): Node feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
r (float): The radius.
batch (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
loop (bool, optional): If :obj:`True`, the graph will contain
self-loops. (default: :obj:`False`)
max_num_neighbors (int, optional): The maximum number of neighbors to
return for each element in :obj:`y`. (default: :obj:`32`)
flow (string, optional): The flow direction when using in combination
with message passing (:obj:`"source_to_target"` or
:obj:`"target_to_source"`). (default: :obj:`"source_to_target"`)
num_workers (int): Number of workers to use for computation. Has no
effect in case :obj:`batch` is not :obj:`None`, or the input lies
on the GPU. (default: :obj:`1`)
:rtype: :class:`LongTensor`
.. code-block:: python
import torch
from torch_geometric.nn import radius_graph
x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
batch = torch.tensor([0, 0, 0, 0])
edge_index = radius_graph(x, r=1.5, batch=batch, loop=False)
"""
return torch_cluster.radius_graph(x, r, batch, loop, max_num_neighbors,
flow, num_workers)
def nearest(x: Tensor, y: Tensor, batch_x: OptTensor = None,
batch_y: OptTensor = None) -> Tensor:
r"""Clusters points in :obj:`x` together which are nearest to a given query
point in :obj:`y`.
Args:
x (Tensor): Node feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
y (Tensor): Node feature matrix
:math:`\mathbf{Y} \in \mathbb{R}^{M \times F}`.
batch_x (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
batch_y (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each
node to a specific example. (default: :obj:`None`)
:rtype: :class:`LongTensor`
.. code-block:: python
import torch
from torch_geometric.nn import nearest
x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
batch_x = torch.tensor([0, 0, 0, 0])
y = torch.Tensor([[-1, 0], [1, 0]])
batch_y = torch.tensor([0, 0])
cluster = nearest(x, y, batch_x, batch_y)
"""
return torch_cluster.nearest(x, y, batch_x, batch_y)
__all__ = [
'TopKPooling',
'SAGPooling',
'EdgePooling',
'ASAPooling',
'PANPooling',
'MemPooling',
'max_pool',
'avg_pool',
'max_pool_x',
'max_pool_neighbor_x',
'avg_pool_x',
'avg_pool_neighbor_x',
'graclus',
'voxel_grid',
'fps',
'knn',
'knn_graph',
'radius',
'radius_graph',
'nearest',
]
classes = __all__
|
python/009 Palindrome Number.py | allandproust/leetcode-share | 156 | 12676490 | '''
Determine whether an integer is a palindrome. Do this without extra space.
Could negative integers be palindromes? (ie, -1)
If you are thinking of converting the integer to string, note the restriction of using extra space.
You could also try reversing an integer. However, if you have solved the problem "Reverse Integer", you know that the reversed integer might overflow. How would you handle such case?
There is a more generic way of solving this problem.
'''
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
div = 1
while x / div >= 10:
div *= 10
while x > 0:
l = x // div
r = x % 10
if l != r:
return False
x %= div
x //= 10
div /= 100
return True
if __name__ == "__main__":
assert Solution().isPalindrome(123) == False
assert Solution().isPalindrome(12321) == True
assert Solution().isPalindrome(-121) == False
|
stonesoup/hypothesiser/tests/test_distance.py | Red-Portal/Stone-Soup-1 | 157 | 12676492 | # -*- coding: utf-8 -*-
from operator import attrgetter
import datetime
import numpy as np
from ..distance import DistanceHypothesiser
from ...types.detection import Detection
from ...types.state import GaussianState
from ...types.track import Track
from ... import measures
def test_mahalanobis(predictor, updater):
timestamp = datetime.datetime.now()
track = Track([GaussianState(np.array([[0]]), np.array([[1]]), timestamp)])
detection1 = Detection(np.array([[2]]))
detection2 = Detection(np.array([[3]]))
detection3 = Detection(np.array([[10]]))
detections = {detection1, detection2, detection3}
measure = measures.Mahalanobis()
hypothesiser = DistanceHypothesiser(
predictor, updater, measure=measure, missed_distance=3)
hypotheses = hypothesiser.hypothesise(track, detections, timestamp)
# There are 3 hypotheses - Detection 1, Detection 2, Missed Detection
assert len(hypotheses) == 3
# And not detection3
assert detection3 not in {hypothesis.measurement
for hypothesis in hypotheses}
# There is a missed detection hypothesis
assert any(not hypothesis.measurement for hypothesis in hypotheses)
# Each hypothesis has a distance attribute
assert all(hypothesis.distance >= 0 for hypothesis in hypotheses)
# The hypotheses are sorted correctly
assert min(hypotheses, key=attrgetter('distance')) is hypotheses[0]
def test_distance_include_all(predictor, updater):
timestamp = datetime.datetime.now()
track = Track([GaussianState(np.array([[0]]), np.array([[1]]), timestamp)])
detection1 = Detection(np.array([[2]]))
detection2 = Detection(np.array([[3]]))
detection3 = Detection(np.array([[10]]))
detections = {detection1, detection2, detection3}
measure = measures.Mahalanobis()
hypothesiser = DistanceHypothesiser(
predictor, updater, measure=measure, missed_distance=1,
include_all=True)
hypotheses = hypothesiser.hypothesise(track, detections, timestamp)
# There are 4 hypotheses - Detections and Missed Detection
assert len(hypotheses) == 4
# detection3 is beyond missed distance and largest distance (last
# hypothesis in list)
last_hypothesis = hypotheses[-1]
assert last_hypothesis.measurement is detection3
assert last_hypothesis.distance > hypothesiser.missed_distance
|
compiler/gdsMill/sram_examples/newcell.py | bsg-external/OpenRAM | 335 | 12676533 | #!/usr/bin/env python
import gdsMill
#we are going to make an array of instances of an existing layout
#assume that we designed the "base cell" in cadence
#step 1 is to stream it out of cadence into a GDS to work with
# creater a streamer object to interact with the cadence libraries
gds_file_in = "sram_lib2.gds" #"sram_cell_6t.gds" #"gds_sram_tgate2.gds"
gds_file_out = "newcell.gds"
debug = 0
streamer = gdsMill.GdsStreamer()
# use the streamer to take a cadence layout, and convert it to GDS 2 for us to work with
# the GDS will be named testLayoutA.gds
#streamer.streamFromCadence(cadenceLibraryContainerPath = "~/design/600nmAmi",
# libraryName = "gdsMillTest",
# cellName = "testLayoutA",
# outputPath = "./gdsFiles")
#next, load our base cell layout from the GDS generated above
arrayCellLayout = gdsMill.VlsiLayout()
reader = gdsMill.Gds2reader(arrayCellLayout, debugToTerminal = debug)
reader.loadFromFile(gds_file_in)
##since we will be streaming into the same library that testLayout came from
#let's rename it here so that we don't overwrite accidentally later
#arrayCellLayout.rename("tom_2x2")
#now create a new layout
#be sure to assign a name, since this will be the root object in our hierarchy to which
#all other objects are referenced
#newLayout = gdsMill.VlsiLayout(name="arrayExample", debug=1, units=(5e-4,5e-10)) #
newLayout = gdsMill.VlsiLayout(name="tom_2x2", debug=0, units=(0.001,1.0000000000000001e-09)) #
#now place an instnace of our top level layout into the filled layout
#hierarchy looks like this:
# array example
# array cell layout
# layout elements
# layout elements
# layout elements
# cell instance
# cell instance
# cell instance
# connection elements .....
#now create the array of instances
for xIndex in range(0,2):
for yIndex in range(0,2):
if(yIndex%2 == 0):
mirror = "MX"
else:
mirror = "R0"
newLayout.addInstance(arrayCellLayout,
nameOfLayout = "cell_6t",
offsetInMicrons = (xIndex*1.25250,yIndex*1.820),
mirror = mirror,
rotate = 0.0)
#newLayout.addInstance(arrayCellLayout,
# nameOfLayout = "precharge",
# offsetInMicrons = (0*1.25250,1*3.640000),
# mirror = "R0",
# rotate = 0.0)
#newLayout.addInstance(arrayCellLayout,
# nameOfLayout = "precharge",
# offsetInMicrons = (1*1.25250,1*3.640000),
# mirror = "R0",
# rotate = 0.0)
#add a "wire" that in a real example might be a power rail, data bus, etc.
#newLayout.addPath(layerNumber = newLayout.layerNumbersInUse[7],
# coordinates = [(-20.0,0.0),(25.0,0),(25.0,10.0)],
# width = 1.0,
# updateInternalMap = False)
#add some text that in a real example might be an I/O pin
#newLayout.addText(text = "Hello",
# layerNumber = newLayout.layerNumbersInUse[5],
# offsetInMicrons = (0,0),
# magnification = 1,
# rotate = None,
# updateInternalMap=True)
#and now dump the filled layout to a new GDS file
writer = gdsMill.Gds2writer(newLayout)
writer.writeToFile(gds_file_out)
#and stream it into cadence
#streamer.streamToCadence(cadenceLibraryContainerPath = "~/design/600nmAmi",
# libraryName = "gdsMillTest",
# inputPath = "./gdsFiles/arrayLayout.gds")
print "LIB: %s" % gds_file_in
print "\nCompleted ", gds_file_out
|
app/src/thirdparty/telemetry/internal/platform/profiler/android_systrace_profiler.py | ta2edchimp/big-rig | 925 | 12676557 | <filename>app/src/thirdparty/telemetry/internal/platform/profiler/android_systrace_profiler.py
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import StringIO
import subprocess
import zipfile
from telemetry.core import util
from telemetry.internal.backends.chrome import android_browser_finder
from telemetry.internal.platform import profiler
from telemetry.timeline import trace_data as trace_data_module
from telemetry.timeline import tracing_options
_SYSTRACE_CATEGORIES = [
'gfx',
'input',
'view',
'sched',
'freq',
]
class AndroidSystraceProfiler(profiler.Profiler):
"""Collects a Systrace on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state,
device=None):
super(AndroidSystraceProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
assert self._browser_backend.supports_tracing
self._output_path = output_path + '-trace.zip'
self._systrace_output_path = output_path + '.systrace'
# Use telemetry's own tracing backend instead the combined mode in
# adb_profile_chrome because some benchmarks also do tracing of their own
# and the two methods conflict.
options = tracing_options.TracingOptions()
options.enable_chrome_trace = True
self._browser_backend.StartTracing(options, timeout=10)
command = ['python', os.path.join(util.GetChromiumSrcDir(), 'tools',
'profile_chrome.py'),
'--categories', '', '--continuous', '--output',
self._systrace_output_path, '--json', '--systrace',
','.join(_SYSTRACE_CATEGORIES)]
if device:
command.extend(['--device', device])
self._profiler = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-systrace'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._profiler.communicate(input='\n')
trace_result_builder = trace_data_module.TraceDataBuilder()
self._browser_backend.StopTracing(trace_result_builder)
trace_result = trace_result_builder.AsData()
trace_file = StringIO.StringIO()
trace_result.Serialize(trace_file)
# Merge the chrome and systraces into a zip file.
with zipfile.ZipFile(self._output_path, 'w', zipfile.ZIP_DEFLATED) as z:
z.writestr('trace.json', trace_file.getvalue())
z.write(self._systrace_output_path, 'systrace')
os.unlink(self._systrace_output_path)
print 'Systrace saved as %s' % self._output_path
print 'To view, open in chrome://tracing'
return [self._output_path]
|
tensorflow/contrib/nccl/python/ops/nccl_ops_test.py | AlexChrisF/udacity | 384 | 12676575 | <gh_stars>100-1000
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nccl ops. See also the cc test for nccl_communicator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import nccl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class AllReduceTest(test.TestCase):
def testAllReduce(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
for dtype in [np.float32, np.int32, np.int64, np.float64]:
# Create session inside outer loop to test use of
# same communicator across multiple sessions.
with self.test_session(use_gpu=True) as sess:
self._testSingleAllReduce(sess, dtype, nccl.all_sum, lambda x, y: x + y)
self._testSingleAllReduce(sess, dtype, nccl.all_prod,
lambda x, y: x * y)
self._testSingleAllReduce(sess, dtype, nccl.all_min, np.minimum)
self._testSingleAllReduce(sess, dtype, nccl.all_max, np.maximum)
def _testSingleAllReduce(self, sess, np_type, nccl_fn, numpy_accumulation_fn):
for devices in [['/gpu:0', '/gpu:0', '/gpu:0'], ['/gpu:0', '/gpu:0']]:
shape = (3, 4)
np_ans = None
tensors = []
for d in devices:
with ops.device(d):
t = ((np.random.random_sample(shape) - .5) * 1024).astype(np_type)
if np_ans is None:
np_ans = t
else:
np_ans = numpy_accumulation_fn(np_ans, t)
tensors.append(array_ops.identity(t))
all_reduce_tensors = nccl_fn(tensors)
# Test shape inference.
for r in all_reduce_tensors:
self.assertEqual(shape, r.get_shape())
# Test execution and results.
nccl_results = sess.run(all_reduce_tensors)
for r in nccl_results:
self.assertAllClose(r, np_ans)
def testErrors(self):
with self.assertRaisesRegexp(ValueError, 'Device assignment required'):
nccl.all_sum([array_ops.identity(np.random.random_sample((3, 4)))])
with self.assertRaisesRegexp(ValueError, 'Must pass >0 tensors'):
nccl.all_sum([])
class BroadcastTest(test.TestCase):
def testBroadcast(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
for dtype in [np.float32, np.int32, np.int64, np.float64]:
# Create session inside outer loop to test use of
# same communicator across multiple sessions.
with self.test_session(use_gpu=True) as sess:
for devices in [['/gpu:0', '/gpu:0', '/gpu:0'], ['/gpu:0', '/gpu:0']]:
shape = (3, 4)
sender = np.random.randint(0, len(devices) - 1)
with ops.device(devices[sender]):
np_ans = ((
(np.random.random_sample(shape) - .5) * 1024).astype(dtype))
t = array_ops.identity(np_ans)
other_devices = devices[:sender] + devices[sender + 1:]
send_op, received_tensors = nccl.broadcast(t, other_devices)
# Verify shape inference.
for r in received_tensors:
self.assertEqual(shape, r.get_shape())
# Run and verify results.
nccl_results = sess.run(received_tensors + [send_op])
for r in nccl_results[:-1]:
self.assertAllClose(r, np_ans)
class CombinedTest(test.TestCase):
"""Tests using a mix of all-reduce ops in one session.run call."""
def testCombined(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
for dtype in [np.float32, np.int32, np.int64, np.float64]:
# Create session inside outer loop to test use of
# same communicator across multiple sessions.
with self.test_session(use_gpu=True) as sess:
for devices in [['/gpu:0', '/gpu:0', '/gpu:0'], ['/gpu:0', '/gpu:0']]:
shape = (3, 4)
# all-reduce
np_ans = np.zeros(shape=shape, dtype=dtype)
tensors = []
for d in devices:
with ops.device(d):
t = ((np.random.random_sample(shape) - .5) * 1024).astype(dtype)
np_ans += t
tensors.append(array_ops.identity(t))
all_reduce_tensors = nccl.all_sum(tensors)
sender = np.random.randint(0, len(devices) - 1)
other_devices = devices[:sender] + devices[sender + 1:]
send_op, received_tensors = nccl.broadcast(all_reduce_tensors[sender],
other_devices)
# sender doesn't need to be fetched as part of outputs of session.run.
del all_reduce_tensors[sender]
# Verify shape inference.
for r in received_tensors:
self.assertEqual(shape, r.get_shape())
# Run and verify results.
nccl_results = sess.run(
received_tensors + [send_op] + all_reduce_tensors)
for r in nccl_results[:len(received_tensors)]:
self.assertAllClose(r, np_ans)
if __name__ == '__main__':
test.main()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.