filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_26346
|
import pytest
from skidl import *
from skidl.common import * # pylint: disable=wildcard-import
from .setup_teardown import *
def test_missing_lib():
# Sometimes, loading a part from a non-existent library doesn't throw an
# exception until the second time it's tried. This detects that error.
set_query_backup_lib(
False
) # Don't allow searching backup lib that might exist from previous tests.
with pytest.raises(FileNotFoundError):
a = Part("crap", "R")
with pytest.raises(FileNotFoundError):
b = Part("crap", "C")
def test_lib_import_1():
lib = SchLib("xess.lib")
assert len(lib) > 0
def test_lib_import_2():
lib = SchLib("Device")
def test_lib_export_1():
lib = SchLib("Device")
lib.export("my_device", tool=SKIDL)
my_lib = SchLib("my_device", tool=SKIDL)
assert len(lib) == len(my_lib)
def test_lib_creation_1():
lib = SchLib()
prt1 = SkidlPart(name="Q", dest=TEMPLATE)
lib += prt1
lib += prt1 # Duplicate library entries are not added.
assert len(lib.parts) == 1
assert not lib.get_parts(name="QQ") # Part not in library.
prt2 = SkidlPart(name="QQ", dest=TEMPLATE)
prt2.add_pins(
Pin(num=1, name="Q1", func=Pin.types.TRISTATE),
Pin(num=2, name="Q2", func=Pin.types.PWRIN),
)
lib += prt2
prt2.add_pins(Pin(num=3, name="Q1", func=Pin.types.PWROUT))
assert len(lib.parts) == 2
assert lib["Q"].name == "Q"
assert len(lib["Q"].pins) == 0
assert lib["QQ"].name == "QQ"
assert len(lib["QQ"].pins) == 2
def test_backup_1():
a = Part("Device", "R", footprint="null")
b = Part("Device", "C", footprint="null")
c = Part("Device", "L", footprint="null")
a & b & c # Connect device to keep them from being culled.
generate_netlist(do_backup=True) # This creates the backup parts library.
default_circuit.reset()
set_query_backup_lib(True) # FIXME: this is already True by default!
a = Part("crap", "R", footprint="null")
b = Part("crap", "C", footprint="null")
generate_netlist()
def test_backup_2():
a = Part("Device", "R", footprint="null")
b = Part("Device", "C", footprint="null")
c = Part("Device", "L", footprint="null")
a & b & c # Place parts in series.
num_pins_per_net_1 = {net.name: len(net) for net in default_circuit.get_nets()}
generate_netlist(do_backup=True) # This creates the backup parts library.
num_pins_per_net_2 = {net.name: len(net) for net in default_circuit.get_nets()}
for nm in num_pins_per_net_1:
assert num_pins_per_net_1[nm] == num_pins_per_net_2[nm]
def test_lib_1():
lib_kicad = SchLib("Device")
lib_kicad.export("Device")
SchLib.reset()
lib_skidl = SchLib("Device", tool=SKIDL)
assert len(lib_kicad) == len(lib_skidl)
SchLib.reset()
set_default_tool(SKIDL)
set_query_backup_lib(False)
a = Part("Device", "R")
assert a.tool == SKIDL
b = Part("Device", "L")
assert b.tool == SKIDL
c = Part("Device", "C")
assert c.tool == SKIDL
def test_non_existing_lib_cannot_be_loaded():
for tool in ALL_TOOLS:
with pytest.raises(FileNotFoundError):
lib = SchLib("non-existing", tool=tool)
def test_part_from_non_existing_lib_cannot_be_instantiated():
for tool in ALL_TOOLS:
with pytest.raises(ValueError):
part = Part("non-existing", "P", tool=tool)
def test_lib_kicad_v5():
lib_name = "Device.lib"
lib_v5 = SchLib(lib_name)
v5_part_names = [part.name for part in lib_v5.parts]
with open(lib_name, "r") as fp:
lines = fp.readlines()
part_cnt = len([l for l in lines if l.startswith("ENDDEF")])
print(lib_name, "#parts =", part_cnt)
assert part_cnt == len(v5_part_names)
assert part_cnt == 502
def test_lib_kicad_v6():
lib_name = "Device.kicad_sym"
lib_v6 = SchLib(lib_name)
v6_part_names = [part.name for part in lib_v6.parts]
with open(lib_name, "r") as fp:
parts = parse_sexp("".join(fp.readlines()))
part_cnt = len([part for part in parts if to_list(part)[0] == "symbol"])
print(lib_name, "#parts =", part_cnt)
assert part_cnt == len(v6_part_names)
assert part_cnt == 564
|
the-stack_0_26348
|
import cv2
import numpy as np
def normal(img):
return img
def cartoon(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
color = cv2.bilateralFilter(img, 9, 250, 250)
cartoon = cv2.bitwise_and(color, color, mask=edges)
return cartoon
def blur(img):
blur = cv2.blur(img,(5,5))
return blur
def justRED(img):
r,g,b=cv2.split(img)
g=np.ones_like(g)
b=np.ones_like(b)
result = cv2.merge((b,g,r))
result=cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
return result
def Countours(image):
contoured_image = image
gray = cv2.cvtColor(contoured_image, cv2.COLOR_BGR2GRAY)
edged = cv2.Canny(gray, 200, 200)
contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
cv2.drawContours(contoured_image, contours, contourIdx=-1, color=6, thickness=1)
return contoured_image
def ColourQuantization(image, K=9):
Z = image.reshape((-1, 3))
Z = np.float32(Z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
compactness, label, center = cv2.kmeans(Z, K, None, criteria, 1, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((image.shape))
return res2
def cartoonT1(img):
img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
coloured = ColourQuantization(img)
contoured = Countours(coloured)
final_image = contoured
return final_image
def noHue(img):
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
h,s,v=cv2.split(img)
h=np.zeros_like(h)
result = cv2.merge((h,s,v))
return result
def noGreen(img):
r,g,b=cv2.split(img)
g=np.zeros_like(g)
result = cv2.merge((b,g,r))
return result
def noBlue(img):
r,g,b=cv2.split(img)
b=np.zeros_like(b)
result = cv2.merge((b,g,r))
return result
def noRed(img):
r,g,b=cv2.split(img)
r=np.zeros_like(r)
result = cv2.merge((b,g,r))
return result
|
the-stack_0_26349
|
'''Example showing how to use trend filtering to perform density regression in
a time series model.'''
import numpy as np
from scipy.stats import norm
def get_1d_penalty_matrix(x, k=0, sparse=False):
'''Create a 1D trend filtering penalty matrix D^(k+1).'''
length = len(x)
if sparse:
rows = np.repeat(np.arange(length-1), 2)
cols = np.repeat(np.arange(length), 2)[1:-1]
data = np.tile([-1, 1], length-1)
D = coo_matrix((data, (rows, cols)), shape=(length-1, length))
else:
D = np.eye(length, dtype=float)[0:-1] * -1
for i in range(len(D)):
D[i,i+1] = 1
return get_delta(D, k, x)
def get_delta(D, k, x=None):
'''Calculate the k-th order trend filtering matrix given the oriented edge
incidence matrix and the value of k. If x is specified, then we use the
falling factorial basis from Wang et al. (ICML 2014) to specifiy an
irregular grid.'''
if k < 0:
raise Exception('k must be at least 0th order.')
result = D
for i in range(k):
if x is not None:
z = i+1
W = np.diag(float(z) / (x[z:] - x[:-z]))
result = D.T.dot(W).dot(result) if i % 2 == 0 else D.dot(W).dot(result)
else:
result = D.T.dot(result) if i % 2 == 0 else D.dot(result)
return result
def sufficient_statistics(x, y):
T = []
X = []
W = []
for i in range(x.min(), x.max()+1):
xi = x[x==i]
if len(xi) > 0:
yi = y[x==i]
t1 = np.sum(yi)
t2 = np.sum(yi**2)
X.append(i)
W.append(len(xi))
T.append([t1,t2])
return np.array(T), np.array(X), np.array(W)
def tf_fit(T, X, W, D0, D1, lam1, lam2,
init_eta1=None,
init_eta2_raw=None,
nsteps=10000,
learning_rate_fn=lambda s: 0.995**(s+1),
verbose=True):
import tensorflow as tf
# Setup the TF model
tf.reset_default_graph()
tf_sess = tf.Session()
if init_eta1 is None:
init_eta1 = np.ones_like(T[:,0]).astype('float32')
if init_eta2_raw is None:
init_eta2_raw = np.ones_like(T[:,1]).astype('float32')
# Create the data tensors
tf_W = tf.constant(W, tf.float32)
tf_T = tf.constant(T, tf.float32)
tf_D0 = tf.constant(D0, tf.float32)
tf_D1 = tf.constant(D1, tf.float32)
# Work in natural parameter space
tf_eta1 = tf.get_variable('Eta1', initializer=init_eta1)
tf_eta2_raw = tf.get_variable('Eta2', initializer=init_eta2_raw)
tf_eta2 = -tf.nn.softplus(tf_eta2_raw)
tf_eta = tf.stack([tf_eta1, tf_eta2], axis=1)
tf_mean, tf_variance = tf_eta1 / (-2 * tf_eta2), 1. / (-2 * tf_eta2)
# Use a gaussian loss
log_kernel = tf.reduce_sum(tf_T * tf_eta, 1)
log_partition = tf_eta1**2 / (4.*tf_eta2) + 0.5 * tf.log(-2 * tf_eta2)
# Use a model that has piecewise-linear means and constant variance
mean_penalty = tf.reduce_sum(tf.abs(tf.matmul(tf_D1, tf_mean[:,None])))
var_penalty = tf.reduce_sum(tf.abs(tf.matmul(tf_D0, tf_variance[:,None])))
# Use a model that has piecewise-linear first natural parameter and
# piecewise-constant second natural parameter
# mean_penalty = tf.reduce_sum(tf.abs(tf.matmul(tf_D1, tf_eta1[:,None])))
# var_penalty = tf.reduce_sum(tf.abs(tf.matmul(tf_D0, tf_eta2[:,None])))
# Get the convex optimization loss
loss = -tf.reduce_sum(log_kernel + W * log_partition) + lam1 * mean_penalty + lam2 * var_penalty
# Setup optimizer
tf_learning_rate = tf.placeholder(tf.float32, shape=[])
opt = tf.train.GradientDescentOptimizer(tf_learning_rate)
tf_train_step = opt.minimize(loss)
# Fit the model
tf_sess.run(tf.global_variables_initializer())
for step in range(nsteps):
if verbose and (step % 1000) == 0:
# Convert back to normal parameter space
mean, variance = tf_sess.run([tf_eta1 / (-2 * tf_eta2), 1. / (-2 * tf_eta2)])
eta = tf_sess.run(tf_eta[0])
loss_kernel, loss_part, loss_mean, loss_var = tf_sess.run([tf.reduce_sum(log_kernel), tf.reduce_sum(W*log_partition), mean_penalty, var_penalty])
print('\n\n********** STEP {} **********'.format(step))
print('Data.\n\tT: {}\n\tW: {}\n\tX: {}'.format(T[0], W[0], X[0]))
print('Parameters.\n\tMean: {}\n\tVariance: {}\n\tEta: {}'.format(mean[0], variance[0], eta))
print('Loss components.\n\tKernel: {}\n\tParition: {}\n\tMean: {}\n\tVariance: {}'.format(loss_kernel, loss_part, loss_mean, loss_var) )
print('Step size: {}'.format(learning_rate_fn(step)))
tf_sess.run(tf_train_step, feed_dict={tf_learning_rate: learning_rate_fn(step)})
# Convert back to normal parameter space
return tf_sess.run([tf_mean, tf_variance])
def density_regression(x, y, lam1=10., lam2=5.,
init_eta1=None, init_eta2_raw=None,
nsteps=10000, verbose=True):
# Convert to z-scores
y_mu = y.mean()
y_stdev = y.std()
y = (y - y_mu) / y_stdev
# Calculate the sufficient statistics under a normal likelihood
T, X, W = sufficient_statistics(x, y)
# Create the trend filtering penalty matrices
D0 = get_1d_penalty_matrix(X, k=0, sparse=False)
D1 = get_1d_penalty_matrix(X, k=1, sparse=False)
# Fit the data under a normal distribution assumption
fit_means, fit_variances = tf_fit(T, X, W, D0, D1, lam1, lam2,
learning_rate_fn=lambda s: 0.01,
nsteps=nsteps,
init_eta1=init_eta1,
init_eta2_raw=init_eta2_raw,
verbose=verbose)
# Convert back from z-scores to raw values
return X, fit_means * y_stdev + y_mu, fit_variances * y_stdev**2
def logspace_grid(min_val, max_val, npoints):
return np.exp(np.linspace(np.log(min_val), np.log(max_val), npoints))
def create_folds(n, k):
indices = np.arange(n)
np.random.shuffle(indices)
folds = []
start = 0
end = 0
for f in range(k):
start = end
end = start + len(indices) // k + (1 if (len(indices) % k) > f else 0)
folds.append(indices[start:end])
return folds
def predict(x, fit):
fit_x, means, variances = fit
pred_mean = np.interp(x, fit_x, means)
pred_var = np.interp(x, fit_x, variances)
return pred_mean, pred_var
def logprob(x, y, fit):
pred_mean, pred_var = predict(x, fit)
logprobs = norm.logpdf(y, pred_mean, np.sqrt(pred_var))
return logprobs
def density_regression_cv(x, y, nfolds=5,
min_lam1=1e-2, max_lam1=2e2, nlam1=10,
min_lam2=1e-2, max_lam2=2e2, nlam2=10):
'''Cross-validation to select the value of lambda 1 and lambda 2 based on
log probability.'''
lam1_grid = logspace_grid(min_lam1, max_lam1, nlam1)
lam2_grid = logspace_grid(min_lam2, max_lam2, nlam2)
folds = create_folds(len(x), nfolds)
cv_scores = np.zeros((nlam1, nlam2))
for fold_num, fold in enumerate(folds):
print('\tFold #{0}'.format(fold_num+1))
mask = np.ones(len(x), dtype=bool)
mask[fold] = False
x_train, y_train = x[mask], y[mask]
x_test, y_test = x[~mask], y[~mask]
prev_init_eta1, prev_init_eta2_raw = None, None
for i,lam1 in enumerate(lam1_grid):
init_eta1 = prev_init_eta1
init_eta2_raw = prev_init_eta2_raw
for j,lam2 in enumerate(lam2_grid):
print('\n\t\tlam1={} lam2={}'.format(lam1, lam2))
fold_x, fold_means, fold_variances = density_regression(x_train, y_train, lam1, lam2,
init_eta1=init_eta1,
init_eta2_raw=init_eta2_raw,
verbose=False, nsteps=3000)
score = logprob(x_test, y_test, (fold_x, fold_means, fold_variances)).sum()
print('\t\t\t score={}'.format(score))
cv_scores[i,j] += score
init_eta1 = fold_means / fold_variances
init_eta2_raw = np.exp(1./(2*fold_variances) - 1.)
if j == 0 and not np.isnan(score):
prev_init_eta1 = np.copy(init_eta1)
prev_init_eta2_raw = np.copy(init_eta2_raw)
best_idx = np.nanargmax(cv_scores)
best_lam1 = lam1_grid[int(np.floor(best_idx // nlam2))]
best_lam2 = lam2_grid[int(best_idx % nlam2)]
fit = density_regression(x, y, best_lam1, best_lam2, nsteps=20000, verbose=False)
print(best_lam1, best_lam2)
print('Best selected values: lam1={} lam2={}'.format(best_lam1, best_lam2))
return fit, (best_lam1, best_lam2)
if __name__ == '__main__':
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
n = 200
m = 8
offset = 7000.
p = np.array([0.3]*(n/5) + [0.01]*(n/10) + [0.9]*(n/10) + [0.3]*(n/4) + [0.5]*(n/4) + [0.5]*(n/10))
slopes = np.array([50.]*(n/4) + [-30]*(n/4) + [-20]*(n/4) + [40]*(n/4))
variances = np.array([100000.]*(n/4) + [500000.]*(n/4) + [10000.]*(n/4) + [200000.]*(n/4))
means = np.zeros(n)
means[0] = offset
assert len(p) == n
assert len(slopes) == n
assert len(variances) == n
# Generate the data
m0 = np.random.poisson(m)
x = [0]*m0
y = list(np.random.normal(means[0], np.sqrt(variances[0]), size=m0))
for i in range(1,n):
means[i] = means[i-1] + slopes[i]
if p[i] < np.random.random():
nsamples = np.random.poisson(m)
if nsamples > 0:
x.extend([i] * nsamples)
y.extend(np.random.normal(means[i], np.sqrt(variances[i]), size=nsamples))
x = np.array(x)
y = np.array(y)
# Fit the model by 5-fold cross-validation
# fit_x, fit_means, fit_variances = density_regression_cv(x, y)
# Use some decent values found via cross-validation
fit_x, fit_means, fit_variances = density_regression(x, y, lam1=3.5, lam2=20.)
# Plot a comparison of the truth vs. the fit
fig, axarr = plt.subplots(1,2, sharex=True, sharey=True)
axarr[0].scatter(x, y, alpha=0.7, color='gray')
axarr[0].plot(np.arange(n), means, label='Truth')
axarr[0].fill_between(np.arange(n), means + 2*np.sqrt(variances), means - 2*np.sqrt(variances), color='blue', alpha=0.3)
axarr[0].set_title('Truth')
axarr[1].scatter(x, y, alpha=0.7, color='gray')
axarr[1].plot(fit_x, fit_means, label='Truth')
axarr[1].fill_between(fit_x, fit_means+2*np.sqrt(fit_variances), fit_means-2*np.sqrt(fit_variances), color='blue', alpha=0.3)
axarr[1].set_title('Fit')
plt.show()
|
the-stack_0_26350
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A class containing information about beads and chains of a protein."""
from typing import List, Sequence
from .chains.side_chain import SideChain
from .chains.main_chain import MainChain
class Peptide:
"""A class containing information about beads and chains of a protein. Each bead is
characterized by a letter which encodes its residue sequence and defines the energy of
interactions with other beads (unless interactions are random). For more information about
residue types see:
https://en.wikipedia.org/wiki/Amino_acid#Table_of_standard_amino_acid_abbreviations_and_properties.
Currently, only side chains of length 1 (i.e. with 1 bead) are supported which is a
simplifying assumption. For more information see the description in the Protein Folding
Problems module and the paper cited there."""
def __init__(
self,
main_chain_residue_sequence: str,
side_chain_residue_sequences: List[str],
):
"""
Args:
main_chain_residue_sequence: String of characters that define residues for a main
chain. Valid residue types are [A, C, D, E, F, G, H, I,
K, L, M, N, P, Q, R, S, T, V, W, Y].
side_chain_residue_sequences: List of characters that define residues for all side
beads. Empty string if a side bead does not exist. Valid
residue types are [A, C, D, E, F, G, H, I, K, L, M, N, P,
Q, R, S, T, V, W, Y]. Side chains cannot be attached to
first or last main bead. First and last main
beads with a side chain can be modeled by elongating the
main chain with corresponding side chains.
"""
self._main_chain = MainChain(
main_chain_residue_sequence,
side_chain_residue_sequences,
)
def get_side_chains(self) -> Sequence[SideChain]:
"""
Returns the list of all side chains in a peptide.
Returns:
A list of all side chains in a peptide.
"""
side_chains = []
for main_bead in self._main_chain.beads_list:
side_chains.append(main_bead.side_chain) # type: ignore
return side_chains
def get_side_chain_hot_vector(self) -> List[bool]:
"""
Returns a one-hot encoding list for side chains in a peptide which indicates which side
chains are present.
Returns:
A one-hot encoding list for side chains in a peptide.
"""
side_chain_hot_vector = []
for main_bead in self._main_chain.beads_list:
if main_bead.side_chain is not None: # type: ignore
side_chain_hot_vector.append(True)
else:
side_chain_hot_vector.append(False)
return side_chain_hot_vector
@property
def get_main_chain(self) -> MainChain:
"""Returns the main chain."""
return self._main_chain
|
the-stack_0_26353
|
from datetime import datetime
from django.utils.translation import ugettext as _
from dimagi.ext.couchdbkit import (
BooleanProperty,
DateTimeProperty,
Document,
StringProperty,
)
from corehq.apps.groups.models import dt_no_Z_re
from corehq.apps.products.models import Product, SQLProduct
class Program(Document):
"""
A program, e.g. "hiv" or "tb"
"""
domain = StringProperty()
name = StringProperty()
code = StringProperty()
last_modified = DateTimeProperty()
default = BooleanProperty(default=False)
is_archived = BooleanProperty(default=False)
@classmethod
def wrap(cls, data):
# If "Z" is missing because of the Aug 2014 migration, then add it.
# cf. Group class
last_modified = data.get('last_modified')
if last_modified and dt_no_Z_re.match(last_modified):
data['last_modified'] += 'Z'
return super(Program, cls).wrap(data)
def save(self, *args, **kwargs):
self.last_modified = datetime.utcnow()
super(Program, self).save(*args, **kwargs)
self.clear_caches(self.domain)
@classmethod
def by_domain(cls, domain, wrap=True):
"""
Gets all programs in a domain.
"""
kwargs = dict(
view_name='program_by_code/view',
startkey=[domain],
endkey=[domain, {}],
include_docs=True
)
if wrap:
return Program.view(**kwargs)
else:
return [row["doc"] for row in Program.view(wrap_doc=False, **kwargs)]
@classmethod
def default_for_domain(cls, domain):
programs = cls.by_domain(domain)
for p in programs:
if p.default:
return p
def delete(self):
# you cannot delete the default program
if self.default:
raise Exception(_('You cannot delete the default program'))
default = Program.default_for_domain(self.domain)
sql_products = SQLProduct.objects.filter(domain=self.domain,
program_id=self.get_id)
to_save = []
for product in sql_products.couch_products():
product['program_id'] = default._id
to_save.append(product)
# break up saving in case there are many products
if len(to_save) > 500:
Product.bulk_save(to_save)
to_save = []
Product.bulk_save(to_save)
# bulk update sqlproducts
sql_products.update(program_id=default._id)
super(Program, self).delete()
self.clear_caches(self.domain)
def unarchive(self):
"""
Unarchive a program, causing it (and its data) to show
up in Couch and SQL views again.
"""
self.is_archived = False
self.save()
def get_products_count(self):
return (SQLProduct.objects
.filter(domain=self.domain, program_id=self.get_id)
.count())
@classmethod
def clear_caches(cls, domain):
from casexml.apps.phone.utils import clear_fixture_cache
from corehq.apps.programs.fixtures import PROGRAM_FIXTURE_BUCKET
clear_fixture_cache(domain, PROGRAM_FIXTURE_BUCKET)
|
the-stack_0_26354
|
import logging
import os
import re
from collections import OrderedDict
import numexpr as ne
import numpy as np
import pandas as pd
import yaml
from tardis import constants
from astropy import units as u
from pyne import nucname
import tardis
from tardis.io.util import get_internal_data_path
from IPython import get_ipython
k_B_cgs = constants.k_B.cgs.value
c_cgs = constants.c.cgs.value
h_cgs = constants.h.cgs.value
m_e_cgs = constants.m_e.cgs.value
e_charge_gauss = constants.e.gauss.value
logger = logging.getLogger(__name__)
tardis_dir = os.path.realpath(tardis.__path__[0])
ATOMIC_SYMBOLS_DATA = (
pd.read_csv(
get_internal_data_path("atomic_symbols.dat"),
delim_whitespace=True,
names=["atomic_number", "symbol"],
)
.set_index("atomic_number")
.squeeze()
)
ATOMIC_NUMBER2SYMBOL = OrderedDict(ATOMIC_SYMBOLS_DATA.to_dict())
SYMBOL2ATOMIC_NUMBER = OrderedDict(
(y, x) for x, y in ATOMIC_NUMBER2SYMBOL.items()
)
synpp_default_yaml_fname = get_internal_data_path("synpp_default.yaml")
NUMERAL_MAP = tuple(
zip(
(1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1),
("M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"),
)
)
class MalformedError(Exception):
pass
class MalformedSpeciesError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return (
'Expecting a species notation (e.g. "Si 2", "Si II", "Fe IV") '
f"- supplied {self.malformed_element_symbol}"
)
class MalformedElementSymbolError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return f"Expecting an atomic symbol (e.g. Fe) - supplied {self.malformed_element_symbol}"
class MalformedQuantityError(MalformedError):
def __init__(self, malformed_quantity_string):
self.malformed_quantity_string = malformed_quantity_string
def __str__(self):
return (
f'Expecting a quantity string(e.g. "5 km/s") for keyword '
f"- supplied {self.malformed_quantity_string}"
)
def int_to_roman(i):
"""
Convert an integer into its roman numeral representation.
Parameters
----------
i : int
Integer to be converted into roman numerals
Returns
-------
str
Returns roman numeral representation of i in str format.
"""
result = []
for integer, numeral in NUMERAL_MAP:
count = i // integer
result.append(numeral * count)
i -= integer * count
return "".join(result)
def roman_to_int(roman_string):
"""
Convert a roman numeral into its corresponding integer.
Parameters
----------
roman_string : str
Roman numeral to be converted into an integer
Returns
-------
int
Returns integer representation of roman_string
"""
NUMERALS_SET = set(list(zip(*NUMERAL_MAP))[1])
roman_string = roman_string.upper()
if len(set(list(roman_string.upper())) - NUMERALS_SET) != 0:
raise ValueError(f"{roman_string} does not seem to be a roman numeral")
i = result = 0
for integer, numeral in NUMERAL_MAP:
while roman_string[i : i + len(numeral)] == numeral:
result += integer
i += len(numeral)
if result < 1:
raise ValueError(f"Can not interpret Roman Numeral {roman_string}")
return result
def calculate_luminosity(
spec_fname,
distance,
wavelength_column=0,
wavelength_unit=u.angstrom,
flux_column=1,
flux_unit=u.Unit("erg / (Angstrom cm2 s)"),
):
"""
Calculates luminosity of star.
Parameters
----------
spec_fname : file or str
File or file name to be read
distance : float
Distance to star
wavelength_column : int, optional(default = 0)
Column index in which the wavelength is stored
wavelength_unit : float, optional(default = u.angstrom)
Dictates units used for calculating wavelength.
flux_column : int, optional(default = 1)
Column index in which the flux is stored
flux_unit : str, optional(default = u.Unit('erg / (Angstrom cm2 s)')
Dictates units used for flux
Returns
-------
luminosity.value : float
Returned luminosity value of star.
wavelength.min() : float
Minimum value of wavelength of light
wavelength.max() : float
Maximum value of wavelength of light
"""
# BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(
spec_fname, usecols=(wavelength_column, flux_column), unpack=True
)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance ** 2).to("erg/s")
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
"""
Create a yaml file that is readable from syn++
Parameters
----------
radial1d_mdl : Radial1DModel
Inputted object that will be read into YAML file
fname : str
File name for the synpp yaml
shell_no : int, optional(default = 0)
Number of shells
lines_db : file, optional(default = None)
Raises
------
ValueError
If the current dataset does not contain necessary reference files
"""
logger.warning("Currently only works with Si and a special setup")
if radial1d_mdl.atom_data.synpp_refs is not None:
raise ValueError(
"The current atom dataset does not contain the "
"necessary reference files (please contact the authors)"
)
radial1d_mdl.atom_data.synpp_refs["ref_log_tau"] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
try:
radial1d_mdl.atom_data.synpp_refs["ref_log_tau"].loc[
key
] = np.log10(
radial1d_mdl.plasma.tau_sobolevs[0].loc[value["line_id"]]
)
except KeyError:
pass
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[
radial1d_mdl.atom_data.synpp_refs["ref_log_tau"] > -50
]
with open(synpp_default_yaml_fname) as stream:
yaml_reference = yaml.load(stream, Loader=yaml.CLoader)
if lines_db is not None:
yaml_reference["opacity"]["line_dir"] = os.path.join(lines_db, "lines")
yaml_reference["opacity"]["line_dir"] = os.path.join(
lines_db, "refs.dat"
)
yaml_reference["output"]["min_wl"] = float(
radial1d_mdl.runner.spectrum.wavelength.to("angstrom").value.min()
)
yaml_reference["output"]["max_wl"] = float(
radial1d_mdl.runner.spectrum.wavelength.to("angstrom").value.max()
)
# raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference["opacity"]["v_ref"] = float(
(
radial1d_mdl.tardis_config.structure.v_inner[0].to("km/s")
/ (1000.0 * u.km / u.s)
).value
)
yaml_reference["grid"]["v_outer_max"] = float(
(
radial1d_mdl.tardis_config.structure.v_outer[-1].to("km/s")
/ (1000.0 * u.km / u.s)
).value
)
# pdb.set_trace()
yaml_setup = yaml_reference["setups"][0]
yaml_setup["ions"] = []
yaml_setup["log_tau"] = []
yaml_setup["active"] = []
yaml_setup["temp"] = []
yaml_setup["v_min"] = []
yaml_setup["v_max"] = []
yaml_setup["aux"] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup["ions"].append(100 * species[0] + species[1])
yaml_setup["log_tau"].append(float(synpp_ref["ref_log_tau"]))
yaml_setup["active"].append(True)
yaml_setup["temp"].append(yaml_setup["t_phot"])
yaml_setup["v_min"].append(yaml_reference["opacity"]["v_ref"])
yaml_setup["v_max"].append(yaml_reference["grid"]["v_outer_max"])
yaml_setup["aux"].append(1e200)
with open(fname, "w") as f:
yaml.dump(yaml_reference, stream=f, explicit_start=True)
def intensity_black_body(nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\\frac{1}
{e^{h\\nu \\beta_\\textrm{rad}} - 1}
Parameters
----------
nu : float
Frequency of light
T : float
Temperature in kelvin
Returns
-------
Intensity : float
Returns the intensity of the black body
"""
beta_rad = 1 / (k_B_cgs * T)
coefficient = 2 * h_cgs / c_cgs ** 2
intensity = ne.evaluate(
"coefficient * nu**3 / " "(exp(h_cgs * nu * beta_rad) -1 )"
)
return intensity
def species_tuple_to_string(species_tuple, roman_numerals=True):
"""
Convert a species tuple to its corresponding string representation.
Parameters
----------
species_tuple : tuple
Tuple of 2 values indicated atomic number and number of
electrons missing
roman_numerals : bool, optional(default = TRUE)
Indicates whether the returned ion number is in roman numerals
Returns
-------
element_symbol, roman_ion_number : str
Returns corresponding string representation of given tuple
"""
atomic_number, ion_number = species_tuple
element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number + 1)
return f"{str(element_symbol)} {roman_ion_number}"
else:
return f"{element_symbol} {ion_number:d}"
def species_string_to_tuple(species_string):
"""
Convert a species string to its corresponding tuple representation
Parameters
----------
species_string : str
String containing species symbol (e.g. Si II, Fe III)
Returns
-------
atomic_number, ion_number : tuple
Returns tuple of length 2 indicating atomic number and ion number
Raises
------
MalformedSpeciesError
If the inputted string does not match the species format
"""
try:
element_symbol, ion_number_string = re.match(
r"^(\w+)\s*(\d+)", species_string
).groups()
except AttributeError:
try:
element_symbol, ion_number_string = species_string.split()
except ValueError:
raise MalformedSpeciesError(
f'Species string "{species_string}" is not of format <element_symbol><number>'
f" (e.g. Fe 2, Fe2, ..)"
)
atomic_number = element_symbol2atomic_number(element_symbol)
try:
ion_number = roman_to_int(ion_number_string)
except ValueError:
try:
ion_number = int(ion_number_string)
except ValueError:
raise MalformedSpeciesError(
f"Given ion number ('{ion_number_string}') could not be parsed"
)
if ion_number > atomic_number:
raise ValueError(
"Species given does not exist: ion number > atomic number"
)
return atomic_number, ion_number - 1
def parse_quantity(quantity_string):
"""
Changes a string into it's corresponding astropy.Quantity object.
Parameters
----------
quantity_string : str
String to be converted into astropy.Quantity
Returns
-------
q : u.Quantity
Corresponding astropy.Quantity object for passed string
Raises
------
MalformedQuantityError
If string is not properly formatted for Astropy Quantity
"""
if not isinstance(quantity_string, str):
raise MalformedQuantityError(quantity_string)
try:
value_string, unit_string = quantity_string.split()
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
value = float(value_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
q = u.Quantity(value, unit_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
return q
def element_symbol2atomic_number(element_string):
"""
Takes an element symbol and returns its corresponding atomic number
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
int
Returned atomic number
"""
reformatted_element_string = reformat_element_symbol(element_string)
if reformatted_element_string not in SYMBOL2ATOMIC_NUMBER:
raise MalformedElementSymbolError(element_string)
return SYMBOL2ATOMIC_NUMBER[reformatted_element_string]
def atomic_number2element_symbol(atomic_number):
"""
Convert atomic number to string
Parameters
----------
atomic_number : int
Inputted atomic number
Returns
-------
str
Returned corresponding element symbol
"""
return ATOMIC_NUMBER2SYMBOL[atomic_number]
def reformat_element_symbol(element_string):
"""
Reformat the string so the first letter is uppercase and all subsequent
letters lowercase.
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
str
Returned reformatted element symbol
"""
return element_string[0].upper() + element_string[1:].lower()
def quantity_linspace(start, stop, num, **kwargs):
"""
Essentially the same input parameters as linspace, but
calculated for an astropy quantity start and stop.
Parameters
----------
start : astropy.Quantity
Starting value of the sequence
stop : astropy.Quantity
End value of the sequence
num : int
Number of samples to generate
Returns
-------
astropy.Quantity
Returns num evenly spaced characters of type astropy.Quantity
Raises
------
ValueError
If start and stop values have no unit attribute.
"""
if not (hasattr(start, "unit") and hasattr(stop, "unit")):
raise ValueError(
"Both start and stop need to be quantities with a " "unit attribute"
)
return (
np.linspace(start.value, stop.to(start.unit).value, num, **kwargs)
* start.unit
)
def convert_abundances_format(fname, delimiter=r"\s+"):
"""
Changes format of file containing abundances into data frame
Parameters
----------
fname : file, str
File or file name that contains abundance info
delimiter : str, optional(default = '\\s+')
Determines the separator for splitting file
Returns
-------
DataFrame
Corresponding data frame
"""
df = pd.read_csv(fname, delimiter=delimiter, comment="#", header=None)
# Drop shell index column
df.drop(df.columns[0], axis=1, inplace=True)
# Assign header row
df.columns = [nucname.name(i) for i in range(1, df.shape[1] + 1)]
return df
def is_notebook():
"""
Checking the shell environment where the simulation is run is Jupyter based
Returns
-------
True : if the shell environment is IPython Based
False : if the shell environment is Terminal or anything else
"""
try:
# Trying to import the ZMQInteractiveShell for Jupyter based environments
from ipykernel.zmqshell import ZMQInteractiveShell
except NameError:
# If the class cannot be imported then we are automatically return False Value
# Raised due to Name Error with the imported Class
return False
try:
# Trying to import Interactive Terminal based IPython shell
from IPython.core.interactiveshell import InteractiveShell
except NameError:
# If the class cannot be imported then we are automatically return False Value
# Raised due to Name Error with the imported Class
return False
try:
# Trying to get the value of the shell via the get_ipython() method
shell = get_ipython()
except NameError:
# Returns False if the shell name cannot be inferred correctly
return False
# Checking if the shell instance is Jupyter based & if True, returning True
if isinstance(shell, ZMQInteractiveShell):
return True
# Checking if the shell instance is Terminal IPython based & if True, returning False
elif isinstance(shell, InteractiveShell):
return False
# All other shell instances are returned False
else:
return False
|
the-stack_0_26355
|
#!/usr/bin/python3
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#|R|a|s|p|b|e|r|r|y|P|i|.|c|o|m|.|t|w|
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# Copyright (c) 2021, raspberrypi.com.tw
# All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# led_blink_warning.py
# Blinking led with warning
#
# Author : sosorry
# Date : 06/22/2014
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
LED_PIN = 12
GPIO.setup(LED_PIN, GPIO.OUT)
while True:
print("LED is on")
GPIO.output(LED_PIN, GPIO.HIGH)
time.sleep(1)
print("LED is off")
GPIO.output(LED_PIN, GPIO.LOW)
time.sleep(1)
GPIO.cleanup()
|
the-stack_0_26358
|
from cms.api import get_page_draft
from cms.cms_toolbars import PAGE_MENU_SECOND_BREAK
from cms.toolbar.items import Break
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils.i18n import get_language_list, get_language_object
from cms.utils.permissions import has_page_permission
from django.urls.base import reverse
from django.urls.exceptions import NoReverseMatch
from django.utils.translation import ugettext_lazy as _
from .models import PageMeta, TitleMeta
try:
from cms.utils import get_cms_setting
except ImportError: # pragma: no cover
from cms.utils.conf import get_cms_setting
PAGE_META_MENU_TITLE = _('Meta-information')
PAGE_META_ITEM_TITLE = _('Common')
@toolbar_pool.register
class PageToolbarMeta(CMSToolbar):
def populate(self):
# always use draft if we have a page
self.page = get_page_draft(self.request.current_page)
if not self.page:
# Nothing to do
return
# check global permissions if CMS_PERMISSIONS is active
if get_cms_setting('PERMISSION'):
has_global_current_page_change_permission = has_page_permission(
self.request.user, self.request.current_page, 'change'
)
else:
has_global_current_page_change_permission = False
# check if user has page edit permission
can_change = (
self.request.current_page and
self.request.current_page.has_change_permission(self.request.user)
)
if has_global_current_page_change_permission or can_change:
not_edit_mode = not self.toolbar.edit_mode_active
current_page_menu = self.toolbar.get_or_create_menu('page')
super_item = current_page_menu.find_first(
Break, identifier=PAGE_MENU_SECOND_BREAK) + 1
meta_menu = current_page_menu.get_or_create_menu(
'pagemeta', PAGE_META_MENU_TITLE, position=super_item)
position = 0
# Page tags
try:
page_extension = PageMeta.objects.get(extended_object_id=self.page.pk)
except PageMeta.DoesNotExist:
page_extension = None
try:
if page_extension:
url = reverse('admin:djangocms_page_meta_pagemeta_change',
args=(page_extension.pk,))
else:
url = '%s?extended_object=%s' % (
reverse('admin:djangocms_page_meta_pagemeta_add'),
self.page.pk)
except NoReverseMatch:
# not in urls
pass
else:
meta_menu.add_modal_item(PAGE_META_ITEM_TITLE, url=url, disabled=not_edit_mode,
position=position)
# Title tags
try:
site_id = self.page.node.site_id
except AttributeError: # CMS_3_4
site_id = self.page.site_id
for title in self.page.title_set.filter(
language__in=get_language_list(site_id)
):
try:
title_extension = TitleMeta.objects.get(extended_object_id=title.pk)
except TitleMeta.DoesNotExist:
title_extension = None
try:
if title_extension:
url = reverse('admin:djangocms_page_meta_titlemeta_change',
args=(title_extension.pk,))
else:
url = '%s?extended_object=%s' % (
reverse('admin:djangocms_page_meta_titlemeta_add'),
title.pk
)
except NoReverseMatch:
# not in urls
pass
else:
position += 1
language = get_language_object(title.language)
meta_menu.add_modal_item(language['name'], url=url, disabled=not_edit_mode,
position=position)
|
the-stack_0_26360
|
"""
Module with all the individual handlers, which execute git commands and return the results to the frontend.
"""
import json
from notebook.utils import url_path_join as ujoin
from notebook.base.handlers import APIHandler
class GitHandler(APIHandler):
"""
Top-level parent class.
"""
@property
def git(self):
return self.settings["git"]
class GitCloneHandler(GitHandler):
def post(self):
"""
Handler for the `git clone`
Input format:
{
'current_path': 'current_file_browser_path',
'repo_url': 'https://github.com/path/to/myrepo'
}
"""
data = json.loads(self.request.body.decode('utf-8'))
response = self.git.clone(data['current_path'], data['clone_url'])
self.finish(json.dumps(response))
class GitAllHistoryHandler(GitHandler):
"""
Parent handler for all four history/status git commands:
1. git show_top_level
2. git branch
3. git log
4. git status
Called on refresh of extension's widget
"""
def post(self):
"""
POST request handler, calls individual handlers for
'git show_top_level', 'git branch', 'git log', and 'git status'
"""
current_path = self.get_json_body()["current_path"]
show_top_level = self.git.show_top_level(current_path)
if show_top_level["code"] != 0:
self.finish(json.dumps(show_top_level))
else:
branch = self.git.branch(current_path)
log = self.git.log(current_path)
status = self.git.status(current_path)
result = {
"code": show_top_level["code"],
"data": {
"show_top_level": show_top_level,
"branch": branch,
"log": log,
"status": status,
},
}
self.finish(json.dumps(result))
class GitShowTopLevelHandler(GitHandler):
"""
Handler for 'git rev-parse --show-toplevel'.
Displays the git root directory inside a repository.
"""
def post(self):
"""
POST request handler, displays the git root directory inside a repository.
"""
current_path = self.get_json_body()["current_path"]
result = self.git.show_top_level(current_path)
self.finish(json.dumps(result))
class GitShowPrefixHandler(GitHandler):
"""
Handler for 'git rev-parse --show-prefix'.
Displays the prefix path of a directory in a repository,
with respect to the root directory.
"""
def post(self):
"""
POST request handler, displays the prefix path of a directory in a repository,
with respect to the root directory.
"""
current_path = self.get_json_body()["current_path"]
result = self.git.show_prefix(current_path)
self.finish(json.dumps(result))
class GitStatusHandler(GitHandler):
"""
Handler for 'git status --porcelain', fetches the git status.
"""
def get(self):
"""
GET request handler, shows file status, used in refresh method.
"""
self.finish(
json.dumps(
{"add_all": "check", "filename": "filename", "top_repo_path": "path"}
)
)
def post(self):
"""
POST request handler, fetches the git status.
"""
current_path = self.get_json_body()["current_path"]
result = self.git.status(current_path)
self.finish(json.dumps(result))
class GitLogHandler(GitHandler):
"""
Handler for 'git log --pretty=format:%H-%an-%ar-%s'.
Fetches Commit SHA, Author Name, Commit Date & Commit Message.
"""
def post(self):
"""
POST request handler,
fetches Commit SHA, Author Name, Commit Date & Commit Message.
"""
current_path = self.get_json_body()["current_path"]
result = self.git.log(current_path)
self.finish(json.dumps(result))
class GitDetailedLogHandler(GitHandler):
"""
Handler for 'git log -1 --stat --numstat --oneline' command.
Fetches file names of committed files, Number of insertions &
deletions in that commit.
"""
def post(self):
"""
POST request handler, fetches file names of committed files, Number of
insertions & deletions in that commit.
"""
data = self.get_json_body()
selected_hash = data["selected_hash"]
current_path = data["current_path"]
result = self.git.detailed_log(selected_hash, current_path)
self.finish(json.dumps(result))
class GitDiffHandler(GitHandler):
"""
Handler for 'git diff --numstat'. Fetches changes between commits & working tree.
"""
def post(self):
"""
POST request handler, fetches differences between commits & current working
tree.
"""
top_repo_path = self.get_json_body()["top_repo_path"]
my_output = self.git.diff(top_repo_path)
self.finish(my_output)
print("GIT DIFF")
print(my_output)
class GitBranchHandler(GitHandler):
"""
Handler for 'git branch -a'. Fetches list of all branches in current repository
"""
def post(self):
"""
POST request handler, fetches all branches in current repository.
"""
current_path = self.get_json_body()["current_path"]
result = self.git.branch(current_path)
self.finish(json.dumps(result))
class GitAddHandler(GitHandler):
"""
Handler for git add <filename>'.
Adds one or all files into to the staging area.
"""
def get(self):
"""
GET request handler, adds files in the staging area.
"""
self.finish(
json.dumps(
{"add_all": "check", "filename": "filename", "top_repo_path": "path"}
)
)
def post(self):
"""
POST request handler, adds one or all files into the staging area.
"""
data = self.get_json_body()
top_repo_path = data["top_repo_path"]
if data["add_all"]:
my_output = self.git.add_all(top_repo_path)
else:
filename = data["filename"]
my_output = self.git.add(filename, top_repo_path)
self.finish(my_output)
class GitResetHandler(GitHandler):
"""
Handler for 'git reset <filename>'.
Moves one or all files from the staged to the unstaged area.
"""
def post(self):
"""
POST request handler,
moves one or all files from the staged to the unstaged area.
"""
data = self.get_json_body()
top_repo_path = data["top_repo_path"]
if data["reset_all"]:
my_output = self.git.reset_all(top_repo_path)
else:
filename = data["filename"]
my_output = self.git.reset(filename, top_repo_path)
self.finish(my_output)
class GitDeleteCommitHandler(GitHandler):
"""
Handler for 'git revert --no-commit <SHA>'.
Deletes the specified commit from the repository, leaving history intact.
"""
def post(self):
data = self.get_json_body()
top_repo_path = data["top_repo_path"]
commit_id = data["commit_id"]
output = self.git.delete_commit(commit_id, top_repo_path)
self.finish(output)
class GitResetToCommitHandler(GitHandler):
"""
Handler for 'git reset --hard <SHA>'.
Deletes all commits from head to the specified commit, making the specified commit the new head.
"""
def post(self):
data = self.get_json_body()
top_repo_path = data["top_repo_path"]
commit_id = data["commit_id"]
output = self.git.reset_to_commit(commit_id, top_repo_path)
self.finish(output)
class GitCheckoutHandler(GitHandler):
"""
Handler for 'git checkout <branchname>'. Changes the current working branch.
"""
def post(self):
"""
POST request handler, changes between branches.
"""
data = self.get_json_body()
top_repo_path = data["top_repo_path"]
if data["checkout_branch"]:
if data["new_check"]:
print("to create a new branch")
my_output = self.git.checkout_new_branch(
data["branchname"], top_repo_path
)
else:
print("switch to an old branch")
my_output = self.git.checkout_branch(
data["branchname"], top_repo_path
)
elif data["checkout_all"]:
my_output = self.git.checkout_all(top_repo_path)
else:
my_output = self.git.checkout(data["filename"], top_repo_path)
self.finish(my_output)
class GitCommitHandler(GitHandler):
"""
Handler for 'git commit -m <message>'. Commits files.
"""
def post(self):
"""
POST request handler, commits files.
"""
data = self.get_json_body()
top_repo_path = data["top_repo_path"]
commit_msg = data["commit_msg"]
my_output = self.git.commit(commit_msg, top_repo_path)
self.finish(my_output)
class GitPullHandler(GitHandler):
"""
Handler for 'git pull <first-branch> <second-branch>'. Pulls files from a remote branch.
"""
def post(self):
"""
POST request handler, pulls files from a remote branch to your current branch.
"""
data = self.get_json_body()
origin = data["origin"]
master = data["master"]
curr_fb_path = data["curr_fb_path"]
my_output = self.git.pull(origin, master, curr_fb_path)
self.finish(my_output)
print("You Pulled")
class GitPushHandler(GitHandler):
"""
Handler for 'git push <first-branch> <second-branch>.
Pushes committed files to a remote branch.
"""
def post(self):
"""
POST request handler,
pushes comitted files from your current branch to a remote branch
"""
data = self.get_json_body()
origin = data["origin"]
master = data["master"]
curr_fb_path = data["curr_fb_path"]
my_output = self.git.push(origin, master, curr_fb_path)
self.finish(my_output)
print("You Pushed")
class GitInitHandler(GitHandler):
"""
Handler for 'git init'. Initializes a repository.
"""
def post(self):
"""
POST request handler, initializes a repository.
"""
current_path = self.get_json_body()["current_path"]
my_output = self.git.init(current_path)
self.finish(my_output)
class GitAddAllUntrackedHandler(GitHandler):
"""
Handler for 'echo "a\n*\nq\n" | git add -i'. Adds ONLY all untracked files.
"""
def post(self):
"""
POST request handler, adds all the untracked files.
"""
top_repo_path = self.get_json_body()["top_repo_path"]
my_output = self.git.add_all_untracked(top_repo_path)
print(my_output)
self.finish(my_output)
def setup_handlers(web_app):
"""
Setups all of the git command handlers.
Every handler is defined here, to be used in git.py file.
"""
git_handlers = [
("/git/show_top_level", GitShowTopLevelHandler),
("/git/show_prefix", GitShowPrefixHandler),
("/git/add", GitAddHandler),
("/git/status", GitStatusHandler),
("/git/branch", GitBranchHandler),
("/git/reset", GitResetHandler),
("/git/delete_commit", GitDeleteCommitHandler),
("/git/reset_to_commit", GitResetToCommitHandler),
("/git/checkout", GitCheckoutHandler),
("/git/commit", GitCommitHandler),
("/git/pull", GitPullHandler),
("/git/push", GitPushHandler),
("/git/diff", GitDiffHandler),
("/git/log", GitLogHandler),
("/git/detailed_log", GitDetailedLogHandler),
("/git/init", GitInitHandler),
("/git/all_history", GitAllHistoryHandler),
("/git/add_all_untracked", GitAddAllUntrackedHandler),
("/git/clone", GitCloneHandler)
]
# add the baseurl to our paths
base_url = web_app.settings["base_url"]
git_handlers = [(ujoin(base_url, x[0]), x[1]) for x in git_handlers]
web_app.add_handlers(".*", git_handlers)
|
the-stack_0_26363
|
#!/usr/bin/env python3
from pyth_utils import *
import random
import sys
import threading
import time
# Accept connections from readiness probe
def publisher_readiness():
run_or_die(["nc", "-k", "-l", "-p", READINESS_PORT])
# Update the specified price with random values
def publisher_random_update(price_pubkey):
value = random.randrange(1024)
confidence = 1
pyth_run_or_die("upd_price_val", args=[price_pubkey, str(value), str(confidence), "trading"])
print("Price updated!")
# Fund the publisher
sol_run_or_die("airdrop", [str(SOL_AIRDROP_AMT),
"--keypair", PYTH_PUBLISHER_KEYPAIR,
"--commitment", "finalized",
])
# Create a mapping
pyth_run_or_die("init_mapping")
# Add a product
prod_pubkey = pyth_run_or_die("add_product", capture_output=True).stdout.strip()
print(f"Added product {prod_pubkey}")
# Add a price
price_pubkey = pyth_run_or_die(
"add_price",
args=[prod_pubkey, "price"],
confirm=False,
capture_output=True
).stdout.strip()
print(f"Added price {price_pubkey}")
publisher_pubkey = sol_run_or_die("address", args=["--keypair", PYTH_PUBLISHER_KEYPAIR], capture_output=True).stdout.strip()
# Become a publisher
pyth_run_or_die("add_publisher", args=[publisher_pubkey, price_pubkey], confirm=False, debug=True, capture_output=True)
print(f"Added publisher {publisher_pubkey}")
# Update the price as the newly added publisher
publisher_random_update(price_pubkey)
print(f"Updated price {price_pubkey}. Mock updates ready to roll. Updating every {str(PYTH_PUBLISHER_INTERVAL)} seconds")
# Spin off the readiness probe endpoint into a separate thread
readiness_thread = threading.Thread(target=publisher_readiness)
readiness_thread.start()
while True:
print(f"Updating price {price_pubkey}")
publisher_random_update(price_pubkey)
time.sleep(PYTH_PUBLISHER_INTERVAL)
sys.stdout.flush()
readiness_thread.join()
|
the-stack_0_26364
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteLinksOperations:
"""ExpressRouteLinksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
express_route_port_name: str,
link_name: str,
**kwargs: Any
) -> "_models.ExpressRouteLink":
"""Retrieves the specified ExpressRouteLink resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param link_name: The name of the ExpressRouteLink resource.
:type link_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteLink, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.ExpressRouteLink
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteLink"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
'linkName': self._serialize.url("link_name", link_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteLink', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}/links/{linkName}'} # type: ignore
def list(
self,
resource_group_name: str,
express_route_port_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteLinkListResult"]:
"""Retrieve the ExpressRouteLink sub-resources of the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteLinkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_02_01.models.ExpressRouteLinkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteLinkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteLinkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}/links'} # type: ignore
|
the-stack_0_26365
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"ChangeEventResourceTypeEnum",},
)
class ChangeEventResourceTypeEnum(proto.Message):
r"""Container for enum describing supported resource types for
the ChangeEvent resource.
"""
class ChangeEventResourceType(proto.Enum):
r"""Enum listing the resource types support by the ChangeEvent
resource.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD = 2
AD_GROUP = 3
AD_GROUP_CRITERION = 4
CAMPAIGN = 5
CAMPAIGN_BUDGET = 6
AD_GROUP_BID_MODIFIER = 7
CAMPAIGN_CRITERION = 8
FEED = 9
FEED_ITEM = 10
CAMPAIGN_FEED = 11
AD_GROUP_FEED = 12
AD_GROUP_AD = 13
ASSET = 14
CUSTOMER_ASSET = 15
CAMPAIGN_ASSET = 16
AD_GROUP_ASSET = 17
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_26369
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake 'Fetch' git submodules implementation
"""
# Copyright (C) 2013 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import bb
from bb import data
from bb.fetch2.git import Git
from bb.fetch2 import runfetchcmd
from bb.fetch2 import logger
class GitSM(Git):
def supports(self, ud, d):
"""
Check to see if a given url can be fetched with git.
"""
return ud.type in ['gitsm']
def uses_submodules(self, ud, d):
for name in ud.names:
try:
runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True)
return True
except bb.fetch.FetchError:
pass
return False
def _set_relative_paths(self, repopath):
"""
Fix submodule paths to be relative instead of absolute,
so that when we move the repo it doesn't break
(In Git 1.7.10+ this is done automatically)
"""
submodules = []
with open(os.path.join(repopath, '.gitmodules'), 'r') as f:
for line in f.readlines():
if line.startswith('[submodule'):
submodules.append(line.split('"')[1])
for module in submodules:
repo_conf = os.path.join(repopath, module, '.git')
if os.path.exists(repo_conf):
with open(repo_conf, 'r') as f:
lines = f.readlines()
newpath = ''
for i, line in enumerate(lines):
if line.startswith('gitdir:'):
oldpath = line.split(': ')[-1].rstrip()
if oldpath.startswith('/'):
newpath = '../' * (module.count('/') + 1) + '.git/modules/' + module
lines[i] = 'gitdir: %s\n' % newpath
break
if newpath:
with open(repo_conf, 'w') as f:
for line in lines:
f.write(line)
repo_conf2 = os.path.join(repopath, '.git', 'modules', module, 'config')
if os.path.exists(repo_conf2):
with open(repo_conf2, 'r') as f:
lines = f.readlines()
newpath = ''
for i, line in enumerate(lines):
if line.lstrip().startswith('worktree = '):
oldpath = line.split(' = ')[-1].rstrip()
if oldpath.startswith('/'):
newpath = '../' * (module.count('/') + 3) + module
lines[i] = '\tworktree = %s\n' % newpath
break
if newpath:
with open(repo_conf2, 'w') as f:
for line in lines:
f.write(line)
def update_submodules(self, ud, d):
# We have to convert bare -> full repo, do the submodule bit, then convert back
tmpclonedir = ud.clonedir + ".tmp"
gitdir = tmpclonedir + os.sep + ".git"
bb.utils.remove(tmpclonedir, True)
os.mkdir(tmpclonedir)
os.rename(ud.clonedir, gitdir)
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*true/bare = false/'", d)
os.chdir(tmpclonedir)
runfetchcmd(ud.basecmd + " reset --hard", d)
runfetchcmd(ud.basecmd + " submodule init", d)
runfetchcmd(ud.basecmd + " submodule update", d)
self._set_relative_paths(tmpclonedir)
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*false/bare = true/'", d)
os.rename(gitdir, ud.clonedir,)
bb.utils.remove(tmpclonedir, True)
def download(self, ud, d):
Git.download(self, ud, d)
os.chdir(ud.clonedir)
submodules = self.uses_submodules(ud, d)
if submodules:
self.update_submodules(ud, d)
def unpack(self, ud, destdir, d):
Git.unpack(self, ud, destdir, d)
os.chdir(ud.destdir)
submodules = self.uses_submodules(ud, d)
if submodules:
runfetchcmd("cp -r " + ud.clonedir + "/modules " + ud.destdir + "/.git/", d)
runfetchcmd(ud.basecmd + " submodule init", d)
runfetchcmd(ud.basecmd + " submodule update", d)
|
the-stack_0_26372
|
# -*- coding: utf-8 -*-
"""
Tests to ensure tmm package was coded correctly. Use run_all() to
run them all in order.
"""
from __future__ import division, print_function, absolute_import
from .tmm_core import (coh_tmm, inc_tmm, ellips, position_resolved,
absorp_in_each_layer, snell, absorp_analytic_fn,
interface_r, inc_absorp_in_each_layer,
interface_R, interface_T, power_entering_from_r)
from numpy import pi, linspace, inf, exp, cos, average, array, vstack, imag
# "5 * degree" is 5 degrees expressed in radians
# "1.2 / degree" is 1.2 radians expressed in degrees
degree = pi/180
def run_all():
basic_test()
position_resolved_test()
position_resolved_test2()
absorp_analytic_fn_test()
incoherent_test()
RT_test()
coh_overflow_test()
inc_overflow_test()
def df(a, b): #difference fraction
return abs(a-b)/max(abs(a), abs(b))
def basic_test():
"""
Compare with program I wrote previously in Mathematica. Also confirms
that I don't accidentally mess up the program by editing.
"""
n_list = [1, 2+4j, 3+0.3j, 1+0.1j]
d_list = [inf, 2, 3, inf]
th_0 = 0.1
lam_vac = 100
print('The following should all be zero (within rounding errors):')
s_data = coh_tmm('s', n_list, d_list, th_0, lam_vac)
print(df(s_data['r'], -0.60331226568845775-0.093522181653632019j))
print(df(s_data['t'], 0.44429533471192989+0.16921936169383078j))
print(df(s_data['R'], 0.37273208839139516))
print(df(s_data['T'], 0.22604491247079261))
p_data = coh_tmm('p', n_list, d_list, th_0, lam_vac)
print(df(p_data['r'], 0.60102654255772481+0.094489146845323682j))
print(df(p_data['t'], 0.4461816467503148+0.17061408427088917j))
print(df(p_data['R'], 0.37016110373044969))
print(df(p_data['T'], 0.22824374314132009))
ellips_data = ellips(n_list, d_list, th_0, lam_vac)
print(df(ellips_data['psi'], 0.78366777347038352))
print(df(ellips_data['Delta'], 0.0021460774404193292))
return
def position_resolved_test():
"""
Compare with program I wrote previously in Mathematica. Also, various
consistency checks.
"""
d_list = [inf, 100, 300, inf] #in nm
n_list = [1, 2.2+0.2j, 3.3+0.3j, 1]
th_0 = pi/4
lam_vac = 400
layer = 1
dist = 37
print('The following should all be zero (within rounding errors):')
pol = 'p'
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
print(df(coh_tmm_data['kz_list'][1],
0.0327410685922732+0.003315885921866465j))
data = position_resolved(layer, dist, coh_tmm_data)
print(df(data['poyn'], 0.7094950598055798))
print(df(data['absor'], 0.005135049118053356))
print(df(1, sum(absorp_in_each_layer(coh_tmm_data))))
pol = 's'
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
print(df(coh_tmm_data['kz_list'][1],
0.0327410685922732+0.003315885921866465j))
data = position_resolved(layer, dist, coh_tmm_data)
print(df(data['poyn'], 0.5422594735025152))
print(df(data['absor'], 0.004041912286816303))
print(df(1, sum(absorp_in_each_layer(coh_tmm_data))))
#Poynting vector derivative should equal absorption
for pol in ['s', 'p']:
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
data1 = position_resolved(layer, dist, coh_tmm_data)
data2 = position_resolved(layer, dist+0.001, coh_tmm_data)
print('Finite difference should approximate derivative. Difference is '
+ str(df((data1['absor']+data2['absor'])/2,
(data1['poyn']-data2['poyn'])/0.001)))
#Poynting vector at end should equal T
layer = 2
dist = 300
for pol in ['s', 'p']:
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
data = position_resolved(layer, dist, coh_tmm_data)
print(df(data['poyn'], coh_tmm_data['T']))
#Poynting vector at start should equal power_entering
layer = 1
dist = 0
for pol in ['s', 'p']:
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
data = position_resolved(layer, dist, coh_tmm_data)
print(df(data['poyn'], coh_tmm_data['power_entering']))
#Poynting vector should be continuous
for pol in ['s', 'p']:
layer = 1
dist = 100
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
data = position_resolved(layer, dist, coh_tmm_data)
poyn1 = data['poyn']
layer = 2
dist = 0
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
data = position_resolved(layer, dist, coh_tmm_data)
poyn2 = data['poyn']
print(df(poyn1, poyn2))
return
def position_resolved_test2():
"""
Similar to position_resolved_test(), but with initial and final medium
having a complex refractive index.
"""
d_list = [inf, 100, 300, inf] #in nm
# "00" is before the 0'th layer. This is easy way to generate th0, ensuring
#that n0*sin(th0) is real.
n00 = 1
th00 = pi/4
n0 = 1+0.1j
th_0 = snell(n00, n0, th00)
n_list = [n0, 2.2+0.2j, 3.3+0.3j, 1+0.4j]
lam_vac = 400
layer = 1
dist = 37
print('The following should all be zero (within rounding errors):')
for pol in ['s', 'p']:
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
data = position_resolved(layer, dist, coh_tmm_data)
print(df(1, sum(absorp_in_each_layer(coh_tmm_data))))
#Poynting vector derivative should equal absorption
for pol in ['s', 'p']:
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
data1 = position_resolved(layer, dist, coh_tmm_data)
data2 = position_resolved(layer, dist+0.001, coh_tmm_data)
print('Finite difference should approximate derivative. Difference is '
+ str(df((data1['absor']+data2['absor'])/2,
(data1['poyn']-data2['poyn'])/0.001)))
#Poynting vector at end should equal T
layer = 2
dist = 300
for pol in ['s', 'p']:
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
data = position_resolved(layer, dist, coh_tmm_data)
print(df(data['poyn'], coh_tmm_data['T']))
#Poynting vector at start should equal power_entering
layer = 1
dist = 0
for pol in ['s', 'p']:
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
data = position_resolved(layer, dist, coh_tmm_data)
print(df(data['poyn'], coh_tmm_data['power_entering']))
#Poynting vector should be continuous
for pol in ['s', 'p']:
layer = 1
dist = 100
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
data = position_resolved(layer, dist, coh_tmm_data)
poyn1 = data['poyn']
layer = 2
dist = 0
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
data = position_resolved(layer, dist, coh_tmm_data)
poyn2 = data['poyn']
print(df(poyn1, poyn2))
return
def absorp_analytic_fn_test():
"""
Test absorp_analytic_fn functions
"""
d_list = [inf, 100, 300, inf] #in nm
n_list = [1, 2.2+0.2j, 3.3+0.3j, 1]
th_0 = pi/4
lam_vac = 400
layer = 1
d = d_list[layer]
dist = 37
print('The following should all be zero (within rounding errors):')
for pol in ['s', 'p']:
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
expected_absorp = position_resolved(layer, dist, coh_tmm_data)['absor']
absorp_fn = absorp_analytic_fn()
absorp_fn.fill_in(coh_tmm_data, layer)
print(df(absorp_fn.run(dist), expected_absorp))
absorp_fn2 = absorp_fn.copy().flip()
dist_from_other_side = d - dist
print(df(absorp_fn2.run(dist_from_other_side), expected_absorp))
return
def incoherent_test():
"""
test inc_tmm(). To do: Add more tests.
"""
print('The following should all be zero (within rounding errors):')
#3-incoherent-layer test, real refractive indices (so that R and T are the
#same in both directions)
n0 = 1
n1 = 2
n2 = 3
n_list = [n0, n1, n2]
d_list = [inf, 567, inf]
c_list = ['i', 'i', 'i']
th0 = pi/3
th1 = snell(n0, n1, th0)
th2 = snell(n0, n2, th0)
lam_vac = 400
for pol in ['s', 'p']:
inc_data = inc_tmm(pol, n_list, d_list, c_list, th0, lam_vac)
R0 = abs(interface_r(pol, n0, n1, th0, th1)**2)
R1 = abs(interface_r(pol, n1, n2, th1, th2)**2)
T0 = 1-R0
RR = R0 + R1*T0**2/(1-R0*R1)
print(df(inc_data['R'], RR))
print(df(inc_data['R']+inc_data['T'], 1))
#One finite layer with incoherent layers on both sides. Should agree with
#coherent program
n0 = 1+0.1j
n1 = 2+0.2j
n2 = 3+0.4j
n_list = [n0, n1, n2]
d_list = [inf, 100, inf]
c_list = ['i', 'c', 'i']
n00 = 1
th00 = pi/3
th0 = snell(n00, n0, th00)
lam_vac = 400
for pol in ['s', 'p']:
inc_data = inc_tmm(pol, n_list, d_list, c_list, th0, lam_vac)
coh_data = coh_tmm(pol, n_list, d_list, th0, lam_vac)
print(df(inc_data['R'], coh_data['R']))
print(df(inc_data['T'], coh_data['T']))
print(df(1, sum(inc_absorp_in_each_layer(inc_data))))
#One finite layer with three incoherent layers. Should agree with
#manual calculation + coherent program
n0 = 1+0.1j
n1 = 2+0.2j
n2 = 3+0.004j
n3 = 4+0.2j
d1 = 100
d2 = 10000
n_list = [n0, n1, n2, n3]
d_list = [inf, d1, d2, inf]
c_list = ['i', 'c', 'i', 'i']
n00 = 1
th00 = pi/3
th0 = snell(n00, n0, th00)
lam_vac = 400
for pol in ['s', 'p']:
inc_data = inc_tmm(pol, n_list, d_list, c_list, th0, lam_vac)
coh_data = coh_tmm(pol, [n0, n1, n2], [inf, d1, inf], th0, lam_vac)
th2 = snell(n0, n2, th0)
th3 = snell(n0, n3, th0)
coh_bdata = coh_tmm(pol, [n2, n1, n0], [inf, d1, inf], th2, lam_vac)
R02 = coh_data['R']
R20 = coh_bdata['R']
T02 = coh_data['T']
T20 = coh_bdata['T']
P2 = exp(-4 * pi * d2
* (n2 * cos(th2)).imag / lam_vac) #fraction passing through
R23 = interface_R(pol, n2, n3, th2, th3)
T23 = interface_T(pol, n2, n3, th2, th3)
#T = T02 * P2 * T23 + T02 * P2 * R23 * P2 * R20 * P2 * T23 + ...
T = T02 * P2 * T23 /(1 - R23 * P2 * R20 * P2)
#R = R02
# + T02 * P2 * R23 * P2 * T20
# + T02 * P2 * R23 * P2 * R20 * P2 * R23 * P2 * T20 + ...
R = R02 + T02 * P2 * R23 * P2 * T20 /(1 - R20 * P2 * R23 * P2)
print(df(inc_data['T'], T))
print(df(inc_data['R'], R))
#The coherent program with a thick but randomly-varying-thickness substrate
#should agree with the incoherent program.
nair = 1+0.1j
nfilm = 2+0.2j
nsub = 3
nf = 3+0.4j
n_list = [nair, nfilm, nsub, nf]
n00 = 1
th00 = pi/3
th0 = snell(n00, n0, th00)
lam_vac = 400
for pol in ['s', 'p']:
d_list_inc = [inf, 100, 1, inf] #sub thickness doesn't matter here
c_list = ['i', 'c', 'i', 'i']
inc_data = inc_tmm(pol, n_list, d_list_inc, c_list, th0, lam_vac)
coh_Rs = []
coh_Ts = []
for dsub in linspace(10000, 30000, 357):
d_list = [inf, 100, dsub, inf]
coh_data = coh_tmm(pol, n_list, d_list, th0, lam_vac)
coh_Rs.append(coh_data['R'])
coh_Ts.append(coh_data['T'])
print('Coherent with random thickness should agree with incoherent. '
+ 'Discrepency is: ' + str(df(average(coh_Rs), inc_data['R'])))
print('Coherent with random thickness should agree with incoherent. '
+ 'Discrepency is: ' + str(df(average(coh_Ts), inc_data['T'])))
#The coherent program with a thick substrate and randomly-varying wavelength
#should agree with the incoherent program.
n0 = 1+0.0j
n_list = [n0, 2+0.0002j, 3+0.0001j, 3+0.4j]
n00 = 1
th00 = pi/3
th0 = snell(n00, n0, th00)
d_list = [inf, 10000, 10200, inf]
c_list = ['i', 'i', 'i', 'i']
for pol in ['s', 'p']:
inc_absorp = array([0., 0., 0., 0.])
coh_absorp = array([0., 0., 0., 0.])
num_pts = 234
for lam_vac in linspace(40, 50, num_pts):
inc_data = inc_tmm(pol, n_list, d_list, c_list, th0, lam_vac)
inc_absorp += array(inc_absorp_in_each_layer(inc_data))
coh_data = coh_tmm(pol, n_list, d_list, th0, lam_vac)
coh_absorp += array(absorp_in_each_layer(coh_data))
inc_absorp /= num_pts
coh_absorp /= num_pts
print('Coherent with random wavelength should agree with incoherent. '
+ 'The two rows of this array should be the same:')
print(vstack((inc_absorp, coh_absorp)))
def RT_test():
"""
Tests of formulas for R and T
"""
print('The following should all be zero (within rounding errors):')
#When ni is real [see manual], R+T should equal 1
ni = 2
nf = 3.+0.2j
thi = pi/5
thf = snell(ni, nf, thi)
for pol in ['s', 'p']:
T = interface_T(pol, ni, nf, thi, thf)
R = interface_R(pol, ni, nf, thi, thf)
print(df(1, R+T))
#For a single interface, power_entering should equal T
ni = 2+0.1j
n00 = 1
th00 = pi/5
thi = snell(n00, ni, th00)
nf = 3.+0.2j
thf = snell(ni, nf, thi)
for pol in ['s', 'p']:
r = interface_r(pol, ni, nf, thi, thf)
pe = power_entering_from_r(pol, r, ni, thi)
T = interface_T(pol, ni, nf, thi, thf)
print(df(pe, T))
return
def coh_overflow_test():
"""
Test whether very very opaque layers will break the coherent program
"""
n_list = [ 1., 2+.1j, 1+3j, 4., 5.]
d_list = [inf, 50, 1e5, 50, inf]
lam = 200
alpha_d = imag(n_list[2]) * 4 * pi * d_list[2] / lam
print('Very opaque layer: Calculation should involve e^(-', alpha_d, ')!')
data = coh_tmm('s', n_list, d_list, 0, lam)
n_list2 = n_list[0:3]
d_list2 = d_list[0:3]
d_list2[-1] = inf
data2 = coh_tmm('s', n_list2, d_list2, 0, lam)
print('First entries of the following two lists should agree:')
print(data['vw_list'])
print(data2['vw_list'])
def inc_overflow_test():
"""
Test whether very very opaque layers will break the incoherent program
"""
n_list = [1., 2., 1+3j, 4., 5.]
d_list = [inf, 50, 1e5, 50, inf]
c_list = ['i', 'i', 'i', 'i', 'i']
lam = 200
alpha_d = imag(n_list[2]) * 4 * pi * d_list[2] / lam
print('Very opaque layer: Calculation should involve e^(-', alpha_d, ')!')
data = inc_tmm('s', n_list, d_list, c_list, 0, lam)
n_list2 = n_list[0:3]
d_list2 = d_list[0:3]
d_list2[-1] = inf
c_list2 = c_list[0:3]
data2 = inc_tmm('s', n_list2, d_list2, c_list2, 0, lam)
print('First entries of the following two lists should agree:')
print(data['power_entering_list'])
print(data2['power_entering_list'])
|
the-stack_0_26374
|
#!/usr/bin/env python
# vim: set expandtab tabstop=4 shiftwidth=4:
import datetime
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.db.models import Q
import django_tables2 as tables
from .models import Artist, Album, Song
class ArtistTable(tables.Table):
name = tables.LinkColumn('exordium:artist', args=[tables.A('normname')])
albums = tables.Column(
verbose_name='Albums',
orderable=False,
empty_values=(),
)
tracks = tables.Column(
verbose_name='Tracks',
orderable=False,
empty_values=(),
)
def render_albums(self, record, **kwargs):
"""
Show the number of albums this artist has.
"""
album_filter = [(Q(artist=record) |
Q(song__artist=record) |
Q(song__group=record) |
Q(song__conductor=record) |
Q(song__composer=record))]
if not self.view.get_preference('show_live'):
album_filter.append(Q(live=False))
return Album.objects.filter(*album_filter).distinct().count()
def render_tracks(self, record, **kwargs):
"""
Show the number of tracks this artist has
"""
song_filter = [(Q(artist=record) | Q(group=record) |
Q(conductor=record) | Q(composer=record))]
if not self.view.get_preference('show_live'):
song_filter.append(Q(album__live=False))
return Song.objects.filter(*song_filter).count()
class Meta:
model = Artist
attrs = {'class': 'paleblue', 'id': 'artisttable'}
fields = ['name']
def __init__(self, *args, **kwargs):
self.view = kwargs.pop('view', None)
super(ArtistTable, self).__init__(*args, **kwargs)
class AlbumTable(tables.Table):
#artist = tables.LinkColumn('exordium:artist', args=[tables.A('artist.pk')])
artist = tables.TemplateColumn(
verbose_name='Artist',
orderable=True,
order_by=('artist.name'),
template_name='exordium/album_artist_column.html',
)
name = tables.LinkColumn('exordium:album', args=[tables.A('pk')])
time_added = tables.DateTimeColumn(
verbose_name = 'Date Added',
format='F j, Y',
#format='F j, Y g:i A',
)
img = tables.TemplateColumn(
verbose_name='',
orderable=False,
template_name='exordium/album_image_list.html',
)
tracks = tables.Column(
verbose_name='Tracks',
orderable=False,
empty_values=(),
)
time = tables.Column(
verbose_name='Length',
orderable=False,
empty_values=(),
)
year = tables.Column(
verbose_name='Year',
orderable=True,
order_by=('year', 'time_added'),
)
def render_year(self, value=0, **kwargs):
"""
Custom formatting for year (ie: don't display anything if the
year is zero)
"""
if value == 0:
return ''
else:
return '%d' % (value)
def render_tracks(self, record, **kwargs):
"""
Get a count of tracks for this album
"""
return(record.song_set.count())
def render_time(self, record, **kwargs):
"""
Get a total time for this album
"""
#delta = datetime.timedelta(seconds=record.get_total_time())
length = record.get_total_time()
minutes, seconds = divmod(length, 60)
if minutes > 60:
hours, minutes = divmod(minutes, 60)
return('%dh%dm' % (hours, minutes))
else:
return('%dm' % (minutes))
class Meta:
model = Album
per_page = 50
attrs = {'class': 'paleblue', 'id': 'albumtable'}
fields = ['img', 'artist', 'name', 'tracks', 'time', 'year', 'time_added']
class SongTable(tables.Table):
#artist = tables.LinkColumn('exordium:artist', args=[tables.A('artist.pk')])
artist = tables.TemplateColumn(
verbose_name='Artist',
orderable=True,
order_by=('artist.name'),
template_name='exordium/song_artist_column.html',
)
length = tables.Column(
footer=lambda table: table.render_length(sum(x.length for x in table.data))
)
dl = tables.TemplateColumn(
verbose_name='',
orderable=False,
template_name='exordium/link_song_download.html'
)
play = tables.TemplateColumn(
verbose_name='',
orderable=False,
template_name='exordium/link_song_play.html'
)
def render_length(self, value):
(minutes, seconds) = divmod(value, 60)
if minutes > 60:
(hours, minutes) = divmod(minutes, 60)
return '%d:%02d:%02d' % (hours, minutes, seconds)
else:
return '%d:%02d' % (minutes, seconds)
# TODO: I wish I could find a better way of having "dynamic" fields than
# this, but everything else I've tried has failed.
# TODO: Also, the cardinality row at the bottom will always say "items"
# instead of "songs." I've tried various ways of fixing that, too, to
# no avail.
class SongTableWithAlbumNoTracknum(SongTable):
album = tables.LinkColumn(
'exordium:album',
verbose_name='Album',
args=[tables.A('album.pk')]
)
class Meta:
model = Song
attrs = {'class': 'paleblue', 'id': 'songtable'}
show_footer = True
fields = ['artist', 'album', 'title', 'length', 'dl']
class SongTableNoAlbum(SongTable):
class Meta:
model = Song
attrs = {'class': 'paleblue', 'id': 'songtable'}
show_footer = True
fields = ['tracknum', 'artist', 'title', 'length', 'dl']
per_page = 100
class SongTableNoAlbumNoTracknum(SongTable):
class Meta:
model = Song
attrs = {'class': 'paleblue', 'id': 'songtable'}
show_footer = True
fields = ['artist', 'title', 'length', 'dl']
per_page = 100
|
the-stack_0_26377
|
from py_compile import compile
from PyInstaller.__main__ import run as pybuild
import os
import sys
import shutil
import tarfile
from zipfile import ZipFile, ZIP_DEFLATED
def compile_package(packagename):
# App Path
appth = os.path.dirname(os.path.abspath(__file__))
print(appth)
# Cleanup existing build and dist dirs
if os.path.exists('build/'):
shutil.rmtree('build/')
if os.path.exists('dist/'):
shutil.rmtree('dist/')
if os.path.exists('__pycache__/'):
shutil.rmtree('__pycache__/')
for pyfile in os.listdir(os.path.join(appth, 'MainApp/')):
if pyfile.endswith('.py'):
# print(pyfile)
compile(os.path.join(appth, 'MainApp/') + pyfile,
os.path.join(appth, 'pycs/') + pyfile + 'c')
# For importing qtmodern theme
import qtmodern
qtm_path = os.path.dirname(os.path.abspath(qtmodern.__file__))
# Set Logo File
platformid = sys.platform
if platformid == 'linux' or platformid == 'darwin':
logofile = os.path.join(appth, "logo.png")
sep = ':'
else:
logofile = os.path.join(appth, "logo.ico")
sep = ';'
pybuild([
"{}/main.py".format(appth),
"--clean",
"--log-level=INFO",
"--onedir",
"--name={}".format(packagename),
"--hidden-import=qtmodern",
"--hidden-import=xlsxwriter",
"--add-data={}/pycs{}./MainApp".format(appth, sep),
"--add-data={}/MainApp/res{}./MainApp/res".format(appth, sep),
"--add-data={}/resources{}./qtmodern/resources".format(qtm_path, sep),
"--windowed",
"--icon={}".format(logofile)
])
# Cleanup Temporary Files
shutil.rmtree(os.path.join(appth, 'pycs/'))
os.remove('{}.spec'.format(packagename))
# For testing
# platformid = 'win32'
# Go to Dist Directory
os.chdir('dist/')
# Package in tar File
target_dir = '{}'.format(packagename)
if platformid == 'linux' or platformid == 'darwin':
with tarfile.open('{}.tar.gz'.format(os.path.join(appth, packagename)), 'w:gz') as tar:
tar.add(target_dir, arcname=packagename)
# Package in Zip File
else:
with ZipFile('{}.zip'.format(target_dir), 'w', ZIP_DEFLATED) as ziph:
for root, _, files in os.walk(target_dir):
for file in files:
ziph.write(os.path.join(root, file))
if __name__ == '__main__':
compile_package('SmallBiller')
print('Code compiled successfully')
|
the-stack_0_26378
|
import os
import json
import random
import requests
try:
from nba_api.library.debug.debug import DEBUG
except ImportError:
DEBUG = False
try:
from nba_api.library.debug.debug import DEBUG_STORAGE
except ImportError:
DEBUG_STORAGE = False
try:
from nba_api.library.debug.debug import PROXY
except ImportError:
PROXY = ''
if DEBUG:
from hashlib import md5
print('DEBUG MODE')
class NBAResponse:
def __init__(self, response, url):
self._response = response
self._url = url
def get_response(self):
return self._response
def get_dict(self):
return json.loads(self._response)
def get_json(self):
return json.dumps(self.get_dict())
def valid_json(self):
try:
self.get_dict()
except ValueError:
return False
return True
def get_url(self):
return self._url
class NBAHTTP:
nba_response = NBAResponse
base_url = None
headers = None
def clean_contents(self, contents):
return contents
def send_api_request(self, endpoint, parameters, referer=None, proxy=PROXY, raise_exception_on_error=False):
if not self.base_url:
raise Exception('Cannot use send_api_request from _HTTP class.')
base_url = self.base_url.format(endpoint=endpoint)
endpoint = endpoint.lower()
headers = self.headers
if referer:
headers['Referer'] = referer
proxies = PROXY
currProxy = None
if proxy:
currProxy = random.sample(proxy, 1)[0]
proxies = {
"http": currProxy,
"https": currProxy,
}
#print(proxy, currProxy)
contents = None
url = None
if DEBUG and DEBUG_STORAGE:
print(endpoint, parameters)
directory_name = 'debug_storage'
parameter_string = '&'.join('{}={}'.format(key, val) for key, val in sorted(parameters.items())).encode('utf-8')
file_name = "{}-{}.txt".format(endpoint, md5(parameter_string).hexdigest())
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'debug', directory_name)
if not os.path.exists(file_path):
os.makedirs(file_path)
file_path = os.path.join(file_path, file_name)
print(file_name, os.path.isfile(file_path))
if os.path.isfile(file_path):
f = open(file_path, 'r')
contents = f.read()
f.close()
url = "{}?{}".format(base_url, parameter_string)
print('loading from file...')
if not contents:
response = requests.get(url=base_url, params=parameters, headers=headers, proxies=proxies)
url = response.url
contents = response.text
contents = self.clean_contents(contents)
if DEBUG and DEBUG_STORAGE:
f = open(file_path, 'w')
f.write(contents)
f.close()
data = self.nba_response(response=contents, url=url)
if raise_exception_on_error and not data.valid_json():
raise Exception('InvalidResponse: Response is not in a valid JSON format.')
return data
|
the-stack_0_26379
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves.urllib import parse
from heatclient._i18n import _
from heatclient.common import base
from heatclient.common import utils
from heatclient import exc
class Stack(base.Resource):
def __repr__(self):
return "<Stack %s>" % self._info
def preview(self, **fields):
return self.manager.preview(**fields)
def create(self, **fields):
return self.manager.create(self.identifier, **fields)
def update(self, **fields):
self.manager.update(self.identifier, **fields)
def delete(self):
return self.manager.delete(self.identifier)
def abandon(self):
return self.manager.abandon(self.identifier)
def snapshot(self, name=None):
return self.manager.snapshot(self.identifier, name)
def snapshot_show(self, snapshot_id):
return self.manager.snapshot_show(self.identifier, snapshot_id)
def snapshot_delete(self, snapshot_id):
return self.manager.snapshot_delete(self.identifier, snapshot_id)
def restore(self, snapshot_id):
return self.manager.restore(self.identifier, snapshot_id)
def snapshot_list(self):
return self.manager.snapshot_list(self.identifier)
def output_list(self):
return self.manager.output_list(self.identifier)
def output_show(self, output_key):
return self.manager.output_show(self.identifier, output_key)
def environment(self):
return self.manager.environment(self.identifier)
def files(self):
return self.manager.files(self.identifier)
def get(self):
# set_loaded() first ... so if we have to bail, we know we tried.
self._loaded = True
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.identifier)
if new:
self._add_details(new._info)
@property
def action(self):
s = self.stack_status
# Return everything before the first underscore
return s[:s.index('_')]
@property
def status(self):
s = self.stack_status
# Return everything after the first underscore
return s[s.index('_') + 1:]
@property
def identifier(self):
return '%s/%s' % (self.stack_name, self.id)
class StackChildManager(base.BaseManager):
@property
def api(self):
return self.client
def _resolve_stack_id(self, stack_id):
# if the id already has a slash in it,
# then it is already {stack_name}/{stack_id}
if stack_id.find('/') > 0:
return stack_id
# We want to capture the redirect, not actually get the stack,
# since all we want is the stacks:lookup response to get the
# fully qualified ID, and not all users are allowed to do the
# redirected stacks:show, so pass redirect=False
resp = self.client.get('/stacks/%s' % stack_id, redirect=False)
location = resp.headers.get('location')
if not location:
message = _("Location not returned with redirect")
raise exc.InvalidEndpoint(message=message)
return location.split('/stacks/', 1)[1]
class StackManager(StackChildManager):
resource_class = Stack
def list(self, **kwargs):
"""Get a list of stacks.
:param limit: maximum number of stacks to return
:param marker: begin returning stacks that appear later in the stack
list than that represented by this stack id
:param filters: dict of direct comparison filters that mimics the
structure of a stack object
:rtype: list of :class:`Stack`
"""
def paginate(params):
'''Paginate stacks, even if more than API limit.'''
current_limit = int(params.get('limit') or 0)
url = '/stacks?%s' % parse.urlencode(params, True)
stacks = self._list(url, 'stacks')
for stack in stacks:
yield stack
num_stacks = len(stacks)
remaining_limit = current_limit - num_stacks
if remaining_limit > 0 and num_stacks > 0:
params['limit'] = remaining_limit
params['marker'] = stack.id
for stack in paginate(params):
yield stack
params = {}
if 'filters' in kwargs:
filters = kwargs.pop('filters')
params.update(filters)
for key, value in kwargs.items():
if value:
params[key] = value
return paginate(params)
def preview(self, **kwargs):
"""Preview a stack."""
headers = self.client.credentials_headers()
resp = self.client.post('/stacks/preview',
data=kwargs, headers=headers)
body = utils.get_response_body(resp)
return Stack(self, body.get('stack'))
def create(self, **kwargs):
"""Create a stack."""
headers = self.client.credentials_headers()
resp = self.client.post('/stacks',
data=kwargs, headers=headers)
body = utils.get_response_body(resp)
return body
def update(self, stack_id, **kwargs):
"""Update a stack."""
headers = self.client.credentials_headers()
if kwargs.pop('existing', None):
self.client.patch('/stacks/%s' % stack_id, data=kwargs,
headers=headers)
else:
self.client.put('/stacks/%s' % stack_id, data=kwargs,
headers=headers)
def preview_update(self, stack_id, **kwargs):
"""Preview a stack update."""
stack_identifier = self._resolve_stack_id(stack_id)
headers = self.client.credentials_headers()
path = '/stacks/%s/preview' % stack_identifier
if kwargs.pop('show_nested', False):
path += '?show_nested=True'
if kwargs.pop('existing', None):
resp = self.client.patch(path, data=kwargs, headers=headers)
else:
resp = self.client.put(path, data=kwargs, headers=headers)
body = utils.get_response_body(resp)
return body
def delete(self, stack_id):
"""Delete a stack."""
self._delete("/stacks/%s" % stack_id)
def abandon(self, stack_id):
"""Abandon a stack."""
stack_identifier = self._resolve_stack_id(stack_id)
resp = self.client.delete('/stacks/%s/abandon' % stack_identifier)
body = utils.get_response_body(resp)
return body
def export(self, stack_id):
"""Export data of a stack."""
stack_identifier = self._resolve_stack_id(stack_id)
resp = self.client.get('/stacks/%s/export' % stack_identifier)
body = utils.get_response_body(resp)
return body
def snapshot(self, stack_id, name=None):
"""Snapshot a stack."""
stack_identifier = self._resolve_stack_id(stack_id)
data = {}
if name:
data['name'] = name
resp = self.client.post('/stacks/%s/snapshots' % stack_identifier,
data=data)
body = utils.get_response_body(resp)
return body
def snapshot_show(self, stack_id, snapshot_id):
stack_identifier = self._resolve_stack_id(stack_id)
resp = self.client.get('/stacks/%s/snapshots/%s' % (stack_identifier,
snapshot_id))
body = utils.get_response_body(resp)
return body
def snapshot_delete(self, stack_id, snapshot_id):
stack_identifier = self._resolve_stack_id(stack_id)
resp = self.client.delete('/stacks/%s/snapshots/%s' %
(stack_identifier, snapshot_id))
body = utils.get_response_body(resp)
return body
def restore(self, stack_id, snapshot_id):
stack_identifier = self._resolve_stack_id(stack_id)
resp = self.client.post('/stacks/%s/snapshots/%s/restore' %
(stack_identifier, snapshot_id))
body = utils.get_response_body(resp)
return body
def snapshot_list(self, stack_id):
stack_identifier = self._resolve_stack_id(stack_id)
resp = self.client.get('/stacks/%s/snapshots' % stack_identifier)
body = utils.get_response_body(resp)
return body
def output_list(self, stack_id):
stack_identifier = self._resolve_stack_id(stack_id)
resp = self.client.get('/stacks/%s/outputs' % stack_identifier)
body = utils.get_response_body(resp)
return body
def output_show(self, stack_id, output_key):
stack_identifier = self._resolve_stack_id(stack_id)
resp = self.client.get('/stacks/%(id)s/outputs/%(key)s' % {
'id': stack_identifier,
'key': output_key
})
body = utils.get_response_body(resp)
return body
def get(self, stack_id, resolve_outputs=True):
"""Get the metadata for a specific stack.
:param stack_id: Stack ID to lookup
:param resolve_outputs: If True, then outputs for this
stack will be resolved
"""
kwargs = {}
if not resolve_outputs:
kwargs['params'] = {"resolve_outputs": False}
resp = self.client.get('/stacks/%s' % stack_id, **kwargs)
body = utils.get_response_body(resp)
return Stack(self, body.get('stack'))
def template(self, stack_id):
"""Get template content for a specific stack as a parsed JSON object.
:param stack_id: Stack ID to get the template for
"""
resp = self.client.get('/stacks/%s/template' % stack_id)
body = utils.get_response_body(resp)
return body
def environment(self, stack_id):
"""Returns the environment for an existing stack.
:param stack_id: identifies the stack
:return:
"""
resp = self.client.get('/stacks/%s/environment' % stack_id)
body = utils.get_response_body(resp)
return body
def files(self, stack_id):
"""Returns the files for an existing stack.
:param stack_id: identifies the stack
:return:
"""
resp = self.client.get('/stacks/%s/files' % stack_id)
body = utils.get_response_body(resp)
return body
def validate(self, **kwargs):
"""Validate a stack template."""
url = '/validate'
params = {}
if kwargs.pop('show_nested', False):
params['show_nested'] = True
ignore_errors = kwargs.pop('ignore_errors', None)
if ignore_errors:
params['ignore_errors'] = ignore_errors
args = {}
if kwargs:
args['data'] = kwargs
if params:
args['params'] = params
resp = self.client.post(url, **args)
body = utils.get_response_body(resp)
return body
|
the-stack_0_26380
|
# -*- coding: utf-8 -*-
"""Utilities to scrape elements from / input text into an MX page.
Current functions:
mx_get_client_vpn_subnet,
mx_get_client_vpn_dns_mode,
mx_get_custom_name_servers,
mx_get_client_vpn_wins_enabled,
mx_get_client_vpn_secret,
mx_get_client_auth_type,
mx_get_sentry_vpn_enabled,
mx_get_active_directory_enabled,
mx_get_primary_uplink,
mx_get_amp_enabled,
mx_get_ids_mode,
mx_get_ids_ruleset
"""
from . import page_utils
def mx_get_client_vpn_subnet(self):
"""Get the Client VPN subnet/cidr (string).
Location: Security appliance > Client VPN > Client VPN
This value will exist regardless of whether Client VPN is enabled.
Sample HTML:
<input autocomplete="new-password" id=
"wired_config_client_vpn_subnet" name="wired_config[
client_vpn_subnet]" size="20" type="text" value="10.0.0.0/24" />
"""
self.open_route('/configure/client_vpn_settings', "Security appliance")
return page_utils.get_input_var_value(
self.get_page(),
'wired_config_client_vpn_subnet')
def mx_get_client_vpn_dns_mode(self):
"""Get the Client VPN DNS mode (string).
Location: Security appliance > Client VPN > Client VPN
Sample HTML:
<select id="wired_config_client_vpn_dns_mode" name=
"wired_config[client_vpn_dns_mode]"><option value="google_dns"
selected="selected">Use Google Public DNS</option><option value=
"opendns">Use OpenDNS</option><option value="custom">
Specify nameservers...</option></select>
"""
route = '/configure/client_vpn_settings'
self.open_route(route)
return page_utils.get_dropdown_value(
self.get_page(),
'wired_config_client_vpn_dns_mode')
def mx_get_client_vpn_nameservers(self):
r"""Return a list of custom name servers.
Location: Security appliance > Client VPN > Client VPN
Sample HTML:
<textarea class="noresize" cols="20" id=
"wired_config_client_vpn_dns" name="wired_config[client_vpn_dns]"
rows="2">\n10.0.0.2\n10.0.0.3</textarea>
"""
self.open_route('/configure/client_vpn_settings', "Security appliance")
nameservers = page_utils.get_textarea_list(
self.get_page(),
var_id='wired_config_client_vpn_dns')
if nameservers == 'Specify nameservers...':
nameservers = None
return nameservers
def mx_get_client_vpn_wins_enabled(self):
"""Return a bool of whether Client VPN WINS is enabled.
Location: Security appliance > Client VPN > Client VPN
Sample HTML:
<select id="wired_config_client_vpn_wins_enabled" name=
"wired_config[client_vpn_wins_enabled]"><option value="true">
Specify WINS servers...</option><option value="false"
selected="selected">No WINS servers</option></select>
"""
self.open_route('/configure/client_vpn_settings', "Security appliance")
dropdown_value = page_utils.get_dropdown_value(
self.get_page(),
var_id='wired_config_client_vpn_wins_enabled')
return dropdown_value == 'Enabled'
def mx_get_client_vpn_secret(self):
"""Get Client VPN secret.
Location: Security appliance > Client VPN > Client VPN
Sample HTML for DDNS name:
<input autocomplete="new-password" class="jsAnalyticsExclude"
id="wired_config_client_vpn_secret" maxlength="32"
name="wired_config[client_vpn_secret]" size="25"
value="my-client-vpn-psk" type="password">
"""
self.open_route('/configure/client_vpn_settings', "Security appliance")
return page_utils.get_input_var_value(
self.get_page(),
var_id='wired_config_client_vpn_secret')
def mx_get_client_auth_type(self):
"""Get the Client VPN authentication type.
Location: Security appliance > Client VPN > Client VPN
Sample HTML:
select id="wired_config_client_vpn_auth_type"
name="wired_config[client_vpn_auth_type]">
<option value="meraki" selected="selected">Meraki cloud</option>
<option value="radius">RADIUS</option>
<option value="active_directory">Active Directory</option></select>
"""
self.open_route('/configure/client_vpn_settings', "Security appliance")
return page_utils.get_dropdown_value(
self.get_page(),
var_id='wired_config_client_vpn_auth_type')
def mx_get_sentry_vpn_enabled(self):
"""Return the bool of whether Sentry VPN is enabled.
Location: Security appliance > Client VPN > Client VPN
Sample HTML:
<select id="wired_config_client_vpn_pcc_access_enabled" name=
"wired_config[client_vpn_pcc_access_enabled]"><option value="true">
Enabled</option><option value="false" selected="selected">
Disabled</option></select>
"""
self.open_route('/configure/client_vpn_settings', "Security appliance")
dropdown_value = page_utils.get_dropdown_value(
self.get_page(),
var_id='wired_config_client_vpn_pcc_access_enabled')
return dropdown_value == 'Enabled'
def mx_get_active_directory_enabled(self):
"""Return the bool of whether Active Directory auth is enabled.
Location: Security appliance > Active Directory
Sample HTML:
<select id="active_directory_enabled_select" name=
"active_directory_enabled_select"><option value="true">Authenticate
users with Active Directory</option><option value="false" selected=
"selected">No authentication</option></select>
"""
self.open_route('/configure/active_directory', "Security appliance")
dropdown_value = page_utils.get_dropdown_value(
self.get_page(),
var_id='active_directory_enabled_select')
return dropdown_value == 'Authenticate users with Active Directory'
def mx_get_primary_uplink(self):
"""Return the MX's primary uplink of ['WAN1', 'WAN2', 'Cellular'].
Location: Security appliance > Traffic Shaping > Uplink selection
Sample HTML:
<select id="wired_config_primary_uplink" name=
"wired_config[primary_uplink]" primary_uplink=
"primary_uplink_select"><option value="0" selected="selected">WAN 1
</option><option value="1">WAN 2</option></select>
"""
self.open_route('/configure/traffic_shaping', "Security appliance")
return page_utils.get_dropdown_value(
self.get_page(),
var_id='wired_config_primary_uplink')
def mx_get_amp_enabled(self):
"""Get the bool of whether AMP is enabled.
# Should probably also check whether
Location: Security appliance > Threat Protection > AMP
Sample HTML:
<select id="scanning_enabled_select"
name="scanning_enabled_select">
<option value="true" selected="selected">Enabled</option>
<option value="false">Disabled</option></select>
"""
self.open_route('/configure/security_filtering', "Security appliance")
dropdown_value = page_utils.get_dropdown_value(
self.get_page(),
var_id='scanning_enabled_select')
return dropdown_value == 'Enabled'
def mx_get_ids_mode(self):
"""Return the ids mode of ['Disabled', 'Detection', 'Prevention'].
Location: Security Applaiance > Threat Protection > IDS/IPS
Sample HTML:
<select id="ids_mode_select" name="ids_mode_select">
<option value="disabled" selected="selected">Disabled</option>
<option value="detection">Detection</option>
<option value="prevention">Prevention</option></select>
"""
self.open_route('/configure/security_filtering', "Security appliance")
return page_utils.get_dropdown_value(
self.get_page(),
var_id='ids_mode_select')
def mx_get_ids_ruleset(self):
"""Return the ids mode of ['Connectivity', 'Balanced', 'Security'].
Location: Security Applaiance > Threat Protection > IDS/IPS
Sample HTML:
<select id="ids_ruleset_select" name="ids_ruleset_select">
<option value="high">Connectivity</option>
<option value="medium" selected="selected">Balanced</option>
<option value="low">Security</option></select>
"""
# If IDS is disabled, don't send another value, even if it is in the HTML
if self.mx_get_ids_mode() == 'Disabled':
return 'Disabled'
self.open_route('/configure/security_filtering', "Security appliance")
return page_utils.get_dropdown_value(
self.get_page(),
var_id='ids_ruleset_select')
|
the-stack_0_26381
|
'''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Vectors/resolve_ecoregions.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Vectors/resolve_ecoregions.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Datasets/Vectors/resolve_ecoregions.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Vectors/resolve_ecoregions.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
'''
# %%
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
'''
# %%
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
def set_color(f):
c = ee.String(f.get('COLOR')).slice(1)
return f \
.set('R', ee.Number.parse(c.slice(0, 2), 16)) \
.set('G', ee.Number.parse(c.slice(2, 4), 16)) \
.set('B', ee.Number.parse(c.slice(4, 6), 16))
fc = ee.FeatureCollection('RESOLVE/ECOREGIONS/2017') \
.map(lambda f: set_color(f))
base = ee.Image(0).mask(0).toInt8()
Map.addLayer(base.paint(fc, 'R')
.addBands(base.paint(fc, 'G')
.addBands(base.paint(fc, 'B'))), {'gamma': 0.3})
# # Load a FeatureCollection from a table dataset: 'RESOLVE' ecoregions.
# ecoregions = ee.FeatureCollection('RESOLVE/ECOREGIONS/2017')
# # Display as default and with a custom color.
# Map.addLayer(ecoregions, {}, 'default display', False)
# Map.addLayer(ecoregions, {'color': 'FF0000'}, 'colored', False)
# Map.addLayer(ecoregions.draw(**{'color': '006600', 'strokeWidth': 5}), {}, 'drawn', False)
# # Create an empty image into which to paint the features, cast to byte.
# empty = ee.Image().byte()
# # Paint all the polygon edges with the same number and 'width', display.
# outline = empty.paint(**{
# 'featureCollection': ecoregions,
# 'color': 1,
# 'width': 3
# })
# Map.addLayer(outline, {'palette': 'FF0000'}, 'edges')
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
|
the-stack_0_26383
|
import Scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes"
def start_requests(self):
urls = ['http://www.yes24.com/24/category/bestseller?CategoryNumber=001&sumgb=09&year=2008&month=1'
'http://www.yes24.com/24/category/bestseller?CategoryNumber=001&sumgb=09&year=2008&month=2',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
page = response.url.split("/")[-2]
filename = 'quotes-%s.html' % page
with open(filename, 'wb') as f:
f.write(response.body)
self.log('Saved file %s' % filename)
|
the-stack_0_26385
|
"""A Track in the database and any related logic."""
import datetime
import logging
from pathlib import Path
from typing import List, Tuple, Type, TypeVar
import mediafile
import sqlalchemy
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relationship
from sqlalchemy.schema import ForeignKey, Table, UniqueConstraint
from moe.library import SABase
from moe.library.album import Album
from moe.library.lib_item import LibItem, PathType
__all__ = ["Track", "TrackError"]
log = logging.getLogger("moe.track")
class TrackError(Exception):
"""Error creating a Track."""
class _Genre(SABase):
"""A track can have multiple genres."""
__tablename__ = "genre"
name: str = Column(String, nullable=False, primary_key=True)
def __init__(self, name: str):
self.name = name
track_genre = Table(
"track_genre",
SABase.metadata,
Column("genre", String, ForeignKey("genre.name")),
Column("track_id", Integer, ForeignKey("track._id")),
)
__table_args__ = ()
# Track generic, used for typing classmethod
T = TypeVar("T", bound="Track") # noqa: WPS111
class Track(LibItem, SABase):
"""A single track.
Attributes:
album (str)
albumartist (str)
album_obj (Album): Corresponding Album object.
artist (str)
date (datetime.date): Album release date.
disc (int): Disc number the track is on.
disc_total (int): Number of discs in the album.
genre (str): String of all genres concatenated with ';'.
genres (List[str]): List of all genres.
mb_album_id (str): Musicbrainz album aka release ID.
mb_track_id (str): Musicbrainz track ID.
path (Path): Filesystem path of the track file.
title (str)
track_num (int)
year (int): Album release year.
Note:
Altering any album-related property attributes, will result in changing the
album field and thus all other tracks in the album as well.
"""
__tablename__ = "track"
_id: int = Column(Integer, primary_key=True)
artist: str = Column(String, nullable=False, default="")
disc: int = Column(Integer, nullable=False, default=1)
mb_track_id: str = Column(String, nullable=False, default="")
path: Path = Column(PathType, nullable=False, unique=True)
title: str = Column(String, nullable=False, default="")
track_num: int = Column(Integer, nullable=False)
_album_id: int = Column(Integer, ForeignKey("album._id"))
album_obj: Album = relationship("Album", back_populates="tracks")
album: str = association_proxy("album_obj", "title")
albumartist: str = association_proxy("album_obj", "artist")
date: datetime.date = association_proxy("album_obj", "date")
disc_total: int = association_proxy("album_obj", "disc_total")
mb_album_id: str = association_proxy("album_obj", "mb_album_id")
year: int = association_proxy("album_obj", "year")
_genres: List[_Genre] = relationship(
"_Genre", secondary=track_genre, collection_class=list
)
genres: List[str] = association_proxy("_genres", "name")
__table_args__ = (UniqueConstraint("disc", "track_num", "_album_id"),)
def __init__(self, album: Album, track_num: int, path: Path, **kwargs):
"""Create a track.
Args:
album: Album the track belongs to.
track_num: Track number.
path: Filesystem path of the track file.
**kwargs: Any other fields to assign to the Track.
Note:
If you wish to add several tracks to the same album, ensure the album
already exists in the database, or use `session.merge()`.
"""
self.album_obj = album
self.path = path
self.track_num = track_num
# set default values
self.artist = ""
self.disc = 1
self.mb_track_id = ""
self.title = ""
for key, value in kwargs.items():
if value:
setattr(self, key, value)
@classmethod
def from_tags(cls: Type[T], path: Path, album_path: Path = None) -> T:
"""Alternate initializer that creates a Track from its tags.
Will read any tags from the given path and save them to the Track.
Args:
path: Filesystem path of the track to add.
album_path: Filesystem path of the track's album. Defaults to using the
parent of the track path.
Returns:
Track instance.
Raises:
TrackError: Missing required tags.
"""
audio_file = mediafile.MediaFile(path)
missing_tags: List[str] = []
if not audio_file.album:
missing_tags.append("album")
if not audio_file.albumartist and not audio_file.artist:
missing_tags.append("albumartist")
if not audio_file.track:
missing_tags.append("track_num")
if not audio_file.date:
missing_tags.append("date")
if missing_tags:
raise TrackError(
f"'{path}' is missing required tag(s): {', '.join(missing_tags)}"
)
# use artist as the backup for the albumartist if missing
if audio_file.albumartist:
albumartist = audio_file.albumartist
else:
log.debug(
f"'{path}' is missing an albumartist, using the artist"
f" '{audio_file.artist}' as a backup."
)
albumartist = audio_file.artist
if not album_path:
album_path = path.parent
album = Album(
artist=albumartist,
title=audio_file.album,
date=audio_file.date,
disc_total=audio_file.disctotal,
mb_album_id=audio_file.mb_albumid,
path=album_path,
)
return cls(
album=album,
path=path,
track_num=audio_file.track,
artist=audio_file.artist,
disc=audio_file.disc,
genres=audio_file.genres,
mb_track_id=audio_file.mb_releasetrackid,
title=audio_file.title,
)
@property
def genre(self) -> str:
"""Returns a string of all genres concatenated with ';'."""
return ";".join(self.genres)
@genre.setter
def genre(self, genre_str: str):
"""Sets a track's genre from a string.
Args:
genre_str: For more than one genre, they should be split with ';'.
"""
self.genres = [genre.strip() for genre in genre_str.split(";")]
def fields(self) -> Tuple[str, ...]:
"""Returns the public fields, or non-method attributes, of a Track."""
return (
"album",
"albumartist",
"album_obj",
"artist",
"date",
"disc",
"disc_total",
"genre",
"genres",
"mb_album_id",
"mb_track_id",
"path",
"title",
"track_num",
"year",
)
def __eq__(self, other) -> bool:
"""Compares a Track by it's attributes."""
if isinstance(other, Track):
if self.album_obj.is_unique(other.album_obj):
return False
for attr in self.fields():
if attr == "album_obj": # prevent cyclic comparison
continue
if getattr(self, attr) != getattr(other, attr):
return False
return True
return False
def __lt__(self, other) -> bool:
"""Sort based on album, then disc, then track number."""
if self.album_obj == other.album_obj:
if self.disc == other.disc:
return self.track_num < other.track_num
return self.disc < other.disc
return self.album_obj < other.album_obj
def __str__(self):
"""String representation of a track."""
return f"{self.artist} - {self.title}"
def __repr__(self):
"""Represents a Track using its primary key, unique fields, title and artist."""
return (
f"{self.__class__.__name__}("
f"id={repr(self._id)}, "
f"disc={repr(self.disc)}, "
f"track_num={repr(self.track_num)}, "
f"{repr(self.album_obj)}, "
f"artist={repr(self.artist)}, "
f"title={repr(self.title)}, "
f"path={repr(self.path)})"
)
@sqlalchemy.orm.validates("_genres")
def _append_genre(self, key: str, genre: _Genre) -> _Genre:
"""Prevents duplicate genres in the database by returning any existing ones."""
genre_session = sqlalchemy.orm.sessionmaker.object_session(self)
if not genre_session:
return genre
persistent_genre = genre_session.get(_Genre, genre.name)
if persistent_genre:
return persistent_genre
return genre
|
the-stack_0_26386
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch.nn.modules.utils import _pair
def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,
cfg):
"""Compute mask target for positive proposals in multiple images.
Args:
pos_proposals_list (list[Tensor]): Positive proposals in multiple
images.
pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each
positive proposals.
gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of
each image.
cfg (dict): Config dict that specifies the mask size.
Returns:
list[Tensor]: Mask target of each image.
"""
cfg_list = [cfg for _ in range(len(pos_proposals_list))]
mask_targets = map(mask_target_single, pos_proposals_list,
pos_assigned_gt_inds_list, gt_masks_list, cfg_list)
mask_targets = list(mask_targets)
if len(mask_targets) > 0:
mask_targets = torch.cat(mask_targets)
return mask_targets
def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
"""Compute mask target for each positive proposal in the image.
Args:
pos_proposals (Tensor): Positive proposals.
pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals.
gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap
or Polygon.
cfg (dict): Config dict that indicate the mask size.
Returns:
Tensor: Mask target of each positive proposals in the image.
"""
device = pos_proposals.device
mask_size = _pair(cfg.mask_size)
num_pos = pos_proposals.size(0)
if num_pos > 0:
proposals_np = pos_proposals.cpu().numpy()
maxh, maxw = gt_masks.height, gt_masks.width
proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw)
proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh)
pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
mask_targets = gt_masks.crop_and_resize(
proposals_np, mask_size, device=device,
inds=pos_assigned_gt_inds).to_ndarray()
mask_targets = torch.from_numpy(mask_targets).float().to(device)
else:
mask_targets = pos_proposals.new_zeros((0, ) + mask_size)
return mask_targets
|
the-stack_0_26388
|
import torch
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, ModuleList, auto_fp16
from mmdet.models.builder import NECKS
@NECKS.register_module()
class FPNC(BaseModule):
"""FPN-like fusion module in Real-time Scene Text Detection with
Differentiable Binarization.
This was partially adapted from https://github.com/MhLiao/DB and
https://github.com/WenmuZhou/DBNet.pytorch
"""
def __init__(self,
in_channels,
lateral_channels=256,
out_channels=64,
bias_on_lateral=False,
bn_re_on_lateral=False,
bias_on_smooth=False,
bn_re_on_smooth=False,
conv_after_concat=False,
init_cfg=None):
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.lateral_channels = lateral_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.bn_re_on_lateral = bn_re_on_lateral
self.bn_re_on_smooth = bn_re_on_smooth
self.conv_after_concat = conv_after_concat
self.lateral_convs = ModuleList()
self.smooth_convs = ModuleList()
self.num_outs = self.num_ins
for i in range(self.num_ins):
norm_cfg = None
act_cfg = None
if self.bn_re_on_lateral:
norm_cfg = dict(type='BN')
act_cfg = dict(type='ReLU')
l_conv = ConvModule(
in_channels[i],
lateral_channels,
1,
bias=bias_on_lateral,
conv_cfg=None,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
norm_cfg = None
act_cfg = None
if self.bn_re_on_smooth:
norm_cfg = dict(type='BN')
act_cfg = dict(type='ReLU')
smooth_conv = ConvModule(
lateral_channels,
out_channels,
3,
bias=bias_on_smooth,
padding=1,
conv_cfg=None,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.smooth_convs.append(smooth_conv)
if self.conv_after_concat:
norm_cfg = dict(type='BN')
act_cfg = dict(type='ReLU')
self.out_conv = ConvModule(
out_channels * self.num_outs,
out_channels * self.num_outs,
3,
padding=1,
conv_cfg=None,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i])
for i, lateral_conv in enumerate(self.lateral_convs)
]
used_backbone_levels = len(laterals)
# build top-down path
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(
laterals[i], size=prev_shape, mode='nearest')
# build outputs
# part 1: from original levels
outs = [
self.smooth_convs[i](laterals[i])
for i in range(used_backbone_levels)
]
for i, out in enumerate(outs):
outs[i] = F.interpolate(
outs[i], size=outs[0].shape[2:], mode='nearest')
out = torch.cat(outs, dim=1)
if self.conv_after_concat:
out = self.out_conv(out)
return out
|
the-stack_0_26389
|
#!/usr/bin/env python
import sys
import numpy as np
import gnsstools.glonass.l3i as l3i
import gnsstools.nco as nco
import gnsstools.io as io
import gnsstools.discriminator as discriminator
class tracking_state:
def __init__(self,fs,prn,code_p,code_f,code_i,carrier_p,carrier_f,carrier_i,mode):
self.fs = fs
self.prn = prn
self.code_p = code_p
self.code_f = code_f
self.code_i = code_i
self.carrier_p = carrier_p
self.carrier_f = carrier_f
self.carrier_i = carrier_i
self.mode = mode
self.prompt1 = 0 + 0*(1j)
self.carrier_e1 = 0
self.code_e1 = 0
self.eml = 0
# tracking loops
def track(x,s):
n = len(x)
fs = s.fs
nco.mix(x,-s.carrier_f/fs, s.carrier_p)
s.carrier_p = s.carrier_p - n*s.carrier_f/fs
s.carrier_p = np.mod(s.carrier_p,1)
cf = (s.code_f+s.carrier_f/117.5)/fs
p_early = l3i.correlate(x, s.prn, 0, s.code_p-0.5, cf, l3i.l3i_code(prn))
p_prompt = l3i.correlate(x, s.prn, 0, s.code_p, cf, l3i.l3i_code(prn))
p_late = l3i.correlate(x, s.prn, 0, s.code_p+0.5, cf, l3i.l3i_code(prn))
if s.mode=='FLL_WIDE':
fll_k = 3.0
a = p_prompt
b = s.prompt1
e = discriminator.fll_atan2(a,b)
s.carrier_f = s.carrier_f + fll_k*e
s.prompt1 = p_prompt
elif s.mode=='FLL_NARROW':
fll_k = 0.3
a = p_prompt
b = s.prompt1
e = discriminator.fll_atan2(a,b)
s.carrier_f = s.carrier_f + fll_k*e
s.prompt1 = p_prompt
elif s.mode=='PLL':
pll_k1 = 0.1
pll_k2 = 5.0
e = discriminator.pll_costas(p_prompt)
e1 = s.carrier_e1
s.carrier_f = s.carrier_f + pll_k1*e + pll_k2*(e-e1)
s.carrier_e1 = e
# code loop
dll_k1 = 0.00002
dll_k2 = 0.2
s.early = np.absolute(p_early)
s.prompt = np.absolute(p_prompt)
s.late = np.absolute(p_late)
if (s.late+s.early)==0:
e = 0
else:
e = (s.late-s.early)/(s.late+s.early)
s.eml = e
e1 = s.code_e1
s.code_f = s.code_f + dll_k1*e + dll_k2*(e-e1)
s.code_e1 = e
s.code_p = s.code_p + n*cf
s.code_p = np.mod(s.code_p,l3i.code_length)
return p_prompt,s
#
# main program
#
# parse command-line arguments
# example:
# ./track-glonass-l3i.py /dev/stdin 68873142.857 -3255000.000 30 -200.0 2296.3
filename = sys.argv[1] # input data, raw file, i/q interleaved, 8 bit signed (two's complement)
fs = float(sys.argv[2]) # sampling rate, Hz
coffset = float(sys.argv[3]) # offset to L1 carrier, Hz (positive or negative)
prn = int(sys.argv[4]) # PRN code
doppler = float(sys.argv[5]) # initial doppler estimate from acquisition
code_offset = float(sys.argv[6]) # initial code offset from acquisition
fp = open(filename,"rb")
n = int(fs*0.001*((l3i.code_length-code_offset)/l3i.code_length)) # align with 1 ms code boundary
x = io.get_samples_complex(fp,n)
code_offset += n*1000.0*l3i.code_length/fs
s = tracking_state(fs=fs, prn=prn, # initialize tracking state
code_p=code_offset, code_f=l3i.chip_rate, code_i=0,
carrier_p=0, carrier_f=doppler, carrier_i=0,
mode='PLL')
block = 0
coffset_phase = 0.0
while True:
if s.code_p<l3i.code_length/2:
n = int(fs*0.001*(l3i.code_length-s.code_p)/l3i.code_length)
else:
n = int(fs*0.001*(2*l3i.code_length-s.code_p)/l3i.code_length)
x = io.get_samples_complex(fp,n)
if x is None:
break
nco.mix(x,-coffset/fs,coffset_phase)
coffset_phase = coffset_phase - n*coffset/fs
coffset_phase = np.mod(coffset_phase,1)
p_prompt,s = track(x,s)
vars = block, np.real(p_prompt), np.imag(p_prompt), s.carrier_f, s.code_f-l3i.chip_rate, (180/np.pi)*np.angle(p_prompt), s.early, s.prompt, s.late
print('%d %f %f %f %f %f %f %f %f' % vars)
block = block + 1
# if (block%100)==0:
# sys.stderr.write("%d\n"%block)
# if block==2000:
# s.mode = 'PLL'
|
the-stack_0_26390
|
from lib.utils.renderer import opengl_utils
import numpy as np
from lib.utils.pysixd import transform
from sklearn.neighbors import NearestNeighbors
def rgbd_to_point_cloud(K, depth):
vs, us = depth.nonzero()
zs = depth[vs, us]
xs = ((us - K[0, 2]) * zs) / float(K[0, 0])
ys = ((vs - K[1, 2]) * zs) / float(K[1, 1])
pts = np.array([xs, ys, zs]).T
return pts
def nearest_neighbor(src, dst):
'''
Find the nearest (Euclidean) neighbor in dst for each point in src
Input:
src: Nxm array of points
dst: Nxm array of points
Output:
distances: Euclidean distances of the nearest neighbor
indices: dst indices of the nearest neighbor
'''
assert src.shape == dst.shape
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(dst)
distances, indices = neigh.kneighbors(src, return_distance=True)
return distances.ravel(), indices.ravel()
def best_fit_transform(A, B, depth_only=False, no_depth=False):
'''
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
Input:
A: Nxm numpy array of corresponding points
B: Nxm numpy array of corresponding points
Returns:
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
R: mxm rotation matrix
t: mx1 translation vector
'''
assert A.shape == B.shape
# get number of dimensions
m = A.shape[1]
# translate points to their centroids
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - centroid_A
BB = B - centroid_B
if depth_only == True and no_depth == False:
R = np.eye(3)
t = centroid_B.T - centroid_A.T
# t = np.array([0, 0, t[2]])
else:
# rotation matrix
H = np.dot(AA.T, BB)
U, S, Vt = np.linalg.svd(H)
R = np.dot(Vt.T, U.T)
# special reflection case
if np.linalg.det(R) < 0:
Vt[m - 1, :] *= -1
R = np.dot(Vt.T, U.T)
t = centroid_B.T - np.dot(R, centroid_A.T)
if no_depth == True and depth_only == False:
t = np.array([t[0], t[1], 0])
T = np.identity(m + 1)
T[:m, :m] = R
T[:m, m] = t
return T, R, t
def icp(A, B, init_pose=None, max_iterations=200, tolerance=0.001, verbose=False, depth_only=False, no_depth=False):
assert A.shape == B.shape
# get number of dimensions
m = A.shape[1]
# make points homogeneous, copy them to maintain the originals
src = np.ones((m + 1, A.shape[0]))
dst = np.ones((m + 1, B.shape[0]))
src[:m, :] = np.copy(A.T)
dst[:m, :] = np.copy(B.T)
# apply the initial pose estimation
if init_pose is not None:
src = np.dot(init_pose, src)
prev_error = 0
for i in range(max_iterations):
# find the nearest neighbors between the current source and destination points
distances, indices = nearest_neighbor(src[:m, :].T, dst[:m, :].T)
# compute the transformation between the current source and nearest destination points
T, _, _ = best_fit_transform(src[:m, :].T, dst[:m, indices].T, depth_only=depth_only, no_depth=no_depth)
# update the current source
src = np.dot(T, src)
mean_error = np.mean(distances)
# print mean_error
# check error
if np.abs(prev_error - mean_error) < tolerance:
break
prev_error = mean_error
# calculate final transformation
T, _, _ = best_fit_transform(A, src[:m, :].T, depth_only=depth_only, no_depth=no_depth)
if verbose:
anim = ax.scatter(src[0, :], src[1, :], src[2, :], label='estimated', marker='.', c='red')
plt.legend()
plt.show()
return T, distances, i
class ICPRefiner:
def __init__(self, model, im_size):
self.renderer = opengl_utils.DepthRender(model, im_size)
self.im_size = im_size
def refine(self, depth_crop, R_est, t_est, K_test, depth_only=False, no_depth=False, max_mean_dist_factor=2.0):
depth = self.renderer.render(self.im_size, 100, 10000, K_test, R_est, t_est)
synthetic_pts = rgbd_to_point_cloud(K_test, depth)
centroid_synthetic_pts = np.mean(synthetic_pts, axis=0)
try:
max_mean_dist = np.max(np.linalg.norm(synthetic_pts - centroid_synthetic_pts, axis=1))
except:
return (R_est, t_est)
real_depth_pts = rgbd_to_point_cloud(K_test, depth_crop)
real_synmean_dist = np.linalg.norm(real_depth_pts - centroid_synthetic_pts, axis=1)
real_depth_pts = real_depth_pts[real_synmean_dist < max_mean_dist_factor * max_mean_dist]
if len(real_depth_pts) < len(synthetic_pts) / 20.:
print('not enough visible points')
R_refined = R_est
t_refined = t_est
else:
N = 3000
sub_idcs_real = np.random.choice(len(real_depth_pts), np.min([len(real_depth_pts), len(synthetic_pts), N]))
sub_idcs_syn = np.random.choice(len(synthetic_pts), np.min([len(real_depth_pts), len(synthetic_pts), N]))
T, distances, iterations = icp(synthetic_pts[sub_idcs_syn], real_depth_pts[sub_idcs_real], tolerance=0.0000005, depth_only=depth_only, no_depth=no_depth)
if no_depth == True:
angle, _, _ = transform.rotation_from_matrix(T)
angle_change_limit = 20 * np.pi / 180.
if np.abs(angle) > angle_change_limit:
T = np.eye(4)
H_est = np.zeros((4, 4))
# R_est, t_est is from model to camera
H_est[3, 3] = 1
H_est[:3, 3] = t_est
H_est[:3, :3] = R_est
H_est_refined = np.dot(T, H_est)
R_refined = H_est_refined[:3, :3]
t_refined = H_est_refined[:3, 3]
return R_refined, t_refined
|
the-stack_0_26391
|
from tkinter import *
from tkinter import messagebox
from typing import Callable
# Создание и запуск tkinter ui
class TkinterApplication(object):
__root: Tk = None
__quitListener: Callable[[], None] = None
def __init__(self, credential: bool, quitListener: Callable[[], None]):
self.__quitListener = quitListener
self.__root = Tk()
self.__root.geometry('640x480')
self.__root.title('Godville следилка')
self.__root.protocol("WM_DELETE_WINDOW", self.quit)
if credential:
from .credential.credentional import CredentionalView
self.__view = CredentionalView(self.__root)
else:
from .info.info import InfoView
self.__view = InfoView(self.__root)
# Полное завершение работы приложения - для полного listener
def quit(self):
if messagebox.askokcancel('Выход', 'Вы действительно хотите выйти?'):
if self.__quitListener:
self.__quitListener()
self.destroy()
# Завершение только gui tkinter
def destroy(self):
if self.__root:
self.__root.destroy()
self.__root = None
def run(self):
if (not self.isStarted()):
self.__root.mainloop()
def isStarted(self):
return self.__root.winfo_ismapped() == 1
|
the-stack_0_26392
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 6 11:49:28 2020
@author: juanporras
"""
import pandas as pd
import numpy as np
import json
import urllib.request
from urllib.request import urlopen
config = {'displayModeBar': False}
from cleaning_datas import df
df_US=df[(df["Country_Region"]=="US")]
df_US = df_US.groupby(['Last_Update', 'Country_Region','Province_State']).sum().loc[:,['Confirmed','Recovered','Deaths']].reset_index()
df_US["Last_Update"] = pd.to_datetime(df_US["Last_Update"]).dt.strftime('%m/%d/%Y')
Raw_Capital_dict = {
'Alabama': 'Montgomery',
'Alaska': 'Juneau',
'Arizona':'Phoenix',
'Arkansas':'Little Rock',
'California': 'Sacramento',
'Colorado':'Denver',
'Connecticut':'Hartford',
'Delaware':'Dover',
'Florida': 'Tallahassee',
'Georgia': 'Atlanta',
'Hawaii': 'Honolulu',
'Idaho': 'Boise',
'Illinios': 'Springfield',
'Indiana': 'Indianapolis',
'Iowa': 'Des Monies',
'Kansas': 'Topeka',
'Kentucky': 'Frankfort',
'Louisiana': 'Baton Rouge',
'Maine': 'Augusta',
'Maryland': 'Annapolis',
'Massachusetts': 'Boston',
'Michigan': 'Lansing',
'Minnesota': 'St. Paul',
'Mississippi': 'Jackson',
'Missouri': 'Jefferson City',
'Montana': 'Helena',
'Nebraska': 'Lincoln',
'Neveda': 'Carson City',
'New Hampshire': 'Concord',
'New Jersey': 'Trenton',
'New Mexico': 'Santa Fe',
'New York': 'Albany',
'North Carolina': 'Raleigh',
'North Dakota': 'Bismarck',
'Ohio': 'Columbus',
'Oklahoma': 'Oklahoma City',
'Oregon': 'Salem',
'Pennsylvania': 'Harrisburg',
'Rhoda Island': 'Providence',
'South Carolina': 'Columbia',
'South Dakoda': 'Pierre',
'Tennessee': 'Nashville',
'Texas': 'Austin',
'Utah': 'Salt Lake City',
'Vermont': 'Montpelier',
'Virginia': 'Richmond',
'Washington': 'Olympia',
'West Virginia': 'Charleston',
'Wisconsin': 'Madison',
'Wyoming': 'Cheyenne'
}
Capital_dict = dict(map(reversed, Raw_Capital_dict.items()))
df_US['State'] = df_US['Province_State'].replace(Capital_dict)
State_dict = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}
State_inverse_dict = dict(map(reversed, State_dict.items()))
list_us=df_US.loc[df_US["Country_Region"]=="US","State"].reset_index(drop=True)
for elem in range(0,len(list_us)):
if len(list_us[elem].split(", ",1))==2:
list_us[elem]=list_us[elem].split(", ",1)[1].replace(".","")[0:2]
if list_us[elem]=="US":
list_us[elem]=float("NaN")
else:
if list_us[elem].split(", ",1)[0] in State_dict:
list_us[elem]=State_dict[list_us[elem].split(", ",1)[0]]
else:
if list_us[elem].split(", ",1)[0]=="Chicago":
list_us[elem]="IL"
else:
list_us[elem]=float("NaN")
df_US['State_Code'] = list_us
### Load Json File
url_us="https://raw.githubusercontent.com/jgoodall/us-maps/master/geojson/state.geo.json"
with urlopen(url_us) as response_us:
states_us = json.load(response_us)
for i in range(0,len(states_us["features"])):
states_us["features"][i]["id"] = states_us["features"][i]["properties"]["STUSPS10"]
States = []
for i in range(0,len(states_us["features"])):
state = states_us["features"][i]["id"]
States.append(state)
S1 = set(States)
S2 = set(df_US['State_Code'].unique())
S2-S1
# Center for Disease Control and Prevention
# Provisional Covid19 Death Count by week, ending date and state (Deaths)
deaths = pd.read_csv("https://data.cdc.gov/api/views/r8kw-7aab/rows.csv?accessType=DOWNLOAD")
# Race and Hispanic Origin (Deaths)
Demographic = pd.read_csv("https://data.cdc.gov/api/views/pj7m-y5uh/rows.csv?accessType=DOWNLOAD")
Demographic['State Name'] = Demographic['State'].replace(State_inverse_dict)
# Three Different Datasets
cond1 = (Demographic['Indicator']=='Distribution of COVID-19 deaths (%)')
cond2 = (Demographic['Indicator']=='Weighted distribution of population (%)')
cond3 = (Demographic['Indicator']=='Unweighted distribution of population (%)')
Deaths_Covid = Demographic[cond1].drop(columns=['Indicator','Footnote'])
Weighted_pop = Demographic[cond2]
Unweighted_pop = Demographic[cond3]
# Tests DATASET
Current_State = pd.read_csv('https://covidtracking.com/api/v1/states/current.csv')
Current_State['state_name'] = Current_State['state'].replace(State_inverse_dict)
|
the-stack_0_26394
|
import os
import numpy as np
from PIL import Image
import tensorflow as tf
import xml.etree.ElementTree as ET
from utils.container import Container
class VOCDataset:
class_names = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
def __init__(self, data_dir, split, transform=None, target_transform=None, keep_difficult=False):
"""Dataset for VOC data.
Args:
data_dir: the root of the VOC2007 or VOC2012 dataset, the directory contains the following sub-directories:
Annotations, ImageSets, JPEGImages, SegmentationClass, SegmentationObject.
"""
self.data_dir = data_dir
self.split = split
self.transform = transform
self.target_transform = target_transform
image_sets_file = os.path.join(self.data_dir, "ImageSets", "Main", "%s.txt" % self.split)
self.ids = VOCDataset._read_image_ids(image_sets_file)
self.keep_difficult = keep_difficult
self.class_dict = {class_name: i for i, class_name in enumerate(self.class_names)}
def generate(self):
for index in range(len(self.ids)):
image_id = self.ids[index]
boxes, labels, is_difficult = self._get_annotation(image_id)
if not self.keep_difficult:
boxes = boxes[is_difficult == 0]
labels = labels[is_difficult == 0]
image = self._read_image(image_id)
if self.transform:
image, boxes, labels = self.transform(image, boxes, labels)
if self.target_transform:
boxes, labels = self.target_transform(boxes, labels)
yield image, boxes, labels, index
def get_annotation(self, index):
image_id = self.ids[index]
return image_id, self._get_annotation(image_id)
def __len__(self):
return len(self.ids)
@staticmethod
def _read_image_ids(image_sets_file):
ids = []
with open(image_sets_file) as f:
for line in f:
ids.append(line.rstrip())
return ids
def _get_annotation(self, image_id):
annotation_file = os.path.join(self.data_dir, "Annotations", "%s.xml" % image_id)
objects = ET.parse(annotation_file).findall("object")
boxes = []
labels = []
is_difficult = []
for obj in objects:
class_name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
# VOC dataset format follows Matlab, in which indexes start from 0
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
boxes.append([x1, y1, x2, y2])
labels.append(self.class_dict[class_name])
is_difficult_str = obj.find('difficult').text
is_difficult.append(int(is_difficult_str) if is_difficult_str else 0)
return (np.array(boxes, dtype=np.float32),
np.array(labels, dtype=np.int64),
np.array(is_difficult, dtype=np.uint8))
def get_img_info(self, index):
img_id = self.ids[index]
annotation_file = os.path.join(self.data_dir, "Annotations", "%s.xml" % img_id)
anno = ET.parse(annotation_file).getroot()
size = anno.find("size")
im_info = tuple(map(int, (size.find("height").text, size.find("width").text)))
return {"height": im_info[0], "width": im_info[1]}
def _read_image(self, image_id):
image_file = os.path.join(self.data_dir, "JPEGImages", "%s.jpg" % image_id)
image = Image.open(image_file).convert("RGB")
image = np.array(image)
return image
|
the-stack_0_26395
|
from hwt.hdl.constants import DIRECTION
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.hdlType import HdlType
from hwt.hdl.types.struct import HStruct, HStructField, HStructFieldMeta
from hwt.interfaces.agents.structIntf import StructIntfAgent
from hwt.interfaces.std import Signal, VldSynced, RegCntrl, BramPort_withoutClk
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.interfaceLevel.mainBases import InterfaceBase
from hwt.synthesizer.interfaceLevel.unitImplHelpers import getSignalName
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
from hwt.doc_markers import internal
class StructIntf(Interface):
"""
Create dynamic interface based on HStruct or HUnion description
:ivar _fieldsToInterfaces: dictionary {field from HStruct template:
sub interface for it}
:ivar _structT: HStruct instance used as template for this interface
:param _instantiateFieldFn: function(FieldTemplateItem instance)
return interface instance
"""
def __init__(self, structT, instantiateFieldFn,
masterDir=DIRECTION.OUT,
loadConfig=True):
Interface.__init__(self,
masterDir=masterDir,
loadConfig=loadConfig)
self._structT = structT
self._instantiateFieldFn = instantiateFieldFn
self._fieldsToInterfaces = {}
def _declr(self):
_t = self._structT
if isinstance(_t, HStruct):
fields = _t.fields
else:
fields = _t.fields.values()
self._fieldsToInterfaces[self._structT] = self
for field in fields:
# skip padding
if field.name is not None:
# generate interface based on struct field
intf = self._instantiateFieldFn(self, field)
assert field not in self._fieldsToInterfaces
self._fieldsToInterfaces[field] = intf
setattr(self, field.name, intf)
if isinstance(intf, StructIntf):
intf._fieldsToInterfaces = self._fieldsToInterfaces
def _initSimAgent(self):
self._ag = StructIntfAgent(self)
class IntfMap(list):
"""
Container of interface map
Items can be Interface/RtlSignal or (type/interface/None/IntfMap, name).
None is used for padding.
"""
pass
@internal
def _HTypeFromIntfMap(intf):
name = getSignalName(intf)
if isinstance(intf, (RtlSignalBase, Signal)):
dtype = intf._dtype
elif isinstance(intf, VldSynced):
dtype = intf.data._dtype
elif isinstance(intf, RegCntrl):
dtype = intf.din._dtype
elif isinstance(intf, BramPort_withoutClk):
dtype = Bits(int(intf.DATA_WIDTH))[2 ** int(intf.ADDR_WIDTH)]
else:
dtype, name = intf
assert isinstance(dtype, HdlType)
assert isinstance(name, str)
return (dtype, name)
@internal
def HTypeFromIntfMapItem(interfaceMapItem):
isTerminal = False
if isinstance(interfaceMapItem, (InterfaceBase, RtlSignalBase)):
dtype, nameOrPrefix = _HTypeFromIntfMap(interfaceMapItem)
isTerminal = True
else:
typeOrListOfInterfaces, nameOrPrefix = interfaceMapItem
if isinstance(typeOrListOfInterfaces, list) and not isinstance(typeOrListOfInterfaces, IntfMap):
# list of HType instances for array
parts = []
arrayItem_t = None
for item in typeOrListOfInterfaces:
if isinstance(item, IntfMap):
t = HTypeFromIntfMap(item)
else:
t = HTypeFromIntfMapItem(item).dtype
if arrayItem_t is None:
arrayItem_t = t
else:
assert arrayItem_t == t, (
"all items in array has to have same type", arrayItem_t, t)
parts.append(t)
dtype = arrayItem_t[len(parts)]
elif isinstance(typeOrListOfInterfaces, HdlType):
dtype = typeOrListOfInterfaces
isTerminal = True
elif isinstance(typeOrListOfInterfaces,
(InterfaceBase, RtlSignalBase)):
# renamed interface, ignore original name
dtype = _HTypeFromIntfMap(typeOrListOfInterfaces)[0]
isTerminal = True
elif isinstance(typeOrListOfInterfaces, IntfMap):
dtype = HTypeFromIntfMap(typeOrListOfInterfaces)
else:
# tuple (tuple of interfaces, prefix)
assert isinstance(typeOrListOfInterfaces,
tuple), typeOrListOfInterfaces
dtype = HTypeFromIntfMap(typeOrListOfInterfaces)
assert isinstance(nameOrPrefix, str) or nameOrPrefix is None, nameOrPrefix
f = HStructField(dtype, nameOrPrefix)
if not isTerminal:
f.meta = HStructFieldMeta(split=True)
return f
def HTypeFromIntfMap(interfaceMap):
"""
Generate flattened register map for HStruct
:param interfaceMap: sequence of
tuple (type, name) or (will create standard struct field member)
interface or (will create a struct field from interface)
instance of hdl type (is used as padding)
tuple (list of interface, name)
:param DATA_WIDTH: width of word
:param terminalNodes: None or set whre are placed StructField instances
which are derived directly from interface
:return: generator of tuple (type, name, BusFieldInfo)
"""
structFields = []
for m in interfaceMap:
f = HTypeFromIntfMapItem(m)
structFields.append(f)
return HStruct(*structFields)
|
the-stack_0_26396
|
import os
import os.path
from pprint import pprint
from typing import Any
from aat import Strategy, Event, Order, Side, Trade
class ReceivedStrategy(Strategy):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(ReceivedStrategy, self).__init__(*args, **kwargs)
self._trade = True
self._received_count = 0
async def onStart(self, event: Event) -> None:
pprint(self.instruments())
pprint(self.positions())
for i in self.instruments():
await self.subscribe(i)
async def onTrade(self, event: Event) -> None:
pprint(event)
trade: Trade = event.target # type: ignore
if self._trade and trade.my_order is None:
await self.newOrder(
Order(
1,
trade.price,
Side.BUY,
trade.instrument,
trade.exchange,
)
)
self._trade = False
async def onReceived(self, event: Event) -> None:
pprint(event)
self._trade = True
self._received_count += 1
async def onExit(self, event: Event) -> None:
print("Finishing...")
if __name__ == "__main__":
from aat import TradingEngine, parseConfig
cfg = parseConfig(
[
"--trading_type",
"backtest",
"--load_accounts",
"--exchanges",
"aat.exchange.generic:CSV,{}".format(
os.path.join(os.path.dirname(__file__), "data", "aapl.csv")
),
"--strategies",
"aat.strategy.sample.csv.received:ReceivedStrategy",
]
)
print(cfg)
t = TradingEngine(**cfg)
t.start()
assert t.strategies[0]._received_count == 64
|
the-stack_0_26397
|
#!/usr/bin/env python3
'''
This script copies the files that are needed from scheduler-and-mapper. If more
files are needed, please add them to FILES_TO_COPY.
'''
import os, shutil
FILES_TO_COPY = (
"agency_common.py",
"agency_nyu.py",
"agency_walking.py",
"agency_walking_dynamic.py",
"agency_walking_static.py",
"common.py",
"common_nyu.py",
"common_walking_static.py",
"departure_lister.py",
"itinerary_finder.py",
"NYU.pickle",
"Stop Locations.csv",
"stops.py",
"WalkingStatic.pickle",
)
SOURCE_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"scheduler-and-mapper"
)
DESTINATION_DIR = os.path.join(
os.path.dirname(__file__),
"scheduler-and-mapper"
)
def main():
# Delete the destination folder.
try:
shutil.rmtree(DESTINATION_DIR)
except FileNotFoundError:
pass
# Create the destination folder.
os.mkdir(DESTINATION_DIR)
# Copy the files.
for filename in FILES_TO_COPY:
shutil.copyfile(
os.path.join(SOURCE_DIR, filename),
os.path.join(DESTINATION_DIR, filename)
)
if __name__ == "__main__":
main()
|
the-stack_0_26398
|
""" generic mechanism for marking and selecting python functions. """
import inspect
class MarkerError(Exception):
"""Error in use of a pytest marker/attribute."""
def pytest_namespace():
return {'mark': MarkGenerator()}
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
'-k',
action="store", dest="keyword", default='', metavar="EXPRESSION",
help="only run tests which match the given substring expression. "
"An expression is a python evaluatable expression "
"where all names are substring-matched against test names "
"and their parent classes. Example: -k 'test_method or test "
"other' matches all test functions and classes whose name "
"contains 'test_method' or 'test_other'. "
"Additionally keywords are matched to classes and functions "
"containing extra names in their 'extra_keyword_matches' set, "
"as well as functions which have names assigned directly to them."
)
group._addoption(
"-m",
action="store", dest="markexpr", default="", metavar="MARKEXPR",
help="only run tests matching given mark expression. "
"example: -m 'mark1 and not mark2'."
)
group.addoption(
"--markers", action="store_true",
help="show markers (builtin, plugin and per-project ones)."
)
parser.addini("markers", "markers for test functions", 'linelist')
def pytest_cmdline_main(config):
import _pytest.config
if config.option.markers:
config._do_configure()
tw = _pytest.config.create_terminal_writer(config)
for line in config.getini("markers"):
name, rest = line.split(":", 1)
tw.write("@pytest.mark.%s:" % name, bold=True)
tw.line(rest)
tw.line()
config._ensure_unconfigure()
return 0
pytest_cmdline_main.tryfirst = True
def pytest_collection_modifyitems(items, config):
keywordexpr = config.option.keyword
matchexpr = config.option.markexpr
if not keywordexpr and not matchexpr:
return
# pytest used to allow "-" for negating
# but today we just allow "-" at the beginning, use "not" instead
# we probably remove "-" alltogether soon
if keywordexpr.startswith("-"):
keywordexpr = "not " + keywordexpr[1:]
selectuntil = False
if keywordexpr[-1:] == ":":
selectuntil = True
keywordexpr = keywordexpr[:-1]
remaining = []
deselected = []
for colitem in items:
if keywordexpr and not matchkeyword(colitem, keywordexpr):
deselected.append(colitem)
else:
if selectuntil:
keywordexpr = None
if matchexpr:
if not matchmark(colitem, matchexpr):
deselected.append(colitem)
continue
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
class MarkMapping:
"""Provides a local mapping for markers where item access
resolves to True if the marker is present. """
def __init__(self, keywords):
mymarks = set()
for key, value in keywords.items():
if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator):
mymarks.add(key)
self._mymarks = mymarks
def __getitem__(self, name):
return name in self._mymarks
class KeywordMapping:
"""Provides a local mapping for keywords.
Given a list of names, map any substring of one of these names to True.
"""
def __init__(self, names):
self._names = names
def __getitem__(self, subname):
for name in self._names:
if subname in name:
return True
return False
def matchmark(colitem, markexpr):
"""Tries to match on any marker names, attached to the given colitem."""
return eval(markexpr, {}, MarkMapping(colitem.keywords))
def matchkeyword(colitem, keywordexpr):
"""Tries to match given keyword expression to given collector item.
Will match on the name of colitem, including the names of its parents.
Only matches names of items which are either a :class:`Class` or a
:class:`Function`.
Additionally, matches on names in the 'extra_keyword_matches' set of
any item, as well as names directly assigned to test functions.
"""
mapped_names = set()
# Add the names of the current item and any parent items
import pytest
for item in colitem.listchain():
if not isinstance(item, pytest.Instance):
mapped_names.add(item.name)
# Add the names added as extra keywords to current or parent items
for name in colitem.listextrakeywords():
mapped_names.add(name)
# Add the names attached to the current function through direct assignment
if hasattr(colitem, 'function'):
for name in colitem.function.__dict__:
mapped_names.add(name)
mapping = KeywordMapping(mapped_names)
if " " not in keywordexpr:
# special case to allow for simple "-k pass" and "-k 1.3"
return mapping[keywordexpr]
elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]:
return not mapping[keywordexpr[4:]]
return eval(keywordexpr, {}, mapping)
def pytest_configure(config):
import pytest
if config.option.strict:
pytest.mark._config = config
class MarkGenerator:
""" Factory for :class:`MarkDecorator` objects - exposed as
a ``pytest.mark`` singleton instance. Example::
import pytest
@pytest.mark.slowtest
def test_function():
pass
will set a 'slowtest' :class:`MarkInfo` object
on the ``test_function`` object. """
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError("Marker name must NOT start with underscore")
if hasattr(self, '_config'):
self._check(name)
return MarkDecorator(name)
def _check(self, name):
try:
if name in self._markers:
return
except AttributeError:
pass
self._markers = l = set()
for line in self._config.getini("markers"):
beginning = line.split(":", 1)
x = beginning[0].split("(", 1)[0]
l.add(x)
if name not in self._markers:
raise AttributeError("%r not a registered marker" % (name,))
def istestfunc(func):
return hasattr(func, "__call__") and \
getattr(func, "__name__", "<lambda>") != "<lambda>"
class MarkDecorator:
""" A decorator for test functions and test classes. When applied
it will create :class:`MarkInfo` objects which may be
:ref:`retrieved by hooks as item keywords <excontrolskip>`.
MarkDecorator instances are often created like this::
mark1 = pytest.mark.NAME # simple MarkDecorator
mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator
and can then be applied as decorators to test functions::
@mark2
def test_function():
pass
When a MarkDecorator instance is called it does the following:
1. If called with a single class as its only positional argument and no
additional keyword arguments, it attaches itself to the class so it
gets applied automatically to all test cases found in that class.
2. If called with a single function as its only positional argument and
no additional keyword arguments, it attaches a MarkInfo object to the
function, containing all the arguments already stored internally in
the MarkDecorator.
3. When called in any other case, it performs a 'fake construction' call,
i.e. it returns a new MarkDecorator instance with the original
MarkDecorator's content updated with the arguments passed to this
call.
Note: The rules above prevent MarkDecorator objects from storing only a
single function or class reference as their positional argument with no
additional keyword or positional arguments.
"""
def __init__(self, name, args=None, kwargs=None):
self.name = name
self.args = args or ()
self.kwargs = kwargs or {}
@property
def markname(self):
return self.name # for backward-compat (2.4.1 had this attr)
def __repr__(self):
d = self.__dict__.copy()
name = d.pop('name')
return "<MarkDecorator %r %r>" % (name, d)
def __call__(self, *args, **kwargs):
""" if passed a single callable argument: decorate it with mark info.
otherwise add *args/**kwargs in-place to mark information. """
if args and not kwargs:
func = args[0]
is_class = inspect.isclass(func)
if len(args) == 1 and (istestfunc(func) or is_class):
if is_class:
if hasattr(func, 'pytestmark'):
mark_list = func.pytestmark
if not isinstance(mark_list, list):
mark_list = [mark_list]
# always work on a copy to avoid updating pytestmark
# from a superclass by accident
mark_list = mark_list + [self]
func.pytestmark = mark_list
else:
func.pytestmark = [self]
else:
holder = getattr(func, self.name, None)
if holder is None:
holder = MarkInfo(
self.name, self.args, self.kwargs
)
setattr(func, self.name, holder)
else:
holder.add(self.args, self.kwargs)
return func
kw = self.kwargs.copy()
kw.update(kwargs)
args = self.args + args
return self.__class__(self.name, args=args, kwargs=kw)
class MarkInfo:
""" Marking object created by :class:`MarkDecorator` instances. """
def __init__(self, name, args, kwargs):
#: name of attribute
self.name = name
#: positional argument list, empty if none specified
self.args = args
#: keyword argument dictionary, empty if nothing specified
self.kwargs = kwargs.copy()
self._arglist = [(args, kwargs.copy())]
def __repr__(self):
return "<MarkInfo %r args=%r kwargs=%r>" % (
self.name, self.args, self.kwargs
)
def add(self, args, kwargs):
""" add a MarkInfo with the given args and kwargs. """
self._arglist.append((args, kwargs))
self.args += args
self.kwargs.update(kwargs)
def __iter__(self):
""" yield MarkInfo objects each relating to a marking-call. """
for args, kwargs in self._arglist:
yield MarkInfo(self.name, args, kwargs)
|
the-stack_0_26399
|
from __future__ import absolute_import
import os
import re
import subprocess
XROOTD_URL_RE = re.compile(r'^(?P<redirector>root://[^/]+)//(?P<path>.*)$')
def parse_xrootd_url(url):
"""Return the redirector and path from an XRootD url."""
match = XROOTD_URL_RE.match(url)
return match.group('redirector'), match.group('path')
def xrdfs_makedirs(url):
"""Recursively create an xrdfs directory."""
redirector, path = parse_xrootd_url(url)
subprocess.check_call(['xrdfs', redirector, 'mkdir', '-p', path])
def xrdfs_isdir(url):
"""Return True if the url is a directory."""
redirector, path = parse_xrootd_url(url)
try:
# Redirect stderr messages such as "[ERROR] Query response negative" to /dev/null.
with open(os.devnull, 'w') as devnull:
subprocess.check_output(['xrdfs', redirector, 'stat', '-q', 'IsDir', path], stderr=devnull)
except subprocess.CalledProcessError as e:
if e.returncode == 55:
return False
else:
raise
else:
return True
def xrdfs_locate_root_files(url):
"""Recurse into a directory and return the urls of all ROOT files encountered.
If the url points to a ROOT file, then return the url.
"""
if xrdfs_isdir(url):
redirector, path = parse_xrootd_url(url)
urls = []
output = subprocess.check_output(['xrdfs', redirector, 'ls', path]).splitlines()
for path in output:
url = '//'.join([redirector, path])
if os.path.splitext(path)[1] == '.root':
urls.append(url)
elif xrdfs_isdir(url):
urls.extend(xrdfs_locate_root_files(url))
return urls
else:
return [url]
|
the-stack_0_26400
|
import os
import matplotlib.pyplot as plt
import json
import sys
sys.path.insert(0, '../')
from forest3D import lidar_IO,treeDetector,ground_removal
# size limitation: 120m x 120m
output_dir = '/home/lloyd/Documents/projects/forest_3d_app/outputs'
# configure raster
config_dict = {'raster_layers': ['vertical_density', 'canopy_height', 'mean_colour1'], 'support_window': [1, 1, 1],
'normalisation': 'rescale+histeq','doHisteq':[True,True,True],'res': 0.2, 'gridSize': [600, 600, 1000]}
rasterMaker = treeDetector.RasterDetector(**config_dict)
for i in [1,3,4,5,6,8,10,12,13,15,16,17,20,21,22,25,26]:
path = '/home/lloyd/Documents/datasets/lidar/forestry_usyd/extracts/plots_for_training/V1_Scanner1_161011_220153_crop%03d.asc'%(i)
xyz_data = lidar_IO.XYZreadFromCSV(path,delimiter=' ',x=0,y=1,z=2,returns=8)
MAX_RETURN_INTENSITY = 50000
xyz_data[:,3] /= MAX_RETURN_INTENSITY
offset = [0,0,0]
# remove ground, output pcd to help with labelling
xyz_data_gr,returns_gr = ground_removal.removeGround(xyz_data[:,:3],offset,returns=xyz_data[:,3],thresh=2.0,
proc_path=output_dir, name='')
ground_pts = ground_removal.load_ground_surface(os.path.join(output_dir,'_ground_surface.ply'))
# create raster
raster = rasterMaker.rasterise(xyz_data_gr,colour_data=returns_gr,ground_pts=ground_pts)
# save rasters
filename = os.path.split(path)[1].split('.')[0]
plt.imsave(os.path.join(output_dir,'raster_'+filename+'.jpg'), raster)
with open(os.path.join(output_dir,'raster_config.json'), 'w') as outfile:
json.dump(config_dict, outfile)
|
the-stack_0_26401
|
"""
A setuptools for the Data Profiler Application and Python Libraries
"""
# To use a consistent encoding
from codecs import open
import os
from os import path
from datetime import datetime
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# Load package version
from dataprofiler.version import __version__
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
# Get the install_requirements from requirements.txt
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
required_packages = f.read().splitlines()
# Get the install_requirements from requirements.txt
with open(path.join(here, 'requirements-ml.txt'), encoding='utf-8') as f:
ml_packages = f.read().splitlines()
resource_dir = 'resources/'
default_labeler_files = [(d, [os.path.join(d, f) for f in files])
for d, folders, files in os.walk(resource_dir)]
DESCRIPTION = "What is in your data? Detect schema, statistics and entities in almost any file."
setup(
name='DataProfiler',
version=__version__,
python_requires='>=3.6',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
# The project's main homepage.
url='https://github.com/capitalone/data-profiler',
# Author details
author='Jeremy Goodsitt, Austin Walters, Anh Truong, Grant Eden',
# Choose your license
license='Apache License, Version 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Topic :: Education',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Security',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 3 or both.
'Programming Language :: Python :: 3',
],
# What does your project relate to?
keywords='Data Investigation',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['src/test', 'src/sample']),
packages=find_packages(exclude=["tests", "examples"]),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=required_packages,
# List of run-time dependencies for the labeler. These will be installed
# by pip when someone installs the project[<label>].
extras_require={ 'ml': ml_packages },
# # If there are data files included in your packages that need to be
# # installed, specify them here. If using Python 2.6 or less, then these
# # have to be included in MANIFEST.in as well.
# package_data={
# 'data': [],
# },
#
# # Although 'package_data' is the preferred approach, in some case you may
# # need to place data files outside of your packages. See:
# # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=default_labeler_files,
include_package_data=True,
)
print("find_packages():", find_packages())
|
the-stack_0_26402
|
import os
import torch
import numpy as np
from torchvision import datasets, transforms, models
import torch.optim as optim
import facenet_pytorch
from facenet_pytorch import MTCNN
from tqdm import tqdm
import torch.nn as nn
import random
import argparse
from collections import OrderedDict
from loadOpenFace import prepareOpenFace
### PARAMETERS
data_dir = 'dataset/cropped_images'
n_epochs = 10
chk_path = 'models/AvengersClassifier.pth' # Default checkpoint path
###
### Parse Arguments
parser = argparse.ArgumentParser(
description='Trains the model and saves the model')
parser.add_argument('-p', '--path', default=chk_path, help='Checkpoint path')
parser.add_argument('-d', '--dataset', default=data_dir, help='Dataset path')
parser.add_argument('-e', '--epochs', type=int, default=n_epochs, help='Number of Epochs')
args = parser.parse_args()
chk_path = args.path
n_epochs = args.epochs
data_dir = args.dataset
### Check if CUDA GPU is available
useCuda = torch.cuda.is_available()
if useCuda:
print('CUDA is avialable')
device = torch.device('cuda:0')
else:
print('CUDA is not avialable')
device = torch.device('cpu')
### Use MTCNN to Crop and Align Images
import warnings
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
mtcnn = MTCNN(
image_size=160, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
device=device
)
aligned_data_dir = data_dir + '_aligned'
dataset = datasets.ImageFolder(data_dir, transform=transforms.Resize((512, 512)))
dataset.idx_to_class = {i:c for c, i in dataset.class_to_idx.items()}
# Replace the class label with the new path for storing aligned data
dataset.samples = [(p, p.replace(data_dir, aligned_data_dir)) for p, _ in dataset.samples]
batch_size = 32
num_workers = 0 if os.name == 'nt' else 8
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=facenet_pytorch.training.collate_pil)
# Run MTCNN for all the images and save them in new directory
for i, (image, path) in enumerate(tqdm(dataloader, desc="Converting")):
mtcnn(image, save_path=path)
# Delete to save memory
del mtcnn
del dataloader
print()
#### Augmenting the Dataset
class AugmentDataset(datasets.ImageFolder):
def __init__(self, root, transform = None):
super().__init__(root, transform)
self.all_labels = [int(x[1]) for x in self.imgs]
self.horizontalTransform = transforms.RandomHorizontalFlip(1)
def __len__(self):
return 2 * super().__len__()
def __getitem__(self, item):
if item < super().__len__():
image, label = super().__getitem__(item)
else:
item -= super().__len__()
image, label = super().__getitem__(item)
image = self.horizontalTransform(image)
return image, label
transform = transforms.Compose([transforms.Resize(96),
transforms.ToTensor()])
dataset = AugmentDataset(aligned_data_dir, transform=transform)
idx_to_class = {i:c for c, i in dataset.class_to_idx.items()}
total_count = len(dataset)
train_count = int(0.8 * total_count)
test_count = total_count - train_count
train_dataset, test_dataset = torch.utils.data.random_split(dataset,
[train_count, test_count])
print('Total Images : ', total_count)
print('Num of Train Images : ', len(train_dataset))
print('Num of Test Images : ', len(test_dataset))
print()
batch_size = 64
num_workers = 0 if os.name == 'nt' else 8
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
num_workers=num_workers, shuffle=True)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,
num_workers=num_workers, shuffle=True)
### Generate triplets Function
def generate_triplets(images, labels):
positive_images = []
negative_images = []
batch_size = len(labels)
for i in range(batch_size):
anchor_label = labels[i]
positive_list = []
negative_list = []
for j in range(batch_size):
if j != i:
if labels[j] == anchor_label:
positive_list.append(j)
else:
negative_list.append(j)
positive_images.append(images[random.choice(positive_list)])
negative_images.append(images[random.choice(negative_list)])
positive_images = torch.stack(positive_images)
negative_images = torch.stack(negative_images)
return positive_images, negative_images
### Define Triplet Loss
class TripletLoss(nn.Module):
def __init__(self, alpha=0.2):
super(TripletLoss, self).__init__()
self.alpha = alpha
def calc_euclidean(self, x1, x2):
return (x1 - x2).pow(2).sum(1)
def forward(self, anchor, positive, negative): # (batch_size , emb_size)
distance_positive = self.calc_euclidean(anchor, positive)
distance_negative = self.calc_euclidean(anchor, negative)
losses = torch.relu(distance_positive - distance_negative + self.alpha)
return losses.mean()
# Load inception model
model = prepareOpenFace(useCuda)
model.eval()
print("Inception Model Loaded")
# Define optimizer and loss for inception model
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_fn = TripletLoss()
# Training the inception model
for epoch in range(n_epochs):
train_loss = 0
count = 0
## Training Loop
model.train()
for batch, (images, labels) in enumerate(tqdm(train_dataloader, \
desc="Training", leave=False)):
positives , negatives = generate_triplets(images, labels)
# Move tensor to device
images, labels = images.to(device), labels.to(device)
positives, negatives = positives.to(device), negatives.to(device)
optimizer.zero_grad()
# Seaseme Network
anchor_out = model(images)
positive_out = model(positives)
negative_out = model(negatives)
# Get the loss
loss = loss_fn(anchor_out, positive_out, negative_out)
loss.backward()
optimizer.step()
train_loss += loss.detach().item()
count = len(labels)
print('Epoch : %d/%d - Loss: %0.4f' %
(epoch+1, n_epochs, train_loss / count))
train_loss = 0.0
model.eval()
print("Inception Model : Training Done\n")
### Transfer Learning the classifier
n_classes = len(dataset.class_to_idx)
# Define the classifier model
classifier_model = nn.Sequential(OrderedDict([
("nn4_small_v2", model),
("fc", nn.Linear(736, n_classes))
]))
classifier_model = classifier_model.to(device)
# Freeze the parameters in the nn4_small_v2 layer
for param in classifier_model.parameters():
param.requires_grad = False
for param in classifier_model.fc.parameters():
param.requires_grad = True
# Define optimizer and loss for classifier model
optimizer = optim.Adam(classifier_model.fc.parameters(), lr=0.01)
loss_fn = torch.nn.CrossEntropyLoss()
### Training the Classifier
print("Training Classifier")
def train(n_epochs, dataloader, model, optimizer, loss_fn):
'''returns Trained classifier model'''
for epoch in range(n_epochs):
train_loss = 0.0
count = 0
# Training loop
model.train()
for batch, (images, labels) in enumerate(tqdm(dataloader, \
desc="Training", leave=False)):
# Move Tensor to appropriate device
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
out = model(images)
# Get the loss
loss = loss_fn(out, labels)
loss.backward()
optimizer.step()
train_loss += loss.detach().item()
count = len(labels)
print('Epoch : %d/%d - Loss: %0.4f' %
(epoch+1, n_epochs, train_loss / count))
train_loss = 0.0
model.eval()
print("Classifier Model : Training Done\n")
return model
# call the train function
classifier_model = train(10 , train_dataloader, classifier_model, optimizer, loss_fn)
### Testing the classifier
def test(dataloader, model, loss_fn):
test_loss = 0.0
total = 0
correct = 0
# Testing loop
model.eval()
for batch, (images, labels) in enumerate(tqdm(dataloader, \
desc="Testing")):
# Move Tensor to appropriate device
images, labels = images.to(device), labels.to(device)
with torch.no_grad():
out = model(images)
loss = loss_fn(out, labels)
test_loss += loss.detach().item()
# Get the class with max probability
pred = out.data.max(1, keepdim=True)[1]
# Compare predictions with true label
correct += np.sum(np.squeeze(pred.eq(labels.view_as(pred))).cpu().numpy())
total += labels.size(0)
print('Test Loss: {:.6f}\n'.format(test_loss/total))
print('Test Accuracy : %d%% (%d/%d)' % (
100 * correct / total, correct, total))
print()
return(float(correct / total))
# call the test function
current_accuracy = test(test_dataloader, classifier_model, loss_fn)
### Define Function to save model
def save_model(model, chk_path, idx_to_class, current_accuracy=1.0):
'''Saves the model only if model doesnt exist or
if the previous model accuracy was better'''
try:
checkpoint = torch.load(chk_path, map_location=torch.device('cpu'))
if(current_accuracy < checkpoint['accuracy']):
print("Not Saving, Previous model was better")
return
except FileNotFoundError:
print("Previous model not found")
torch.save({
'model_state_dict' : model.state_dict(),
'accuracy' : current_accuracy,
'idx_to_class': idx_to_class
}, chk_path)
print("Model Saved : %s" % chk_path)
save_model(classifier_model, chk_path, idx_to_class, current_accuracy)
|
the-stack_0_26403
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import tir, script
from tvm.script import tir as T
from tvm.tir import stmt_functor
from tvm.tir.usmp import utils as usmp_utils
from tvm.target import Target
def _replace_stmt_with_buf_var_names(buffer_info_map):
"""helper to replace tir.allocates with buffer names"""
new_buffer_info_map = dict()
for k, v in buffer_info_map.items():
new_buffer_info_map[v.buffer_var.name] = k
return new_buffer_info_map
def _verify_conflicts(main_buf_name, conflicting_buf_names, buffer_info_map):
"""helper to check expected liveness conflicts"""
buf_info = buffer_info_map[main_buf_name]
for conflict in buf_info.conflicts:
assert conflict.name_hint in conflicting_buf_names
def _get_allocates(primfunc):
"""helper to extract all allocate nodes by name"""
allocates = dict()
def get_allocate(stmt):
if isinstance(stmt, tvm.tir.Allocate):
allocates[str(stmt.buffer_var.name)] = stmt
stmt_functor.post_order_visit(primfunc.body, get_allocate)
return allocates
def _assign_poolinfos_to_allocates_in_primfunc(primfunc, pool_infos):
"""helper to assing poolinfos to allocate nodes in a tir.PrimFunc"""
def set_poolinfos(stmt):
if isinstance(stmt, tvm.tir.Allocate):
return tvm.tir.Allocate(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
condition=stmt.condition,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: pool_infos},
)
return primfunc.with_body(stmt_functor.ir_transform(primfunc.body, None, set_poolinfos))
def _assign_poolinfos_to_allocates_in_irmodule(mod, pool_infos):
"""helper to assing poolinfos to allocate nodes in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = _assign_poolinfos_to_allocates_in_primfunc(basefunc, pool_infos)
return ret
def _assign_targets_to_primfuncs_irmodule(mod, target):
"""helper to assign target for PrimFunc in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = basefunc.with_attr("target", target)
return ret
def _check_max_workspace_size(buffer_pool_allocations, pool_info, size):
max_workspace_size = 0
for buffer_info, pool_allocation in buffer_pool_allocations.items():
if pool_allocation.pool_info == pool_info:
size_candidate = pool_allocation.byte_offset + buffer_info.size_bytes
if size_candidate > max_workspace_size:
max_workspace_size = size_candidate
assert max_workspace_size == size
def test_no_pool_error():
target = Target("c")
tiny_workspace_pool = usmp_utils.PoolInfo(
pool_name="tiny_workspace",
target_access={target: usmp_utils.PoolInfo.READ_WRITE_ACCESS},
size_hint_bytes=10,
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_a.set_conflicts([bi_b])
bi_b.set_conflicts([bi_c])
bi_c.set_conflicts([bi_a])
buffer_info_arr = [bi_a, bi_b, bi_c]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.greedy_by_size")
with pytest.raises(
tvm.TVMError, match="TVM USMP Error: the space available in the provided pools exceeded"
):
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
@pytest.mark.parametrize("algorithm", ["greedy_by_size", "greedy_by_conflicts"])
def test_name_based_ordering(algorithm):
""" This checks when the size and conlicts are same a stable result is generated"""
def _test():
target = Target("c")
global_workspace_pool = usmp_utils.PoolInfo(
pool_name="global_workspace",
target_access={target: usmp_utils.PoolInfo.READ_WRITE_ACCESS},
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_a.set_conflicts([bi_b])
bi_b.set_conflicts([bi_c])
bi_c.set_conflicts([bi_a])
buffer_info_arr = [bi_a, bi_b, bi_c]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
assert buffer_pool_allocations[bi_a].byte_offset == 20
assert buffer_pool_allocations[bi_b].byte_offset == 10
assert buffer_pool_allocations[bi_c].byte_offset == 0
# This is tested for several times to check stability
for x in range(0, 10):
_test()
@pytest.mark.parametrize(
["algorithm", "workspace_size"],
[("greedy_by_size", 140), ("greedy_by_conflicts", 140)],
)
def test_linear(algorithm, workspace_size):
"""
The test case here represent BufferInfo objects
that could get generated for a linear sequence
such as :
(Op A)
|
bi_a
|
(Op B)
|
bi_b
|
.
.
.
(Op F)
|
bi_f
"""
target = Target("c")
global_workspace_pool = usmp_utils.PoolInfo(
pool_name="global_workspace",
target_access={target: usmp_utils.PoolInfo.READ_WRITE_ACCESS},
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=20, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=100, pool_candidates=[global_workspace_pool]
)
bi_d = usmp_utils.BufferInfo(
name_hint="bi_d", size_bytes=40, pool_candidates=[global_workspace_pool]
)
bi_e = usmp_utils.BufferInfo(
name_hint="bi_e", size_bytes=50, pool_candidates=[global_workspace_pool]
)
bi_f = usmp_utils.BufferInfo(
name_hint="bi_f", size_bytes=50, pool_candidates=[global_workspace_pool]
)
# Creating conflicts for a linear graph
bi_a.set_conflicts([bi_b])
bi_b.set_conflicts([bi_a, bi_c])
bi_c.set_conflicts([bi_b, bi_d])
bi_d.set_conflicts([bi_c, bi_e])
bi_e.set_conflicts([bi_d, bi_f])
bi_f.set_conflicts([bi_e])
buffer_info_arr = [bi_a, bi_b, bi_c, bi_d, bi_e, bi_f]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
@pytest.mark.parametrize(
["algorithm", "workspace_size"],
[("greedy_by_size", 190), ("greedy_by_conflicts", 320)],
)
def test_fanout(algorithm, workspace_size):
"""
The test case here represent BufferInfo objects
that could get generated for a fanout topology
such as :
(Op A)
|
bi_a ---------
| |
(Op B) (Op C)
| |
bi_b bi_c
| |
(Op D) (Op E)
| |
bi_d bi_e
| |
(Op F) ------
|
bi_f
|
(Op G)
|
bi_g
"""
target = Target("c")
global_workspace_pool = usmp_utils.PoolInfo(
pool_name="global_workspace",
target_access={target: usmp_utils.PoolInfo.READ_WRITE_ACCESS},
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=20, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=100, pool_candidates=[global_workspace_pool]
)
bi_d = usmp_utils.BufferInfo(
name_hint="bi_d", size_bytes=40, pool_candidates=[global_workspace_pool]
)
bi_e = usmp_utils.BufferInfo(
name_hint="bi_e", size_bytes=50, pool_candidates=[global_workspace_pool]
)
bi_f = usmp_utils.BufferInfo(
name_hint="bi_f", size_bytes=60, pool_candidates=[global_workspace_pool]
)
bi_g = usmp_utils.BufferInfo(
name_hint="bi_g", size_bytes=70, pool_candidates=[global_workspace_pool]
)
# Creating conflicts for a linear graph
bi_a.set_conflicts([bi_b, bi_c])
bi_b.set_conflicts([bi_a, bi_c, bi_e])
bi_c.set_conflicts([bi_e, bi_a, bi_b, bi_d])
bi_d.set_conflicts([bi_b, bi_f, bi_c, bi_e])
bi_e.set_conflicts([bi_c, bi_f, bi_b, bi_d])
bi_f.set_conflicts([bi_d, bi_e, bi_f])
bi_g.set_conflicts([bi_f])
buffer_info_arr = [bi_a, bi_b, bi_c, bi_d, bi_e, bi_f, bi_g]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
# fmt: off
@tvm.script.ir_module
class MobilenetStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [1, 224, 224, 3], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [], dtype="int16", elem_offset=0, align=128, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [1, 224, 224, 3], dtype="int16", elem_offset=0, align=128, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T.store(T_subtract_1.data, (((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1), (T.cast(T.load("uint8", placeholder_4.data, (((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)), "int16") - T.load("int16", placeholder_5.data, 0)), True)
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", "tir.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [1, 224, 224, 3], dtype="int16", elem_offset=0, align=128, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [7, 7, 3, 64], dtype="int16", elem_offset=0, align=128, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [1, 1, 1, 64], dtype="int32", elem_offset=0, align=128, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [1, 112, 112, 64], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
# body
PaddedInput_7 = T.allocate([157323], "int16", "global")
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
T.store(PaddedInput_7, (((i0_i1_fused_7*687) + (i2_7*3)) + i3_7), T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), T.load("int16", placeholder_65.data, ((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)), T.int16(0), dtype="int16"), True)
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7 = T.allocate([64], "int32", "global")
for ff_3 in T.serial(0, 64):
T.store(Conv2dOutput_7, ff_3, 0, True)
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
T.store(Conv2dOutput_7, ff_3, (T.load("int32", Conv2dOutput_7, ff_3) + (T.cast(T.load("int16", PaddedInput_7, (((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)), "int32")*T.cast(T.load("int16", placeholder_66.data, ((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)), "int32"))), True)
for ax3_inner_7 in T.serial(0, 64):
T.store(T_cast_21.data, ((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7), T.cast(T.max(T.min(T.q_multiply_shift((T.load("int32", Conv2dOutput_7, ax3_inner_7) + T.load("int32", placeholder_67.data, ax3_inner_7)), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8"), True)
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [1, 112, 112, 64], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [1, 56, 56, 64], dtype="int16", elem_offset=0, align=128, offset_factor=1)
# body
tensor_2 = T.allocate([200704], "uint8", "global")
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
T.store(tensor_2, (((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init), T.uint8(0), True)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
T.store(tensor_2, (((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2), T.max(T.load("uint8", tensor_2, (((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)), T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), T.load("uint8", placeholder_29.data, (((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)), T.uint8(0), dtype="uint8")), True)
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T.store(T_cast_7.data, (((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3), T.cast(T.load("uint8", tensor_2, (((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)), "int16"), True)
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9 = T.allocate([301056], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
["algorithm", "fast_memory_size", "slow_memory_size"],
[("greedy_by_size", 200704, 1418528), ("greedy_by_conflicts", 200704, 1418528)],
)
def test_mobilenet_subgraph(algorithm, fast_memory_size, slow_memory_size):
target = Target("c")
fast_memory_pool = usmp_utils.PoolInfo(
pool_name="fast_memory",
target_access={target: usmp_utils.PoolInfo.READ_WRITE_ACCESS},
size_hint_bytes=200704,
)
slow_memory_pool = usmp_utils.PoolInfo(
pool_name="slow_memory", target_access={target: usmp_utils.PoolInfo.READ_WRITE_ACCESS}
)
tir_mod = MobilenetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
tir_mod, [fast_memory_pool, slow_memory_pool]
)
main_func = tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 1117718
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_analysis.buffer_info_stmts)
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, buffer_info_analysis.memory_pressure)
buffer_info_map_names = dict()
for buf_info in buffer_info_arr:
buffer_info_map_names[buf_info.name_hint] = buf_info
# check conflicts
_verify_conflicts("PaddedInput_7", ["sid_9", "sid_8", "Conv2dOutput_7"], buffer_info_map_names)
_verify_conflicts("tensor_2", ["sid_8"], buffer_info_map_names)
_verify_conflicts("sid_9", ["PaddedInput_7"], buffer_info_map_names)
_verify_conflicts(
"sid_8", ["PaddedInput_7", "Conv2dOutput_7", "tensor_2"], buffer_info_map_names
)
_verify_conflicts("Conv2dOutput_7", ["sid_8", "PaddedInput_7"], buffer_info_map_names)
_check_max_workspace_size(buffer_pool_allocations, slow_memory_pool, slow_memory_size)
_check_max_workspace_size(buffer_pool_allocations, fast_memory_pool, fast_memory_size)
# fmt: off
@tvm.script.ir_module
class ResnetStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast(placeholder: T.handle, placeholder_1: T.handle, T_cast: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [1, 75, 75, 64], dtype="uint8")
placeholder_3 = T.match_buffer(placeholder_1, [64], dtype="int32")
T_cast_1 = T.match_buffer(T_cast, [1, 75, 75, 64], dtype="int16")
# body
for ax0_ax1_fused, ax2, ax3_outer, ax3_inner in T.grid(75, 75, 4, 16):
T.store(T_cast_1.data, ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner, T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.load("uint8", placeholder_2.data, ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner), "int32") - 94, 1843157232, 31, 1, dtype="int32") + T.load("int32", placeholder_3.data, ax3_outer * 16 + ax3_inner), 255), 0), "uint8"), "int16"), True)
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1(placeholder_10: T.handle, placeholder_11: T.handle, placeholder_12: T.handle, T_cast_4: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", "tir.noalias": True})
placeholder_13 = T.match_buffer(placeholder_10, [1, 75, 75, 64], dtype="int16")
placeholder_14 = T.match_buffer(placeholder_11, [3, 3, 64, 64], dtype="int16")
placeholder_15 = T.match_buffer(placeholder_12, [1, 1, 1, 64], dtype="int32")
T_cast_5 = T.match_buffer(T_cast_4, [1, 75, 75, 64], dtype="int16")
# body
PaddedInput_1 = T.allocate([379456], "int16", "global")
for i0_i1_fused_1, i2_1, i3_1 in T.grid(77, 77, 64):
T.store(PaddedInput_1, i0_i1_fused_1 * 4928 + i2_1 * 64 + i3_1, T.if_then_else(1 <= i0_i1_fused_1 and i0_i1_fused_1 < 76 and 1 <= i2_1 and i2_1 < 76, T.load("int16", placeholder_13.data, i0_i1_fused_1 * 4800 + i2_1 * 64 + i3_1 - 4864), T.int16(0), dtype="int16"), True)
for ax0_ax1_fused_ax2_fused_1 in T.serial(0, 5625):
Conv2dOutput_1 = T.allocate([64], "int32", "global")
for ff_1 in T.serial(0, 64):
T.store(Conv2dOutput_1, ff_1, 0, True)
for ry, rx, rc_1 in T.grid(3, 3, 64):
T.store(Conv2dOutput_1, ff_1, T.load("int32", Conv2dOutput_1, ff_1) + T.cast(T.load("int16", PaddedInput_1, T.floordiv(ax0_ax1_fused_ax2_fused_1, 75) * 4928 + ry * 4928 + rx * 64 + T.floormod(ax0_ax1_fused_ax2_fused_1, 75) * 64 + rc_1), "int32") * T.cast(T.load("int16", placeholder_14.data, ry * 12288 + rx * 4096 + rc_1 * 64 + ff_1), "int32"), True)
for ax3_inner_2 in T.serial(0, 64):
T.store(T_cast_5.data, ax0_ax1_fused_ax2_fused_1 * 64 + ax3_inner_2, T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.load("int32", Conv2dOutput_1, ax3_inner_2) + T.load("int32", placeholder_15.data, ax3_inner_2), 1608879842, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16"), True)
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_(placeholder_16: T.handle, placeholder_17: T.handle, placeholder_18: T.handle, T_add: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", "tir.noalias": True})
placeholder_19 = T.match_buffer(placeholder_16, [1, 75, 75, 64], dtype="int16")
placeholder_20 = T.match_buffer(placeholder_17, [1, 1, 64, 256], dtype="int16")
placeholder_21 = T.match_buffer(placeholder_18, [1, 1, 1, 256], dtype="int32")
T_add_1 = T.match_buffer(T_add, [1, 75, 75, 256], dtype="int32")
# body
PaddedInput_2 = T.allocate([360000], "int16", "global")
for i0_i1_fused_2, i2_2, i3_2 in T.grid(75, 75, 64):
T.store(PaddedInput_2, i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2, T.load("int16", placeholder_19.data, i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2), True)
for ax0_ax1_fused_ax2_fused_2 in T.serial(0, 5625):
Conv2dOutput_2 = T.allocate([64], "int32", "global")
for ax3_outer_1 in T.serial(0, 4):
for ff_2 in T.serial(0, 64):
T.store(Conv2dOutput_2, ff_2, 0, True)
for rc_2 in T.serial(0, 64):
T.store(Conv2dOutput_2, ff_2, T.load("int32", Conv2dOutput_2, ff_2) + T.cast(T.load("int16", PaddedInput_2, ax0_ax1_fused_ax2_fused_2 * 64 + rc_2), "int32") * T.cast(T.load("int16", placeholder_20.data, rc_2 * 256 + ax3_outer_1 * 64 + ff_2), "int32"), True)
for ax3_inner_3 in T.serial(0, 64):
T.store(T_add_1.data, ax0_ax1_fused_ax2_fused_2 * 256 + ax3_outer_1 * 64 + ax3_inner_3, T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.load("int32", Conv2dOutput_2, ax3_inner_3) + T.load("int32", placeholder_21.data, ax3_outer_1 * 64 + ax3_inner_3), 1711626602, 31, -8, dtype="int32") + 132, 255), 0), "uint8"), "int32") - 132, 2094289803, 31, -2, dtype="int32") + 136, True)
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_(placeholder_22: T.handle, placeholder_23: T.handle, placeholder_24: T.handle, placeholder_25: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_22, [1, 75, 75, 64], dtype="int16")
placeholder_27 = T.match_buffer(placeholder_23, [1, 1, 64, 256], dtype="int16")
placeholder_26 = T.match_buffer(placeholder_24, [1, 1, 1, 256], dtype="int32")
placeholder_28 = T.match_buffer(placeholder_25, [1, 75, 75, 256], dtype="int32")
T_cast_7 = T.match_buffer(T_cast_6, [1, 75, 75, 256], dtype="uint8")
# body
PaddedInput_3 = T.allocate([360000], "int16", "global")
for i0_i1_fused_3, i2_3, i3_3 in T.grid(75, 75, 64):
T.store(PaddedInput_3, i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3, T.load("int16", placeholder_29.data, i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3), True)
for ax0_ax1_fused_ax2_fused_3 in T.serial(0, 5625):
Conv2dOutput_3 = T.allocate([64], "int32", "global")
for ax3_outer_2 in T.serial(0, 4):
for ff_3 in T.serial(0, 64):
T.store(Conv2dOutput_3, ff_3, 0, True)
for rc_3 in T.serial(0, 64):
T.store(Conv2dOutput_3, ff_3, T.load("int32", Conv2dOutput_3, ff_3) + T.cast(T.load("int16", PaddedInput_3, ax0_ax1_fused_ax2_fused_3 * 64 + rc_3), "int32") * T.cast(T.load("int16", placeholder_27.data, rc_3 * 256 + ax3_outer_2 * 64 + ff_3), "int32"), True)
for ax3_inner_4 in T.serial(0, 64):
T.store(T_cast_7.data, ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4, T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.load("int32", Conv2dOutput_3, ax3_inner_4) + T.load("int32", placeholder_26.data, ax3_outer_2 * 64 + ax3_inner_4), 1343014664, 31, -8, dtype="int32") + 136, 255), 0), "uint8"), "int32") - 136, 1073903788, 31, 1, dtype="int32") + T.load("int32", placeholder_28.data, ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4), 255), 0), "uint8"), True)
@T.prim_func
def tvmgen_default_run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_2 = T.allocate([720000], "int8", "global")
sid_6 = T.allocate([5760000], "int8", "global")
sid_7 = T.allocate([720000], "int8", "global")
sid_8 = T.allocate([720000], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", input, T.lookup_param("p0", dtype="handle"), sid_2, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", sid_2, T.lookup_param("p3", dtype="handle"), T.lookup_param("p4", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", sid_8, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), sid_7, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", sid_7, T.lookup_param("p7", dtype="handle"), T.lookup_param("p8", dtype="handle"), sid_6, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", sid_2, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_6, output, dtype="int32"))
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast(placeholder_4: T.handle, placeholder_5: T.handle, placeholder_6: T.handle, T_cast_2: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", "tir.noalias": True})
placeholder_7 = T.match_buffer(placeholder_4, [1, 75, 75, 64], dtype="int16")
placeholder_8 = T.match_buffer(placeholder_5, [1, 1, 64, 64], dtype="int16")
placeholder_9 = T.match_buffer(placeholder_6, [1, 1, 1, 64], dtype="int32")
T_cast_3 = T.match_buffer(T_cast_2, [1, 75, 75, 64], dtype="int16")
# body
PaddedInput = T.allocate([360000], "int16", "global")
for i0_i1_fused, i2, i3 in T.grid(75, 75, 64):
T.store(PaddedInput, i0_i1_fused * 4800 + i2 * 64 + i3, T.load("int16", placeholder_7.data, i0_i1_fused * 4800 + i2 * 64 + i3), True)
for ax0_ax1_fused_ax2_fused in T.serial(0, 5625):
Conv2dOutput = T.allocate([64], "int32", "global")
for ff in T.serial(0, 64):
T.store(Conv2dOutput, ff, 0, True)
for rc in T.serial(0, 64):
T.store(Conv2dOutput, ff, T.load("int32", Conv2dOutput, ff) + T.cast(T.load("int16", PaddedInput, ax0_ax1_fused_ax2_fused * 64 + rc), "int32") * T.cast(T.load("int16", placeholder_8.data, rc * 64 + ff), "int32"), True)
for ax3_inner_1 in T.serial(0, 64):
T.store(T_cast_3.data, ax0_ax1_fused_ax2_fused * 64 + ax3_inner_1, T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.load("int32", Conv2dOutput, ax3_inner_1) + T.load("int32", placeholder_9.data, ax3_inner_1), 1843106743, 31, -6, dtype="int32"), 255), 0), "uint8"), "int16"), True)
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
["algorithm", "workspace_size"], [("greedy_by_size", 7920256), ("greedy_by_conflicts", 7200256)]
)
def test_resnet_subgraph(algorithm, workspace_size):
target = Target("c")
global_workspace_pool = usmp_utils.PoolInfo(
pool_name="global_workspace",
target_access={target: usmp_utils.PoolInfo.READ_WRITE_ACCESS},
)
tir_mod = ResnetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_workspace_pool])
main_func = tir_mod["tvmgen_default_run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 7200256
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_analysis.buffer_info_stmts)
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, buffer_info_analysis.memory_pressure)
buffer_info_map_names = dict()
for buf_info in buffer_info_arr:
buffer_info_map_names[buf_info.name_hint] = buf_info
# check conflicts
_verify_conflicts(
"sid_7",
[
"PaddedInput_1",
"sid_2",
"Conv2dOutput_1",
"PaddedInput_2",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_3",
[
"PaddedInput_3",
"sid_6",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_6",
[
"Conv2dOutput_2",
"PaddedInput_2",
"sid_2",
"PaddedInput_3",
"Conv2dOutput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput",
[
"sid_8",
"sid_2",
"PaddedInput",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput_3",
[
"sid_6",
"sid_2",
"Conv2dOutput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_2",
[
"PaddedInput_2",
"sid_2",
"sid_6",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput_1",
[
"sid_8",
"sid_2",
"sid_7",
"Conv2dOutput_1",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_1",
[
"sid_7",
"PaddedInput_1",
"sid_2",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput",
[
"sid_2",
"sid_8",
"Conv2dOutput",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_8",
[
"PaddedInput",
"sid_2",
"Conv2dOutput",
"PaddedInput_1",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_2",
[
"PaddedInput",
"sid_8",
"Conv2dOutput",
"PaddedInput_1",
"sid_7",
"Conv2dOutput_1",
"PaddedInput_2",
"Conv2dOutput_2",
"sid_6",
"PaddedInput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput_2",
[
"sid_7",
"sid_2",
"Conv2dOutput_2",
"sid_6",
],
buffer_info_map_names,
)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
|
the-stack_0_26404
|
import datetime
import logging
from pathlib import Path
import dateutil.tz
import regex
from bs4 import BeautifulSoup
from covid_berlin_scraper.model import Dashboard, DashboardStore
from covid_berlin_scraper.utils.http_utils import http_get
logger = logging.getLogger(__name__)
def download_dashboard(
url: str,
date_selector: str,
date_regex: regex.Regex,
date_regex_group: str,
default_tz: datetime.tzinfo,
**http_kwargs,
) -> Dashboard:
content = http_get(url, **http_kwargs)
soup = BeautifulSoup(content, 'lxml')
date_line = soup.select(date_selector)[0].contents[0]
m = date_regex.search(date_line)
date_str = m.group(date_regex_group)
return Dashboard(
timestamp=datetime.datetime.strptime(date_str, '%d.%m.%Y').replace(
tzinfo=default_tz
),
content=content,
)
def save_dashboard(dashboard: Dashboard, db_path: Path):
dashboard_store = DashboardStore(db_path)
dashboard_store.append(dashboard)
def main(cache_path: Path, config: dict):
default_tz = dateutil.tz.gettz(config['download_feed']['default_tz'])
if not default_tz:
raise Exception('Invalid time zone')
if 'url' in config['download_dashboard']:
urls = [config['download_dashboard']['url']]
else:
urls = config['download_dashboard']['urls']
for url in urls:
dashboard = download_dashboard(
url=url,
date_selector=config['parse_dashboard']['date_selector'],
date_regex=regex.compile(config['parse_dashboard']['date_regex']),
date_regex_group=config['parse_dashboard']['date_regex_group'],
default_tz=default_tz,
timeout=int(config['http']['timeout']),
user_agent=config['http']['user_agent'],
)
save_dashboard(dashboard, db_path=cache_path / 'db.sqlite3')
|
the-stack_0_26405
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerLoadBalancingRulesOperations(object):
"""LoadBalancerLoadBalancingRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerLoadBalancingRuleListResult"]
"""Gets all the load balancing rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerLoadBalancingRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_06_01.models.LoadBalancerLoadBalancingRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerLoadBalancingRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerLoadBalancingRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
load_balancing_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.LoadBalancingRule"
"""Gets the specified load balancer load balancing rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param load_balancing_rule_name: The name of the load balancing rule.
:type load_balancing_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LoadBalancingRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_06_01.models.LoadBalancingRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancingRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'loadBalancingRuleName': self._serialize.url("load_balancing_rule_name", load_balancing_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancingRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules/{loadBalancingRuleName}'} # type: ignore
|
the-stack_0_26406
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='rezero',
version='0.1.0',
author="Thomas Bachlechner, Bodhisattwa Prasad Majumder, Huanru Henry Mao, Garrison W. Cottrell, Julian McAuley",
author_email="[email protected]",
description="ReZero networks",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/majumderb/rezero",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
the-stack_0_26409
|
from __future__ import annotations
import re
import traceback
from typing import Any, Callable, Union
import revolt
from .command import Command
from .context import Context
from .errors import CheckError, CommandNotFound
from .view import StringView
__all__ = (
"CommandsMeta",
"CommandsClient"
)
quote_regex = re.compile(r"[\"']")
chunk_regex = re.compile(r"\S+")
class CommandsMeta(type):
_commands: list[Command]
def __new__(cls, name: str, bases: tuple[type, ...], attrs: dict[str, Any]):
commands: list[Command] = []
self = super().__new__(cls, name, bases, attrs)
for base in reversed(self.__mro__):
for value in base.__dict__.values():
if isinstance(value, Command):
commands.append(value)
self._commands = commands
return self
class CommandsClient(revolt.Client, metaclass=CommandsMeta):
"""Main class that adds commands, this class should be subclassed along with `revolt.Client`."""
_commands: list[Command]
def __init__(self, *args, **kwargs):
self.all_commands: dict[str, Command] = {}
for command in self._commands:
self.all_commands[command.name] = command
for alias in command.aliases:
self.all_commands[alias] = command
super().__init__(*args, **kwargs)
@property
def commands(self) -> list[Command]:
return list(set(self.all_commands.values()))
async def get_prefix(self, message: revolt.Message) -> Union[str, list[str]]:
"""Overwrite this function to set the prefix used for commands, this function is called for every message.
Parameters
-----------
message: :class:`Message`
The message that was sent
Returns
--------
Union[:class:`str`, list[:class:`str`]]
The prefix(s) for the commands
"""
raise NotImplementedError
def get_command(self, name: str) -> Command:
"""Gets a command.
Parameters
-----------
name: :class:`str`
The name or alias of the command
Returns
--------
:class:`Command`
The command with the name
"""
return self.all_commands[name]
def add_command(self, name: str, command: Command):
"""Adds a command, this is typically only used for dynamic commands, you should use the `commands.command` decorator for most usecases.
Parameters
-----------
name: :class:`str`
The name or alias of the command
command: :class:`Command`
The command to be added
"""
self.all_commands[name] = command
def get_view(self, message: revolt.Message) -> type[StringView]:
return StringView
def get_context(self, message: revolt.Message) -> type[Context]:
return Context
async def process_commands(self, message: revolt.Message) -> Any:
"""Processes commands, if you overwrite `Client.on_message` you should manually call this function inside the event.
Parameters
-----------
message: :class:`Message`
The message to process commands on
Returns
--------
Any
The return of the command, if any
"""
content = message.content
if not isinstance(content, str):
return
prefixes = await self.get_prefix(message)
if isinstance(prefixes, str):
prefixes = [prefixes]
for prefix in prefixes:
if content.startswith(prefix):
content = content[len(prefix):]
break
else:
return
if not content:
return
view = self.get_view(message)(content)
try:
command_name = view.get_next_word()
except StopIteration:
return
context_cls = self.get_context(message)
try:
command = self.get_command(command_name)
except KeyError:
context = context_cls(None, command_name, view, message, self)
return self.dispatch("command_error", context, CommandNotFound(command_name))
context = context_cls(command, command_name, view, message, self)
try:
self.dispatch("command", context)
if not await self.bot_check(context):
raise CheckError(f"the global check for the command failed")
if not await context.can_run():
raise CheckError(f"the check(s) for the command failed")
output = await context.invoke()
self.dispatch("after_command_invoke", context, output)
return output
except Exception as e:
self.dispatch("command_error", context, e)
@staticmethod
async def on_command_error(ctx: Context, error: Exception):
traceback.print_exception(type(error), error, error.__traceback__)
on_message = process_commands
async def bot_check(self, context: Context) -> bool:
"""A global check for the bot that stops commands from running on certain criteria.
Parameters
-----------
context: :class:`Context`
The context for the invokation of the command
Returns
--------
:class:`bool` represents if the command should run or not
"""
return True
|
the-stack_0_26411
|
import math
import os
import pdb # noqa
from typing import Any, Dict, List
import psutil
import torch
from torch import Tensor
import torch.nn as nn
from contactnets.utils import tensor_utils
import contactnets.utils.quaternion as quat
def elements_identical(li: List) -> bool:
"""Return true iff all elements of li are identical."""
if len(li) == 0:
return True
return li.count(li[0]) == len(li)
def filter_none(li: List) -> List:
"""Remove all None elements from li."""
return [i for i in li if (i is not None)]
def list_dict_swap(v: List[Dict[Any, Any]]) -> Dict[Any, List[Any]]:
"""Convert list of dicts to a dict of lists.
>>> list_dict_swap([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) == {'a': [1, 3], 'b': [2, 4]}
"""
return {k: [dic[k] for dic in v] for k in v[0]}
def transpose_lists(li: List[List[Any]]) -> List[List[Any]]:
"""Transpose list of lists as if it were a matrix."""
return list(map(list, zip(*li)))
def process_memory() -> float:
"""Return process memory usage in megabytes."""
process = psutil.Process(os.getpid())
return process.memory_info().rss * 1e-6
def compute_quadratic_loss(A: Tensor, b: Tensor, c: Tensor, x: Tensor) -> Tensor:
"""Compute a quadratic with specified coefficients and variable."""
return x.transpose(1, 2).bmm(A).bmm(x) + b.bmm(x) + c
def generate_normalizing_layer(data: Tensor) -> nn.Linear:
"""Create a linear layer which normalizes the input data Tensor.
Args:
data: batch_n x n
Returns:
A linear layer which normalizes each element [0, ..., n-1] along the batch_n dimension.
Namely, layer(data).mean(dim=0) will be all zeros, and layer(data).std(dim=0) will be
all ones. The only corner case is if all elements along a certain index are zero; i.e.
data[i, j] = 0 for all i. Then layer(data) will have zero mean and standard deviation
in that index. Note that layer(data).shape = data.shape.
"""
means = data.mean(dim=0)
stds = data.std(dim=0)
stds_recip = 1 / stds
stds_recip[stds_recip == float('Inf')] = 0
layer = nn.Linear(data.shape[1], data.shape[1], bias=True)
layer.weight = nn.Parameter(torch.diag(stds_recip), requires_grad=False)
layer.bias = nn.Parameter(-means * stds_recip, requires_grad=False)
return layer
def rot2d(theta: Tensor) -> Tensor:
"""Generate a batch of 2d rotation matrices from a batch of rotation angles.
Args:
theta: batch_n
Returns:
A tensor of the shape batch_n x 2 x 2.
"""
assert theta.dim() == 1
c, s = torch.cos(theta).reshape(-1, 1, 1), torch.sin(theta).reshape(-1, 1, 1)
r1 = torch.cat((c, -s), dim=2)
r2 = torch.cat((s, c), dim=2)
rots = torch.cat((r1, r2), dim=1)
return rots
################################################################################
# 2D Transformations #
################################################################################
def transform_vertices_2d(configuration: Tensor, vertices: Tensor) -> Tensor:
"""Transform vertices by the state in configuration.
Args:
configuration: batch_n x 3 x 1. Second dimension represents x, y, theta.
Last dimension just makes each batch entry a column vector.
vertices: batch_n x vert_n x 2 OR vert_n x 2. If the latter the same vertices are used
for every batch entry.
Returns:
A tensor of the shape batch_n x vert_n x 2.
"""
batch_n = configuration.shape[0]
if vertices.dim() == 2: vertices = vertices.unsqueeze(0).repeat(batch_n, 1, 1)
assert vertices.shape[2] == 2
vert_n = vertices.shape[1]
rot = rot2d(configuration[:, 2, 0])
trans = configuration[:, 0:2, :].repeat(1, 1, vert_n)
vertices = torch.bmm(rot, vertices.transpose(1, 2)) + trans
return vertices.transpose(1, 2)
def transform_and_project_2d(configuration: Tensor, vertices: Tensor,
projections: Tensor) -> Tensor:
"""Transform vertices by the configuration.
Args:
configuration: batch_n x 3 x 1. The second dimension represents x, y, theta.
Last dimension just makes each batch entry a column vector.
vertices: vert_n x 2.
projections: proj_n x 2. Transformed vertices are projected along these vectors.
Returns:
A tensor of the shape batch_n x (vert_n * proj_n) x 1. Projections are interleaved
along the second dimension. Meaning that we first stack proj_n projections for the
first vertex, then proj_n projections for the second vertex, etc.
"""
assert vertices.dim() == 2 and vertices.shape[1] == 2
assert projections.dim() == 2 and projections.shape[1] == 2
batch_n = configuration.shape[0]
projections = projections.unsqueeze(0).repeat(batch_n, 1, 1)
vertices = transform_vertices_2d(configuration, vertices)
dists = projections.bmm(vertices.transpose(1, 2)).transpose(1, 2)
return dists
def _compute_corner_jacobians(configuration: Tensor, vertices: Tensor) -> List[Tensor]:
"""Compute the jacobians of each corner position w.r.t. configuration."""
batch_n = configuration.shape[0]
corner_angles = -torch.atan2(vertices[:, 0], vertices[:, 1]) + math.pi / 2
body_rot = configuration[:, 2:3, :].transpose(1, 2)
Js = []
for i, corner_angle in enumerate(corner_angles):
corner_angle_rep = corner_angle.repeat(batch_n).reshape(-1, 1, 1)
corner_rot = body_rot + corner_angle_rep
angle_jacobian = torch.cat((-torch.sin(corner_rot), torch.cos(corner_rot)), dim=1)
dist = torch.norm(vertices[i, :], 2)
angle_jacobian = (angle_jacobian * dist).transpose(1, 2)
Id = torch.eye(2).unsqueeze(0).repeat(batch_n, 1, 1)
J = torch.cat((Id, angle_jacobian), dim=1)
Js.append(J)
return Js
def transform_and_project_2d_jacobian(configuration: Tensor, vertices: Tensor,
projections: Tensor) -> Tensor:
"""Compute the Jacobian of the 2d transformation and projection.
Args:
configuration: batch_n x 3 x 1. The second dimension represents x, y, theta.
Last dimension just makes each batch entry a column vector.
vertices: vert_n x 2.
projections: proj_n x 2. Transformed vertices are projected along these vectors.
Returns:
A tensor of the shape batch_n x (vert_n * proj_n) x 3. Projections are interleaved
along the second dimension. Meaning that we first stack proj_n projection gradients
for the first vertex, then proj_n project gradients for the second vertex, etc.
"""
assert vertices.dim() == 2 and vertices.shape[1] == 2
assert projections.dim() == 2 and projections.shape[1] == 2
batch_n = configuration.shape[0]
projections = projections.unsqueeze(0).repeat(batch_n, 1, 1)
Js = _compute_corner_jacobians(configuration, vertices)
projected_Js = [projections.bmm(J.transpose(1, 2)).transpose(1, 2) for J in Js]
return torch.cat(tuple(projected_Js), dim=2).transpose(1, 2)
################################################################################
# 3D Transformations #
################################################################################
def transform_vertices_3d(configuration: Tensor, vertices: Tensor) -> Tensor:
"""Transform vertices by the configuration.
Args:
configuration: batch_n x 7 x 1. The second dimension represents x, y, z, quaternion.
Last dimension just makes each batch entry a column vector.
vertices: batch_n x vert_n x 3 OR vert_n x 3. If the latter the same vertices are used
for every batch entry.
Returns:
A tensor of the shape batch_n x vert_n x 3.
"""
batch_n = configuration.shape[0]
if vertices.dim() == 2: vertices = vertices.unsqueeze(0).repeat(batch_n, 1, 1)
assert vertices.shape[2] == 3
vertices = vertices.transpose(1, 2)
vert_n = vertices.shape[2]
vert_quat = torch.cat((torch.zeros(batch_n, vert_n, 1), vertices.transpose(1, 2)), dim=2)
rot_quat = configuration[:, 3:7, :].squeeze(2)
vert_quat = vert_quat.reshape(vert_n * batch_n, -1)
rot_quat = rot_quat.repeat(1, vert_n).reshape(vert_n * batch_n, -1)
vert_rot = quat.qmul(quat.qmul(rot_quat, vert_quat), quat.qinv(rot_quat))
vert_rot = vert_rot.reshape(batch_n, vert_n, 4)
vert_rot = vert_rot[:, :, 1:4]
pos_shift = configuration[:, 0:3, :].transpose(1, 2)
return vert_rot + pos_shift
def transform_and_project_3d(configuration: Tensor, vertices: Tensor,
projections: Tensor) -> Tensor:
"""Transform vertices by the configuration, then project along projections.
Args:
configuration: batch_n x 7 x 1. The second dimension represents x, y, z, quaternion.
Last dimension just makes each batch entry a column vector.
vertices: vert_n x 3.
projections: proj_n x 3. Transformed vertices are projected along these vectors.
Returns:
A tensor of the shape batch_n x (vert_n * proj_n) x 1. Projections are interleaved
along the second dimension. Meaning that we first stack proj_n projections for the
first vertex, then proj_n projections for the second vertex, etc.
"""
assert vertices.dim() == 2 and vertices.shape[1] == 3
assert projections.dim() == 2 and projections.shape[1] == 3
batch_n, vert_n, proj_n = configuration.shape[0], vertices.shape[0], projections.shape[0]
projections = projections.unsqueeze(0).repeat(batch_n, 1, 1)
vertices = transform_vertices_3d(configuration, vertices)
dists = vertices.bmm(projections.transpose(1, 2))
# Interleave the projections; should do nothing if proj_n = 1
dists = dists.reshape(batch_n, proj_n * vert_n, 1)
return dists
def transform_and_project_3d_jacobian(configuration: Tensor, vertices: Tensor,
projections: Tensor, vertex_jac=False) -> Tensor:
"""Compute the Jacobian of the 3d transformation and projection w.r.t configuration.
Args:
configuration: batch_n x 7 x 1. The second dimension represents x, y, z, quaternion.
Last dimension just makes each batch entry a column vector.
vertices: vert_n x 3.
projections: proj_n x 3. Transformed vertices are projected along these vectors.
vertex_jac: indicates whether or not the Jacobian w.r.t. vertices should be added.
Returns:
For vertex_jac = False:
A tensor of the shape batch_n x (vert_n * proj_n) x 7. Projections are interleaved
along the second dimension. Meaning that we first stack proj_n projection gradients
for the first vertex, then proj_n project gradients for the second vertex, etc.
For vertex_jac = True:
A tensor of the shape batch_n x (vert_n * proj_n) x (7 + 3 * vert_n). The original
matrix is augmented with the jacobian of each projection w.r.t. vertex coordinates
before transformation.
"""
assert vertices.dim() == 2 and vertices.shape[1] == 3
assert projections.dim() == 2 and projections.shape[1] == 3
batch_n, vert_n, proj_n = configuration.shape[0], vertices.shape[0], projections.shape[0]
projections = projections.unsqueeze(0).repeat(vert_n * batch_n, 1, 1)
qrot = configuration[:, 3:7, 0]
qverts = torch.cat((torch.zeros(vert_n, 1), vertices), dim=1)
qrot = qrot.repeat(1, vert_n).reshape(vert_n * batch_n, -1)
qverts = qverts.repeat(batch_n, 1)
qjac = quat.qjac(qrot, qverts)
qjac = qjac.reshape(-1, qjac.shape[1], qjac.shape[2])
rot_jac_dist = projections.bmm(qjac.transpose(1, 2)).reshape(batch_n, vert_n * proj_n, -1)
pos_jac_dist = projections.reshape(batch_n, vert_n * proj_n, -1)
jac = torch.cat((pos_jac_dist, rot_jac_dist), dim=2)
if vertex_jac:
vertjac = quat.quaternion_to_rotmat(qrot)
vertjac_dist = projections.bmm(vertjac).transpose(1, 2)
vertjac_dist = tensor_utils.block_diag(vertjac_dist) \
.t().unsqueeze(0).repeat(batch_n, 1, 1)
jac = torch.cat((jac, vertjac_dist), dim=2)
return jac
|
the-stack_0_26412
|
"""This class provides often needed analysis functions, for analysis that is done with python.
"""
from __future__ import division
import logging
import os
import errno
import numpy as np
import numexpr as ne
import numba
from numba import njit
from scipy.interpolate import splrep, sproot
from scipy import stats
from scipy import optimize
from scipy.optimize import curve_fit
from scipy.integrate import quad
from scipy.sparse import csr_matrix
from scipy.spatial import Voronoi
from scipy.special import erf
from scipy.ndimage import distance_transform_edt
import requests
from tqdm import tqdm
from beam_telescope_analysis import analysis_functions
# A public secret representing public, read only owncloud folder
SCIBO_PUBLIC_FOLDER = 'lfCJ6Z3bFBaL094'
@njit
def merge_on_event_number(data_1, data_2):
"""
Merges the data_2 with data_1 on an event basis with all permutations
That means: merge all hits of every event in data_2 on all hits of the same event in data_1.
Does the same than the merge of the pandas package:
df = data_1.merge(data_2, how='left', on='event_number')
df.dropna(inplace=True)
But results in 4 x faster code.
Parameter
--------
data_1, data_2: np.recarray with event_number column
Returns
-------
Tuple np.recarray, np.recarray
Is the data_1, data_2 array extended by the permutations.
"""
result_array_size = 0
event_index_data_2 = 0
# Loop to determine the needed result array size
for index_data_1 in range(data_1.shape[0]):
while event_index_data_2 < data_2.shape[0] and data_2[event_index_data_2]['event_number'] < data_1[index_data_1]['event_number']:
event_index_data_2 += 1
for index_data_2 in range(event_index_data_2, data_2.shape[0]):
if data_1[index_data_1]['event_number'] == data_2[index_data_2]['event_number']:
result_array_size += 1
else:
break
# Create result array with correct size
result_1 = np.zeros(shape=(result_array_size,), dtype=data_1.dtype)
result_2 = np.zeros(shape=(result_array_size,), dtype=data_2.dtype)
result_index_1 = 0
result_index_2 = 0
event_index_data_2 = 0
for index_data_1 in range(data_1.shape[0]):
while event_index_data_2 < data_2.shape[0] and data_2[event_index_data_2]['event_number'] < data_1[index_data_1]['event_number']: # Catch up with outer loop
event_index_data_2 += 1
for index_data_2 in range(event_index_data_2, data_2.shape[0]):
if data_1[index_data_1]['event_number'] == data_2[index_data_2]['event_number']:
result_1[result_index_1] = data_1[index_data_1]
result_2[result_index_2] = data_2[index_data_2]
result_index_1 += 1
result_index_2 += 1
else:
break
return result_1, result_2
@njit
def correlate_position_on_event_number(ref_event_numbers, dut_event_numbers, ref_x_indices, ref_y_indices, dut_x_indices, dut_y_indices, x_corr_hist, y_corr_hist):
"""Correlating the hit/cluster positions on event basis including all permutations.
The hit/cluster positions are used to fill the X and Y correlation histograms.
Does the same than the merge of the pandas package:
df = data_1.merge(data_2, how='left', on='event_number')
df.dropna(inplace=True)
correlation_column = np.hist2d(df[column_mean_dut_0], df[column_mean_dut_x])
correlation_row = np.hist2d(df[row_mean_dut_0], df[row_mean_dut_x])
The following code is > 10x faster than the above code.
Parameters
----------
ref_event_numbers: array
Event number array of the reference DUT.
dut_event_numbers: array
Event number array of the second DUT.
ref_x_indices: array
X position indices of the refernce DUT.
ref_y_indices: array
Y position indices of the refernce DUT.
dut_x_indices: array
X position indices of the second DUT.
dut_y_indices: array
Y position indices of the second DUT.
x_corr_hist: array
X correlation array (2D).
y_corr_hist: array
Y correlation array (2D).
"""
dut_index = 0
# Loop to determine the needed result array size.astype(np.uint32)
for ref_index in range(ref_event_numbers.shape[0]):
while dut_index < dut_event_numbers.shape[0] and dut_event_numbers[dut_index] < ref_event_numbers[ref_index]: # Catch up with outer loop
dut_index += 1
for curr_dut_index in range(dut_index, dut_event_numbers.shape[0]):
if ref_event_numbers[ref_index] == dut_event_numbers[curr_dut_index]:
x_index_ref = ref_x_indices[ref_index]
y_index_ref = ref_y_indices[ref_index]
x_index_dut = dut_x_indices[curr_dut_index]
y_index_dut = dut_y_indices[curr_dut_index]
# Add correlation to histogram
x_corr_hist[x_index_dut, x_index_ref] += 1
y_corr_hist[y_index_dut, y_index_ref] += 1
else:
break
@njit(locals={'curr_event_number': numba.int64, 'last_event_number': numba.int64, 'curr_index': numba.int64, 'corr_index': numba.int64})
def correlate_hits_on_event_range(event_numbers, x_indices, y_indices, x_corr_hist, y_corr_hist, event_range):
"""Correlating the hit indices of different events in a certain range.
For unambiguous event building no correlation should be seen.
Parameters
----------
event_numbers: array
Event number array.
x_indices: array
X position indices.
y_indices: array
Y position indices.
x_corr_hist: array
X correlation array (2D).
y_corr_hist: array
Y correlation array (2D).
event_range : uint
The number of events to use for correlation,
e.g., event_range = 1 correlates to predecessing event hits with the current event hits.
"""
last_event_number = -1
# Loop over hits, outer loop
for curr_index in range(event_numbers.shape[0]):
curr_event_number = event_numbers[curr_index]
# calculate new start index for inner loop if new event occurs
if curr_event_number != last_event_number:
corr_start_event_number = curr_event_number - event_range
corr_start_index = np.searchsorted(event_numbers, corr_start_event_number)
# set correlation index
corr_index = corr_start_index
# Iterate until current event number
while event_numbers[corr_index] < curr_event_number:
x_corr_hist[x_indices[corr_index], x_indices[curr_index]] += 1
y_corr_hist[y_indices[corr_index], y_indices[curr_index]] += 1
corr_index += 1
last_event_number = curr_event_number
def in1d_events(ar1, ar2):
"""
Does the same than np.in1d but uses the fact that ar1 and ar2 are sorted and the c++ library. Is therefore much much faster.
"""
ar1 = np.ascontiguousarray(ar1) # change memory alignement for c++ library
ar2 = np.ascontiguousarray(ar2) # change memory alignement for c++ library
tmp = np.empty_like(ar1, dtype=np.uint8) # temporary result array filled by c++ library, bool type is not supported with cython/numpy
return analysis_functions.get_in1d_sorted(ar1, ar2, tmp)
def hist_quantiles(hist, prob=(0.05, 0.95), return_indices=False, copy=True):
'''Calculate quantiles from histograms, cuts off hist below and above given quantile. This function will not cut off more than the given values.
Parameters
----------
hist : array_like, iterable
Input histogram with dimension at most 1.
prob : float, list, tuple
List of quantiles to compute. Upper and lower limit. From 0 to 1. Default is 0.05 and 0.95.
return_indices : bool, optional
If true, return the indices of the hist.
copy : bool, optional
Whether to copy the input data (True), or to use a reference instead. Default is True.
Returns
-------
masked_hist : masked_array
Hist with masked elements.
masked_hist : masked_array, tuple
Hist with masked elements and indices.
'''
# make np array
hist_t = np.array(hist)
# calculate cumulative distribution
cdf = np.cumsum(hist_t)
# copy, convert and normalize
if cdf[-1] == 0:
normcdf = cdf.astype('float')
else:
normcdf = cdf.astype('float') / cdf[-1]
# calculate unique values from cumulative distribution and their indices
unormcdf, indices = np.unique(normcdf, return_index=True)
# calculate limits
try:
hp = np.where(unormcdf > prob[1])[0][0]
lp = np.where(unormcdf >= prob[0])[0][0]
except IndexError:
hp_index = hist_t.shape[0]
lp_index = 0
else:
hp_index = indices[hp]
lp_index = indices[lp]
# copy and create ma
masked_hist = np.ma.array(hist, copy=copy, mask=True)
masked_hist.mask[lp_index:hp_index + 1] = False
if return_indices:
return masked_hist, list(range(lp_index, hp_index + 1))
else:
return masked_hist
def get_max_events_in_both_arrays(events_one, events_two):
"""
Calculates the maximum count of events that exist in both arrays.
"""
events_one = np.ascontiguousarray(events_one) # change memory alignement for c++ library
events_two = np.ascontiguousarray(events_two) # change memory alignement for c++ library
event_result = np.empty(shape=(events_one.shape[0] + events_two.shape[0],), dtype=events_one.dtype)
count = analysis_functions.get_max_events_in_both_arrays(events_one, events_two, event_result)
return event_result[:count]
@njit()
def map_cluster(event_numbers, clusters, mapped_clusters):
'''
Maps the cluster hits on events. Not existing cluster in events have all values set to 0 and column/row/charge set to nan.
Too many cluster per event for the event number are omitted and lost!
Parameters
----------
event_numbers : numpy array
One dimensional event number array with increasing event numbers.
clusters : np.recarray
Recarray with cluster info. The event number is increasing.
mapped_clusters : np.recarray
Recarray of the same length as event_numbers and same dtype as clusters with values initialized to NaN/0.
Example
-------
event_numbers = [ 0 1 1 2 3 3 ]
clusters.event_number = [ 0 1 2 2 3 4 ]
gives mapped_clusters.event_number = [ 0 1 0 2 3 0 ]
'''
i = 0
j = 0
while i < event_numbers.shape[0]:
# Find first Hit with a fitting event number
while j < clusters.shape[0] and clusters['event_number'][j] < event_numbers[i]: # Catch up to actual event number events[i]
j += 1
if j < clusters.shape[0]:
if clusters['event_number'][j] == event_numbers[i]:
mapped_clusters[i] = clusters[j]
j += 1
else:
return
i += 1
def get_events_in_both_arrays(events_one, events_two):
"""
Calculates the events that exist in both arrays.
"""
events_one = np.ascontiguousarray(events_one) # change memory alignement for c++ library
events_two = np.ascontiguousarray(events_two) # change memory alignement for c++ library
event_result = np.empty_like(events_one)
count = analysis_functions.get_events_in_both_arrays(events_one, events_two, event_result)
return event_result[:count]
def hist_1d_index(x, shape):
"""
Fast 1d histogram of 1D indices with C++ inner loop optimization.
Is more than 2 orders faster than np.histogram().
The indices are given in coordinates and have to fit into a histogram of the dimensions shape.
Parameters
----------
x : array like
shape : tuple
tuple with x dimensions: (x,)
Returns
-------
np.ndarray with given shape
"""
if len(shape) != 1:
raise NotImplementedError('The shape has to describe a 1-d histogram')
# change memory alignment for c++ library
x = np.ascontiguousarray(x.astype(np.int32))
result = np.zeros(shape=shape, dtype=np.uint32)
analysis_functions.hist_1d(x, shape[0], result)
return result
def hist_2d_index(x, y, shape):
"""
Fast 2d histogram of 2D indices with C++ inner loop optimization.
Is more than 2 orders faster than np.histogram2d().
The indices are given in x, y coordinates and have to fit into a histogram of the dimensions shape.
Parameters
----------
x : array like
y : array like
shape : tuple
tuple with x,y dimensions: (x, y)
Returns
-------
np.ndarray with given shape
"""
if len(shape) != 2:
raise NotImplementedError('The shape has to describe a 2-d histogram')
if x.shape != y.shape:
raise ValueError('The dimensions in x / y have to match')
# change memory alignment for c++ library
x = np.ascontiguousarray(x.astype(np.int32))
y = np.ascontiguousarray(y.astype(np.int32))
result = np.zeros(shape=shape, dtype=np.uint32).ravel() # ravel hist in c-style, 3D --> 1D
analysis_functions.hist_2d(x, y, shape[0], shape[1], result)
return np.reshape(result, shape) # rebuilt 3D hist from 1D hist
def hist_3d_index(x, y, z, shape):
"""
Fast 3d histogram of 3D indices with C++ inner loop optimization.
Is more than 2 orders faster than np.histogramdd().
The indices are given in x, y, z coordinates and have to fit into a histogram of the dimensions shape.
Parameters
----------
x : array like
y : array like
z : array like
shape : tuple
tuple with x,y,z dimensions: (x, y, z)
Returns
-------
np.ndarray with given shape
"""
if len(shape) != 3:
raise NotImplementedError('The shape has to describe a 3-d histogram')
if x.shape != y.shape or x.shape != z.shape:
raise ValueError('The dimensions in x / y / z have to match')
# change memory alignment for c++ library
x = np.ascontiguousarray(x.astype(np.int32))
y = np.ascontiguousarray(y.astype(np.int32))
z = np.ascontiguousarray(z.astype(np.int32))
result = np.zeros(shape=shape, dtype=np.uint16).ravel() # ravel hist in c-style, 3D --> 1D
analysis_functions.hist_3d(x, y, z, shape[0], shape[1], shape[2], result)
return np.reshape(result, shape) # rebuilt 3D hist from 1D hist
def get_data_in_event_range(array, event_start=None, event_stop=None, assume_sorted=True):
'''Selects the data (rows of a table) that occurred in the given event range [event_start, event_stop[
Parameters
----------
array : numpy.array
event_start : int, None
event_stop : int, None
assume_sorted : bool
Set to true if the hits are sorted by the event_number. Increases speed.
Returns
-------
numpy.array
hit array with the hits in the event range.
'''
event_number = array['event_number']
if not np.any(event_number): # No events in selection
return np.array([])
if assume_sorted:
data_event_start = event_number[0]
data_event_stop = event_number[-1]
if (event_start is not None and event_stop is not None) and (data_event_stop < event_start or data_event_start > event_stop or event_start == event_stop): # special case, no intersection at all
return array[0:0]
# get min/max indices with values that are also in the other array
if event_start is None:
min_index_data = 0
else:
if event_number[0] > event_start:
min_index_data = 0
else:
min_index_data = np.argmin(event_number < event_start)
if event_stop is None:
max_index_data = event_number.shape[0]
else:
if event_number[-1] < event_stop:
max_index_data = event_number.shape[0]
else:
max_index_data = np.argmax(event_number >= event_stop)
if min_index_data < 0:
min_index_data = 0
return array[min_index_data:max_index_data]
else:
return array[ne.evaluate('(event_number >= event_start) & (event_number < event_stop)')]
def data_aligned_at_events(table, start_event_number=None, stop_event_number=None, start_index=None, stop_index=None, chunk_size=1000000, try_speedup=False, first_event_aligned=True, fail_on_missing_events=True):
'''Takes the table with a event_number column and returns chunks with the size up to chunk_size. The chunks are chosen in a way that the events are not splitted.
Additional parameters can be set to increase the readout speed. Events between a certain range can be selected.
Also the start and the stop indices limiting the table size can be specified to improve performance.
The event_number column must be sorted.
In case of try_speedup is True, it is important to create an index of event_number column with pytables before using this function. Otherwise the queries are slowed down.
Parameters
----------
table : pytables.table
The data.
start_event_number : int
The retruned data contains events with event number >= start_event_number. If None, no limit is set.
stop_event_number : int
The retruned data contains events with event number < stop_event_number. If None, no limit is set.
start_index : int
Start index of data. If None, no limit is set.
stop_index : int
Stop index of data. If None, no limit is set.
chunk_size : int
Maximum chunk size per read.
try_speedup : bool
If True, try to reduce the index range to read by searching for the indices of start and stop event number. If these event numbers are usually
not in the data this speedup can even slow down the function!
The following parameters are not used when try_speedup is True:
first_event_aligned : bool
If True, assuming that the first event is aligned to the data chunk and will be added. If False, the lowest event number of the first chunk will not be read out.
fail_on_missing_events : bool
If True, an error is given when start_event_number or stop_event_number is not part of the data.
Returns
-------
Iterator of tuples
Data of the actual data chunk and start index for the next chunk.
Example
-------
start_index = 0
for scan_parameter in scan_parameter_range:
start_event_number, stop_event_number = event_select_function(scan_parameter)
for data, start_index in data_aligned_at_events(table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=start_index):
do_something(data)
for data, index in data_aligned_at_events(table):
do_something(data)
'''
# initialize variables
start_index_known = False
stop_index_known = False
start_index = 0 if start_index is None else start_index
stop_index = table.nrows if stop_index is None else stop_index
if stop_index < start_index:
raise ValueError('Invalid start/stop index')
table_max_rows = table.nrows
if stop_event_number is not None and start_event_number is not None and stop_event_number < start_event_number:
raise ValueError('Invalid start/stop event number')
# set start stop indices from the event numbers for fast read if possible; not possible if the given event number does not exist in the data stream
if try_speedup and table.colindexed["event_number"]:
if start_event_number is not None:
start_condition = 'event_number==' + str(start_event_number)
start_indices = table.get_where_list(start_condition, start=start_index, stop=stop_index)
if start_indices.shape[0] != 0: # set start index if possible
start_index = start_indices[0]
start_index_known = True
if stop_event_number is not None:
stop_condition = 'event_number==' + str(stop_event_number)
stop_indices = table.get_where_list(stop_condition, start=start_index, stop=stop_index)
if stop_indices.shape[0] != 0: # set the stop index if possible, stop index is excluded
stop_index = stop_indices[0]
stop_index_known = True
if start_index_known and stop_index_known and start_index + chunk_size >= stop_index: # special case, one read is enough, data not bigger than one chunk and the indices are known
yield table.read(start=start_index, stop=stop_index), stop_index
else: # read data in chunks, chunks do not divide events, abort if stop_event_number is reached
# search for begin
current_start_index = start_index
if start_event_number is not None:
while current_start_index < stop_index:
current_stop_index = min(current_start_index + chunk_size, stop_index)
array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1
last_event_in_chunk = array_chunk["event_number"][-1]
if last_event_in_chunk < start_event_number:
current_start_index = current_start_index + chunk_size # not there yet, continue to next read (assuming sorted events)
else:
first_event_in_chunk = array_chunk["event_number"][0]
# if stop_event_number is not None and first_event_in_chunk >= stop_event_number and start_index != 0 and start_index == current_start_index:
# raise ValueError('The stop event %d is missing. Change stop_event_number.' % stop_event_number)
if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk:
raise ValueError('Chunk size too small. Increase chunk size to fit full event.')
if not first_event_aligned and first_event_in_chunk == start_event_number and start_index != 0 and start_index == current_start_index: # first event in first chunk not aligned at index 0, so take next event
if fail_on_missing_events:
raise ValueError('The start event %d is missing. Change start_event_number.' % start_event_number)
chunk_start_index = np.searchsorted(array_chunk["event_number"], start_event_number + 1, side='left')
elif fail_on_missing_events and first_event_in_chunk > start_event_number and start_index == current_start_index:
raise ValueError('The start event %d is missing. Change start_event_number.' % start_event_number)
elif first_event_aligned and first_event_in_chunk == start_event_number and start_index == current_start_index:
chunk_start_index = 0
else:
chunk_start_index = np.searchsorted(array_chunk["event_number"], start_event_number, side='left')
if fail_on_missing_events and array_chunk["event_number"][chunk_start_index] != start_event_number and start_index == current_start_index:
raise ValueError('The start event %d is missing. Change start_event_number.' % start_event_number)
# if fail_on_missing_events and ((start_index == current_start_index and chunk_start_index == 0 and start_index != 0 and not first_event_aligned) or array_chunk["event_number"][chunk_start_index] != start_event_number):
# raise ValueError('The start event %d is missing. Change start_event_number.' % start_event_number)
current_start_index = current_start_index + chunk_start_index # calculate index for next loop
break
elif not first_event_aligned and start_index != 0:
while current_start_index < stop_index:
current_stop_index = min(current_start_index + chunk_size, stop_index)
array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1
first_event_in_chunk = array_chunk["event_number"][0]
last_event_in_chunk = array_chunk["event_number"][-1]
if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk:
raise ValueError('Chunk size too small. Increase chunk size to fit full event.')
chunk_start_index = np.searchsorted(array_chunk["event_number"], first_event_in_chunk + 1, side='left')
current_start_index = current_start_index + chunk_start_index
if not first_event_in_chunk == last_event_in_chunk:
break
# data loop
while current_start_index < stop_index:
current_stop_index = min(current_start_index + chunk_size, stop_index)
array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1
first_event_in_chunk = array_chunk["event_number"][0]
last_event_in_chunk = array_chunk["event_number"][-1]
chunk_start_index = 0
if stop_event_number is None:
if current_stop_index == table_max_rows:
chunk_stop_index = array_chunk.shape[0]
else:
chunk_stop_index = np.searchsorted(array_chunk["event_number"], last_event_in_chunk, side='left')
else:
if last_event_in_chunk >= stop_event_number:
chunk_stop_index = np.searchsorted(array_chunk["event_number"], stop_event_number, side='left')
elif current_stop_index == table_max_rows: # this will also add the last event of the table
chunk_stop_index = array_chunk.shape[0]
else:
chunk_stop_index = np.searchsorted(array_chunk["event_number"], last_event_in_chunk, side='left')
nrows = chunk_stop_index - chunk_start_index
if nrows == 0:
if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk:
raise ValueError('Chunk size too small to fit event. Data corruption possible. Increase chunk size to read full event.')
elif chunk_start_index == 0: # not increasing current_start_index
return
elif stop_event_number is not None and last_event_in_chunk >= stop_event_number:
return
else:
yield array_chunk[chunk_start_index:chunk_stop_index], current_start_index + nrows + chunk_start_index
current_start_index = current_start_index + nrows + chunk_start_index # events fully read, increase start index and continue reading
def find_closest(arr, values):
'''Returns a list of indices with values closest to arr values.
Parameters
----------
arr : iterable
Iterable of numbers. Arr must be sorted.
values : iterable
Iterable of numbers.
Returns
-------
A list of indices with values closest to arr values.
See also: http://stackoverflow.com/questions/8914491/finding-the-nearest-value-and-return-the-index-of-array-in-python
'''
idx = arr.searchsorted(values)
idx = np.clip(idx, 1, len(arr) - 1)
left = arr[idx - 1]
right = arr[idx]
idx -= values - left < right - values
return idx
def linear(x, c0, c1):
return c0 + c1 * x
def gauss(x, *p):
A, mu, sigma = p
return A * np.exp(-(x - mu) ** 2.0 / (2.0 * sigma ** 2.0))
def gauss2(x, *p):
mu, sigma = p
return (sigma * np.sqrt(2.0 * np.pi))**-1.0 * np.exp(-0.5 * ((x - mu) / sigma)**2.0)
def gauss_offset_slope(x, *p):
A, mu, sigma, offset, slope = p
return gauss(x, A, mu, sigma) + offset + x * slope
def gauss_offset(x, *p):
A, mu, sigma, offset = p
return gauss(x, A, mu, sigma) + offset
def double_gauss(x, *p):
A_1, mu_1, sigma_1, A_2, mu_2, sigma_2 = p
return gauss(x, A_1, mu_1, sigma_1) + gauss(x, A_2, mu_2, sigma_2)
def double_gauss_offset(x, *p):
A_1, mu_1, sigma_1, A_2, mu_2, sigma_2, offset = p
return gauss(x, A_1, mu_1, sigma_1) + gauss(x, A_2, mu_2, sigma_2) + offset
def gauss_box_non_vec(x, *p):
''''Convolution of gaussian and rectangle is a gaussian integral.
Parameters
----------
A, mu, sigma, a (width of the rectangle) : float
See also:
- http://stackoverflow.com/questions/24230233/fit-gaussian-integral-function-to-data
- https://stackoverflow.com/questions/24386931/how-to-convolve-two-distirbutions-from-scipy-library
'''
A, mu, sigma, a = p
return quad(lambda t: gauss(x - t, A, mu, sigma) / (np.sqrt(2.0 * np.pi) * sigma), -a / 2.0, a / 2.0)[0]
# Vetorize function to use with np.arrays
gauss_box = np.vectorize(gauss_box_non_vec, excluded=["*p"])
def gauss_box_erf(x, *p):
''' Identical to gauss_box().
'''
A, mu, sigma, width = p
return 0.5 * A * erf((x - mu + width * 0.5) / (np.sqrt(2) * sigma)) + 0.5 * A * erf((mu + width * 0.5 - x) / (np.sqrt(2) * sigma))
def get_mean_from_histogram(counts, bin_positions):
return np.dot(counts, np.array(bin_positions)) / np.sum(counts).astype(np.float64)
def get_rms_from_histogram(counts, bin_positions):
return np.std(np.repeat(bin_positions, counts))
def get_median_from_histogram(counts, bin_positions):
return np.median(np.repeat(bin_positions, counts))
def get_mean_efficiency(array_pass, array_total, interval=0.68):
''' Calculates the mean efficiency with statistical errors
Parameters
----------
array_pass, array_total : numpy array
interval: float
Confidence interval for error calculation
Returns
-------
Tuple with: Mean efficiency and positive negative confidence interval limits
'''
def get_eff_pdf(eff, k, N):
''' Returns the propability density function for the efficiency
estimator eff = k/N, where k are the passing events and N the
total number of events.
http://lss.fnal.gov/archive/test-tm/2000/fermilab-tm-2286-cd.pdf
page 5
This function gives plot 1 of paper, when multiplied by Gamma(N+1)
'''
# The paper has the function defined by gamma functions. These explode quickly
# leading to numerical instabilities. The beta function does the same...
# np.float(gamma(N + 2)) / np.float((gamma(k + 1) * gamma(N - k + 1))) * eff**k * (1. - eff)**(N - k)
return stats.beta.pdf(eff, k + 1, N - k + 1)
def get_eff_prop_int(eff, k, N):
''' C.d.f. of beta function = P.d.f. integral -infty..eff '''
return stats.beta.cdf(eff, k + 1, N - k + 1)
def interval_integ(a, b, k, N):
''' Return the integral of the efficiency pdf using limits [a, b]:
'''
return get_eff_prop_int(b, k, N) - get_eff_prop_int(a, k, N)
def find_inter(k, N, interval):
''' Calculates Integral(pdf(k, N))_err-^err+ = interval with
| err- - err+| != min.
'''
def minimizeMe(x):
a, b = x[0], x[1]
return b - a
def get_start_values(k, N):
# Discretize issue for numerical calculation
eff = np.linspace(0.8 * float(k) / N, 1.2 * float(k) / N, 1000000)
eff = eff[eff <= 1.]
# Normalize by discretization
p = get_eff_pdf(eff, k, N=N)
max_i = np.argmin(np.abs(eff - float(k) / N))
for y in np.linspace(p[max_i] * 0.9, 0, 1000):
if max_i > 0:
idx_l = np.abs(y - p[:max_i]).argmin()
else:
idx_l = 0
if max_i < p.shape[0]:
idx_r = np.abs(y - p[max_i:]).argmin() + max_i
else:
idx_r = p.shape[0] - 1
if p[idx_l:idx_r].sum() * np.diff(eff)[0] > interval:
break
return eff[idx_l], eff[idx_r]
# Quick determination of start value to enhance convergence
max_a = float(k) / N # a < maximum eff
min_b = float(k) / N # b > maximum eff
a0, b0 = get_start_values(k, N)
cons = ({'type': 'eq', 'fun': lambda x: np.abs(interval_integ(x[0], x[1], k, N) - interval)})
# Find b
res = optimize.minimize(fun=minimizeMe, method='SLSQP', x0=(a0, b0),
bounds=((0., max_a), (min_b, 1.)),
constraints=cons)
return res.x
k = array_pass.sum()
N = array_total.sum()
eff = k.astype(np.float64) / N
lim_e_m, lim_e_p = find_inter(k, N, interval)
return eff, lim_e_m - eff, lim_e_p - eff
def fwhm(x, y):
"""
Determine full-with-half-maximum of a peaked set of points, x and y.
Assumes that there is only one peak present in the datasset. The function
uses a spline interpolation of order 3.
See also http://stackoverflow.com/questions/10582795/finding-the-full-width-half-maximum-of-a-peak
"""
half_max = np.max(y) / 2.0
spl = splrep(x, y - half_max)
roots = sproot(spl)
if len(roots) != 2: # multiple peaks or no peaks
raise RuntimeError("Cannot determine FWHM")
else:
return roots[0], roots[1]
def peak_detect(x, y):
try:
fwhm_left_right = fwhm(x=x, y=y)
except (RuntimeError, TypeError):
raise RuntimeError("Cannot determine peak")
fwhm_value = fwhm_left_right[-1] - fwhm_left_right[0]
max_position = x[np.argmax(y)]
center = (fwhm_left_right[0] + fwhm_left_right[-1]) / 2.0
return max_position, center, fwhm_value, fwhm_left_right
def simple_peak_detect(x, y):
y = np.array(y)
half_maximum = np.max(y) * 0.5
greater = (y > half_maximum)
change_indices = np.where(greater[:-1] != greater[1:])[0]
if not np.any(greater) or greater[0] is True or greater[-1] is True:
raise RuntimeError("Cannot determine peak")
x = np.array(x)
# get center of indices for higher precision peak position and FWHM
x_center = (x[1:] + x[:-1]) / 2.0
try:
fwhm_left_right = (x_center[change_indices[0]], x_center[change_indices[-1]])
except IndexError:
raise RuntimeError("Cannot determine peak")
fwhm_value = fwhm_left_right[-1] - fwhm_left_right[0]
max_position = x[np.argmax(y)]
center = (fwhm_left_right[0] + fwhm_left_right[-1]) / 2.0
if fwhm_value == 0:
raise RuntimeError("Cannot determine peak")
return max_position, center, fwhm_value, fwhm_left_right
def fit_residuals(hist, edges):
bin_center = (edges[1:] + edges[:-1]) / 2.0
hist_mean = get_mean_from_histogram(hist, bin_center)
hist_std = get_rms_from_histogram(hist, bin_center)
if hist_std == 0:
fit, cov = [np.amax(hist), hist_mean, hist_std], np.full((3, 3), np.nan)
else:
try:
fit, cov = curve_fit(gauss, bin_center, hist, p0=[np.amax(hist), hist_mean, hist_std])
except (RuntimeError, TypeError):
fit, cov = [np.amax(hist), hist_mean, hist_std], np.full((3, 3), np.nan)
return fit, cov
def fit_residuals_vs_position(hist, xedges, yedges, mean, count, limit=None):
xcenter = (xedges[1:] + xedges[:-1]) / 2.0
select = (count > 0)
if limit is None:
n_hits_threshold = np.percentile(count[select], 100.0 - 68.0)
select &= (count > n_hits_threshold)
else:
limit_left = limit[0]
limit_right = limit[1]
if np.isfinite(limit_left):
try:
select_left = np.where(xcenter >= limit_left)[0][0]
except IndexError:
select_left = 0
else:
select_left = 0
if np.isfinite(limit_right):
try:
select_right = np.where(xcenter <= limit_right)[0][-1] + 1
except IndexError:
select_right = select.shape[0]
else:
select_right = select.shape[0]
select_range = np.zeros_like(select)
select_range[select_left:select_right] = 1
select &= select_range
# n_hits_threshold = np.median(count[select]) * 0.1
# select &= (count > n_hits_threshold)
mean_fit = []
for index in range(hist.shape[0]):
if np.sum(hist[index, :]) == 0:
mean_fit.append(np.nan)
else:
mean_fit.append(fit_residuals(hist[index, :].astype(np.int32), yedges)[0][1])
select &= np.isfinite(mean_fit)
mean_fit = np.ma.masked_invalid(mean_fit)
if np.count_nonzero(select) > 1:
y_rel_err = np.sum(count[select]) / count[select]
fit, cov = curve_fit(linear, xcenter[select], mean_fit[select], p0=[0.0, 0.0], sigma=y_rel_err, absolute_sigma=False)
else:
fit, cov = [np.nan, np.nan], [[np.nan, np.nan], [np.nan, np.nan]]
return fit, cov, select, mean_fit
def hough_transform(img, theta_res=1.0, rho_res=1.0, return_edges=False):
thetas = np.linspace(-90.0, 0.0, int(np.ceil(90.0 / theta_res)) + 1)
thetas = np.concatenate((thetas, -thetas[len(thetas) - 2::-1]))
thetas = np.deg2rad(thetas)
width, height = img.shape
diag_len = np.sqrt((width - 1)**2 + (height - 1)**2)
q = np.ceil(diag_len / rho_res)
nrhos = 2 * q + 1
rhos = np.linspace(-q * rho_res, q * rho_res, int(nrhos))
cos_t = np.cos(thetas)
sin_t = np.sin(thetas)
accumulator = np.zeros((rhos.size, thetas.size), dtype=np.int32)
y_idxs, x_idxs = np.nonzero(img)
@njit
def loop(accumulator, x_idxs, y_idxs, thetas, rhos, sin_t, cos_t):
for i in range(len(x_idxs)):
x = x_idxs[i]
y = y_idxs[i]
for theta_idx in range(thetas.size):
# rho_idx = np.around(x * cos_t[theta_idx] + y * sin_t[theta_idx]) + diag_len
rhoVal = x * cos_t[theta_idx] + y * sin_t[theta_idx]
rho_idx = (np.abs(rhos - rhoVal)).argmin()
accumulator[rho_idx, theta_idx] += 1
loop(accumulator, x_idxs, y_idxs, thetas, rhos, sin_t, cos_t)
if return_edges:
thetas_diff = thetas[1] - thetas[0]
thetas_edges = (thetas[1:] + thetas[:-1]) / 2.0
theta_edges = np.r_[thetas_edges[0] - thetas_diff, thetas_edges, thetas_edges[-1] + thetas_diff]
rho_diff = rhos[1] - rhos[0]
rho_edges = (rhos[1:] + rhos[:-1]) / 2.0
rho_edges = np.r_[rho_edges[0] - rho_diff, rho_edges, rho_edges[-1] + rho_diff]
return accumulator, thetas, rhos, theta_edges, rho_edges # return histogram, bin centers, edges
else:
return accumulator, thetas, rhos # return histogram and bin centers
def binned_statistic(x, values, func, nbins, range):
'''The usage is approximately the same as the scipy one.
See: https://stackoverflow.com/questions/26783719/efficiently-get-indices-of-histogram-bins-in-python
'''
N = len(values)
r0, r1 = range
digitized = (float(nbins) / (r1 - r0) * (x - r0)).astype(int)
S = csr_matrix((values, [digitized, np.arange(N)]), shape=(nbins, N))
return [func(group) for group in np.split(S.data, S.indptr[1:-1])]
@njit(numba.uint64(numba.uint32, numba.uint32))
def xy2d_morton(x, y):
'''Tuple to number.
See: https://stackoverflow.com/questions/30539347/2d-morton-code-encode-decode-64bits
'''
x = (x | (x << 16)) & 0x0000FFFF0000FFFF
x = (x | (x << 8)) & 0x00FF00FF00FF00FF
x = (x | (x << 4)) & 0x0F0F0F0F0F0F0F0F
x = (x | (x << 2)) & 0x3333333333333333
x = (x | (x << 1)) & 0x5555555555555555
y = (y | (y << 16)) & 0x0000FFFF0000FFFF
y = (y | (y << 8)) & 0x00FF00FF00FF00FF
y = (y | (y << 4)) & 0x0F0F0F0F0F0F0F0F
y = (y | (y << 2)) & 0x3333333333333333
y = (y | (y << 1)) & 0x5555555555555555
return x | (y << 1)
@njit(numba.uint64(numba.uint64,))
def morton_1(x):
x = x & 0x5555555555555555
x = (x | (x >> 1)) & 0x3333333333333333
x = (x | (x >> 2)) & 0x0F0F0F0F0F0F0F0F
x = (x | (x >> 4)) & 0x00FF00FF00FF00FF
x = (x | (x >> 8)) & 0x0000FFFF0000FFFF
x = (x | (x >> 16)) & 0xFFFFFFFFFFFFFFFF # TODO: 0x00000000FFFFFFFF
return x
@njit((numba.uint64,))
def d2xy_morton(d):
'''Number to tuple.
See: https://stackoverflow.com/questions/30539347/2d-morton-code-encode-decode-64bits
'''
return morton_1(d), morton_1(d >> 1)
@njit(locals={'cluster_shape': numba.uint64})
def calculate_cluster_shape(cluster_array):
'''Boolean 8x8 array to number.
'''
cluster_shape = 0
indices_x, indices_y = np.nonzero(cluster_array)
for index in np.arange(indices_x.size):
cluster_shape += 2**xy2d_morton(indices_x[index], indices_y[index])
return cluster_shape
@njit((numba.uint64,), locals={'val': numba.uint64})
def calculate_cluster_array(cluster_shape):
'''Number to boolean 8x8 array.
'''
cluster_array = np.zeros((8, 8), dtype=np.bool_)
for i in np.arange(63, -1, -1):
val = 2**i
if val <= cluster_shape:
x, y = d2xy_morton(i)
cluster_array[x, y] = 1
cluster_shape -= val
return cluster_array
def number_of_set_bits(val):
'''Calculate the number of set bits
See:
- https://stackoverflow.com/questions/9829578/fast-way-of-counting-non-zero-bits-in-positive-integer
- https://stackoverflow.com/questions/109023/how-to-count-the-number-of-set-bits-in-a-32-bit-integer#109025
'''
val = val - ((val >> 1) & 0x55555555)
val = (val & 0x33333333) + ((val >> 2) & 0x33333333)
return (((val + (val >> 4) & 0xF0F0F0F) * 0x1010101) & 0xffffffff) >> 24
def voronoi_finite_polygons_2d(points, dut_extent=None):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Useful links:
- https://stackoverflow.com/questions/34968838/python-finite-boundary-voronoi-cells
- https://stackoverflow.com/questions/20515554/colorize-voronoi-diagram
Parameters
----------
points : ndarray of floats
Coordinates of points to construct a convex hull from.
dut_extent : list
Boundary of the Voronoi diagram.
If None, remove all Voronoi infinite regions.
Returns
-------
points : ndarray of floats
Coordinates of points to construct a convex hull from (including mirrored points).
regions : list of list of ints
Indices of vertices in each revised Voronoi regions.
ridge_vetices : list of list of ints
Indices of vertices in each revised Voronoi.
vertices : ndarray of doubles
Coordinates for revised Voronoi vertices.
"""
if len(points.shape) != 2 or points.shape[1] != 2:
raise ValueError("Requires 2D voronoi data")
vor = Voronoi(points, incremental=True)
n_points = vor.points.shape[0]
# select Voronoi regions with invalid ("infinite") vetices and vertices outside the boundary
vertices_outside_sel = ((vor.vertices[:, 0] >= max(dut_extent[:2])) & ~np.isclose(vor.vertices[:, 0], max(dut_extent[:2])))
vertices_outside_sel |= ((vor.vertices[:, 0] <= min(dut_extent[:2])) & ~np.isclose(vor.vertices[:, 0], min(dut_extent[:2])))
vertices_outside_sel |= ((vor.vertices[:, 1] <= min(dut_extent[2:])) & ~np.isclose(vor.vertices[:, 1], min(dut_extent[2:])))
vertices_outside_sel |= ((vor.vertices[:, 1] >= max(dut_extent[2:])) & ~np.isclose(vor.vertices[:, 1], max(dut_extent[2:])))
vertices_indices_outside = np.where(vertices_outside_sel)
length = len(sorted(vor.regions, key=len, reverse=True)[0])
vor_regions = np.array([arr + [vor.vertices.shape[0]] * (length - len(arr)) for arr in vor.regions])
regions_with_vertex_outside_sel = np.any(vor_regions == -1, axis=1)
regions_with_vertex_outside_sel |= np.any(np.isin(vor_regions, vertices_indices_outside), axis=1)
regions_indices_with_vertex_outside = np.where(regions_with_vertex_outside_sel)[0]
points_indices_with_vertex_outside = np.in1d(vor.point_region, regions_indices_with_vertex_outside)
points_with_vertex_outside = vor.points[points_indices_with_vertex_outside]
# generate mirrored points at the boundary
points_left = points_with_vertex_outside.copy()
points_left[:, 0] -= (max(dut_extent[:2]) + min(dut_extent[:2])) / 2.0
points_left[:, 0] *= -1
points_left[:, 0] += (max(dut_extent[:2]) + min(dut_extent[:2])) / 2.0 - np.ptp(dut_extent[:2])
points_right = points_with_vertex_outside.copy()
points_right[:, 0] -= (max(dut_extent[:2]) + min(dut_extent[:2])) / 2.0
points_right[:, 0] *= -1
points_right[:, 0] += (max(dut_extent[:2]) + min(dut_extent[:2])) / 2.0 + np.ptp(dut_extent[:2])
points_up = points_with_vertex_outside.copy()
points_up[:, 1] -= (max(dut_extent[2:]) + min(dut_extent[2:])) / 2.0
points_up[:, 1] *= -1
points_up[:, 1] += (max(dut_extent[2:]) + min(dut_extent[2:])) / 2.0 + np.ptp(dut_extent[2:])
points_down = points_with_vertex_outside.copy()
points_down[:, 1] -= (max(dut_extent[2:]) + min(dut_extent[2:])) / 2.0
points_down[:, 1] *= -1
points_down[:, 1] += (max(dut_extent[2:]) + min(dut_extent[2:])) / 2.0 - np.ptp(dut_extent[2:])
# adding the points and generate new Voronoi regions
mirrored_points = np.concatenate((points_up, points_down, points_left, points_right))
vor.add_points(mirrored_points)
new_regions_indices = vor.point_region[:n_points]
# select Voronoi regions with valid vetices and vertices inside the boundary
vertices_inside_sel = ((vor.vertices[:, 0] <= max(dut_extent[:2])) | np.isclose(vor.vertices[:, 0], max(dut_extent[:2])))
vertices_inside_sel &= ((vor.vertices[:, 0] >= min(dut_extent[:2])) | np.isclose(vor.vertices[:, 0], min(dut_extent[:2])))
vertices_inside_sel &= ((vor.vertices[:, 1] >= min(dut_extent[2:])) | np.isclose(vor.vertices[:, 1], min(dut_extent[2:])))
vertices_inside_sel &= ((vor.vertices[:, 1] <= max(dut_extent[2:])) | np.isclose(vor.vertices[:, 1], max(dut_extent[2:])))
vertices_indices_inside = np.where(vertices_inside_sel)
vor_ridge_vertices = np.array(vor.ridge_vertices)
ridge_vertices_with_vertex_inside_sel = np.all(vor_ridge_vertices != -1.0, axis=1)
ridge_vertices_with_vertex_inside_sel &= np.all(np.isin(vor_ridge_vertices, vertices_indices_inside), axis=1)
ridge_vertices_indices_with_vertex_inside = np.where(ridge_vertices_with_vertex_inside_sel)[0]
# TODO: remove not used vertices and update indices lists
return vor.points, np.array(vor.regions)[new_regions_indices].tolist(), vor_ridge_vertices[ridge_vertices_indices_with_vertex_inside].tolist(), vor.vertices
def polygon_area(x, y):
''' Calculating the polygon area using Shoelace formula/Gausssche Trapezformel.
Parameters
----------
x : list
X coodinates of the polygon.
y : list
Y coodinates of the polygon.
Returns
-------
Area of the polygon.
Note: Points must be provided in clockwise/counter clockwise order.
See: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
'''
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygon_area_multi(x, y):
''' Calculating the are of multiple polygons.
'''
return 0.5 * np.abs(np.einsum('ik,ik->i', x, np.roll(y, 1, axis=1)) - np.einsum('ik,ik->i', y, np.roll(x, 1, axis=1)))
def in1d_index(ar1, ar2, fill_invalid=None, assume_sorted=False):
''' Return indices of ar1 that overlap with ar2 and the indices of ar2 that occur in ar1.
alternative implementation (only works if both ar1 and ar2 are unique!):
def overlap(ar1, ar2):
bool_ar1 = np.in1d(ar1, ar2)
ind_ar1 = np.arange(len(ar1))
ind_ar1 = ind_ar1[bool_ar1]
ind_ar2 = np.array([np.argwhere(ar2 == ar1[x]) for x in ind_ar1]).flatten()
return ind_ar1, ind_ar2
Parameters
----------
ar1 : array_like
Input array.
ar2 : array_like
The values against which to test each value of ar1.
fill_invalid : int
If a value is given, in1d_index has the same lenght than ar1
and invalid positions are filled with the given value.
assume_sorted : bool
If True, assume sorted ar2.
Returns
-------
in1d_valid : ndarray
The indices of ar1 overlapping with ar2.
in1d_index : ndarray
The indices of ar2 that occur in ar1.
'''
if assume_sorted:
ar2_index = np.searchsorted(ar2, ar1)
else:
ar2_sorter = np.argsort(ar2)
ar2_sorted = ar2[ar2_sorter]
ar2_sorted_index = np.searchsorted(ar2_sorted, ar1)
# Remove invalid indices
ar2_sorted_index[ar2_sorted_index >= ar2.shape[0]] = ar2.shape[0] - 1
# Go back into the original index
ar2_index = ar2_sorter[ar2_sorted_index]
if fill_invalid is None:
valid = ar2.take(ar2_index, mode='clip') == ar1
return np.where(valid)[0], ar2_index[valid]
else:
invalid = ar2.take(ar2_index, mode='clip') != ar1
ar2_index[invalid] = fill_invalid
return np.where(~invalid)[0], ar2_index
@njit
def unique_loop(mask, N, A, p, count):
for j in range(N):
mask[:] = True
mask[A[0, j]] = False
c = 1
for i in range(1, p):
if mask[A[i, j]]:
c += 1
mask[A[i, j]] = False
count[j] = c
return count
def unique_in_array(arr):
''' Return number of unique values along axis 0.
See:
- https://stackoverflow.com/questions/46893369/count-unique-elements-along-an-axis-of-a-numpy-array
- https://stackoverflow.com/questions/5551286/filling-gaps-in-a-numpy-array
-
'''
p, m, n = arr.shape
arr.shape = (-1, m * n)
maxn = arr.max() + 1
N = arr.shape[1]
mask = np.empty(maxn, dtype=bool)
count = np.empty(N, dtype=int)
arr_out = unique_loop(mask, N, arr, p, count).reshape(m, n)
arr.shape = (-1, m, n)
return arr_out
def count_unique_neighbors(arr, structure=None):
''' Return number of unique neighbors (vertical, horizontal and diagonal) in a given 2D array.
Parameters
----------
data : ndarray
2D array.
structure : ndarray
A 2D structuring element that defines the neighbors. The stucture
must be symmetric.
If None, the structuring element is [[1,1,1], [1,1,1], [1,1,1]].
Returns
-------
Array with number of unique neighbor elements.
See:
- https://stackoverflow.com/questions/48248773/numpy-counting-unique-neighbours-in-2d-array
- https://stackoverflow.com/questions/25169997/how-to-count-adjacent-elements-in-a-3d-numpy-array-efficiently
- https://stackoverflow.com/questions/41550979/fill-holes-with-majority-of-surrounding-values-python
'''
if structure is None:
structure = np.ones((3, 3), dtype=np.bool)
else:
structure = np.array(structure, dtype=np.bool)
if len(structure.shape) != 2:
raise ValueError('Structure must be a 2D array')
if structure.shape[0] != structure.shape[1]:
raise ValueError('Structure must be symmetrical')
if structure.shape[0] % 2 == 0:
raise ValueError('Structure shape must be odd')
selected_indices = np.column_stack(np.where(structure))
extent = int(structure.shape[0] / 2)
selected_indices -= extent
a = np.pad(arr, (extent, extent), mode='reflect')
selected_arrays = []
for selected_index in selected_indices:
selected_arrays.append(a[extent + selected_index[0]:a.shape[0] - extent + selected_index[0], extent + selected_index[1]:a.shape[1] - extent + selected_index[1]])
return unique_in_array(np.array(selected_arrays))
def fill(arr, invalid=None):
'''
Replace the value of invalid data cells by the value of the nearest valid data cell.
See: https://stackoverflow.com/questions/3662361/fill-in-missing-values-with-nearest-neighbour-in-python-numpy-masked-arrays
Parameters
----------
arr : ndarray
Array of any dimension.
invalid : ndarray
Boolean array of the same shape as data where True indicates data cells which are to be replaced.
If None (default), generate the array by assuming invalid values to be nan.
Returns
-------
Array with filled data cells.
'''
if invalid is None:
invalid = np.isnan(arr)
else:
invalid = np.array(invalid, dtype=np.bool)
fill_indices = distance_transform_edt(invalid, return_distances=False, return_indices=True)
return arr[tuple(fill_indices)]
def get_data(path, output=None, fail_on_overwrite=False):
''' Downloads data (eg. for examples, fixtures).
Uses data in a public scibo folder. If you want
write access contact the maintainer.
Parameters
----------
path : string
File path with name. Location on online folder.
output : string, None
File path with name. Location where to store data.
If None the path variable path is used.
fail_on_overwrite : Bool
If files exist already the download is skipped.
If fail_on_overwrite this raises a RuntimeError.
'''
def download_scibo(public_secret, path, filename):
folder = os.path.dirname(path)
name = os.path.basename(path)
url = "https://uni-bonn.sciebo.de/index.php/s/"
url += public_secret + '/download?path=%2F'
url += folder + '&files='
url += name
logging.info('Downloading %s' % name)
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-Length'])
logging.info('Downloading %s', name)
with open(filename, 'wb') as f:
pbar = tqdm(total=file_size, ncols=80)
for i, chunk in enumerate(r.iter_content(32 * 1024)):
f.write(chunk)
pbar.update(len(chunk))
pbar.close()
if not output:
output = os.path.basename(path)
output_path = os.path.dirname(os.path.realpath(path))
else:
output_path = os.path.dirname(os.path.realpath(output))
if not os.path.isfile(os.path.join(output_path, output)):
# Create output folder
if not os.path.exists(output_path):
try:
os.makedirs(output_path)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
download_scibo(public_secret=SCIBO_PUBLIC_FOLDER,
path=path,
filename=os.path.join(output_path, output))
elif fail_on_overwrite:
raise RuntimeError('The files %s exists already', output)
return os.path.join(output_path, output)
|
the-stack_0_26413
|
A, K = map(int, input().split())
n = 2 * 10 ** 12
if K == 0:
print(n - A)
else:
result = 0
t = A
while t < n:
t += 1 + K * t
result += 1
print(result)
|
the-stack_0_26414
|
# -*- coding: utf-8 -*-
"""Pype terminal animation."""
import blessed
from pathlib import Path
from time import sleep
NO_TERMINAL = False
try:
term = blessed.Terminal()
except AttributeError:
# this happens when blessed cannot find proper terminal.
# If so, skip printing ascii art animation.
NO_TERMINAL = True
def play_animation():
"""Play ASCII art Pype animation."""
if NO_TERMINAL:
return
print(term.home + term.clear)
frame_size = 7
splash_file = Path(__file__).parent / "splash.txt"
with splash_file.open("r") as sf:
animation = sf.readlines()
animation_length = int(len(animation) / frame_size)
current_frame = 0
for _ in range(animation_length):
frame = "".join(
scanline
for y, scanline in enumerate(
animation[current_frame : current_frame + frame_size]
)
)
with term.location(0, 0):
# term.aquamarine3_bold(frame)
print(f"{term.bold}{term.aquamarine3}{frame}{term.normal}")
sleep(0.02)
current_frame += frame_size
print(term.move_y(7))
|
the-stack_0_26415
|
from tests.abstrac_api_test import AbstractTestApiDocReader
class TestApiDocReader(AbstractTestApiDocReader):
def __check_content(self, tables):
self.assertEqual(1, len(tables))
table1 = tables[0]
rows1 = table1["cells"]
self.assertEqual("1", rows1[0][0])
self.assertEqual("2", rows1[0][1])
self.assertEqual('3', rows1[0][2])
self.assertEqual("2", rows1[1][0])
self.assertEqual("1", rows1[1][1])
self.assertEqual("5", rows1[1][2])
self.assertEqual("5", rows1[2][0])
self.assertEqual("3", rows1[2][1])
self.assertEqual("1", rows1[2][2])
def test_csv_books2(self):
file_name = "books_2.csv"
result = self._send_request(file_name)
tables = result["content"]["tables"]
table = tables[0]["cells"]
self.assertListEqual(['0553573403', 'book', "A Game of Throne, kings and other stuff",
'7.99', 'True', 'George R.R. Martin', 'A Song of Ice and Fire', '1', 'fantasy'],
table[1])
self.assertListEqual(
["0553579908", "book", 'A Clash of "Kings"', '7.99', 'True', 'George R.R. Martin',
'A Song of Ice and Fire', '2', 'fantasy'], table[2])
def test_csv(self):
file_name = "csv_coma.csv"
result = self._send_request(file_name)
tables = result["content"]["tables"]
self.__check_content(tables)
def test_csv_semicolon(self):
file_name = "csv_semicolon.csv"
result = self._send_request(file_name, dict(delimiter=";"))
tables = result["content"]["tables"]
self.__check_content(tables)
|
the-stack_0_26416
|
import asyncio
import sys
import thriftpy2 as thriftpy
from aiothrift.server import create_server
pingpong_thrift = thriftpy.load("tests/test.thrift", module_name="test_thrift")
@asyncio.coroutine
def _add(a, b):
yield from asyncio.sleep(0)
return a + b
class Dispatcher:
def ping(self):
return "pong"
@asyncio.coroutine
def add(self, a, b):
result = yield from _add(a, b)
return result
def address(self, name):
return "address " + name
loop = asyncio.get_event_loop()
server = loop.run_until_complete(
create_server(
pingpong_thrift.Test, Dispatcher(), ("127.0.0.1", 6000), loop=loop, timeout=10
)
)
print("server is listening on host {} and port {}".format("127.0.0.1", 6000))
sys.stdout.flush()
try:
loop.run_forever()
except KeyboardInterrupt:
pass
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
|
the-stack_0_26417
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getblockfilter RPC."""
from test_framework.test_framework import BitcoinpaythroughTestFramework
from test_framework.util import (
assert_equal, assert_is_hex_string, assert_raises_rpc_error,
connect_nodes, disconnect_nodes, sync_blocks
)
FILTER_TYPES = ["basic"]
class GetBlockFilterTest(BitcoinpaythroughTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-blockfilterindex"], []]
def run_test(self):
# Create two chains by disconnecting nodes 0 & 1, mining, then reconnecting
disconnect_nodes(self.nodes[0], 1)
self.nodes[0].generate(3)
self.nodes[1].generate(4)
assert_equal(self.nodes[0].getblockcount(), 3)
chain0_hashes = [self.nodes[0].getblockhash(block_height) for block_height in range(4)]
# Reorg node 0 to a new chain
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getblockcount(), 4)
chain1_hashes = [self.nodes[0].getblockhash(block_height) for block_height in range(4)]
# Test getblockfilter returns a filter for all blocks and filter types on active chain
for block_hash in chain1_hashes:
for filter_type in FILTER_TYPES:
result = self.nodes[0].getblockfilter(block_hash, filter_type)
assert_is_hex_string(result['filter'])
# Test getblockfilter returns a filter for all blocks and filter types on stale chain
for block_hash in chain0_hashes:
for filter_type in FILTER_TYPES:
result = self.nodes[0].getblockfilter(block_hash, filter_type)
assert_is_hex_string(result['filter'])
# Test getblockfilter with unknown block
bad_block_hash = "0123456789abcdef" * 4
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockfilter, bad_block_hash, "basic")
# Test getblockfilter with undefined filter type
genesis_hash = self.nodes[0].getblockhash(0)
assert_raises_rpc_error(-5, "Unknown filtertype", self.nodes[0].getblockfilter, genesis_hash, "unknown")
if __name__ == '__main__':
GetBlockFilterTest().main()
|
the-stack_0_26423
|
# Handbrake processing of dvd/bluray
import sys
import os
import logging
import subprocess
import re
import shlex
from arm.ripper import utils
# from arm.config.config import cfg
from arm.models.models import Track # noqa: E402
from arm.ui import app, db # noqa E402
# flake8: noqa: W605
def handbrake_mainfeature(srcpath, basepath, logfile, job):
"""process dvd with mainfeature enabled.\n
srcpath = Path to source for HB (dvd or files)\n
basepath = Path where HB will save trancoded files\n
logfile = Logfile for HB to redirect output to\n
job = Job object\n
Returns nothing
"""
logging.info("Starting DVD Movie Mainfeature processing")
logging.debug("Handbrake starting: " + str(job))
filename = os.path.join(basepath, job.title + "." + job.config.DEST_EXT)
filepathname = os.path.join(basepath, filename)
logging.info("Ripping title Mainfeature to " + shlex.quote(filepathname))
get_track_info(srcpath, job)
track = job.tracks.filter_by(main_feature=True).first()
if track is None:
msg = "No main feature found by Handbrake. Turn MAINFEATURE to false in arm.yml and try again."
logging.error(msg)
raise RuntimeError(msg)
track.filename = track.orig_filename = filename
db.session.commit()
if job.disctype == "dvd":
hb_args = job.config.HB_ARGS_DVD
hb_preset = job.config.HB_PRESET_DVD
elif job.disctype == "bluray":
hb_args = job.config.HB_ARGS_BD
hb_preset = job.config.HB_PRESET_BD
cmd = 'nice {0} -i {1} -o {2} --main-feature --preset "{3}" {4} >> {5} 2>&1'.format(
job.config.HANDBRAKE_CLI,
shlex.quote(srcpath),
shlex.quote(filepathname),
hb_preset,
hb_args,
logfile
)
logging.debug("Sending command: %s", (cmd))
try:
subprocess.check_output(
cmd,
shell=True
).decode("utf-8")
logging.info("Handbrake call successful")
track.status = "success"
except subprocess.CalledProcessError as hb_error:
err = "Call to handbrake failed with code: " + str(hb_error.returncode) + "(" + str(hb_error.output) + ")"
logging.error(err)
track.status = "fail"
track.error = err
sys.exit(err)
logging.info("Handbrake processing complete")
logging.debug(str(job))
track.ripped = True
db.session.commit()
return
def handbrake_all(srcpath, basepath, logfile, job):
"""Process all titles on the dvd\n
srcpath = Path to source for HB (dvd or files)\n
basepath = Path where HB will save trancoded files\n
logfile = Logfile for HB to redirect output to\n
job = Disc object\n
Returns nothing
"""
logging.info("Starting BluRay/DVD transcoding - All titles")
if job.disctype == "dvd":
hb_args = job.config.HB_ARGS_DVD
hb_preset = job.config.HB_PRESET_DVD
elif job.disctype == "bluray":
hb_args = job.config.HB_ARGS_BD
hb_preset = job.config.HB_PRESET_BD
get_track_info(srcpath, job)
logging.debug("Total number of tracks is " + str(job.no_of_titles))
for track in job.tracks:
if track.length < int(job.config.MINLENGTH):
# too short
logging.info("Track #" + str(track.track_number) + " of " + str(job.no_of_titles) + ". Length (" + str(track.length) +
") is less than minimum length (" + job.config.MINLENGTH + "). Skipping")
elif track.length > int(job.config.MAXLENGTH):
# too long
logging.info("Track #" + str(track.track_number) + " of " + str(job.no_of_titles) + ". Length (" + str(track.length) +
") is greater than maximum length (" + job.config.MAXLENGTH + "). Skipping")
else:
# just right
logging.info("Processing track #" + str(track.track_number) + " of " + str(job.no_of_titles) + ". Length is " + str(track.length) + " seconds.")
filename = "title_" + str.zfill(str(track.track_number), 2) + "." + job.config.DEST_EXT
filepathname = os.path.join(basepath, filename)
logging.info("Transcoding title " + str(track.track_number) + " to " + shlex.quote(filepathname))
track.filename = track.orig_filename = filename
db.session.commit()
cmd = 'nice {0} -i {1} -o {2} --preset "{3}" -t {4} {5}>> {6} 2>&1'.format(
job.config.HANDBRAKE_CLI,
shlex.quote(srcpath),
shlex.quote(filepathname),
hb_preset,
str(track.track_number),
hb_args,
logfile
)
logging.debug("Sending command: %s", (cmd))
try:
hb = subprocess.check_output(
cmd,
shell=True
).decode("utf-8")
logging.debug("Handbrake exit code: " + hb)
track.status = "success"
except subprocess.CalledProcessError as hb_error:
err = "Handbrake encoding of title " + str(track.track_number) + " failed with code: " + str(hb_error.returncode) + "(" + str(hb_error.output) + ")" # noqa E501
logging.error(err)
track.status = "fail"
track.error = err
# return
# sys.exit(err)
track.ripped = True
db.session.commit()
logging.info("Handbrake processing complete")
logging.debug(str(job))
return
def handbrake_mkv(srcpath, basepath, logfile, job):
"""process all mkv files in a directory.\n
srcpath = Path to source for HB (dvd or files)\n
basepath = Path where HB will save trancoded files\n
logfile = Logfile for HB to redirect output to\n
job = Disc object\n
Returns nothing
"""
if job.disctype == "dvd":
hb_args = job.config.HB_ARGS_DVD
hb_preset = job.config.HB_PRESET_DVD
elif job.disctype == "bluray":
hb_args = job.config.HB_ARGS_BD
hb_preset = job.config.HB_PRESET_BD
for f in os.listdir(srcpath):
srcpathname = os.path.join(srcpath, f)
destfile = os.path.splitext(f)[0]
filename = os.path.join(basepath, destfile + "." + job.config.DEST_EXT)
filepathname = os.path.join(basepath, filename)
logging.info("Transcoding file " + shlex.quote(f) + " to " + shlex.quote(filepathname))
cmd = 'nice {0} -i {1} -o {2} --preset "{3}" {4}>> {5} 2>&1'.format(
job.config.HANDBRAKE_CLI,
shlex.quote(srcpathname),
shlex.quote(filepathname),
hb_preset,
hb_args,
logfile
)
logging.debug("Sending command: %s", (cmd))
try:
hb = subprocess.check_output(
cmd,
shell=True
).decode("utf-8")
logging.debug("Handbrake exit code: " + hb)
except subprocess.CalledProcessError as hb_error:
err = "Handbrake encoding of file " + shlex.quote(f) + " failed with code: " + str(hb_error.returncode) + "(" + str(hb_error.output) + ")"
logging.error(err)
# job.errors.append(f)
logging.info("Handbrake processing complete")
logging.debug(str(job))
return
def get_track_info(srcpath, job):
"""Use HandBrake to get track info and updatte Track class\n
srcpath = Path to disc\n
job = Job instance\n
"""
logging.info("Using HandBrake to get information on all the tracks on the disc. This will take a few minutes...")
cmd = '{0} -i {1} -t 0 --scan'.format(
job.config.HANDBRAKE_CLI,
shlex.quote(srcpath)
)
logging.debug("Sending command: %s", (cmd))
try:
hb = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True
).decode('cp437').splitlines()
except subprocess.CalledProcessError as hb_error:
logging.error("Couldn't find a valid track. Try running the command manually to see more specific errors.")
logging.error("Specifid error is: " + str(hb_error.returncode) + "(" + str(hb_error.output) + ")")
return(-1)
# sys.exit(err)
t_pattern = re.compile(r'.*\+ title *')
pattern = re.compile(r'.*duration\:.*')
seconds = 0
t_no = 0
fps = float(0)
aspect = 0
result = None
mainfeature = False
for line in hb:
# get number of titles
if result is None:
if job.disctype == "bluray":
result = re.search('scan: BD has (.*) title\(s\)', line)
else:
result = re.search('scan: DVD has (.*) title\(s\)', line)
if result:
titles = result.group(1)
titles = titles.strip()
logging.debug("Line found is: " + line)
logging.info("Found " + titles + " titles")
job.no_of_titles = titles
db.session.commit()
if(re.search(t_pattern, line)) is not None:
if t_no == 0:
pass
else:
utils.put_track(job, t_no, seconds, aspect, fps, mainfeature, "handbrake")
mainfeature = False
t_no = line.rsplit(' ', 1)[-1]
t_no = t_no.replace(":", "")
if(re.search(pattern, line)) is not None:
t = line.split()
h, m, s = t[2].split(':')
seconds = int(h) * 3600 + int(m) * 60 + int(s)
if(re.search("Main Feature", line)) is not None:
mainfeature = True
if(re.search(" fps", line)) is not None:
fps = line.rsplit(' ', 2)[-2]
aspect = line.rsplit(' ', 3)[-3]
aspect = str(aspect).replace(",", "")
utils.put_track(job, t_no, seconds, aspect, fps, mainfeature, "handbrake")
|
the-stack_0_26425
|
#!/usr/bin/env python
from __future__ import print_function
from tlslite import HTTPTLSConnection, HandshakeSettings
settings = HandshakeSettings()
settings.useExperimentalTackExtension = True
h = HTTPTLSConnection("localhost", 4443, settings=settings)
h.request("GET", "/index.html")
r = h.getresponse()
print(r.read())
|
the-stack_0_26428
|
import time
import numpy as np
import struct
from empyric.adapters import *
from empyric.collection.instrument import *
class SRSRGA(Instrument):
"""
SRS Residual Gas Analyzer, a quadrupole mass spectrometer with mass ranges from 100 to 300 amu
"""
name = 'SRS-RGA'
supported_adapters = (
(Serial, {'baud_rate': 28800, 'timeout': 300, 'read_termination': '\n\r'}),
)
knobs = (
'initialize'
'filament current',
'mass',
'masses',
'mass range',
'ppsf', # partial pressure sensitivity factor
'tpsf' # total pressure sensitivity factor
)
presets = {
'filament current': 1
}
postsets = {
'filament current': 0
}
meters = {
'filament current',
'single',
'spectrum',
'total pressure'
}
def __init__(self, *args, **kwargs):
Instrument.__init__(self, *args, **kwargs)
@setter
def set_filament_current(self, current):
# current is in mA
if current >= 3.5:
self.query('FL3.5')
elif current <= 0:
self.query('FL0')
else:
self.query('FL'+f'{np.round(float(current), 2)}')
time.sleep(5)
@measurer
def measure_filament_current(self):
return float(self.query('FL?'))
@setter
def set_mass(self, mass):
self.write('ML' + f'{int(mass)}')
@setter
def set_masses(self, masses):
initial_mass, final_mass = masses[0], masses[-1]
self.write('MI' + f'{int(initial_mass)}')
self.write('MF' + f'{int(final_mass)}')
self.mass_range = [initial_mass, final_mass]
@getter
def get_masses(self):
initial_mass = int(self.query('MI?'))
final_mass = int(self.query('MF?'))
self.mass_range = [initial_mass, final_mass]
return np.arange(initial_mass, final_mass + 1)
@setter
def set_mass_range(self, mass_range):
initial_mass, final_mass = mass_range
self.write('MI' + f'{int(initial_mass)}')
self.write('MF' + f'{int(final_mass)}')
self.masses = np.arange(initial_mass, final_mass + 1)
@getter
def get_mass_range(self):
initial_mass = int(self.query('MI?'))
final_mass = int(self.query('MF?'))
self.masses = np.arange(initial_mass, final_mass + 1)
return [initial_mass, final_mass]
@setter
def set_ppsf(self, value):
self.write(f'SP{value}')
@getter
def get_ppsf(self):
return float(self.query('SP?'))
@setter
def set_tpsf(self, value):
self.write(f'ST{value}')
@getter
def get_tpsf(self):
return float(self.query('ST?'))
@measurer
def measure_spectrum(self):
response = self.query('HS1', num_bytes=4*(len(self.masses)+1), decode=False)
return np.array(struct.unpack('<'+'i'*(len(self.masses)+1), response))[:-1] * 1.0e-16 / self.ppsf * 1000
@measurer
def measure_single(self):
response = self.query('MR'+f'{int(self.mass)}', num_bytes=4, decode=False)
return struct.unpack('<i', response)[0] * 1.0e-16 / self.ppsf * 1000
@measurer
def measure_total_pressure(self):
response = self.query('TP?', num_bytes=4, decode=False)
return struct.unpack('<i', response)[0] * 1.0e-16 / self.tpsf * 1000
|
the-stack_0_26431
|
magic_number = 64
DATAPARAM={
"output_dim" : 2,
#Ns
#"zsAVG" : [0.3, 0.8628, 0.95],
#"zsSTD" : [0.02853, 0.04887, 0.028]
#H0
#"zsAVG" : [0.3, 0.8628, 0.701],
#"zsSTD" : [0.02853, 0.04887, 0.05691]
"zsAVG": [2.995679839999998983e-01,8.610806619999996636e-01],
"zsSTD": [2.905168635566176411e-02,4.023372385668218254e-02]
}
Input = {
"BATCH_SIZE" : 1, #mini-batch size for training and validation
"NUM_THREADS" : 2, #number of threads to read data
"CAPACITY" : 0,
"MIN_AFTER_DEQUEUE" : 200 #the minimum number in the queue after dequeue (Min_after_dequeue and capacity together determines the shuffling of input data)
}
Input["CAPACITY"] = Input["BATCH_SIZE"]*4 + Input["MIN_AFTER_DEQUEUE"]
Input_Test = {
"BATCH_SIZE" : 64, #mini-batch size for test data
"NUM_THREADS" : 2, #number of threads to read data
"CAPACITY" : 0,
"MIN_AFTER_DEQUEUE" : 64
}
Input_Test["CAPACITY"] = Input_Test["BATCH_SIZE"]*4 + Input_Test["MIN_AFTER_DEQUEUE"]
Model = {
"REG_RATE": 0., #regularization of weights: currently set to 0 since batch_normalization has the same effect of regularization
"LEAK_PARAMETER": 0.01, #leaky parameter for leaky relu
"LEARNING_RATE" : 0.0001, #adam_optimizer to do the update.
"DROP_OUT": 0.5 #apply drop out in fully connected layer. this value gives the probabilty of keep the node.
}
RUNPARAM={
"num_epoch": 3, #each epoch means a fully pass over the data. The program might stop before running num_epoch (see next line).
"require_improvement": 20, #if with require_improvement, there is no improvement in validation error, then stop running.
"num_train":400, #total number of simulations for training
"num_val":50, #total number of simulations for validation
"num_test":49, #total number of simulations for testing
"batch_per_epoch":0,
"batch_per_epoch_val":0,
"iter_test":0
}
RUNPARAM["batch_per_epoch"] = 10 #RUNPARAM['num_train']*magic_number/Input['BATCH_SIZE']
RUNPARAM["batch_per_epoch_val"] = 10 # RUNPARAM['num_val']*magic_number/Input['BATCH_SIZE']
RUNPARAM['iter_test'] = 10 # RUNPARAM['num_test']*magic_number/Input_Test['BATCH_SIZE']
#target_dir = "new_data_2"
#main_dir = '/data1/jamesarnemann/cosmoNet/'
#target_dir = "new_data_3_param_1"
##### CHANGE THIS TO LOCAL DIRECTORY
#main_dir = "/data0/jamesarnemann/cosmoNet/"
#target_dir = "orig_paper"
#main_dir = '/lus/scratch/p02472/cosmoflow/'
#target_dir = 'new_data_3_param_2'
main_dir = '/global/cscratch1/sd/djbard/cosmoML/CosmoNet/'
target_dir = ''
#######
#main_dir = '/data0/jamesarnemann/cosmoNet/'
#target_dir = "new_data_3_param_n0"
#/data0/jamesarnemann/cosmoNet/new_data_3_param_n0/
#target_path =
result_dir = '/result/'
Path={
"init_data" : '.', #Path where the init data is
#"Model_path" : main_dir + target_dir + result_dir, #Path to save the best model where the validation error is the smallest. And then we use this model for test
"Model_path" : "." + result_dir,
"train_data" : main_dir + target_dir + '/data/train/', #path where the train data is
#"train_result" : main_dir + target_dir + result_dir, #path to store the train result
"train_result": "." + result_dir,
"val_data" : main_dir + target_dir + '/data/train/', #path where the validation data is
#"val_result" : main_dir + target_dir + result_dir, #path to st/data0/jamesarnemann/cosmoNet/' + target_dir + '/result/'ore the validation result
"val_result": "." + result_dir,
"test_data" : main_dir + target_dir + '/data/train/', #path where the test data is
#"test_result" : main_dir + target_dir + result_dir, #path to store the test result
"test_result": "." + result_dir
}
|
the-stack_0_26437
|
"""
Various utilities around voxel grids.
"""
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from deepchem.utils.noncovalent_utils import compute_pi_stack
logger = logging.getLogger(__name__)
def convert_atom_to_voxel(coordinates: np.ndarray, atom_index: int,
box_width: float, voxel_width: float) -> np.ndarray:
"""Converts atom coordinates to an i,j,k grid index.
This function offsets molecular atom coordinates by
(box_width/2, box_width/2, box_width/2) and then divides by
voxel_width to compute the voxel indices.
Parameters
-----------
coordinates: np.ndarray
Array with coordinates of all atoms in the molecule, shape (N, 3).
atom_index: int
Index of an atom in the molecule.
box_width: float
Size of the box in Angstroms.
voxel_width: float
Size of a voxel in Angstroms
Returns
-------
indices: np.ndarray
A 1D numpy array of length 3 with `[i, j, k]`, the voxel coordinates
of specified atom.
"""
indices = np.floor(
(coordinates[atom_index] + box_width / 2.0) / voxel_width).astype(int)
if ((indices < 0) | (indices >= box_width / voxel_width)).any():
logger.warning('Coordinates are outside of the box (atom id = %s,'
' coords xyz = %s, coords in box = %s' %
(atom_index, coordinates[atom_index], indices))
return indices
def convert_atom_pair_to_voxel(coordinates_tuple: Tuple[np.ndarray, np.ndarray],
atom_index_pair: Tuple[int, int],
box_width: float,
voxel_width: float) -> np.ndarray:
"""Converts a pair of atoms to i,j,k grid indexes.
Parameters
----------
coordinates_tuple: Tuple[np.ndarray, np.ndarray]
A tuple containing two molecular coordinate arrays of shapes `(N, 3)` and `(M, 3)`.
atom_index_pair: Tuple[int, int]
A tuple of indices for the atoms in the two molecules.
box_width: float
Size of the box in Angstroms.
voxel_width: float
Size of a voxel in Angstroms
Returns
-------
indices_list: np.ndarray
A numpy array of shape `(2, 3)`, where `3` is `[i, j, k]` of the
voxel coordinates of specified atom.
"""
indices_list = []
for coordinates, atom_index in zip(coordinates_tuple, atom_index_pair):
indices_list.append(
convert_atom_to_voxel(coordinates, atom_index, box_width, voxel_width))
return np.array(indices_list)
def voxelize(get_voxels: Callable[..., Any],
coordinates: np.ndarray,
box_width: float = 16.0,
voxel_width: float = 1.0,
hash_function: Optional[Callable[..., Any]] = None,
feature_dict: Optional[Dict[Any, Any]] = None,
feature_list: Optional[List[Union[int, Tuple[int]]]] = None,
nb_channel: int = 16,
dtype: str = 'int') -> np.ndarray:
"""Helper function to voxelize inputs.
This helper function helps convert a hash function which
specifies spatial features of a molecular complex into a voxel
tensor. This utility is used by various featurizers that generate
voxel grids.
Parameters
----------
get_voxels: Function
Function that voxelizes inputs
coordinates: np.ndarray
Contains the 3D coordinates of a molecular system.
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid in Angstroms.
hash_function: Function
Used to map feature choices to voxel channels.
feature_dict: Dict, optional (default None)
Keys are atom indices or tuples of atom indices, the values are
computed features. If `hash_function is not None`, then the values
are hashed using the hash function into `[0, nb_channels)` and
this channel at the voxel for the given key is incremented by `1`
for each dictionary entry. If `hash_function is None`, then the
value must be a vector of size `(n_channels,)` which is added to
the existing channel values at that voxel grid.
feature_list: List, optional (default None)
List of atom indices or tuples of atom indices. This can only be
used if `nb_channel==1`. Increments the voxels corresponding to
these indices by `1` for each entry.
nb_channel: int, , optional (default 16)
The number of feature channels computed per voxel. Should
be a power of 2.
dtype: str ('int' or 'float'), optional (default 'int')
The type of the numpy ndarray created to hold features.
Returns
-------
feature_tensor: np.ndarray
The voxel of the input with the shape
`(voxels_per_edge, voxels_per_edge, voxels_per_edge, nb_channel)`.
"""
# Number of voxels per one edge of box to voxelize.
voxels_per_edge = int(box_width / voxel_width)
if dtype == "int":
feature_tensor = np.zeros(
(voxels_per_edge, voxels_per_edge, voxels_per_edge, nb_channel),
dtype=np.int8)
else:
feature_tensor = np.zeros(
(voxels_per_edge, voxels_per_edge, voxels_per_edge, nb_channel),
dtype=np.float16)
if feature_dict is not None:
for key, features in feature_dict.items():
voxels = get_voxels(coordinates, key, box_width, voxel_width)
if len(voxels.shape) == 1:
voxels = np.expand_dims(voxels, axis=0)
for voxel in voxels:
if ((voxel >= 0) & (voxel < voxels_per_edge)).all():
if hash_function is not None:
feature_tensor[voxel[0], voxel[1], voxel[2],
hash_function(features, nb_channel)] += 1.0
else:
feature_tensor[voxel[0], voxel[1], voxel[2], 0] += features
elif feature_list is not None:
for key in feature_list:
voxels = get_voxels(coordinates, key, box_width, voxel_width)
for voxel in voxels:
if ((voxel >= 0) & (voxel < voxels_per_edge)).all():
feature_tensor[voxel[0], voxel[1], voxel[2], 0] += 1.0
return feature_tensor
def voxelize_pi_stack(prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances,
pi_stack_dist_cutoff, pi_stack_angle_cutoff, box_width,
voxel_width):
protein_pi_t, protein_pi_parallel, ligand_pi_t, ligand_pi_parallel = (
compute_pi_stack(
prot_rdk,
lig_rdk,
distances,
dist_cutoff=pi_stack_dist_cutoff,
angle_cutoff=pi_stack_angle_cutoff))
pi_parallel_tensor = voxelize(
convert_atom_to_voxel,
prot_xyz,
box_width=box_width,
voxel_width=voxel_width,
feature_dict=protein_pi_parallel,
nb_channel=1,
)
pi_parallel_tensor += voxelize(
convert_atom_to_voxel,
lig_xyz,
box_width=box_width,
voxel_width=voxel_width,
feature_dict=ligand_pi_parallel,
nb_channel=1,
)
pi_t_tensor = voxelize(
convert_atom_to_voxel,
prot_xyz,
box_width=box_width,
voxel_width=voxel_width,
feature_dict=protein_pi_t,
nb_channel=1,
)
pi_t_tensor += voxelize(
convert_atom_to_voxel,
lig_xyz,
box_width=box_width,
voxel_width=voxel_width,
feature_dict=ligand_pi_t,
nb_channel=1,
)
return [pi_parallel_tensor, pi_t_tensor]
|
the-stack_0_26438
|
import copy
from collections import KeysView, ValuesView
from typing import Optional
import os
import platform
##### STATIC METHODS #####
def clean_path(uncleaned_path: str):
platformSeparator = getPlatformSeparators()
uncleaned_path = copy.deepcopy(uncleaned_path).replace('/', '\\')
if not uncleaned_path.endswith("\\"):
uncleaned_path += "\\"
struct_path = uncleaned_path.split('\\')
i = 0
_cleaner(struct_path, 0, first=True)
while i < len(struct_path):
if struct_path[i] == '..':
struct_path.pop(i)
struct_path.pop(i)
else:
i += 1
path = struct_path[0] if struct_path else platformSeparator
for element in struct_path[1:]:
if element != '':
path += platformSeparator + element
return path
def _cleaner(struct_path: list, index: int, first=False):
if index == len(struct_path):
return
if (not first) and struct_path[index] == "":
struct_path.pop(index)
return _cleaner(struct_path, index)
return _cleaner(struct_path, index + 1)
def getPlatformSeparators():
system = platform.system()
if system == 'Linux':
return '/'
elif system == 'Windows':
return '\\'
elif system == 'Darwin':
raise SystemError(f'Platform {system} (Mac OS) not supported!')
else:
raise SystemError(f'Platform {system} not supported!')
class Properties:
content: dict
comments: dict ## Key is line number
path: str ## self.path is absolute
prev_key: str
comment_char: str
separator_char: str
platformSeparator: str
def __init__(self, path: str = False, **kwargs):
""" Creates a Properties object used to manage a .properties file
:param path: string path as relative {used with a context manager}
:key separator_char: properties file separator character (default:=)
:key comment_char: properties file comment character (default:#)
:key is_absolute: boolean of whether or not the path given is absolute
"""
self.content = {}
self.comments = {}
self.path = ''
self.prev_key = ''
self.separator_char = kwargs.get('separator_char', '=')
self.comment_char = kwargs.get('comment_char', '#')
self.platformSeparator = getPlatformSeparators()
is_absolute: bool = kwargs.get('is_absolute', False)
if path:
self.load(path, self.separator_char, self.comment_char, is_absolute)
def __repr__(self):
return f"<{self.__class__}, loaded_file: '{self.path if self.path else 'None'}'>"
def getPath(self) -> str:
"""Used to get the path of the file stored
:return: str, the path of the file stored
"""
return self.path
def load(self, path: str, separator_char: str = '=', comment_char: str = '#',
is_absolute: bool = False) -> 'Properties':
""" Loads Properties and stores them in a dict and returns the dict
:param path: string path as relative {used with a context manager}
:param comment_char: comment_char (default:#)
:param separator_char: separator_char (default:=)
:param is_absolute: boolean of whether or not the path given is absolute
:return: Dict of content
"""
print(f'-> Loading file: {path}')
if not is_absolute:
path = str(os.getcwd()) + self.platformSeparator + clean_path(path)
path = clean_path(path)
self.content = {}
self.prev_key = ''
try:
with open(path, 'r') as f:
strContent = f.readlines()
for index, line in enumerate(strContent):
line = line.replace('\n', '').strip()
if not line:
continue
if self.prev_key != '':
self.content[self.prev_key] += ' ' + line
elif line[0] != comment_char:
key, key_value = line.split(separator_char)
self.content[key] = key_value
elif line[0] == comment_char:
self.comments[index] = line[1:].strip()
if line[-1] == '\\':
self.prev_key = key if self.prev_key == '' else self.prev_key
self.content[key] = self.content[key].replace('\\', '')
if line[-1] != '\\' and self.prev_key != '':
self.prev_key = ''
f.close()
self.prev_key = ''
self.path = path
self.separator_char = separator_char
self.comment_char = comment_char
except FileNotFoundError as e:
print(e)
return self
def reload(self):
"""Reloads the property file"""
if not self.path:
return print("No path given, cannot reload properties. Skipping...")
self.load(self.path, self.separator_char, self.comment_char, is_absolute=True)
def getProperty(self, key: str) -> str:
"""Returns the key
:param key: str, key of dict
:return: value of key, Undefined if not found
"""
return self.content.get(key, 'Undefined')
def replaceProperty(self, key: str, val: str, create_if_needed: bool = False):
"""Used to change the value of an existing property
:param key: str, key to change value from
:param val: str, new value
:param create_if_needed: bool, if True will create property if it doesn't exist
:return:
"""
if self.containsProperty(key) or create_if_needed:
self.content[key] = val
else:
print(f"Undefined property {key}")
def getContent(self) -> dict:
"""Returns the full content as a dict
:return: dict, content of property file
"""
return self.content
def setProperty(self, key: str, val: any):
"""Used to set properties to create file
:param key: str, key
:param val: str, value for key
:return:
"""
self.content[key] = str(val)
def clone(self) -> 'Properties':
"""Used to clone the <Properties> object
:return: copy of self
"""
return copy.deepcopy(self)
def clear(self):
"""Clears the content stored and the path of the file"""
self.path = ''
self.content = {}
def getKeySet(self) -> KeysView:
"""
:return: Set of Keys of the content
"""
return self.content.keys()
def getValuesSet(self) -> ValuesView:
"""
:return: Set of Values of the content
"""
return self.content.values()
def removeProperty(self, key: str) -> str:
"""Used to remove a property
:param key: key to be removed
:return: the value of the removed key
"""
return self.content.pop(key)
def containsProperty(self, key: str) -> bool:
"""Used to test if the property file contains a certain key
:param key: the key to test
:return: boolean, True if contains the key
"""
return self.content.__contains__(key)
def out(self, path: str = None, separator_char: str = '=', comment_char: str = '#', comments=None,
comments_pos: str = 'top', is_absolute: bool = False):
"""Used to write a properties file
:param path: string path as relative {used with a context manager}
:param comment_char: comment_char (default:#)
:param separator_char: separator_char (default:=)
:param comments: A list of strings to be written in the file as comments
:type comments: list[str]
:param comments_pos: position of the comments, must be 'top' or 'bottom'
:param is_absolute: boolean of whether or not the path given is absolute
"""
if path is None:
if self.path == '':
return print(f"No Path given nor set. Skipping...")
path = self.path
else:
if not is_absolute:
path = str(os.getcwd()) + self.platformSeparator + clean_path(path)
path = clean_path(path)
if comments is None:
comments = []
elif not hasattr(comments, '__iter__'):
return print(f"Comments type is not valid: not iterable. Given={comments.__class__} | Should be a list")
with open(path, 'w') as f:
if comments and comments_pos == 'top':
for comment in comments:
f.write(comment_char + ' ' + comment + '\n')
lineNumber, keyIndex, lastComIndex = 0, 0, 0
while keyIndex < len(self.content.keys()):
if self.comments.__contains__(lineNumber):
f.write(comment_char+' '+self.comments.get(lineNumber)+'\n')
lastComIndex += 1
else:
key = list(self.content.keys())[keyIndex]
f.write(key + separator_char + self.content[key] + '\n')
keyIndex += 1
lineNumber += 1
if len(self.comments.keys()) > lastComIndex:
for key in list(self.comments.keys())[lastComIndex:]:
f.write(comment_char + ' ' + self.comments.get(key) + '\n')
if comments and comments_pos == 'bottom':
for comment in comments:
f.write(comment_char + ' ' + comment + '\n')
f.close()
def close(self, comments=None):
"""Writes file to its path (Basically updates it) and clears the property so that it can be reused"""
self.out(self.path, self.separator_char, self.comment_char, comments, comments_pos='top', is_absolute=True)
self.clear()
class PropertiesHandler:
properties_dict: dict[str, Properties]
curr_prop: Optional[Properties]
directories_dict: dict[str, list[str]]
directories_name_dict: dict[str, str]
platformSeparator: str
def __init__(self, properties_list=None):
"""Creates a 'PropertiesHandler' object used to manage and switch easily between Properties objects,
useful for supporting languages for example
:param properties_list: A list of Properties objects if you have one
"""
if properties_list is None:
properties_list = []
self.properties_dict = {}
self.directories_dict = {}
self.directories_name_dict = {}
self.curr_prop = None
self.platformSeparator = getPlatformSeparators()
if properties_list:
for prop in properties_list:
if not isinstance(prop, Properties):
print(f"[Init Error] Object {prop} is not an instance of {Properties.__class__}, skipping...")
continue
self.addProperty(prop)
def __repr__(self):
return f"<{self.__class__.__name__} class, number_of_childs={len(self.properties_dict)}, " \
f"selected_properties_class={self.curr_prop if self.curr_prop else 'None'}, " \
f"list_of_childs={self.properties_dict}>"
def passFiles(self, files_list: list, input_order: str = 's,c') -> 'PropertiesHandler':
"""Used when creating the object or to pass multiple files at once;
Multiple lists are valid, you can do a list with only paths or replace some paths wit a lsit containing the path,
followed by the separator character then followed by the comment character. (According to the input order,
defaults to separator then comment, can be changed by setting 'input_order' to 'c,s'.
:param files_list: a list of .properties files path. Can be used as [[path, separator_char, comment_char], [path, separator_char]]
:param input_order: defines the input order in sublists, separator character first or second (default=s,c)
:return: self so that it can be used to instantiate
"""
for file in files_list:
if isinstance(file, list):
length = len(file)
if length == 1:
self.addProperty(Properties(file[0]))
elif length == 2:
if input_order == 'c,s':
self.addProperty(Properties(file[0], comment_char=file[1]))
else:
self.addProperty(Properties(file[0], separator_char=file[1]))
elif length == 3:
if input_order == 'c,s':
self.addProperty(Properties(file[0], comment_char=file[1], separator_char=file[2]))
else:
self.addProperty(Properties(file[0], separator_char=file[1], comment_char=file[2]))
else:
print(f'Invalid input file={file}')
else:
try:
self.addProperty(Properties(file))
except FileNotFoundError:
print(f"File '{file}' not found")
continue
return self
def getDirectorys(self):
return self.directories_dict
def setDirectory(self, relative_path: str, is_absolute: bool = False):
"""Used to set a single directory as the container for all .properties files,
removes every other Properties objects stored
:param relative_path: relative path to directory or absolute if is_absolute is True
:param is_absolute: boolean of whether or not the path given is absolute
"""
if not is_absolute:
absolute_path = str(os.getcwd()) + self.platformSeparator + clean_path(relative_path)
else:
absolute_path = relative_path
absolute_path = clean_path(absolute_path)
if not absolute_path.endswith(self.platformSeparator):
absolute_path += self.platformSeparator
self.directories_dict = {absolute_path: []}
self.properties_dict = {}
self.curr_prop = None
for file in os.listdir(absolute_path):
if file.endswith(".properties"):
self.directories_dict[absolute_path].append(absolute_path + file)
self.addProperty(Properties(os.path.join(absolute_path + file), is_absolute=True))
def addDirectory(self, relative_path: str, name: str = None, is_absolute: bool = False):
"""Used to add a directory to the Properties directories list
:param relative_path: relative path to directory or absolute if is_absolute is True
:param is_absolute: boolean of whether or not the path given is absolute
:param name: name to be given to the directory, if not: defaults to dir name
"""
if not is_absolute:
absolute_path = str(os.getcwd()) + self.platformSeparator + clean_path(relative_path)
else:
absolute_path = relative_path
absolute_path = clean_path(absolute_path)
if not name:
name = absolute_path.split(self.platformSeparator)[-1]
if self.directories_name_dict.__contains__(name): ##TODO: multiple directories may have the same name but are not on the same path
print(f"Directory '{name}' already loaded. Reloading...")
return self.updateDirectory(absolute_path=self.directories_name_dict.get(name))
if not absolute_path.endswith(self.platformSeparator):
absolute_path += self.platformSeparator
self.directories_name_dict[name] = absolute_path
self.directories_dict[absolute_path] = []
for file in os.listdir(absolute_path):
if file.endswith(".properties"):
self.directories_dict[absolute_path].append(absolute_path + file)
self.addProperty(Properties(os.path.join(absolute_path + file), is_absolute=True))
def removeDirectories(self):
"""Removes every directory added to the Directories list
Keeps externally added files"""
print(f'{list(self.directories_dict.values())=}')
for absolute_path in copy.deepcopy(list(self.directories_dict.keys())):
self.removeDirectory(absolute_path=absolute_path, is_absolute=True)
def removeDirectory(self, **kwargs):
"""Removes a directory and it's properties objects from the directories list
:key relative_path: relative path to directory
:key absolute_path: absolute path to directory
:key name: name of directory (in the dict_names dictionnary)
"""
relative_path = kwargs.get("relative_path", False)
absolute_path = kwargs.get("absolute_path", False)
name = kwargs.get("name", False)
if name:
if not self.directories_name_dict.__contains__(str(name)):
return print(f"No directory with name '{name}'")
absolute_path = self.directories_name_dict.pop(str(name))
elif relative_path:
absolute_path = str(os.getcwd()) + self.platformSeparator + clean_path(str(relative_path))
absolute_path = clean_path(absolute_path)
if not absolute_path.endswith(self.platformSeparator):
absolute_path += self.platformSeparator
if absolute_path not in self.directories_dict.keys():
raise AttributeError(f"Directory '{absolute_path}' isn't registered.")
for prop_path in self.directories_dict[absolute_path]:
prop = self._getPropertyByPath(prop_path)
if self.curr_prop == prop:
self.curr_prop = None
self.properties_dict.pop(list(self.properties_dict.keys())
[list(self.properties_dict.values()).index(prop)])
if len(self.properties_dict) > 0 and self.curr_prop is None:
self.curr_prop = list(self.properties_dict.values())[0]
return self.directories_dict.pop(absolute_path)
def updateDirectories(self):
"""Reloads every Properties objects contained in every stored directory
and adds new files that were created after previous loading"""
for absolute_path in self.directories_dict.keys():
self.updateDirectory(absolute_path=absolute_path)
def updateDirectory(self, **kwargs):
"""Reloads every Properties objects contained in a stored directory
and adds new files that were created after previous loading
:key relative_path: relative path to directory
:key absolute_path: absolute path to directory
:key name: name of directory (in the dict_names dictionnary)"""
relative_path = kwargs.get("relative_path", False)
absolute_path = kwargs.get("absolute_path", False)
name = kwargs.get("name", False)
if name:
if not self.directories_name_dict.__contains__(str(name)):
return print(f"No directory with name '{name}'")
absolute_path = self.directories_name_dict.pop(str(name))
elif relative_path:
absolute_path = str(os.getcwd()) + self.platformSeparator + clean_path(str(relative_path))
absolute_path = clean_path(absolute_path) + self.platformSeparator
print(f"\nUpdating files at {absolute_path}")
for file in os.listdir(absolute_path):
if file.endswith(".properties"):
fileName = absolute_path + file
if fileName not in self.directories_dict[absolute_path]:
self.addProperty(Properties(os.path.join(absolute_path + file), is_absolute=True))
self.directories_dict[absolute_path].append(absolute_path + file)
else:
self._getPropertyByPath(fileName).reload()
def reloadAll(self):
"""Reloads every Properties objects"""
for prop in self.properties_dict.values():
prop.reload()
def _getPropertyByPath(self, path: str) -> Properties:
"""Used to get a Property object using it's absolute path
:param path: str, absolute path to the property
"""
for prop in self.properties_dict.values():
if prop.getPath() == path:
return prop
def addProperty(self, prop: Properties, name: str = ''):
"""Adds a Properties object
:param prop: Properties object
:param name: name of the prop in the dict, will default to prop+a_number to fill the list
"""
if not name:
num = 1
name = 'prop' + str(num)
while self.properties_dict.__contains__(name):
num += 1
name = 'prop' + str(num)
self.properties_dict[name] = prop
if not self.curr_prop:
self.curr_prop = prop
def removeProperty(self, **kwargs) -> Optional['Properties']:
"""Used to remove a Properties object, returns the removed Properties class
:key relative_path: relative path to property
:key index: index of property
:key absolute_path: absolute path to property
:key name: name of the property
:return: the popped object
"""
name = kwargs.get('name', False)
index = kwargs.get('index', 'False')
relative_path = kwargs.get('relative_path', False)
absolute_path = kwargs.get('absolute_path', False)
prop: Optional[Properties] = None
if isinstance(name, str):
if not self.properties_dict.__contains__(name):
raise KeyError(f'Unknown key {name}')
prop = self.properties_dict.get(name)
elif isinstance(index, int):
if len(self.properties_dict) <= index or index < -len(self.properties_dict):
raise IndexError(
f"index '{index}' out of bounds: max={len(self.properties_dict) - 1}, min={-len(self.properties_dict)}")
prop = self.properties_dict.get(list(self.properties_dict.keys())[index])
elif isinstance(relative_path, str):
absolute_path = str(os.getcwd()) + self.platformSeparator + clean_path(relative_path)
absolute_path = clean_path(absolute_path)
prop = self._getPropertyByPath(absolute_path)
elif isinstance(absolute_path, str):
absolute_path = clean_path(absolute_path)
prop = self._getPropertyByPath(absolute_path)
if prop == self.curr_prop and prop is not None:
curr_index = list(self.properties_dict.values()).index(prop)
if curr_index == 0:
self.switchUp()
else:
self.switchDown()
try:
return self.properties_dict.pop(list(self.properties_dict.keys())
[list(self.properties_dict.values()).index(prop)])
except ValueError:
return print(f"Property {absolute_path.split(self.platformSeparator)[-1]} not loaded. Skipping...")
def changeProperty(self, **kwargs):
"""Used to change between Properties objects
:key relative_path: relative path to property
:key index: index of property
:key absolute_path: absolute path to property
:key name: name of the property
"""
name = kwargs.get('name', False)
index = kwargs.get('index', 'False')
relative_path = kwargs.get('relative_path', False)
absolute_path = kwargs.get('absolute_path', False)
if name:
if not self.properties_dict.__contains__(name):
raise KeyError(f'Unknown key {name}')
self.curr_prop = self.properties_dict.get(str(name), None)
elif isinstance(index, int):
if len(self.properties_dict) <= index or index < -len(self.properties_dict):
raise IndexError(
f"index '{index}' out of bounds: max={len(self.properties_dict) - 1}, min={-len(self.properties_dict)}")
self.curr_prop = self.properties_dict.get(list(self.properties_dict.keys())[index])
elif isinstance(relative_path, str):
absolute_path = str(os.getcwd()) + self.platformSeparator + clean_path(relative_path)
absolute_path = clean_path(absolute_path)
for prop in list(self.properties_dict.values()):
if prop.getPath() == absolute_path:
self.curr_prop = self.properties_dict.get(
list(self.properties_dict.keys())[list(self.properties_dict.values()).index(prop)])
break
elif isinstance(absolute_path, str):
absolute_path = clean_path(absolute_path)
for prop in list(self.properties_dict.values()):
if prop.getPath() == absolute_path:
self.curr_prop = self.properties_dict.get(
list(self.properties_dict.keys())[list(self.properties_dict.values()).index(prop)])
break
def getProperty(self, **kwargs):
"""Used to get a Properties object
:key relative_path: relative path to property
:key index: index of property
:key absolute_path: absolute path to property
:key name: name of the property
"""
name = kwargs.get('name', False)
index = kwargs.get('index', 'False')
relative_path = kwargs.get('relative_path', False)
absolute_path = kwargs.get('absolute_path', False)
if name:
if not self.properties_dict.__contains__(name):
raise KeyError(f'Unknown key {name}')
return self.properties_dict.get(str(name))
elif isinstance(index, int):
if len(self.properties_dict) <= index or index < -len(self.properties_dict):
raise IndexError(
f"index '{index}' out of bounds: max={len(self.properties_dict) - 1}, min={-len(self.properties_dict)}")
return self.properties_dict.get(list(self.properties_dict.keys())[index])
elif isinstance(relative_path, str):
absolute_path = str(os.getcwd()) + self.platformSeparator + clean_path(relative_path)
absolute_path = clean_path(absolute_path)
for prop in list(self.properties_dict.values()):
if prop.getPath() == absolute_path:
return self.properties_dict.get(
list(self.properties_dict.keys())[list(self.properties_dict.values()).index(prop)])
elif isinstance(absolute_path, str):
absolute_path = clean_path(absolute_path)
for prop in list(self.properties_dict.values()):
if prop.getPath() == absolute_path:
return self.properties_dict.get(
list(self.properties_dict.keys())[list(self.properties_dict.values()).index(prop)])
def switchUp(self):
"""Switches to the next Properties object in the internal dict (or to the first one if the last is passed)"""
curr_pos = list(self.properties_dict.values()).index(self.curr_prop)
next_pos = curr_pos + 1 if curr_pos + 1 < len(self.properties_dict) else 0
self.curr_prop = self.properties_dict.get(list(self.properties_dict.keys())[next_pos])
def switchDown(self):
"""Switches to the previous Properties object in the internal dict (or to the last one if the first is passed)"""
curr_pos = list(self.properties_dict.values()).index(self.curr_prop)
next_pos = len(self.properties_dict) - 1 if curr_pos - 1 < 0 else curr_pos - 1
self.curr_prop = self.properties_dict.get(list(self.properties_dict.keys())[next_pos])
def getProperties(self) -> ValuesView:
"""Used to get the values (Properties objects) of the internal dict
:return: ValuesView of the internal dict
"""
return self.properties_dict.values()
def getContent(self) -> dict:
"""Used to get the internal dict
:return: The internal dict
"""
return self.properties_dict
def getNames(self) -> KeysView:
"""Used to get the keys (Properties objects name's in the dict) of the internal dict
:return: KeysView of the internal dict
"""
return self.properties_dict.keys()
def get(self) -> Optional[Properties]:
"""Used to get the current Properties object
:return: Current selected Properties object
"""
return self.curr_prop
def closeProp(self, **kwargs):
"""Used to close a Properties object (basically clear it), and keep it in memory
:key relative_path: relative path to property
:key index: index of property
:key absolute_path: absolute path to property
:key name: name of the property
"""
name = kwargs.get('name', False)
index = kwargs.get('index', 'False')
relative_path = kwargs.get('relative_path', False)
absolute_path = kwargs.get('absolute_path', False)
prop: Optional[Properties] = None
if isinstance(name, str):
if not self.properties_dict.__contains__(name):
raise KeyError(f'Unknown key {name}')
prop = self.properties_dict.get(name)
elif isinstance(index, int):
if len(self.properties_dict) <= index or index < -len(self.properties_dict):
raise IndexError(
f"index '{index}' out of bounds: max={len(self.properties_dict) - 1}, min={-len(self.properties_dict)}")
prop = self.properties_dict.get(list(self.properties_dict.keys())[index])
elif isinstance(relative_path, str):
absolute_path = str(os.getcwd()) + self.platformSeparator + relative_path
absolute_path = clean_path(absolute_path)
for prop in list(self.properties_dict.values()):
if prop.getPath() == absolute_path:
prop = self.properties_dict.get(
list(self.properties_dict.keys())[list(self.properties_dict.values()).index(prop)])
break
elif isinstance(absolute_path, str):
absolute_path = clean_path(absolute_path)
for prop in list(self.properties_dict.values()):
if prop.getPath() == absolute_path:
prop = self.properties_dict.get(
list(self.properties_dict.keys())[list(self.properties_dict.values()).index(prop)])
break
if prop == self.curr_prop:
curr_index = list(self.properties_dict.values()).index(prop)
self.curr_prop = self.properties_dict.get(list(self.properties_dict.keys())
[(curr_index - 1 if curr_index == len(self.properties_dict)
else curr_index)])
prop.close()
def closeProps(self):
for prop in self.properties_dict.values():
prop.close()
|
the-stack_0_26440
|
#Frog問題を緩和を意識したDPで解く
#pythonにchminが無いのでつくる
#リスト自体を引数に渡す(参照)
def chmin(dp:list,i,x):
if x<dp[i]:
dp[i]=x
return True
return False
INF=1<<60
#2^60
n=int(input())
h=list(map(int,input().split()))
#dp[i]=i番目の地点へ到達する最小コスト
dp=[INF]*n
dp[0]=0
dp[1]=abs(h[1]-h[0])
for i in range(2,n):
chmin(dp,i,dp[i-1]+abs(h[i]-h[i-1]))
chmin(dp,i,dp[i-2]+abs(h[i]-h[i-2]))
print(dp[n-1])
|
the-stack_0_26441
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 4 21:07:44 2021
@author: qagustina
"""
# 8.9: Comparando especies en parques y en veredas
import os
import pandas as pd
directorio = '../Data'
archivo_parques = 'arbolado-en-espacios-verdes.csv'
archivo_veredas = 'arbolado-publico-lineal-2017-2018.csv'
fname_p = os.path.join(directorio, archivo_parques) # abro
fname_v = os.path.join(directorio, archivo_veredas)
df_parques = pd.read_csv(fname_p) # leo csv parques
df_veredas = pd.read_csv(fname_v) # leo csv veredas
# Parques
df_tipas_parques = df_parques[df_parques['nombre_cie'] == 'Tipuana Tipu']
# Columnas con las que me interesa trabajar
cols_p = ['altura_tot', 'diametro']
df_tipas_parques = df_tipas_parques[cols_p]
# Copia del dataframe asi no modifico el original
df_tipas_parques = df_parques[df_parques['nombre_cie'] == 'Tipuana Tipu'][cols_p].copy()
# RENAME
df_tipas_parques = df_tipas_parques.rename(columns={"altura_tot": "altura_arbol", "diametro": "diametro_altura_pecho"})
# AGREGO COLUMNA
df_tipas_parques['ambiente']='parque'
# Veredas
df_tipas_veredas = df_veredas[df_veredas['nombre_cientifico'] == 'Tipuana tipu']
cols_v = ['altura_arbol', 'diametro_altura_pecho']
df_tipas_veredas = df_tipas_veredas[cols_v]
# COPIA
df_tipas_veredas = df_veredas[df_veredas['nombre_cientifico'] == 'Tipuana tipu'][cols_v].copy()
# AGREGO COLUMNA
df_tipas_veredas['ambiente']='vereda'
# Junto datasets
df_tipas = pd.concat([df_tipas_veredas, df_tipas_parques])
# Graficos
# boxplot para los diámetros a la altura del pecho de las tipas distinguiendo los ambientes
df_tipas.boxplot('diametro_altura_pecho',by = 'ambiente', figsize=(12,9))
# boxplot para altura de las tipas distinguiendo los ambientes
df_tipas.boxplot('altura_arbol',by = 'ambiente', figsize=(12,9))
|
the-stack_0_26442
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
watchonly_vout = find_vout_for_address(self.nodes[0], watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid litecoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
# Again lock the watchonly UTXO or nodes[0] may spend it, because
# lockunspent is memory-only and thus lost on restart
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 1*min_relay_tx_fee}) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
|
the-stack_0_26443
|
from motion import motion
from mpu6886 import mpu6886
import utime # 延时函数在utime
from driver import I2C # 驱动库
import network # Wi-Fi功能所在库
import ujson # json字串解析库
from aliyunIoT import Device # iot组件是连接阿里云物联网平台的组件
i2cObj = None
mpu6886Dev = None
motionObj = None
# 物联网平台连接标志位
iot_connected = False
wlan = None
# 三元组信息
productKey = "Your-productKey"
deviceName = "Your-deviceName"
deviceSecret = "Your-deviceSecret"
# 物联网设备实例
device = None
# Wi-Fi SSID和Password设置
wifiSsid = "Your-wifiSsid"
wifiPassword = "Your-wifiPassword"
# 等待Wi-Fi成功连接到路由器
def get_wifi_status():
global wlan
wifi_connected = False
wlan.active(True) # 激活界面
wlan.scan() # 扫描接入点
wlan.disconnect() # 断开Wi-Fi
#print("start to connect ", wifiSsid)
# 连接到指定的路由器(路由器名称为wifiSsid, 密码为:wifiPassword)
wlan.connect(wifiSsid, wifiPassword)
while True:
wifi_connected = wlan.isconnected() # 获取Wi-Fi连接路由器的状态信息
if wifi_connected: # Wi-Fi连接成功则退出while循环
break
else:
utime.sleep(0.5)
print("wifi_connected:", wifi_connected)
ifconfig = wlan.ifconfig() # 获取接口的IP/netmask/gw/DNS地址
print(ifconfig)
utime.sleep(0.5)
# 物联网平台连接成功的回调函数
def on_connect(data):
global iot_connected
iot_connected = True
# 设置props 事件接收函数(当云平台向设备下发属性时)
def on_props(request):
pass
def connect_lp(productKey, deviceName, deviceSecret):
global device, iot_connected
key_info = {
'region': 'cn-shanghai',
'productKey': productKey,
'deviceName': deviceName,
'deviceSecret': deviceSecret,
'keepaliveSec': 60
}
# 将三元组信息设置到iot组件中
device = Device()
# 设定连接到物联网平台的回调函数,如果连接物联网平台成功,则调用on_connect函数
device.on(Device.ON_CONNECT, on_connect)
# 配置收到云端属性控制指令的回调函数,如果收到物联网平台发送的属性控制消息,则调用on_props函数
device.on(Device.ON_PROPS, on_props)
# 启动连接阿里云物联网平台过程
device.connect(key_info)
# 等待设备成功连接到物联网平台
while(True):
if iot_connected:
print('物联网平台连接成功')
break
else:
print('sleep for 1 s')
utime.sleep(1)
print('sleep for 2s')
utime.sleep(2)
def get_data():
acc = mpu6886Dev.acceleration
gyro = mpu6886Dev.gyro
# print(acc)
# print(gyro)
return acc, gyro # 返回读取到的加速度、角速度值
def tap_detected():
upload_data = {'params': ujson.dumps({
'tap_count': motionObj.detectAction.tap_detect_count,
})
}
# 上传状态到物联网平台
if (iot_connected):
device.postProps(upload_data)
if __name__ == '__main__':
# 网络初始化
wlan = network.WLAN(network.STA_IF) # 创建WLAN对象
get_wifi_status()
connect_lp(productKey, deviceName, deviceSecret)
# 硬件初始化
i2cObj = I2C()
# 按照board.json中名为"mpu6886"的设备节点的配置参数(主设备I2C端口号,从设备地址,总线频率等)初始化I2C类型设备对象
i2cObj.open("mpu6886")
print("mpu6886 inited!")
mpu6886Dev = mpu6886.MPU6886(i2cObj) # 初始化MPU6886传感器
# 获取跌倒检测的motion实例
motionObj = motion.Motion("double_tap", get_data, tap_detected)
# 使能action检测,并以Dictionary格式传入灵敏度参数
sensitivity = {"ACCELERATION_UP_THREADHOLD": 30}
motionObj.enable(sensitivity)
# 关闭action检测,可再次使能,支持传入新的灵敏度
# motionObj.disable()
# i2cObj.close() # 关闭I2C设备对象
# del mpu6886Dev
|
the-stack_0_26446
|
import yaml
import numpy as np
import sys
import matplotlib.pyplot as plt
from ir_sim.world import env_plot, mobile_robot, car_robot, obs_circle, obs_polygon
from ir_sim.env.env_robot import env_robot
from ir_sim.env.env_car import env_car
from ir_sim.env.env_obs_cir import env_obs_cir
from ir_sim.env.env_obs_line import env_obs_line
from ir_sim.env.env_obs_poly import env_obs_poly
from ir_sim.env.env_grid import env_grid
from PIL import Image
from pynput import keyboard
class env_base:
def __init__(self, world_name=None, plot=True, **kwargs):
if world_name != None:
world_name = sys.path[0] + '/' + world_name
with open(world_name) as file:
com_list = yaml.load(file, Loader=yaml.FullLoader)
world_args = com_list['world']
self.__height = world_args.get('world_height', 10)
self.__width = world_args.get('world_width', 10)
self.offset_x = world_args.get('offset_x', 0)
self.offset_y = world_args.get('offset_y', 0)
self.step_time = world_args.get('step_time', 0.1)
self.world_map = world_args.get('world_map', None)
self.xy_reso = world_args.get('xy_resolution', 1)
self.yaw_reso = world_args.get('yaw_resolution', 5)
self.offset = np.array([self.offset_x, self.offset_y])
self.robots_args = com_list.get('robots', dict())
self.robot_number = kwargs.get('robot_number', self.robots_args.get('robot_number', 0) )
self.cars_args = com_list.get('cars', dict())
self.car_number = self.cars_args.get('number', 0)
# obs_cir
self.obs_cirs_args = com_list.get('obs_cirs', dict())
self.obs_cir_number = self.obs_cirs_args.get('number', 0)
self.obs_step_mode = self.obs_cirs_args.get('obs_step_mode', 0)
# obs line
self.obs_lines_args = com_list.get('obs_lines', dict())
# obs polygons
self.obs_polygons_args = com_list.get('obs_polygons', dict())
self.vertexes_list = self.obs_polygons_args.get('vertexes_list', [])
self.obs_poly_num = self.obs_polygons_args.get('number', 0)
else:
self.__height = kwargs.get('world_height', 10)
self.__width = kwargs.get('world_width', 10)
self.step_time = kwargs.get('step_time', 0.1)
self.world_map = kwargs.get('world_map', None)
self.xy_reso = kwargs.get('xy_resolution', 1)
self.yaw_reso = kwargs.get('yaw_resolution', 5)
self.offset_x = kwargs.get('offset_x', 0)
self.offset_y = kwargs.get('offset_y', 0)
self.robot_number = kwargs.get('robot_number', 0)
self.obs_cir_number = kwargs.get('obs_cir_number', 0)
self.car_number = kwargs.get('car_number', 0)
self.robots_args = kwargs.get('robots', dict())
self.obs_cirs_args = kwargs.get('obs_cirs', dict())
self.cars_args = kwargs.get('cars', dict())
self.obs_lines_args = kwargs.get('obs_lines', dict())
self.obs_polygons_args = kwargs.get('obs_polygons', dict())
self.vertexes_list = self.obs_polygons_args.get('vertexes_list', [])
self.obs_poly_num = self.obs_polygons_args.get('number', 0)
self.plot = plot
self.components = dict()
self.init_environment(**kwargs)
if kwargs.get('teleop_key', False):
self.key_lv_max = 2
self.key_ang_max = 2
self.key_lv = 0
self.key_ang = 0
self.key_id = 1
self.alt_flag = 0
plt.rcParams['keymap.save'].remove('s')
plt.rcParams['keymap.quit'].remove('q')
self.key_vel = np.zeros(2,)
print('start to keyboard control')
print('w: forward', 's: backforward', 'a: turn left', 'd: turn right',
'q: decrease linear velocity', 'e: increase linear velocity',
'z: decrease angular velocity', 'c: increase angular velocity',
'alt+num: change current control robot id')
self.listener = keyboard.Listener(on_press=self.on_press, on_release=self.on_release)
self.listener.start()
if kwargs.get('mouse', False):
pass
def init_environment(self, robot_class=mobile_robot, car_class=car_robot, obs_cir_class=obs_circle, obs_polygon_class=obs_polygon, **kwargs):
# world
px = int(self.__width / self.xy_reso)
py = int(self.__height / self.xy_reso)
if self.world_map != None:
world_map_path = sys.path[0] + '/' + self.world_map
img = Image.open(world_map_path).convert('L')
# img = Image.open(world_map_path)
img = img.resize( (px, py), Image.NEAREST)
# img = img.resize( (px, py), Image.ANTIALIAS)
# img.thumbnail( (px, py))
map_matrix = np.array(img)
map_matrix = 255 - map_matrix
map_matrix[map_matrix>255/2] = 255
map_matrix[map_matrix<255/2] = 0
# map_matrix[map_matrix>0] = 255
# map_matrix[map_matrix==0] = 0
self.map_matrix = np.fliplr(map_matrix.T)
else:
self.map_matrix = None
self.components['map_matrix'] = self.map_matrix
self.components['xy_reso'] = self.xy_reso
self.components['offset'] = np.array([self.offset_x, self.offset_y])
# self.components['grid_map'] = env_grid(grid_map_matrix=kwargs.get('grid_map_matrix', None))
self.components['obs_lines'] = env_obs_line(**{**self.obs_lines_args, **kwargs})
self.obs_line_states=self.components['obs_lines'].obs_line_states
self.components['obs_circles'] = env_obs_cir(obs_cir_class=obs_cir_class, obs_cir_num=self.obs_cir_number, step_time=self.step_time, components=self.components, **{**self.obs_cirs_args, **kwargs})
self.obs_cir_list = self.components['obs_circles'].obs_cir_list
self.components['obs_polygons'] = env_obs_poly(obs_poly_class=obs_polygon_class, vertex_list=self.vertexes_list, obs_poly_num=self.obs_poly_num, **{**self.obs_polygons_args, **kwargs})
self.obs_poly_list = self.components['obs_polygons'].obs_poly_list
self.components['robots'] = env_robot(robot_class=robot_class, step_time=self.step_time, components=self.components, **{**self.robots_args, **kwargs})
self.robot_list = self.components['robots'].robot_list
self.components['cars'] = env_car(car_class=car_class, car_num=self.car_number, step_time=self.step_time, **{**self.cars_args, **kwargs})
self.car_list = self.components['cars'].car_list
if self.plot:
self.world_plot = env_plot(self.__width, self.__height, self.components, offset_x=self.offset_x, offset_y=self.offset_y, **kwargs)
self.time = 0
if self.robot_number > 0:
self.robot = self.components['robots'].robot_list[0]
if self.car_number > 0:
self.car = self.components['cars'].car_list[0]
def collision_check(self):
collision = False
for robot in self.components['robots'].robot_list:
if robot.collision_check(self.components):
collision = True
for car in self.components['cars'].car_list:
if car.collision_check(self.components):
collision = True
return collision
def arrive_check(self):
arrive=True
for robot in self.components['robots'].robot_list:
if not robot.arrive_flag:
arrive = False
for car in self.components['cars'].car_list:
if not car.arrive_flag:
arrive = False
return arrive
def robot_step(self, vel_list, robot_id = None, **kwargs):
if robot_id == None:
if not isinstance(vel_list, list):
self.robot.move_forward(vel_list, **kwargs)
else:
for i, robot in enumerate(self.components['robots'].robot_list):
robot.move_forward(vel_list[i], **kwargs)
else:
self.components['robots'].robot_list[robot_id-1].move_forward(vel_list, **kwargs)
for robot in self.components['robots'].robot_list:
robot.cal_lidar_range(self.components)
def car_step(self, vel_list, car_id=None, **kwargs):
if car_id == None:
if not isinstance(vel_list, list):
self.car.move_forward(vel_list, **kwargs)
else:
for i, car in enumerate(self.components['cars'].car_list):
car.move_forward(vel_list[i], **kwargs)
else:
self.components['cars'].car_list[car_id-1].move_forward(vel_list, **kwargs)
for car in self.components['cars'].car_list:
car.cal_lidar_range(self.components)
def obs_cirs_step(self, vel_list=[], obs_id=None, **kwargs):
if self.obs_step_mode == 'default':
if obs_id == None:
for i, obs_cir in enumerate(self.components['obs_circles'].obs_cir_list):
obs_cir.move_forward(vel_list[i], **kwargs)
else:
self.components['obs_circles'].obs_cir_list[obs_id-1].move_forward(vel_list, **kwargs)
elif self.obs_step_mode == 'wander':
# rvo
self.components['obs_circles'].step_wander(**kwargs)
def render(self, time=0.05, **kwargs):
if self.plot:
self.world_plot.com_cla()
self.world_plot.draw_dyna_components(**kwargs)
self.world_plot.pause(time)
self.time = self.time + time
# def reset(self, ):
def on_press(self, key):
try:
if key.char.isdigit() and self.alt_flag:
if int(key.char) > self.robot_number:
print('out of number of robots')
else:
self.key_id = int(key.char)
if key.char == 'w':
self.key_lv = self.key_lv_max
if key.char == 's':
self.key_lv = - self.key_lv_max
if key.char == 'a':
self.key_ang = self.key_ang_max
if key.char == 'd':
self.key_ang = -self.key_ang_max
self.key_vel = np.array([self.key_lv, self.key_ang])
except AttributeError:
if key == keyboard.Key.alt:
self.alt_flag = 1
def on_release(self, key):
try:
if key.char == 'w':
self.key_lv = 0
if key.char == 's':
self.key_lv = 0
if key.char == 'a':
self.key_ang = 0
if key.char == 'd':
self.key_ang = 0
if key.char == 'q':
self.key_lv_max = self.key_lv_max - 0.2
print('current lv ', self.key_lv_max)
if key.char == 'e':
self.key_lv_max = self.key_lv_max + 0.2
print('current lv ', self.key_lv_max)
if key.char == 'z':
self.key_ang_max = self.key_ang_max - 0.2
print('current ang ', self.key_ang_max)
if key.char == 'c':
self.key_ang_max = self.key_ang_max + 0.2
print('current ang ', self.key_ang_max)
self.key_vel = np.array([self.key_lv, self.key_ang])
except AttributeError:
if key == keyboard.Key.alt:
self.alt_flag = 0
def save_fig(self, path, i):
self.world_plot.save_gif_figure(path, i)
def save_ani(self, image_path, ani_path, ani_name='animated', **kwargs):
self.world_plot.create_animate(image_path, ani_path, ani_name=ani_name, **kwargs)
def show(self, **kwargs):
self.world_plot.draw_dyna_components(**kwargs)
self.world_plot.show()
def show_ani(self):
self.world_plot.show_ani()
|
the-stack_0_26447
|
## 함수 선언 부분 ##
class TreeNode() :
def __init__ (self) :
self.left = None
self.data = None
self.right = None
## 전역 변수 선언 부분 ##
memory = []
root = None
nameAry = ['블랙핑크', '레드벨벳', '마마무', '에이핑크', '걸스데이', '트와이스' ]
## 메인 코드 부분 ##
node = TreeNode()
node.data = nameAry[0]
root = node
memory.append(node)
for name in nameAry[1:] :
node = TreeNode()
node.data = name
current = root
while True :
if name < current.data :
if current.left == None :
current.left = node
break
current = current.left
else :
if current.right == None :
current.right = node
break
current = current.right
memory.append(node)
findName = '마마무'
current = root
while True :
if findName == current.data:
print(findName, '을(를) 찾음.')
break
elif findName < current.data :
if current.left == None :
print(findName, '이(가) 트리에 없음')
break
current = current.left
else :
if current.right == None :
print(findName, '이(가) 트리에 없음')
break
current = current.right
|
the-stack_0_26449
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Equipment.Core.IdentifiedObject import IdentifiedObject
class BaseVoltage(IdentifiedObject):
"""Defines a nominal base voltage which is referenced in the system.For the 2010 ENTSOE IOP, the BaseVoltage association for SeriesCompensator and TransformerWinding is required.
"""
def __init__(self, nominalVoltage=0.0, ConductingEquipment=None, VoltageLevel=None, *args, **kw_args):
"""Initialises a new 'BaseVoltage' instance.
@param nominalVoltage: The PowerSystemResource's base voltage.Should be a positive value - not zero.
@param ConductingEquipment: Use association to ConductingEquipment only when there is no VoltageLevel container used.
@param VoltageLevel: The VoltageLevels having this BaseVoltage.
"""
#: The PowerSystemResource's base voltage.Should be a positive value - not zero.
self.nominalVoltage = nominalVoltage
self._ConductingEquipment = []
self.ConductingEquipment = [] if ConductingEquipment is None else ConductingEquipment
self._VoltageLevel = []
self.VoltageLevel = [] if VoltageLevel is None else VoltageLevel
super(BaseVoltage, self).__init__(*args, **kw_args)
_attrs = ["nominalVoltage"]
_attr_types = {"nominalVoltage": float}
_defaults = {"nominalVoltage": 0.0}
_enums = {}
_refs = ["ConductingEquipment", "VoltageLevel"]
_many_refs = ["ConductingEquipment", "VoltageLevel"]
def getConductingEquipment(self):
"""Use association to ConductingEquipment only when there is no VoltageLevel container used.
"""
return self._ConductingEquipment
def setConductingEquipment(self, value):
for x in self._ConductingEquipment:
x.BaseVoltage = None
for y in value:
y._BaseVoltage = self
self._ConductingEquipment = value
ConductingEquipment = property(getConductingEquipment, setConductingEquipment)
def addConductingEquipment(self, *ConductingEquipment):
for obj in ConductingEquipment:
obj.BaseVoltage = self
def removeConductingEquipment(self, *ConductingEquipment):
for obj in ConductingEquipment:
obj.BaseVoltage = None
def getVoltageLevel(self):
"""The VoltageLevels having this BaseVoltage.
"""
return self._VoltageLevel
def setVoltageLevel(self, value):
for x in self._VoltageLevel:
x.BaseVoltage = None
for y in value:
y._BaseVoltage = self
self._VoltageLevel = value
VoltageLevel = property(getVoltageLevel, setVoltageLevel)
def addVoltageLevel(self, *VoltageLevel):
for obj in VoltageLevel:
obj.BaseVoltage = self
def removeVoltageLevel(self, *VoltageLevel):
for obj in VoltageLevel:
obj.BaseVoltage = None
|
the-stack_0_26452
|
from datetime import datetime
import minimalmodbus
import math
import time
# sample period (in seconds)
Ts = 60
# initialize data averaging variables
N = 0
p = 0
T = 0
# open serial device
instrument = minimalmodbus.Instrument('/dev/ttyUSB0',1)
instrument.serial.baudrate = 9600
# data collection (inifinite) loop
while True:
# get the current time, time of current sample, and time to stop sampling
t = time.time()
t_center = (math.floor(t / Ts) + 1) * Ts
t_stop = t_center + 0.5 * Ts
# loop over one sample period
while (t < t_stop):
# get pressure and temperature and add to running sum
p += instrument.read_float(2,3,2)
T += instrument.read_float(8,3,2)
# increment loop counter and update current time
N += 1
t = time.time()
# compute average pressure and temperature
p = 1E5*p/N
T = T/N
# create filename
YYYY = datetime.fromtimestamp(t_center).strftime('%Y')
JJJ = datetime.fromtimestamp(t_center).strftime('%j')
fn = '/home/sdewolf/Data/%s.%s.txt'%(YYYY,JJJ)
# open file, create and write output string to file, and close file
out_file = open(fn,'a')
out_string = '%s,%0.6f,%0.6f\n'%(datetime.fromtimestamp(t_center).strftime('%Y-%m-%dT%H:%M:%S.%f'),p,T)
out_file.write(out_string)
out_file.close()
# display output to command line
dt_string = datetime.fromtimestamp(t_center).strftime('%Y:%j:%H:%M:%S.%f')
print('%s N = %i p = %0.6f T = %0.6f'%(dt_string,N,p,T))
# reset loop variables
N = 0
p = 0
T = 0
|
the-stack_0_26453
|
from contextlib import suppress
from aiogram import types
from aiogram.dispatcher.filters.filters import OrFilter
from aiogram.utils.exceptions import MessageCantBeDeleted, MessageNotModified
from loguru import logger
from app.middlewares.i18n import i18n
from app.misc import bot, dp
from app.models.chat import Chat
from app.models.user import User
from app.utils.chat_settings import cb_user_settings, get_user_settings_markup
_ = i18n.gettext
@dp.message_handler(commands=["settings"])
async def cmd_settings(message: types.Message, chat: Chat, user: User):
logger.info(
"User {user} wants to configure chat {chat}", user=user.id, chat=chat.id
)
with suppress(MessageCantBeDeleted):
await message.delete()
text, markup = get_user_settings_markup(chat, user)
await bot.send_message(chat_id=user.id, text=text, reply_markup=markup)
@dp.callback_query_handler(cb_user_settings.filter(property="language", value="change"))
async def cq_language(query: types.CallbackQuery, callback_data: dict):
logger.info(
"User {user} wants to change language", user=query.from_user.id,
)
callback_factory = cb_user_settings.new
markup = types.InlineKeyboardMarkup()
for code, language in i18n.AVAILABLE_LANGUAGES.items():
markup.add(
types.InlineKeyboardButton(
language.label,
callback_data=callback_factory(property="language", value=code),
)
)
await query.answer(_("Choose chat language"))
await query.message.edit_reply_markup(markup)
@dp.callback_query_handler(
OrFilter(
*[
cb_user_settings.filter(property="language", value=code)
for code in i18n.AVAILABLE_LANGUAGES
]
)
)
async def cq_choose_language(
query: types.CallbackQuery, chat: Chat, user: User, callback_data: dict
):
target_language = callback_data["value"]
logger.info(
"User {user} set language in chat {chat} to '{language}'",
user=query.from_user.id,
chat=chat.id,
language=target_language,
)
i18n.ctx_locale.set(target_language)
await chat.update(language=target_language).apply()
text, markup = get_user_settings_markup(chat, user)
await query.answer(
_("Language changed to {new_language}").format(
new_language=i18n.AVAILABLE_LANGUAGES[target_language].title
)
)
await query.message.edit_text(text, reply_markup=markup)
@dp.callback_query_handler(
cb_user_settings.filter(property="do_not_disturb", value="switch")
)
async def cq_do_not_disturb(query: types.CallbackQuery, user: User, chat: Chat):
logger.info("User {user} switched DND mode", user=query.from_user.id)
await query.answer(
_("Do not disturb mode {mode}").format(
mode=_("switched on") if not user.do_not_disturb else _("switched off")
)
)
await user.update(do_not_disturb=not user.do_not_disturb).apply()
text, markup = get_user_settings_markup(chat, user)
with suppress(MessageNotModified):
await query.message.edit_text(text, reply_markup=markup)
@dp.callback_query_handler(cb_user_settings.filter(property="done", value="true"))
async def cq_done(query: types.CallbackQuery):
logger.info(
"User {user} close settings menu", user=query.from_user.id,
)
await query.answer(_("Settings saved"))
await query.message.delete()
|
the-stack_0_26455
|
import numpy
seq = [165, 170, 165, 175, 155, 165, 165, 150, 155, 160, 160, 165, 170, 165, 170]
N = len(seq)
dic = {}
dic[150] = 0
dic[155] = 1
dic[160] = 2
dic[165] = 3
dic[170] = 4
dic[175] = 5
rdic = dict(zip(dic.values(), dic.keys()))
nstate = len(set(seq))
t = numpy.zeros((nstate, nstate))
for i in range(len(seq) - 1):
t[dic[seq[i]], dic[seq[i + 1]]] = 1
print(t)
for i in range(nstate):
s = sum(t[i, :])
t[i, :] = t[i, :] / s
print(t)
p0 = numpy.array([0, 0, 0, 1, 0, 0])
r = p0
for i in range(N):
p = numpy.zeros((1, nstate))
p[0, dic[seq[i]]] = 1
print('start p:', p)
for j in range(N - i):
p = p * t
print('Prediction of Year 2020 from ' + str(2005 + i) + ' is: y=p0*p^' + str(N - i))
x = numpy.argmax(p)
a = x // nstate
b = x % nstate
print('Prediction of Year 2020 from ' + str(2005 + i) + ' result score is:' + str(rdic[b]))
|
the-stack_0_26459
|
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from .views import CustomUserViewSet, get_confirmation_code, get_jwt_token
router_1 = DefaultRouter()
router_1.register(r'', CustomUserViewSet, basename='users')
urlpatterns = [
path('email/', get_confirmation_code, name='token_obtain_pair'),
path('token/', get_jwt_token, name='token'),
path('', include(router_1.urls)),
]
|
the-stack_0_26462
|
from AoCUtils import *
result = 0
partNumber = "1"
writeToLog = False
if writeToLog:
logFile = open("log" + partNumber + ".txt", "w")
else:
logFile = "stdout"
printLog = printLogFactory(logFile)
tape = []
with open("input.txt", "r") as inputFile:
lines = inputFile.read().strip().split("\n")
for line in lines:
line = line.strip()
tape.append(int(line))
prev = inf
for i in tape:
if prev < i:
result += 1
prev = i
with open("output" + partNumber + ".txt", "w") as outputFile:
outputFile.write(str(result))
print(str(result))
if writeToLog:
cast(TextIOWrapper, logFile).close()
|
the-stack_0_26464
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nnabla as nn
import nnabla.functions as F
from .backward_function import BackwardFunction
class ImageAugmentationBackward(BackwardFunction):
def name(self):
return 'ImageAugmentationBackward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
# Inputs on the forward graph
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
# Outputs on the forward graph
outputs_fwd = []
for i in range(self._num_outputs_fwd):
inp = inputs[self._num_inputs_fwd + i]
v = nn.Variable(inp.shape)
v.grad = inp.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
# inputs: [inputs_fwd_graph] + [inputs_bwd_graph] or
# [inputs_fwd_graph] + [outputs_fwd_graph] + [inputs_bwd_graph]
raise NotImplementedError(
"The backward method of ImageAugmentationBackward class is not implemented.")
|
the-stack_0_26465
|
import copy
from vmaf.tools.decorator import deprecated, override
__copyright__ = "Copyright 2016-2020, Netflix, Inc."
__license__ = "BSD+Patent"
import os
from vmaf.core.mixin import WorkdirEnabled
from vmaf.tools.misc import get_file_name_without_extension, \
get_file_name_with_extension, get_unique_str_from_recursive_dict
from vmaf.config import VmafConfig
from vmaf.core.proc_func import proc_func_dict
class Asset(WorkdirEnabled):
"""
An Asset is the most basic unit with sufficient information to perform an
execution task. It includes basic information about a distorted video and
its undistorted reference video, as well as the frame range on which to
extract features/calculate quality results (*dis_start_end_frame* and
*ref_start_end_frame*), and at what resolution to perform such feature
extraction (each video frame is upscaled to the resolution specified by
*quality_width_hight* before processing).
Asset extends WorkdirEnabled mixin, which comes with a thread-safe working
directory to facilitate parallel execution.
The ref_path/dis_path points to the reference/distorted video files. For now,
it supports YUV video files (yuvxxx), or encoded video files (notyuv) that
can be decoded by ffmpeg.
"""
SUPPORTED_YUV_TYPES = ['yuv420p', 'yuv422p', 'yuv444p',
'yuv420p10le', 'yuv422p10le', 'yuv444p10le',
'notyuv']
DEFAULT_YUV_TYPE = 'yuv420p'
SUPPORTED_RESAMPLING_TYPES = ['bilinear', 'bicubic', 'lanczos']
DEFAULT_RESAMPLING_TYPE = 'bicubic'
ORDERED_FILTER_LIST = ['crop', 'pad', 'gblur', 'eq', 'lutyuv']
# ==== constructor ====
def __init__(self, dataset, content_id, asset_id,
ref_path, dis_path,
asset_dict,
workdir_root=VmafConfig.workdir_path()):
"""
:param dataset
:param content_id: ID of content the asset correspond to within dataset
:param asset_id: ID of asset
:param ref_path: path to reference video
:param dis_path: path to distorted video
:param asset_dict: dictionary with additional asset properties
:param workdir_root:
:return:
"""
WorkdirEnabled.__init__(self, workdir_root)
self.dataset = dataset
self.content_id = content_id
self.asset_id = asset_id
self.ref_path = ref_path
self.dis_path = dis_path
self.asset_dict = asset_dict
self._assert()
def _assert(self):
# validate yuv types
assert self.ref_yuv_type in self.SUPPORTED_YUV_TYPES
assert self.dis_yuv_type in self.SUPPORTED_YUV_TYPES
assert self.workfile_yuv_type in self.SUPPORTED_YUV_TYPES
# if YUV is notyuv, then ref/dis width and height should not be given,
# since it must be encoded video and the information should be already
# in included in the header of the video files
if self.ref_yuv_type == 'notyuv':
assert self.ref_width_height is None, 'For ref_yuv_type nonyuv, ref_width_height must NOT be specified.'
if self.dis_yuv_type == 'notyuv':
assert self.dis_width_height is None, 'For dis_yuv_type nonyuv, dis_width_height must NOT be specified.'
# validate asset_dict
self._assert_asset_dict()
def _assert_asset_dict(self):
# perform necessary assertions on asset properties of asset dict
if 'fps' in self.asset_dict:
assert self.asset_dict['fps'] > 0.0, 'Frame rate has to be positive.'
if 'rebuf_indices' in self.asset_dict:
assert isinstance(self.asset_dict['rebuf_indices'], list), 'Rebuffering indices need to be in a list.'
# check for negative rebuffering indices
assert len(list(filter(lambda x: x < 0, self.asset_dict['rebuf_indices']))) == 0, 'All rebuffering indices have to be >= 0.'
def copy(self, **kwargs):
new_asset_dict = copy.deepcopy(self.asset_dict)
# reset the following arguments:
if 'use_path_as_workpath' in new_asset_dict:
del new_asset_dict['use_path_as_workpath']
if 'use_workpath_as_procpath' in new_asset_dict:
del new_asset_dict['use_workpath_as_procpath']
dataset = kwargs['dataset'] if 'dataset' in kwargs else self.dataset
content_id = kwargs['content_id'] if 'content_id' in kwargs else self.content_id
asset_id = kwargs['asset_id'] if 'asset_id' in kwargs else self.asset_id
ref_path = kwargs['ref_path'] if 'ref_path' in kwargs else self.ref_path
dis_path = kwargs['dis_path'] if 'dis_path' in kwargs else self.dis_path
workdir_root = kwargs['workdir_root'] if 'workdir_root' in kwargs else self.workdir_root
new_asset = self.__class__(dataset, content_id, asset_id,
ref_path, dis_path, new_asset_dict,
workdir_root)
return new_asset
@staticmethod
def from_repr(rp):
"""
Reconstruct Asset from repr string.
:return:
"""
import ast
d = ast.literal_eval(rp)
assert 'dataset' in d
assert 'content_id' in d
assert 'asset_id' in d
assert 'ref_path' in d
assert 'dis_path' in d
assert 'asset_dict' in d
return Asset(dataset=d['dataset'],
content_id=d['content_id'],
asset_id=d['asset_id'],
ref_path=d['ref_path'],
dis_path=d['dis_path'],
asset_dict=d['asset_dict']
)
# ==== groundtruth ====
@property
def groundtruth(self):
"""
Ground truth score, e.g. MOS, DMOS
:return:
"""
if 'groundtruth' in self.asset_dict:
return self.asset_dict['groundtruth']
else:
return None
@property
def groundtruth_std(self):
if 'groundtruth_std' in self.asset_dict:
return self.asset_dict['groundtruth_std']
else:
return None
@property
def raw_groundtruth(self):
"""
Raw ground truth scores, e.g. opinion score (OS)
:return:
"""
if 'raw_groundtruth' in self.asset_dict:
return self.asset_dict['raw_groundtruth']
else:
return None
# ==== width and height ====
@property
def ref_width_height(self):
"""
Width and height of reference video.
:return: width and height of reference video. If None, it signals that
width and height should be figured out in other means (e.g. FFMPEG).
"""
if 'ref_width' in self.asset_dict and 'ref_height' in self.asset_dict:
return self.asset_dict['ref_width'], self.asset_dict['ref_height']
elif 'width' in self.asset_dict and 'height' in self.asset_dict:
return self.asset_dict['width'], self.asset_dict['height']
else:
return None
@property
def dis_width_height(self):
"""
Width and height of distorted video.
:return: width and height of distorted video. If None, it signals that
width and height should be figured out in other means (e.g. FFMPEG)
"""
if 'dis_width' in self.asset_dict and 'dis_height' in self.asset_dict:
return self.asset_dict['dis_width'], self.asset_dict['dis_height']
elif 'width' in self.asset_dict and 'height' in self.asset_dict:
return self.asset_dict['width'], self.asset_dict['height']
else:
return None
def clear_up_width_height(self):
if 'width' in self.asset_dict:
del self.asset_dict['width']
if 'height' in self.asset_dict:
del self.asset_dict['height']
if 'ref_width' in self.asset_dict:
del self.asset_dict['ref_width']
if 'ref_height' in self.asset_dict:
del self.asset_dict['ref_height']
if 'dis_width' in self.asset_dict:
del self.asset_dict['dis_width']
if 'dis_height' in self.asset_dict:
del self.asset_dict['dis_height']
@property
def quality_width_height(self):
"""
Width and height to scale distorted video to before quality calculation.
:return: width and height at which the quality is measured at. either
'quality_width' and 'quality_height' have to present in asset_dict;
or ref and dis's width and height must be equal, which will be used
as the default quality width and height; or either of ref/dis is type
'notyuv', in which case the other's width/height (could also be None)
"""
if 'quality_width' in self.asset_dict and 'quality_height' in self.asset_dict:
return self.asset_dict['quality_width'], self.asset_dict['quality_height']
elif self.ref_yuv_type == 'notyuv':
return self.dis_width_height
elif self.dis_yuv_type == 'notyuv':
return self.ref_width_height
else:
assert self.ref_width_height == self.dis_width_height
return self.ref_width_height
# ==== start and end frame ====
@property
def ref_start_end_frame(self):
"""
Start and end frame of reference video for quality calculation.
:return: reference video's start frame and end frame for processing
(inclusive). If None, it signals that the entire video should be
processed.
"""
if 'ref_start_frame' in self.asset_dict and 'ref_end_frame' in self.asset_dict:
return self.asset_dict['ref_start_frame'], self.asset_dict['ref_end_frame']
elif 'start_frame' in self.asset_dict and 'end_frame' in self.asset_dict:
return self.asset_dict['start_frame'], self.asset_dict['end_frame']
elif 'start_sec' in self.asset_dict and 'end_sec' in self.asset_dict and 'fps' in self.asset_dict:
start_frame = int(round(self.asset_dict['start_sec'] * self.asset_dict['fps']))
end_frame = int(round(self.asset_dict['end_sec'] * self.asset_dict['fps'])) - 1
return start_frame, end_frame
elif 'duration_sec' in self.asset_dict and 'fps' in self.asset_dict:
start_frame = 0
end_frame = int(round(self.asset_dict['duration_sec'] * self.asset_dict['fps'])) - 1
return start_frame, end_frame
else:
return None
@property
def dis_start_end_frame(self):
"""
Start and end frame of distorted video for quality calculation.
:return: distorted video's start frame and end frame for processing
(inclusive). If None, it signals that the entire video should be
processed.
"""
if 'dis_start_frame' in self.asset_dict and 'dis_end_frame' in self.asset_dict:
return self.asset_dict['dis_start_frame'], self.asset_dict['dis_end_frame']
elif 'start_frame' in self.asset_dict and 'end_frame' in self.asset_dict:
return self.asset_dict['start_frame'], self.asset_dict['end_frame']
elif 'start_sec' in self.asset_dict and 'end_sec' in self.asset_dict and 'fps' in self.asset_dict:
start_frame = int(round(self.asset_dict['start_sec'] * self.asset_dict['fps']))
end_frame = int(round(self.asset_dict['end_sec'] * self.asset_dict['fps'])) - 1
return start_frame, end_frame
elif 'duration_sec' in self.asset_dict and 'fps' in self.asset_dict:
start_frame = 0
end_frame = int(round(self.asset_dict['duration_sec'] * self.asset_dict['fps'])) - 1
return start_frame, end_frame
else:
return None
def clear_up_start_end_frame(self):
if 'start_frame' in self.asset_dict:
del self.asset_dict['start_frame']
if 'end_frame' in self.asset_dict:
del self.asset_dict['end_frame']
if 'ref_start_frame' in self.asset_dict:
del self.asset_dict['ref_start_frame']
if 'dis_start_frame' in self.asset_dict:
del self.asset_dict['dis_start_frame']
if 'start_sec' in self.asset_dict:
del self.asset_dict['start_sec']
if 'end_sec' in self.asset_dict:
del self.asset_dict['end_sec']
if 'duration_sec' in self.asset_dict:
del self.asset_dict['duration_sec']
# ==== duration and start time====
@property
def ref_duration_sec(self):
"""
Reference video's duration in second used in quality calculation.
:return:
"""
if 'duration_sec' in self.asset_dict:
return self.asset_dict['duration_sec']
elif 'start_sec' in self.asset_dict \
and 'end_sec' in self.asset_dict:
return self.asset_dict['end_sec'] - self.asset_dict['start_sec']
else:
ref_start_end_frame = self.ref_start_end_frame
if ref_start_end_frame and 'fps' in self.asset_dict:
s, e = ref_start_end_frame
return (e - s + 1) / float(self.asset_dict['fps'])
else:
return None
@property
def dis_duration_sec(self):
"""
Distorted video's duration in second used in quality calculation.
:return:
"""
if 'duration_sec' in self.asset_dict:
return self.asset_dict['duration_sec']
elif 'start_sec' in self.asset_dict \
and 'end_sec' in self.asset_dict:
return self.asset_dict['end_sec'] - self.asset_dict['start_sec']
else:
dis_start_end_frame = self.dis_start_end_frame
if dis_start_end_frame \
and 'fps' in self.asset_dict:
start, end = dis_start_end_frame
return (end - start + 1) / float(self.asset_dict['fps'])
else:
return None
@property
def ref_start_sec(self):
if self.ref_start_end_frame is None or self.fps is None:
return None
else:
ref_start_frame, ref_end_frame = self.ref_start_end_frame
fps = self.fps
return float(ref_start_frame) / fps
@property
def dis_start_sec(self):
if self.dis_start_end_frame is None or self.fps is None:
return None
else:
dis_start_frame, dis_end_frame = self.dis_start_end_frame
fps = self.fps
return float(dis_start_frame) / fps
@property
def fps(self):
if 'fps' in self.asset_dict:
assert self.asset_dict['fps'] > 0.0, 'Frame rate has to be positive.'
return self.asset_dict['fps']
else:
return None
@property
def rebuf_indices(self):
if 'rebuf_indices' in self.asset_dict:
assert isinstance(self.asset_dict['rebuf_indices'], list), 'Rebuffering indices need to be in a list.'
# check for negative rebuffering indices
assert len(list(filter(lambda x: x < 0, self.asset_dict['rebuf_indices']))) == 0, 'All rebuffering indices have to be >= 0.'
return self.asset_dict['rebuf_indices']
else:
return None
# ==== str ====
@property
def ref_str(self):
"""
String representation for reference video.
:return:
"""
s = ""
path = get_file_name_without_extension(self.ref_path)
s += "{path}".format(path=path)
if self.ref_width_height:
w, h = self.ref_width_height
s += "_{w}x{h}".format(w=w, h=h)
if self.ref_yuv_type != self.DEFAULT_YUV_TYPE:
s += "_{}".format(self.ref_yuv_type)
if self.ref_start_end_frame:
start, end = self.ref_start_end_frame
s += "_{start}to{end}".format(start=start, end=end)
for key in self.ORDERED_FILTER_LIST:
if self.get_filter_cmd(key, 'ref') is not None:
if s != "":
s += "_"
s += "{}{}".format(key, self.get_filter_cmd(key, 'ref'))
if self.ref_proc_callback_str:
s += f'_{self.ref_proc_callback_str}'
return s
@property
def dis_str(self):
"""
String representation for distorted video.
:return:
"""
s = ""
path = get_file_name_without_extension(self.dis_path)
s += "{path}".format(path=path)
if self.dis_width_height:
w, h = self.dis_width_height
s += "_{w}x{h}".format(w=w, h=h)
if self.dis_yuv_type != self.DEFAULT_YUV_TYPE:
s += "_{}".format(self.dis_yuv_type)
if self.dis_start_end_frame:
start, end = self.dis_start_end_frame
s += "_{start}to{end}".format(start=start, end=end)
for key in self.ORDERED_FILTER_LIST:
if self.get_filter_cmd(key, 'dis') is not None:
if s != "":
s += "_"
s += "{}{}".format(key, self.get_filter_cmd(key, 'dis'))
if self.dis_proc_callback_str:
s += f'_{self.dis_proc_callback_str}'
return s
@property
def quality_str(self):
"""
String representation for quality-related information
:return:
"""
s = ""
if self.quality_width_height:
w, h = self.quality_width_height
if s != "":
s += "_"
s += "{w}x{h}".format(w=w, h=h)
# if resolutions are consistent, no resampling is taking place, so
# specificying resampling type should be ignored
if self.resampling_type != self.DEFAULT_RESAMPLING_TYPE and \
not (self.ref_width_height == self.quality_width_height
and self.dis_width_height == self.quality_width_height):
if s != "":
s += "_"
s += "{}".format(self.resampling_type)
return s
def to_string(self):
"""
The compact string representation of asset, used by __str__.
:return:
"""
s = "{dataset}_{content_id}_{asset_id}_{ref_str}_vs_{dis_str}".\
format(dataset=self.dataset,
content_id=self.content_id,
asset_id=self.asset_id,
ref_str=self.ref_str,
dis_str=self.dis_str)
quality_str = self.quality_str
if quality_str:
s += "_q_{quality_str}".format(quality_str=quality_str)
return s
def to_normalized_dict(self):
"""
Similar to self.__dict__ except for excluding workdir (which is random)
and dir part of ref_path/dis_path.
:return:
"""
d = {}
for key in self.__dict__:
if key == 'workdir':
d[key] = ""
elif key == 'ref_path' or key == 'dis_path':
d[key] = get_file_name_with_extension(self.__dict__[key])
else:
d[key] = self.__dict__[key]
return d
def __str__(self):
"""
Use str(asset) for compact but unique description of asset, for example
use in file names
:return:
"""
return self.to_string()
def __repr__(self):
"""
Use repr(asset) for serialization of asset (to be recovered later on)
:return:
"""
return self.to_normalized_repr()
def to_full_repr(self):
return get_unique_str_from_recursive_dict(self.__dict__)
def to_normalized_repr(self):
return get_unique_str_from_recursive_dict(self.to_normalized_dict())
def __hash__(self):
return hash(self.to_normalized_repr())
def __eq__(self, other):
return self.to_normalized_repr() == other.to_normalized_repr()
def __ne__(self, other):
return not self.__eq__(other)
# ==== workfile ====
@property
def ref_workfile_path(self):
if self.use_path_as_workpath:
return self.ref_path
else:
return os.path.join(self.workdir, f"ref_{str(self)}")
@property
def dis_workfile_path(self):
if self.use_path_as_workpath:
return self.dis_path
else:
return os.path.join(self.workdir, f"dis_{str(self)}")
# ==== procfile ====
@property
def ref_procfile_path(self):
if self.use_workpath_as_procpath:
return self.ref_workfile_path
else:
return os.path.join(self.workdir, f"refp_{str(self)}")
@property
def dis_procfile_path(self):
if self.use_workpath_as_procpath:
return self.dis_workfile_path
else:
return os.path.join(self.workdir, f"disp_{str(self)}")
# ==== bitrate ====
@property
def ref_bitrate_kbps_for_entire_file(self):
"""
:return: the bitrate in Kbps for the entire reference video file. Must
make sure ref_duration_sec covers the entire file.
"""
try:
return os.path.getsize(self.ref_path) / self.ref_duration_sec * 8.0 / 1000.0
except:
return None
@property
def dis_bitrate_kbps_for_entire_file(self):
"""
:return: the bitrate in Kbps for the entire reference video file. Must
make sure ref_duration_sec covers the entire file.
"""
try:
return os.path.getsize(self.dis_path) / self.dis_duration_sec * 8.0 / 1000.0
except:
return None
# ==== yuv format ====
@property
def ref_yuv_type(self):
if 'ref_yuv_type' in self.asset_dict:
if self.asset_dict['ref_yuv_type'] in self.SUPPORTED_YUV_TYPES:
return self.asset_dict['ref_yuv_type']
else:
assert False, "Unsupported YUV type: {}".format(
self.asset_dict['ref_yuv_type'])
elif 'yuv_type' in self.asset_dict:
if self.asset_dict['yuv_type'] in self.SUPPORTED_YUV_TYPES:
return self.asset_dict['yuv_type']
else:
assert False, "Unsupported YUV type: {}".format(
self.asset_dict['yuv_type'])
else:
return self.DEFAULT_YUV_TYPE
@property
def dis_yuv_type(self):
if 'dis_yuv_type' in self.asset_dict:
if self.asset_dict['dis_yuv_type'] in self.SUPPORTED_YUV_TYPES:
return self.asset_dict['dis_yuv_type']
else:
assert False, "Unsupported YUV type: {}".format(
self.asset_dict['dis_yuv_type'])
elif 'yuv_type' in self.asset_dict:
if self.asset_dict['yuv_type'] in self.SUPPORTED_YUV_TYPES:
return self.asset_dict['yuv_type']
else:
assert False, "Unsupported YUV type: {}".format(
self.asset_dict['yuv_type'])
else:
return self.DEFAULT_YUV_TYPE
@property
def workfile_yuv_type(self):
"""
for notyuv assets, we want to allow the decoded yuv format to be set by the user
this is highly relevant to image decoding, where we would like to select yuv444p
this property tries to read workfile_yuv_type from asset_dict, if it is there it is set
else it default to default_yuv_type
"""
supported_yuv_types = list(set(Asset.SUPPORTED_YUV_TYPES) - {'notyuv'})
if 'workfile_yuv_type' in self.asset_dict:
workfile_yuv_type = self.asset_dict['workfile_yuv_type']
assert workfile_yuv_type in supported_yuv_types, "Workfile YUV format {} is not valid, pick: {}".format(
workfile_yuv_type, str(supported_yuv_types))
return workfile_yuv_type
else:
return self.DEFAULT_YUV_TYPE
@property
@deprecated
def yuv_type(self):
""" For backward-compatibility """
return self.dis_yuv_type
def clear_up_yuv_type(self):
if 'yuv_type' in self.asset_dict:
del self.asset_dict['yuv_type']
if 'ref_yuv_type' in self.asset_dict:
del self.asset_dict['ref_yuv_type']
if 'dis_yuv_type' in self.asset_dict:
del self.asset_dict['dis_yuv_type']
@property
def resampling_type(self):
if 'resampling_type' in self.asset_dict:
if self.asset_dict['resampling_type'] in self.SUPPORTED_RESAMPLING_TYPES:
return self.asset_dict['resampling_type']
else:
assert False, "Unsupported resampling type: {}".format(
self.asset_dict['resampling_type'])
else:
return self.DEFAULT_RESAMPLING_TYPE
@property
def use_path_as_workpath(self):
"""
If True, use ref_path as ref_workfile_path, and dis_path as
dis_workfile_path.
"""
if 'use_path_as_workpath' in self.asset_dict:
if self.asset_dict['use_path_as_workpath'] == 1:
return True
elif self.asset_dict['use_path_as_workpath'] == 0:
return False
else:
assert False
else:
return False
@use_path_as_workpath.setter
def use_path_as_workpath(self, bool_value):
# cannot just assign True/False for ResultStore reason:
# df = pd.DataFrame.from_dict(ast.literal_eval(result_file.read()))
# cannot read true/false
if bool_value is True:
self.asset_dict['use_path_as_workpath'] = 1
else:
self.asset_dict['use_path_as_workpath'] = 0
@property
def use_workpath_as_procpath(self):
"""
If True, use ref_workfile_path as ref_procfile_path, and dis_workfile_path
as dis_procfile_path.
"""
if 'use_workpath_as_procpath' in self.asset_dict:
if self.asset_dict['use_workpath_as_procpath'] == 1:
return True
elif self.asset_dict['use_workpath_as_procpath'] == 0:
return False
else:
assert False
else:
return False
@use_workpath_as_procpath.setter
def use_workpath_as_procpath(self, bool_value):
# cannot just assign True/False for ResultStore reason:
# df = pd.DataFrame.from_dict(ast.literal_eval(result_file.read()))
# cannot read true/false
if bool_value is True:
self.asset_dict['use_workpath_as_procpath'] = 1
else:
self.asset_dict['use_workpath_as_procpath'] = 0
@property
def crop_cmd(self):
return self.get_filter_cmd('crop', None)
@property
def ref_crop_cmd(self):
return self.get_filter_cmd('crop', 'ref')
@property
def dis_crop_cmd(self):
return self.get_filter_cmd('crop', 'dis')
@property
def pad_cmd(self):
return self.get_filter_cmd('pad', None)
@property
def ref_pad_cmd(self):
return self.get_filter_cmd('pad', 'ref')
@property
def dis_pad_cmd(self):
return self.get_filter_cmd('pad', 'dis')
def get_filter_cmd(self, key, target=None):
assert key in self.ORDERED_FILTER_LIST, 'key {key} is not in SUPPORTED_FILTER_TYPES'.format(key=key)
assert target == 'ref' or target == 'dis' or target is None, \
'target is {}, which is not supported'.format(target)
if target is None:
cmd = key + '_cmd'
if cmd in self.asset_dict:
return self.asset_dict[cmd]
else:
return None
if target == 'ref' or target == 'dis':
cmd = target + '_' + key + '_cmd'
cmd2 = key + '_cmd'
if cmd in self.asset_dict:
return self.asset_dict[cmd]
elif cmd2 in self.asset_dict:
return self.asset_dict[cmd2]
else:
return None
else:
assert False
@property
def ref_proc_callback_str(self):
if 'ref_proc_callback' in self.asset_dict:
if self.asset_dict['ref_proc_callback'] in proc_func_dict:
return self.asset_dict['ref_proc_callback']
else:
assert False, "Unsupported ref_proc_callback: {}".format(
self.asset_dict['ref_proc_callback'])
elif 'proc_callback' in self.asset_dict:
if self.asset_dict['proc_callback'] in proc_func_dict:
return self.asset_dict['proc_callback']
else:
assert False, "Unsupported proc_callback: {}".format(
self.asset_dict['proc_callback'])
else:
return None
@property
def ref_proc_callback(self):
if self.ref_proc_callback_str is None:
return None
else:
return proc_func_dict[self.ref_proc_callback_str]
@property
def dis_proc_callback_str(self):
if 'dis_proc_callback' in self.asset_dict:
if self.asset_dict['dis_proc_callback'] in proc_func_dict:
return self.asset_dict['dis_proc_callback']
else:
assert False, "Unsupported dis_proc_callback: {}".format(
self.asset_dict['dis_proc_callback'])
elif 'proc_callback' in self.asset_dict:
if self.asset_dict['proc_callback'] in proc_func_dict:
return self.asset_dict['proc_callback']
else:
assert False, "Unsupported proc_callback: {}".format(
self.asset_dict['proc_callback'])
else:
return None
@property
def dis_proc_callback(self):
if self.dis_proc_callback_str is None:
return None
else:
return proc_func_dict[self.dis_proc_callback_str]
class NorefAsset(Asset):
"""
NorefAsset is similar to Asset except that it does not have a reference
video path ref_path.
"""
# ==== constructor ====
def __init__(self, dataset, content_id, asset_id,
dis_path,
asset_dict,
workdir_root=VmafConfig.workdir_path()):
"""
:param dataset
:param content_id: ID of content the asset correspond to within dataset
:param asset_id: ID of asset
:param dis_path: path to distorted video
:param asset_dict: dictionary with additional asset properties
:param workdir_root:
:return:
"""
super(NorefAsset, self).__init__(
dataset,
content_id,
asset_id,
dis_path, # repeat dis_path for both ref_path and dis_path
dis_path,
asset_dict,
workdir_root
)
@override(Asset)
def copy(self, **kwargs):
new_asset_dict = copy.deepcopy(self.asset_dict)
# reset the following arguments:
if 'use_path_as_workpath' in new_asset_dict:
del new_asset_dict['use_path_as_workpath']
if 'use_workpath_as_procpath' in new_asset_dict:
del new_asset_dict['use_workpath_as_procpath']
dataset = kwargs['dataset'] if 'dataset' in kwargs else self.dataset
content_id = kwargs['content_id'] if 'content_id' in kwargs else self.content_id
asset_id = kwargs['asset_id'] if 'asset_id' in kwargs else self.asset_id
dis_path = kwargs['dis_path'] if 'dis_path' in kwargs else self.dis_path
workdir_root = kwargs['workdir_root'] if 'workdir_root' in kwargs else self.workdir_root
new_asset = self.__class__(dataset, content_id, asset_id,
dis_path, new_asset_dict,
workdir_root)
return new_asset
def copy_as_asset(self, **kwargs):
""" similar to Noref.copy, except that the returned object is of
(super)class Asset. """
new_asset = self.copy()
new_asset.__class__ = Asset
return new_asset.copy(**kwargs)
|
the-stack_0_26466
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import os
import tempfile
import textwrap
import unittest
from mock import patch
import yaml
from buildtool import (
DEFAULT_BUILD_NUMBER,
BranchSourceCodeManager,
GitRepositorySpec,
RepositorySummary,
SourceInfo)
import buildtool
import buildtool.__main__ as bomtool_main
import buildtool.bom_commands
from buildtool.bom_commands import (
BomBuilder, BuildBomCommand)
from test_util import (
ALL_STANDARD_TEST_BOM_REPO_NAMES,
PATCH_BRANCH,
PATCH_VERSION_NUMBER,
NORMAL_REPO,
NORMAL_SERVICE,
OUTLIER_REPO,
OUTLIER_SERVICE,
BaseGitRepoTestFixture,
init_runtime)
def load_default_bom_dependencies():
path = os.path.join(os.path.dirname(__file__),
'../../dev/buildtool/bom_dependencies.yml')
with open(path, 'r') as stream:
return yaml.load(stream.read())
def make_default_options(options):
options.git_branch = 'OptionBranch'
options.github_owner = 'test-user'
options.bom_dependencies_path = None
options.build_number = 'OptionBuildNumber'
options.bintray_org = 'test-bintray-org'
options.bintray_debian_repository = 'test-debian-repo'
options.docker_registry = 'test-docker-registry'
options.publish_gce_image_project = 'test-image-project-name'
return options
class TestBuildBomCommand(BaseGitRepoTestFixture):
def setUp(self):
super(TestBuildBomCommand, self).setUp()
self.parser = argparse.ArgumentParser()
self.subparsers = self.parser.add_subparsers()
def make_test_options(self):
options = super(TestBuildBomCommand, self).make_test_options()
return make_default_options(options)
def test_default_bom_options(self):
registry = {}
buildtool.bom_commands.register_commands(registry, self.subparsers, {})
self.assertTrue('build_bom' in registry)
self.assertTrue('publish_bom' in registry)
options = self.parser.parse_args(['build_bom'])
option_dict = vars(options)
self.assertEquals(DEFAULT_BUILD_NUMBER, options.build_number)
for key in ['bom_path', 'github_owner']:
self.assertIsNone(option_dict[key])
def test_bom_option_default_overrides(self):
defaults = {'not_used': False}
defaults.update(vars(self.options))
registry = {}
buildtool.bom_commands.register_commands(
registry, self.subparsers, defaults)
parsed_options = self.parser.parse_args(['build_bom'])
parsed_option_dict = vars(parsed_options)
self.assertTrue('not_used' not in parsed_option_dict)
for key, value in defaults.items():
if key in ['not_used', 'command', 'input_dir', 'output_dir']:
continue
self.assertEquals(value, parsed_option_dict[key])
def test_bom_command(self):
"""Make sure when we run "build_bom" we actually get what we meant."""
defaults = vars(make_default_options(self.options))
defaults.update({'bom_path': 'MY PATH',
'github_owner': 'TestOwner',
'input_dir': 'TestInputRoot'})
defaults.update({'bintray_org': 'TestBintrayOrg',
'bintray_debian_repository': 'TestDebianRepo',
'docker_registry': 'TestDockerRegistry',
'publish_gce_image_project': 'TestGceProject'})
del defaults['github_filesystem_root']
parser = argparse.ArgumentParser()
registry = bomtool_main.make_registry([buildtool.bom_commands],
parser, defaults)
bomtool_main.add_standard_parser_args(parser, defaults)
options = parser.parse_args(['build_bom'])
prefix = 'http://test-domain.com/test-owner'
make_fake = self.patch_method
# When asked to filter the normal bom repos to determine source_repositories
# we'll return our own fake repository as if we configured the original
# command for it. This will also make it easier to test just the one
# repo rather than all, and that there are no assumptions.
mock_filter = make_fake(BuildBomCommand, 'filter_repositories')
test_repository = GitRepositorySpec('clouddriver', commit_id='CommitA',
origin=prefix + '/TestRepoA')
mock_filter.return_value = [test_repository]
# When the base command ensures the local repository exists, we'll
# intercept that call and do nothing rather than the git checkouts, etc.
make_fake(BranchSourceCodeManager, 'ensure_local_repository')
mock_remote = self.patch_function('buildtool.branch_scm.GitRunner'
'.query_remote_repository_commit_id')
mock_remote.return_value = 'CommitA'
# When the base command asks for the repository metadata, we'll return
# this hardcoded info, then look for it later in the generated om.
mock_refresh = make_fake(BranchSourceCodeManager, 'refresh_source_info')
summary = RepositorySummary('CommitA', 'TagA', '9.8.7', '44.55.66', [])
source_info = SourceInfo('MyBuildNumber', summary)
mock_refresh.return_value = source_info
# When asked to write the bom out, do nothing.
# We'll verify the bom later when looking at the mock call sequencing.
mock_write = self.patch_function('buildtool.bom_commands.write_to_path')
mock_now = self.patch_function('buildtool.bom_commands.now')
mock_now.return_value = datetime.datetime(2018, 1, 2, 3, 4, 5)
factory = registry['build_bom']
command = factory.make_command(options)
command()
# Verify source repositories were filtered
self.assertEquals([test_repository], command.source_repositories)
# Verify that the filter was called with the original bom repos,
# and these repos were coming from the configured github_owner's repo.
bom_repo_list = [
GitRepositorySpec(
name,
git_dir=os.path.join('TestInputRoot', 'build_bom', name),
origin='https://%s/TestOwner/%s' % (options.github_hostname, name),
upstream='https://github.com/spinnaker/' + name)
for name in sorted(['clouddriver', 'deck', 'echo', 'fiat', 'front50',
'gate', 'igor', 'kayenta', 'orca', 'rosco', 'spinnaker',
'spinnaker-monitoring'])
]
mock_remote.assert_called_once_with(test_repository.origin,
options.git_branch)
mock_filter.assert_called_once_with(bom_repo_list)
mock_refresh.assert_called_once_with(test_repository, 'OptionBuildNumber')
bom_text, bom_path = mock_write.call_args_list[0][0]
self.assertEquals(bom_path, 'MY PATH')
bom = yaml.load(bom_text)
golden_text = textwrap.dedent("""\
artifactSources:
debianRepository: https://dl.bintray.com/TestBintrayOrg/TestDebianRepo
dockerRegistry: TestDockerRegistry
gitPrefix: http://test-domain.com/test-owner
googleImageProject: TestGceProject
dependencies:
services:
clouddriver:
commit: CommitA
version: 9.8.7-MyBuildNumber
timestamp: '2018-01-02 03:04:05'
version: OptionBranch-OptionBuildNumber
""")
golden_bom = yaml.load(golden_text)
golden_bom['dependencies'] = load_default_bom_dependencies()
for key, value in golden_bom.items():
self.assertEquals(value, bom[key])
class TestBomBuilder(BaseGitRepoTestFixture):
def make_test_options(self):
options = super(TestBomBuilder, self).make_test_options()
return make_default_options(options)
def setUp(self):
super(TestBomBuilder, self).setUp()
self.test_root = os.path.join(self.base_temp_dir, self._testMethodName)
self.scm = BranchSourceCodeManager(self.options, self.test_root)
def test_default_build(self):
builder = BomBuilder(self.options, self.scm)
bom = builder.build()
self.assertEquals(
bom['dependencies'], load_default_bom_dependencies())
# There are no services because we never added any.
# Although the builder takes an SCM, you still need to explicitly add repos.
self.assertEquals({}, bom['services'])
def test_inject_dependencies(self):
dependencies = {
'DependencyA': {'version': 'vA'},
'DependencyB': {'version': 'vB'}
}
fd, path = tempfile.mkstemp(prefix='bomdeps')
os.close(fd)
with open(path, 'w') as stream:
yaml.dump(dependencies, stream)
options = self.options
options.bom_dependencies_path = path
try:
builder = BomBuilder(options, self.scm)
bom = builder.build()
finally:
os.remove(path)
self.assertEquals(dependencies, bom['dependencies'])
self.assertEquals({}, bom['services'])
def test_build(self):
test_root = self.test_root
options = self.options
options.git_branch = PATCH_BRANCH
options.github_owner = 'default'
options.github_disable_upstream_push = True
scm = BranchSourceCodeManager(options, test_root)
golden_bom = dict(self.golden_bom)
builder = BomBuilder.new_from_bom(options, scm, golden_bom)
source_repositories = [scm.make_repository_spec(name)
for name in ALL_STANDARD_TEST_BOM_REPO_NAMES]
for repository in source_repositories:
scm.ensure_git_path(repository)
summary = scm.git.collect_repository_summary(repository.git_dir)
source_info = SourceInfo('SourceInfoBuildNumber', summary)
builder.add_repository(repository, source_info)
with patch('buildtool.bom_commands.now') as mock_now:
mock_now.return_value = datetime.datetime(2018, 1, 2, 3, 4, 5)
bom = builder.build()
golden_bom['version'] = 'patch-OptionBuildNumber'
golden_bom['timestamp'] = '2018-01-02 03:04:05'
golden_bom['services'][NORMAL_SERVICE]['version'] = (
PATCH_VERSION_NUMBER + '-SourceInfoBuildNumber')
golden_bom['services'][OUTLIER_SERVICE]['version'] = (
PATCH_VERSION_NUMBER + '-SourceInfoBuildNumber')
golden_bom['services']['monitoring-third-party']['version'] = (
PATCH_VERSION_NUMBER + '-SourceInfoBuildNumber')
golden_bom['artifactSources'] = {
'debianRepository': 'https://dl.bintray.com/%s/%s' % (
options.bintray_org, options.bintray_debian_repository),
'dockerRegistry': options.docker_registry,
'googleImageProject': options.publish_gce_image_project,
'gitPrefix': os.path.dirname(self.repo_commit_map[NORMAL_REPO]['ORIGIN'])
}
for key, value in bom['services'].items():
self.assertEquals(value, golden_bom['services'][key])
for key, value in bom.items():
self.assertEquals(value, golden_bom[key])
self.assertEquals(golden_bom, bom)
def test_rebuild(self):
test_root = self.test_root
options = self.options
options.git_branch = 'master'
options.github_owner = 'default'
options.github_disable_upstream_push = True
options.build_number = 'UpdatedBuildNumber'
scm = BranchSourceCodeManager(options, test_root)
builder = BomBuilder.new_from_bom(options, scm, self.golden_bom)
repository = scm.make_repository_spec(OUTLIER_REPO)
scm.ensure_git_path(repository)
scm.git.check_run(repository.git_dir, 'checkout ' + PATCH_BRANCH)
summary = scm.git.collect_repository_summary(repository.git_dir)
source_info = SourceInfo('SourceInfoBuildNumber', summary)
builder.add_repository(repository, source_info)
with patch('buildtool.bom_commands.now') as mock_now:
mock_now.return_value = datetime.datetime(2018, 1, 2, 3, 4, 5)
bom = builder.build()
updated_service = bom['services'][OUTLIER_SERVICE]
self.assertEquals(updated_service, {
'commit': self.repo_commit_map[OUTLIER_REPO][PATCH_BRANCH],
'version': PATCH_VERSION_NUMBER + '-SourceInfoBuildNumber'
})
# The bom should be the same as before, but with new timestamp/version
# and our service updated. And the artifactSources to our configs.
updated_bom = dict(self.golden_bom)
updated_bom['timestamp'] = '2018-01-02 03:04:05'
updated_bom['version'] = 'master-UpdatedBuildNumber'
updated_bom['services'][OUTLIER_SERVICE] = updated_service
updated_bom['artifactSources'] = {
'debianRepository': 'https://dl.bintray.com/%s/%s' % (
options.bintray_org, options.bintray_debian_repository),
'dockerRegistry': options.docker_registry,
'googleImageProject': options.publish_gce_image_project,
'gitPrefix': self.golden_bom['artifactSources']['gitPrefix']
}
for key, value in updated_bom.items():
self.assertEquals(value, bom[key])
self.assertEquals(updated_bom, bom)
def test_determine_most_common_prefix(self):
options = self.options
builder = BomBuilder(options, self.scm)
self.assertIsNone(builder.determine_most_common_prefix())
prefix = ['http://github.com/one', '/local/source/path/two']
# Test two vs one in from different repo prefix
# run the test twice changing the ordering the desired prefix is visible.
for which in [0, 1]:
repository = GitRepositorySpec(
'RepoOne', origin=prefix[0] + '/RepoOne',
commit_id='RepoOneCommit')
summary = RepositorySummary('RepoOneCommit', 'RepoOneTag',
'1.2.3', '1.2.2', [])
source_info = SourceInfo('BuildOne', summary)
builder.add_repository(repository, source_info)
self.assertEquals(prefix[0], builder.determine_most_common_prefix())
repository = GitRepositorySpec(
'RepoTwo', origin=prefix[which] + '/RepoTwo',
commit_id='RepoTwoCommit')
summary = RepositorySummary('RepoTwoCommit', 'RepoTwoTag',
'2.2.3', '2.2.3', [])
source_info = SourceInfo('BuildTwo', summary)
builder.add_repository(repository, source_info)
repository = GitRepositorySpec(
'RepoThree', origin=prefix[1] + '/RepoThree',
commit_id='RepoThreeCommit')
summary = RepositorySummary('RepoThreeCommit', 'RepoThreeTag',
'3.2.0', '2.2.1', [])
source_info = SourceInfo('BuildThree', summary)
builder.add_repository(repository, source_info)
self.assertEquals(prefix[which], builder.determine_most_common_prefix())
if __name__ == '__main__':
init_runtime()
unittest.main(verbosity=2)
|
the-stack_0_26467
|
import torch
import torch.nn as nn
def conv_block_3d(in_dim, out_dim, activation):
return nn.Sequential(
nn.Conv3d(in_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(out_dim),
activation,)
def conv_trans_block_3d(in_dim, out_dim, activation):
return nn.Sequential(
nn.ConvTranspose3d(in_dim, out_dim, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm3d(out_dim),
activation,)
def max_pooling_3d():
return nn.MaxPool3d(kernel_size=2, stride=2, padding=0)
def conv_block_2_3d(in_dim, out_dim, activation):
return nn.Sequential(
conv_block_3d(in_dim, out_dim, activation),
nn.Conv3d(out_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(out_dim),)
class UNet(nn.Module):
def __init__(self, in_dim=3, num_classes=3, num_filters=8, backbone=None, pretrained=False, checkpoint=False):
super(UNet, self).__init__()
self.in_dim = in_dim
self.out_dim = num_classes
self.num_filters = num_filters
activation = nn.LeakyReLU(0.2, inplace=True)
# Down sampling
self.down_1 = conv_block_2_3d(self.in_dim, self.num_filters, activation)
self.pool_1 = max_pooling_3d()
self.down_2 = conv_block_2_3d(self.num_filters, self.num_filters * 2, activation)
self.pool_2 = max_pooling_3d()
self.down_3 = conv_block_2_3d(self.num_filters * 2, self.num_filters * 4, activation)
self.pool_3 = max_pooling_3d()
self.down_4 = conv_block_2_3d(self.num_filters * 4, self.num_filters * 8, activation)
self.pool_4 = max_pooling_3d()
self.down_5 = conv_block_2_3d(self.num_filters * 8, self.num_filters * 16, activation)
self.pool_5 = max_pooling_3d()
# Bridge
self.bridge = conv_block_2_3d(self.num_filters * 16, self.num_filters * 32, activation)
# Up sampling
self.trans_1 = conv_trans_block_3d(self.num_filters * 32, self.num_filters * 32, activation)
self.up_1 = conv_block_2_3d(self.num_filters * 48, self.num_filters * 16, activation)
self.trans_2 = conv_trans_block_3d(self.num_filters * 16, self.num_filters * 16, activation)
self.up_2 = conv_block_2_3d(self.num_filters * 24, self.num_filters * 8, activation)
self.trans_3 = conv_trans_block_3d(self.num_filters * 8, self.num_filters * 8, activation)
self.up_3 = conv_block_2_3d(self.num_filters * 12, self.num_filters * 4, activation)
self.trans_4 = conv_trans_block_3d(self.num_filters * 4, self.num_filters * 4, activation)
self.up_4 = conv_block_2_3d(self.num_filters * 6, self.num_filters * 2, activation)
self.trans_5 = conv_trans_block_3d(self.num_filters * 2, self.num_filters * 2, activation)
self.up_5 = conv_block_2_3d(self.num_filters * 3, self.num_filters * 1, activation)
# Output
self.out = conv_block_3d(self.num_filters, self.out_dim, activation)
def forward(self, x):
# Down sampling
down_1 = self.down_1(x) # -> [1, 4, 128, 128, 128]
pool_1 = self.pool_1(down_1) # -> [1, 4, 64, 64, 64]
down_2 = self.down_2(pool_1) # -> [1, 8, 64, 64, 64]
pool_2 = self.pool_2(down_2) # -> [1, 8, 32, 32, 32]
down_3 = self.down_3(pool_2) # -> [1, 16, 32, 32, 32]
pool_3 = self.pool_3(down_3) # -> [1, 16, 16, 16, 16]
down_4 = self.down_4(pool_3) # -> [1, 32, 16, 16, 16]
pool_4 = self.pool_4(down_4) # -> [1, 32, 8, 8, 8]
down_5 = self.down_5(pool_4) # -> [1, 64, 8, 8, 8]
pool_5 = self.pool_5(down_5) # -> [1, 64, 4, 4, 4]
# Bridge
bridge = self.bridge(pool_5) # -> [1, 128, 4, 4, 4]
# Up sampling
trans_1 = self.trans_1(bridge) # -> [1, 128, 8, 8, 8]
concat_1 = torch.cat([trans_1, down_5], dim=1) # -> [1, 192, 8, 8, 8]
up_1 = self.up_1(concat_1) # -> [1, 64, 8, 8, 8]
trans_2 = self.trans_2(up_1) # -> [1, 64, 16, 16, 16]
concat_2 = torch.cat([trans_2, down_4], dim=1) # -> [1, 96, 16, 16, 16]
up_2 = self.up_2(concat_2) # -> [1, 32, 16, 16, 16]
trans_3 = self.trans_3(up_2) # -> [1, 32, 32, 32, 32]
concat_3 = torch.cat([trans_3, down_3], dim=1) # -> [1, 48, 32, 32, 32]
up_3 = self.up_3(concat_3) # -> [1, 16, 32, 32, 32]
trans_4 = self.trans_4(up_3) # -> [1, 16, 64, 64, 64]
concat_4 = torch.cat([trans_4, down_2], dim=1) # -> [1, 24, 64, 64, 64]
up_4 = self.up_4(concat_4) # -> [1, 8, 64, 64, 64]
trans_5 = self.trans_5(up_4) # -> [1, 8, 128, 128, 128]
concat_5 = torch.cat([trans_5, down_1], dim=1) # -> [1, 12, 128, 128, 128]
up_5 = self.up_5(concat_5) # -> [1, 4, 128, 128, 128]
# Output
out = self.out(up_5) # -> [1, 3, 128, 128, 128]
return out
'''
if __name__ == "__main__":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
image_size = 128
x = torch.Tensor(1, 3, image_size, image_size, image_size)
x.to(device)
print("x size: {}".format(x.size()))
model = UNet(in_dim=3, out_dim=3, num_filters=4)
out = model(x)
print("out size: {}".format(out.size()))
'''
|
the-stack_0_26468
|
from __future__ import print_function
import numpy as np
from numpy.linalg import norm
import numpy.testing as npt
from dipy.testing.memory import get_type_refcount
from nose.tools import assert_true, assert_equal, assert_almost_equal
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises, run_module_suite)
from dipy.tracking.streamline import (set_number_of_points,
length as ds_length,
relist_streamlines,
unlist_streamlines,
center_streamlines,
transform_streamlines,
select_random_set_of_streamlines,
compress_streamlines,
select_by_rois,
orient_by_rois)
streamline = np.array([[82.20181274, 91.36505890, 43.15737152],
[82.38442230, 91.79336548, 43.87036514],
[82.48710632, 92.27861023, 44.56298065],
[82.53310394, 92.78545380, 45.24635315],
[82.53793335, 93.26902008, 45.94785309],
[82.48797607, 93.75003815, 46.64939880],
[82.35533142, 94.25181580, 47.32533264],
[82.15484619, 94.76634216, 47.97451019],
[81.90982819, 95.28792572, 48.60244370],
[81.63336945, 95.78153229, 49.23971176],
[81.35479736, 96.24868011, 49.89558792],
[81.08713531, 96.69807434, 50.56812668],
[80.81504822, 97.14285278, 51.24193192],
[80.52591705, 97.56719971, 51.92168427],
[80.26599884, 97.98269653, 52.61848068],
[80.04635620, 98.38131714, 53.33855820],
[79.84691620, 98.77052307, 54.06955338],
[79.57667542, 99.13599396, 54.78985596],
[79.23351288, 99.43207550, 55.51065063],
[78.84815979, 99.64141846, 56.24016571],
[78.47383881, 99.77347565, 56.99299240],
[78.12837219, 99.81330872, 57.76969528],
[77.80438995, 99.85082245, 58.55574799],
[77.49439240, 99.88065338, 59.34777069],
[77.21414185, 99.85343933, 60.15090561],
[76.96416473, 99.82772827, 60.96406937],
[76.74712372, 99.80519104, 61.78676605],
[76.52263641, 99.79122162, 62.60765076],
[76.03757477, 100.08692169, 63.24152374],
[75.44867706, 100.35265350, 63.79513168],
[74.78033447, 100.57255554, 64.27278900],
[74.11605835, 100.77330780, 64.76428986],
[73.51222992, 100.98779297, 65.32373047],
[72.97387695, 101.23387146, 65.93502045],
[72.47355652, 101.49151611, 66.57343292],
[71.99834442, 101.72480774, 67.23979950],
[71.56909180, 101.98665619, 67.92664337],
[71.18083191, 102.29483795, 68.61888123],
[70.81879425, 102.63343048, 69.31127167],
[70.47422791, 102.98672485, 70.00532532],
[70.10092926, 103.28502655, 70.70999908],
[69.69512177, 103.51667023, 71.42147064],
[69.27423096, 103.71351624, 72.13452911],
[68.91260529, 103.81676483, 72.89796448],
[68.60788727, 103.81982422, 73.69258118],
[68.34162903, 103.76619720, 74.49915314],
[68.08542633, 103.70635223, 75.30856323],
[67.83590698, 103.60187531, 76.11553955],
[67.56822968, 103.44821930, 76.90870667],
[67.28399658, 103.25878906, 77.68825531],
[67.00117493, 103.03740692, 78.45989227],
[66.72718048, 102.80329895, 79.23099518],
[66.46197510, 102.54130554, 79.99622345],
[66.20803833, 102.22305298, 80.74387360],
[65.96872711, 101.88980865, 81.48987579],
[65.72864532, 101.59316254, 82.25085449],
[65.47808075, 101.33383942, 83.02194214],
[65.21841431, 101.11295319, 83.80186462],
[64.95678711, 100.94080353, 84.59326935],
[64.71759033, 100.82022095, 85.40114594],
[64.48053741, 100.73490143, 86.21411896],
[64.24304199, 100.65074158, 87.02709198],
[64.01773834, 100.55318451, 87.84204865],
[63.83801651, 100.41996765, 88.66333008],
[63.70982361, 100.25119019, 89.48779297],
[63.60707855, 100.06730652, 90.31262207],
[63.46164322, 99.91001892, 91.13648224],
[63.26287842, 99.78648376, 91.95485687],
[63.03713226, 99.68377686, 92.76905823],
[62.81192398, 99.56619263, 93.58140564],
[62.57145309, 99.42708588, 94.38592529],
[62.32259369, 99.25592804, 95.18167114],
[62.07497787, 99.05770111, 95.97154236],
[61.82253647, 98.83877563, 96.75438690],
[61.59536743, 98.59293365, 97.53706360],
[61.46530151, 98.30503845, 98.32772827],
[61.39904785, 97.97928619, 99.11172485],
[61.33279419, 97.65353394, 99.89572906],
[61.26067352, 97.30914307, 100.67123413],
[61.19459534, 96.96743011, 101.44847107],
[61.19580460, 96.63417053, 102.23215485],
[61.26572037, 96.29887390, 103.01185608],
[61.39840698, 95.96297455, 103.78307343],
[61.57207870, 95.64262390, 104.55268097],
[61.78163528, 95.35540771, 105.32629395],
[62.06700134, 95.09746552, 106.08564758],
[62.39427185, 94.85724640, 106.83369446],
[62.74076462, 94.62278748, 107.57482147],
[63.11461639, 94.40107727, 108.30641937],
[63.53397751, 94.20418549, 109.02002716],
[64.00019836, 94.03809357, 109.71183777],
[64.43580627, 93.87523651, 110.42416382],
[64.84857941, 93.69993591, 111.14715576],
[65.26740265, 93.51858521, 111.86515808],
[65.69511414, 93.36718750, 112.58474731],
[66.10470581, 93.22719574, 113.31711578],
[66.45891571, 93.06028748, 114.07256317],
[66.78582001, 92.90560913, 114.84281921],
[67.11138916, 92.79004669, 115.62040710],
[67.44729614, 92.75711823, 116.40135193],
[67.75688171, 92.98265076, 117.16111755],
[68.02041626, 93.28012848, 117.91371155],
[68.25725555, 93.53466797, 118.69052124],
[68.46047974, 93.63263702, 119.51107788],
[68.62039948, 93.62007141, 120.34690094],
[68.76782227, 93.56475067, 121.18331909],
[68.90222168, 93.46326447, 122.01765442],
[68.99872589, 93.30039978, 122.84759521],
[69.04119873, 93.05428314, 123.66156769],
[69.05086517, 92.74394989, 124.45450592],
[69.02742004, 92.40427399, 125.23509979],
[68.95466614, 92.09059143, 126.02339935],
[68.84975433, 91.79674530, 126.81564331],
[68.72673798, 91.53726196, 127.61715698],
[68.60685730, 91.30300140, 128.42681885],
[68.50636292, 91.12481689, 129.25317383],
[68.39311218, 91.01572418, 130.08976746],
[68.25946808, 90.94654083, 130.92756653]],
dtype=np.float32)
streamline_64bit = streamline.astype(np.float64)
streamlines = [streamline[[0, 10]], streamline,
streamline[::2], streamline[::3],
streamline[::5], streamline[::6]]
streamlines_64bit = [streamline_64bit[[0, 10]], streamline_64bit,
streamline_64bit[::2], streamline_64bit[::3],
streamline_64bit[::4], streamline_64bit[::5]]
heterogeneous_streamlines = [streamline_64bit,
streamline_64bit.reshape((-1, 6)),
streamline_64bit.reshape((-1, 2))]
def length_python(xyz, along=False):
xyz = np.asarray(xyz, dtype=np.float64)
if xyz.shape[0] < 2:
if along:
return np.array([0])
return 0
dists = np.sqrt((np.diff(xyz, axis=0)**2).sum(axis=1))
if along:
return np.cumsum(dists)
return np.sum(dists)
def set_number_of_points_python(xyz, n_pols=3):
def _extrap(xyz, cumlen, distance):
''' Helper function for extrapolate '''
ind = np.where((cumlen-distance) > 0)[0][0]
len0 = cumlen[ind-1]
len1 = cumlen[ind]
Ds = distance-len0
Lambda = Ds/(len1-len0)
return Lambda*xyz[ind] + (1-Lambda)*xyz[ind-1]
cumlen = np.zeros(xyz.shape[0])
cumlen[1:] = length_python(xyz, along=True)
step = cumlen[-1] / (n_pols-1)
ar = np.arange(0, cumlen[-1], step)
if np.abs(ar[-1] - cumlen[-1]) < np.finfo('f4').eps:
ar = ar[:-1]
xyz2 = [_extrap(xyz, cumlen, distance) for distance in ar]
return np.vstack((np.array(xyz2), xyz[-1]))
def test_set_number_of_points():
# Test resampling of only one streamline
nb_points = 12
modified_streamline_cython = set_number_of_points(
streamline, nb_points)
modified_streamline_python = set_number_of_points_python(
streamline, nb_points)
assert_equal(len(modified_streamline_cython), nb_points)
# Using a 5 digits precision because of streamline is in float32.
assert_array_almost_equal(modified_streamline_cython,
modified_streamline_python, 5)
modified_streamline_cython = set_number_of_points(
streamline_64bit, nb_points)
modified_streamline_python = set_number_of_points_python(
streamline_64bit, nb_points)
assert_equal(len(modified_streamline_cython), nb_points)
assert_array_almost_equal(modified_streamline_cython,
modified_streamline_python)
res = []
simple_streamline = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]], 'f4')
for nb_points in range(2, 200):
modified_streamline_cython = set_number_of_points(
simple_streamline, nb_points)
res.append(nb_points - len(modified_streamline_cython))
assert_equal(np.sum(res), 0)
# Test resampling of multiple streamlines of different nb_points
nb_points = 12
modified_streamlines_cython = set_number_of_points(
streamlines, nb_points)
for i, s in enumerate(streamlines):
modified_streamline_python = set_number_of_points_python(s, nb_points)
# Using a 5 digits precision because of streamline is in float32.
assert_array_almost_equal(modified_streamlines_cython[i],
modified_streamline_python, 5)
modified_streamlines_cython = set_number_of_points(
streamlines_64bit, nb_points)
for i, s in enumerate(streamlines_64bit):
modified_streamline_python = set_number_of_points_python(s, nb_points)
assert_array_almost_equal(modified_streamlines_cython[i],
modified_streamline_python)
# Test streamlines with mixed dtype
streamlines_mixed_dtype = [streamline,
streamline.astype(np.float64),
streamline.astype(np.int32),
streamline.astype(np.int64)]
nb_points_mixed_dtype = [len(s) for s in set_number_of_points(
streamlines_mixed_dtype, nb_points)]
assert_array_equal(nb_points_mixed_dtype,
[nb_points] * len(streamlines_mixed_dtype))
# Test streamlines with different shape
modified_streamlines_cython = set_number_of_points(
heterogeneous_streamlines, nb_points)
for i, s in enumerate(heterogeneous_streamlines):
modified_streamline_python = set_number_of_points_python(s, nb_points)
assert_array_almost_equal(modified_streamlines_cython[i],
modified_streamline_python)
# Test streamline with integer dtype
modified_streamline = set_number_of_points(streamline.astype(np.int32))
assert_true(modified_streamline.dtype == np.float32)
modified_streamline = set_number_of_points(streamline.astype(np.int64))
assert_true(modified_streamline.dtype == np.float64)
# Test empty list
assert_equal(set_number_of_points([]), [])
# Test streamline having only one point
assert_raises(ValueError, set_number_of_points, np.array([[1, 2, 3]]))
# We do not support list of lists, it should be numpy ndarray.
streamline_unsupported = [[1, 2, 3], [4, 5, 5], [2, 1, 3], [4, 2, 1]]
assert_raises(AttributeError, set_number_of_points, streamline_unsupported)
# Test setting number of points of a numpy with flag WRITABLE=False
streamline_readonly = streamline.copy()
streamline_readonly.setflags(write=False)
assert_equal(len(set_number_of_points(streamline_readonly, nb_points=42)),
42)
# Test setting computing length of a numpy with flag WRITABLE=False
streamlines_readonly = []
for s in streamlines:
streamlines_readonly.append(s.copy())
streamlines_readonly[-1].setflags(write=False)
assert_equal(len(set_number_of_points(streamlines_readonly, nb_points=42)),
len(streamlines_readonly))
streamlines_readonly = []
for s in streamlines_64bit:
streamlines_readonly.append(s.copy())
streamlines_readonly[-1].setflags(write=False)
assert_equal(len(set_number_of_points(streamlines_readonly, nb_points=42)),
len(streamlines_readonly))
def test_set_number_of_points_memory_leaks():
# Test some dtypes
dtypes = [np.float32, np.float64, np.int32, np.int64]
for dtype in dtypes:
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = [rng.randn(rng.randint(10, 100), 3).astype(dtype) for _ in range(NB_STREAMLINES)]
list_refcount_before = get_type_refcount()["list"]
rstreamlines = set_number_of_points(streamlines, nb_points=2)
list_refcount_after = get_type_refcount()["list"]
del rstreamlines # Delete `rstreamlines` because it holds a reference to `list`.
# Calling `set_number_of_points` should increase the refcount of `list` by one
# since we kept the returned value.
assert_equal(list_refcount_after, list_refcount_before+1)
# Test mixed dtypes
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = []
for i in range(NB_STREAMLINES):
dtype = dtypes[i % len(dtypes)]
streamlines.append(rng.randn(rng.randint(10, 100), 3).astype(dtype))
list_refcount_before = get_type_refcount()["list"]
rstreamlines = set_number_of_points(streamlines, nb_points=2)
list_refcount_after = get_type_refcount()["list"]
# Calling `set_number_of_points` should increase the refcount of `list` by one
# since we kept the returned value.
assert_equal(list_refcount_after, list_refcount_before+1)
def test_length():
# Test length of only one streamline
length_streamline_cython = ds_length(streamline)
length_streamline_python = length_python(streamline)
assert_almost_equal(length_streamline_cython, length_streamline_python)
length_streamline_cython = ds_length(streamline_64bit)
length_streamline_python = length_python(streamline_64bit)
assert_almost_equal(length_streamline_cython, length_streamline_python)
# Test computing length of multiple streamlines of different nb_points
length_streamlines_cython = ds_length(streamlines)
for i, s in enumerate(streamlines):
length_streamline_python = length_python(s)
assert_array_almost_equal(length_streamlines_cython[i],
length_streamline_python)
length_streamlines_cython = ds_length(streamlines_64bit)
for i, s in enumerate(streamlines_64bit):
length_streamline_python = length_python(s)
assert_array_almost_equal(length_streamlines_cython[i],
length_streamline_python)
# Test streamlines having mixed dtype
streamlines_mixed_dtype = [streamline,
streamline.astype(np.float64),
streamline.astype(np.int32),
streamline.astype(np.int64)]
lengths_mixed_dtype = [ds_length(s)
for s in streamlines_mixed_dtype]
assert_array_equal(ds_length(streamlines_mixed_dtype),
lengths_mixed_dtype)
# Test streamlines with different shape
length_streamlines_cython = ds_length(
heterogeneous_streamlines)
for i, s in enumerate(heterogeneous_streamlines):
length_streamline_python = length_python(s)
assert_array_almost_equal(length_streamlines_cython[i],
length_streamline_python)
# Test streamline having integer dtype
length_streamline = ds_length(streamline.astype('int'))
assert_true(length_streamline.dtype == np.float64)
# Test empty list
assert_equal(ds_length([]), 0.0)
# Test streamline having only one point
assert_equal(ds_length(np.array([[1, 2, 3]])), 0.0)
# We do not support list of lists, it should be numpy ndarray.
streamline_unsupported = [[1, 2, 3], [4, 5, 5], [2, 1, 3], [4, 2, 1]]
assert_raises(AttributeError, ds_length,
streamline_unsupported)
# Test setting computing length of a numpy with flag WRITABLE=False
streamlines_readonly = []
for s in streamlines:
streamlines_readonly.append(s.copy())
streamlines_readonly[-1].setflags(write=False)
assert_array_almost_equal(ds_length(streamlines_readonly),
[length_python(s) for s in streamlines_readonly])
streamlines_readonly = []
for s in streamlines_64bit:
streamlines_readonly.append(s.copy())
streamlines_readonly[-1].setflags(write=False)
assert_array_almost_equal(ds_length(streamlines_readonly),
[length_python(s) for s in streamlines_readonly])
def test_length_memory_leaks():
# Test some dtypes
dtypes = [np.float32, np.float64, np.int32, np.int64]
for dtype in dtypes:
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = [rng.randn(rng.randint(10, 100), 3).astype(dtype) for _ in range(NB_STREAMLINES)]
list_refcount_before = get_type_refcount()["list"]
lengths = ds_length(streamlines)
list_refcount_after = get_type_refcount()["list"]
# Calling `ds_length` shouldn't increase the refcount of `list`
# since the return value is a numpy array.
assert_equal(list_refcount_after, list_refcount_before)
# Test mixed dtypes
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = []
for i in range(NB_STREAMLINES):
dtype = dtypes[i % len(dtypes)]
streamlines.append(rng.randn(rng.randint(10, 100), 3).astype(dtype))
list_refcount_before = get_type_refcount()["list"]
lengths = ds_length(streamlines)
list_refcount_after = get_type_refcount()["list"]
# Calling `ds_length` shouldn't increase the refcount of `list`
# since the return value is a numpy array.
assert_equal(list_refcount_after, list_refcount_before)
def test_unlist_relist_streamlines():
streamlines = [np.random.rand(10, 3),
np.random.rand(20, 3),
np.random.rand(5, 3)]
points, offsets = unlist_streamlines(streamlines)
assert_equal(offsets.dtype, np.dtype('i8'))
assert_equal(points.shape, (35, 3))
assert_equal(len(offsets), len(streamlines))
streamlines2 = relist_streamlines(points, offsets)
assert_equal(len(streamlines), len(streamlines2))
for i in range(len(streamlines)):
assert_array_equal(streamlines[i], streamlines2[i])
def test_center_and_transform():
A = np.array([[1, 2, 3], [1, 2, 3.]])
streamlines = [A for i in range(10)]
streamlines2, center = center_streamlines(streamlines)
B = np.zeros((2, 3))
assert_array_equal(streamlines2[0], B)
assert_array_equal(center, A[0])
affine = np.eye(4)
affine[0, 0] = 2
affine[:3, -1] = - np.array([2, 1, 1]) * center
streamlines3 = transform_streamlines(streamlines, affine)
assert_array_equal(streamlines3[0], B)
def test_select_random_streamlines():
streamlines = [np.random.rand(10, 3),
np.random.rand(20, 3),
np.random.rand(5, 3)]
new_streamlines = select_random_set_of_streamlines(streamlines, 2)
assert_equal(len(new_streamlines), 2)
new_streamlines = select_random_set_of_streamlines(streamlines, 4)
assert_equal(len(new_streamlines), 3)
def compress_streamlines_python(streamline, tol_error=0.01,
max_segment_length=10):
"""
Python version of the FiberCompression found on
https://github.com/scilus/FiberCompression.
"""
if streamline.shape[0] <= 2:
return streamline.copy()
# Euclidean distance
def segment_length(prev, next):
return np.sqrt(((prev-next)**2).sum())
# Projection of a 3D point on a 3D line, minimal distance
def dist_to_line(prev, next, curr):
return norm(np.cross(next-prev, curr-next)) / norm(next-prev)
nb_points = 0
compressed_streamline = np.zeros_like(streamline)
# Copy first point since it is always kept.
compressed_streamline[0, :] = streamline[0, :]
nb_points += 1
prev = streamline[0]
prev_id = 0
for next_id, next in enumerate(streamline[2:], start=2):
# Euclidean distance between last added point and current point.
if segment_length(prev, next) > max_segment_length:
compressed_streamline[nb_points, :] = streamline[next_id-1, :]
nb_points += 1
prev = streamline[next_id-1]
prev_id = next_id-1
continue
# Check that each point is not offset by more than `tol_error` mm.
for o, curr in enumerate(streamline[prev_id+1:next_id],
start=prev_id+1):
dist = dist_to_line(prev, next, curr)
if np.isnan(dist) or dist > tol_error:
compressed_streamline[nb_points, :] = streamline[next_id-1, :]
nb_points += 1
prev = streamline[next_id-1]
prev_id = next_id-1
break
# Copy last point since it is always kept.
compressed_streamline[nb_points, :] = streamline[-1, :]
nb_points += 1
# Make sure the array have the correct size
return compressed_streamline[:nb_points]
def test_compress_streamlines():
for compress_func in [compress_streamlines_python, compress_streamlines]:
# Small streamlines (less than two points) are uncompressable.
for small_streamline in [np.array([[]]),
np.array([[1, 1, 1]]),
np.array([[1, 1, 1], [2, 2, 2]])]:
c_streamline = compress_func(small_streamline)
assert_equal(len(c_streamline), len(small_streamline))
assert_array_equal(c_streamline, small_streamline)
# Compressing a straight streamline that is less than 10mm long
# should output a two points streamline.
linear_streamline = np.linspace(0, 5, 100*3).reshape((100, 3))
c_streamline = compress_func(linear_streamline)
assert_equal(len(c_streamline), 2)
assert_array_equal(c_streamline, [linear_streamline[0],
linear_streamline[-1]])
# The distance of consecutive points must be less or equal than some
# value.
max_segment_length = 10
linear_streamline = np.linspace(0, 100, 100*3).reshape((100, 3))
linear_streamline[:, 1:] = 0.
c_streamline = compress_func(linear_streamline,
max_segment_length=max_segment_length)
segments_length = np.sqrt((np.diff(c_streamline,
axis=0)**2).sum(axis=1))
assert_true(np.all(segments_length <= max_segment_length))
assert_equal(len(c_streamline), 12)
assert_array_equal(c_streamline, linear_streamline[::9])
# A small `max_segment_length` should keep all points.
c_streamline = compress_func(linear_streamline,
max_segment_length=0.01)
assert_array_equal(c_streamline, linear_streamline)
# Test we can set `max_segment_length` to infinity
# (like the C++ version)
compress_func(streamline, max_segment_length=np.inf)
# Uncompressable streamline when `tol_error` == 1.
simple_streamline = np.array([[0, 0, 0],
[1, 1, 0],
[1.5, np.inf, 0],
[2, 2, 0],
[2.5, 20, 0],
[3, 3, 0]])
# Because of np.inf, compressing that streamline causes a warning.
with np.errstate(invalid='ignore'):
c_streamline = compress_func(simple_streamline, tol_error=1)
assert_array_equal(c_streamline, simple_streamline)
# Create a special streamline where every other point is increasingly
# farther from a straigth line formed by the streamline endpoints.
tol_errors = np.linspace(0, 10, 21)
orthogonal_line = np.array([[-np.sqrt(2)/2, np.sqrt(2)/2, 0]],
dtype=np.float32)
special_streamline = np.array([range(len(tol_errors)*2+1)] * 3,
dtype=np.float32).T
special_streamline[1::2] += orthogonal_line * tol_errors[:, None]
# # Uncomment to see the streamline.
# import pylab as plt
# plt.plot(special_streamline[:, 0], special_streamline[:, 1], '.-')
# plt.axis('equal'); plt.show()
# Test different values for `tol_error`.
for i, tol_error in enumerate(tol_errors):
cspecial_streamline = compress_streamlines(special_streamline,
tol_error=tol_error+1e-4,
max_segment_length=np.inf)
# First and last points should always be the same as the original ones.
assert_array_equal(cspecial_streamline[0], special_streamline[0])
assert_array_equal(cspecial_streamline[-1], special_streamline[-1])
assert_equal(len(cspecial_streamline),
len(special_streamline)-((i*2)+1))
# Make sure Cython and Python versions are the same.
cstreamline_python = compress_streamlines_python(
special_streamline,
tol_error=tol_error+1e-4,
max_segment_length=np.inf)
assert_equal(len(cspecial_streamline), len(cstreamline_python))
assert_array_almost_equal(cspecial_streamline, cstreamline_python)
def test_compress_streamlines_memory_leaks():
# Test some dtypes
dtypes = [np.float32, np.float64, np.int32, np.int64]
for dtype in dtypes:
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = [rng.randn(rng.randint(10, 100), 3).astype(dtype) for _ in range(NB_STREAMLINES)]
list_refcount_before = get_type_refcount()["list"]
cstreamlines = compress_streamlines(streamlines)
list_refcount_after = get_type_refcount()["list"]
del cstreamlines # Delete `cstreamlines` because it holds a reference to `list`.
# Calling `compress_streamlines` should increase the refcount of `list` by one
# since we kept the returned value.
assert_equal(list_refcount_after, list_refcount_before+1)
# Test mixed dtypes
rng = np.random.RandomState(1234)
NB_STREAMLINES = 10000
streamlines = []
for i in range(NB_STREAMLINES):
dtype = dtypes[i % len(dtypes)]
streamlines.append(rng.randn(rng.randint(10, 100), 3).astype(dtype))
list_refcount_before = get_type_refcount()["list"]
cstreamlines = compress_streamlines(streamlines)
list_refcount_after = get_type_refcount()["list"]
# Calling `compress_streamlines` should increase the refcount of `list` by one
# since we kept the returned value.
assert_equal(list_refcount_after, list_refcount_before+1)
def test_select_by_rois():
streamlines = [np.array([[0, 0., 0.9],
[1.9, 0., 0.]]),
np.array([[0.1, 0., 0],
[0, 1., 1.],
[0, 2., 2.]]),
np.array([[2, 2, 2],
[3, 3, 3]])]
# Make two ROIs:
mask1 = np.zeros((4, 4, 4), dtype=bool)
mask2 = np.zeros_like(mask1)
mask1[0, 0, 0] = True
mask2[1, 0, 0] = True
selection = select_by_rois(streamlines, [mask1], [True],
tol=1)
npt.assert_array_equal(list(selection), [streamlines[0],
streamlines[1]])
selection = select_by_rois(streamlines, [mask1, mask2], [True, True],
tol=1)
npt.assert_array_equal(list(selection), [streamlines[0],
streamlines[1]])
selection = select_by_rois(streamlines, [mask1, mask2], [True, False])
npt.assert_array_equal(list(selection), [streamlines[1]])
# Setting tolerance too low gets overridden:
selection = select_by_rois(streamlines, [mask1, mask2], [True, False],
tol=0.1)
npt.assert_array_equal(list(selection), [streamlines[1]])
selection = select_by_rois(streamlines, [mask1, mask2], [True, True],
tol=0.87)
npt.assert_array_equal(list(selection), [streamlines[1]])
mask3 = np.zeros_like(mask1)
mask3[0, 2, 2] = 1
selection = select_by_rois(streamlines, [mask1, mask2, mask3],
[True, True, False], tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0]])
# Select using only one ROI
selection = select_by_rois(streamlines, [mask1], [True], tol=0.87)
npt.assert_array_equal(list(selection), [streamlines[1]])
selection = select_by_rois(streamlines, [mask1], [True], tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0],
streamlines[1]])
# Use different modes:
selection = select_by_rois(streamlines, [mask1, mask2, mask3],
[True, True, False],
mode="all",
tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0]])
selection = select_by_rois(streamlines, [mask1, mask2, mask3],
[True, True, False],
mode="either_end",
tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0]])
selection = select_by_rois(streamlines, [mask1, mask2, mask3],
[True, True, False],
mode="both_end",
tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0]])
mask2[0, 2, 2] = True
selection = select_by_rois(streamlines, [mask1, mask2, mask3],
[True, True, False],
mode="both_end",
tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0],
streamlines[1]])
# Test with generator input:
def generate_sl(streamlines):
for sl in streamlines:
yield sl
selection = select_by_rois(generate_sl(streamlines), [mask1], [True],
tol=1.0)
npt.assert_array_equal(list(selection), [streamlines[0],
streamlines[1]])
def test_orient_by_rois():
streamlines = [np.array([[0, 0., 0],
[1, 0., 0.],
[2, 0., 0.]]),
np.array([[2, 0., 0.],
[1, 0., 0],
[0, 0, 0.]])]
# Make two ROIs:
mask1_vol = np.zeros((4, 4, 4), dtype=bool)
mask2_vol = np.zeros_like(mask1_vol)
mask1_vol[0, 0, 0] = True
mask2_vol[1, 0, 0] = True
mask1_coords = np.array(np.where(mask1_vol)).T
mask2_coords = np.array(np.where(mask2_vol)).T
# If there is an affine, we'll use it:
affine = np.eye(4)
affine[:, 3] = [-1, 100, -20, 1]
# Transform the streamlines:
x_streamlines = [sl + affine[:3, 3] for sl in streamlines]
for copy in [True, False]:
for sl, affine in zip([streamlines, x_streamlines], [None, affine]):
for mask1, mask2 in \
zip([mask1_vol, mask1_coords], [mask2_vol, mask2_coords]):
new_streamlines = orient_by_rois(sl, mask1, mask2,
affine=affine, copy=copy)
if copy:
flipped_sl = [sl[0], sl[1][::-1]]
else:
flipped_sl = [np.array([[0, 0., 0],
[1, 0., 0.],
[2, 0., 0.]]),
np.array([[0, 0., 0.],
[1, 0., 0],
[2, 0, 0.]])]
if affine is not None:
flipped_sl = [s + affine[:3, 3] for s in flipped_sl]
npt.assert_equal(new_streamlines, flipped_sl)
if __name__ == '__main__':
run_module_suite()
|
the-stack_0_26472
|
import os
import mujoco_py
import numpy as np
from gym.utils import seeding
class JacoEnv():
def __init__(self,
width,
height,
frame_skip,
rewarding_distance,
control_magnitude,
reward_continuous):
self.frame_skip = frame_skip
self.width = width
self.height = height
# Instantiate Mujoco model
model_path = "jaco.xml"
fullpath = os.path.join(
os.path.dirname(__file__), "assets", model_path)
if not os.path.exists(fullpath):
raise IOError("File %s does not exist" % fullpath)
model = mujoco_py.load_model_from_path(fullpath)
self.sim = mujoco_py.MjSim(model)
self.init_state = self.sim.get_state()
self.init_qpos = self.sim.data.qpos.ravel().copy()
self.init_qvel = self.sim.data.qvel.ravel().copy()
# Setup actuators
self.actuator_bounds = self.sim.model.actuator_ctrlrange
self.actuator_low = self.actuator_bounds[:, 0]
self.actuator_high = self.actuator_bounds[:, 1]
self.actuator_ctrlrange = self.actuator_high - self.actuator_low
self.num_actuators = len(self.actuator_low)
# init model_data_ctrl
self.null_action = np.zeros(self.num_actuators)
self.sim.data.ctrl[:] = self.null_action
self.seed()
self.sum_reward = 0
self.rewarding_distance = rewarding_distance
# Target position bounds
self.target_bounds = np.array(((0.4, 0.6), (0.1, 0.3), (0.2, 0.3)))
self.target_reset_distance = 0.2
# Setup discrete action space
self.control_values = self.actuator_ctrlrange * control_magnitude
self.num_actions = 5
self.action_space = [list(range(self.num_actions))
] * self.num_actuators
self.observation_space = ((0, ), (height, width, 3),
(height, width, 3))
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def set_qpos_qvel(self, qpos, qvel):
assert qpos.shape == (self.sim.model.nq, ) and qvel.shape == (
self.sim.model.nv, )
self.sim.data.qpos[:] = qpos
self.sim.data.qvel[:] = qvel
self.sim.forward()
def reset(self):
# Random initial position of Jaco
# qpos = self.init_qpos + np.random.randn(self.sim.nv)
# Fixed initial position of Jaco
qpos = self.init_qpos
qvel = self.init_qvel
# random object position start of episode
self.reset_target()
# set initial joint positions and velocities
self.set_qpos_qvel(qpos, qvel)
return self.get_obs()
def reset_target(self):
# Randomize goal position within specified bounds
self.goal = np.random.rand(3) * (self.target_bounds[:, 1] -
self.target_bounds[:, 0]
) + self.target_bounds[:, 0]
geom_positions = self.sim.model.geom_pos.copy()
prev_goal_location = geom_positions[1]
while (np.linalg.norm(prev_goal_location - self.goal) <
self.target_reset_distance):
self.goal = np.random.rand(3) * (self.target_bounds[:, 1] -
self.target_bounds[:, 0]
) + self.target_bounds[:, 0]
geom_positions[1] = self.goal
self.sim.model.geom_pos[:] = geom_positions
def render(self, camera_name=None):
rgb = self.sim.render(
width=self.width, height=self.height, camera_name=camera_name)
return rgb
def _get_obs_joint(self):
return np.concatenate(
[self.sim.data.qpos.flat[:], self.sim.data.qvel.flat[:]])
def _get_obs_rgb_view1(self):
obs_rgb_view1 = self.render(camera_name='view1')
return obs_rgb_view1
def _get_obs_rgb_view2(self):
obs_rgb_view2 = self.render(camera_name='view2')
return obs_rgb_view2
def get_obs(self):
return (self._get_obs_joint(), self._get_obs_rgb_view1(),
self._get_obs_rgb_view2())
def do_simulation(self, ctrl):
'''Do one step of simulation, taking new control as target
Arguments:
ctrl {np.array(num_actuator)} -- new control to send to actuators
'''
ctrl = np.min((ctrl, self.actuator_high), axis=0)
ctrl = np.max((ctrl, self.actuator_low), axis=0)
self.sim.data.ctrl[:] = ctrl
for _ in range(self.frame_skip):
self.sim.step()
# @profile(immediate=True)
def step(self, a):
dist = np.zeros(3)
done = False
new_control = np.copy(self.sim.data.ctrl).flatten()
# Compute reward:
# If any finger is close enough to target => +1
dist[0] = np.linalg.norm(
self.sim.data.get_body_xpos("jaco_link_finger_1") - self.goal)
dist[1] = np.linalg.norm(
self.sim.data.get_body_xpos("jaco_link_finger_2") - self.goal)
dist[2] = np.linalg.norm(
self.sim.data.get_body_xpos("jaco_link_finger_3") - self.goal)
# if continuous reward
# reward = float((np.mean(dist)**-1)*0.1)
reward = 0
if any(d < self.rewarding_distance for d in dist):
reward = 1
self.reset_target()
# Transform discrete actions to continuous controls
for i in range(self.num_actuators):
'''
0 = 0 velocity
1 = small positive velocity
2 = large positive velocity
3 = small negative velocity
4 = large negative velocity
'''
if a[i] == 0:
new_control[i] = 0
if a[i] == 1:
new_control[i] = self.control_values[i] / 2
if a[i] == 2:
new_control[i] = self.control_values[i]
if a[i] == 3:
new_control[i] = -self.control_values[i] / 2
elif a[i] == 4:
new_control[i] = -self.control_values[i]
# Do one step of simulation
self.do_simulation(new_control)
self.sum_reward += reward
return self.get_obs(), reward, done
|
the-stack_0_26473
|
import requests
class TestSources:
params = {
'test_create': [
{
'data': [{
'type': 'kafka',
'name': 'kafka_source',
'config': {
'version': '2.0+',
'conf.brokerURI': 'kafka:29092',
'conf.kafkaOptions': ['key:value'],
'conf.topicList': ['test'],
'conf.numberOfThreads': 1,
'conf.kafkaAutoOffsetReset': 'EARLIEST',
'conf.dataFormat': 'JSON',
'conf.maxBatchSize': 1000,
'conf.batchWaitTime': 10,
}
}],
'er': [{"config": {"conf.batchWaitTime": 10, "conf.brokerURI": "kafka:29092", "conf.dataFormat": "JSON",
"conf.kafkaAutoOffsetReset": "EARLIEST", "conf.kafkaOptions": ["key:value"],
"conf.maxBatchSize": 1000, "conf.numberOfThreads": 1, "conf.topicList": ["test"],
"version": "2.0+"}, "name": "kafka_source", "type": "kafka"}]
},
],
'test_edit': [
{
'data': [{
'name': 'kafka_source',
'config': {
'version': '2.0+',
'conf.brokerURI': 'http://kafka:29092',
'conf.kafkaOptions': ['key1:value1'],
'conf.topicList': ['test1'],
'conf.numberOfThreads': 2,
'conf.kafkaAutoOffsetReset': 'EARLIEST',
'conf.dataFormat': 'JSON',
'conf.maxBatchSize': 500,
'conf.batchWaitTime': 5,
}
}],
'er': [{"config": {"conf.batchWaitTime": 5, "conf.brokerURI": "http://kafka:29092",
"conf.dataFormat": "JSON", "conf.kafkaAutoOffsetReset": "EARLIEST",
"conf.kafkaOptions": ["key1:value1"], "conf.maxBatchSize": 500,
"conf.numberOfThreads": 2, "conf.topicList": ["test1"], "version": "2.0+"},
"name": "kafka_source", "type": "kafka"}]
},
]
}
def test_create(self, data, er):
result = requests.post('http://localhost/sources', json=list(data))
result.raise_for_status()
assert result.json() == er
def test_edit(self, api_client, data, er):
result = api_client.put('/sources', json=list(data))
assert result.json == er
def test_get(self, api_client):
result = api_client.get('/sources')
assert result.json == ["kafka_source"]
def test_delete(self, api_client):
api_client.delete('sources/kafka_source')
assert api_client.get('/sources').json == []
|
the-stack_0_26478
|
import requests
import xmltodict
from helpers import helpers
from config import(
bucket_url,
item_key,
null_data
)
class fetch_resource(object):
"""streams the resource from s3 bucket"""
def __init__(self):
self.url = bucket_url
self.key = item_key
self.null = null_data
self.key_maps = [
("Key", helpers.null),
("LastModified", helpers.date_),
("Size", helpers.int_),
("StorageClass", helpers.null),
("ETag", helpers.str_)
]
def _get_(self):
"""fetches all the images data"""
resp = requests.get(self.url)
if resp.status_code == 200:
resp = resp.text
else:
resp = self.null
return resp
def map_keys(self, item):
"""maps values of image data to right type"""
return dict((key.lower(), func(item[key])) for key, func in self.key_maps if key in item)
def _fetch_(self):
"""entrypoint function to stream image resources"""
rdata = self._get_()
data = xmltodict.parse(rdata)
for parent in data:
for item in data[parent].get(self.key, []):
yield self.map_keys(dict(item))
|
the-stack_0_26479
|
"""
Tests for Discussion API forms
"""
import itertools
from unittest import TestCase
from urllib.parse import urlencode
import ddt
from django.http import QueryDict
from opaque_keys.edx.locator import CourseLocator
from lms.djangoapps.discussion.rest_api.forms import CommentListGetForm, ThreadListGetForm
from openedx.core.djangoapps.util.test_forms import FormTestMixin
class PaginationTestMixin:
"""A mixin for testing forms with pagination fields"""
def test_missing_page(self):
self.form_data.pop("page")
self.assert_field_value("page", 1)
def test_invalid_page(self):
self.form_data["page"] = "0"
self.assert_error("page", "Ensure this value is greater than or equal to 1.")
def test_missing_page_size(self):
self.form_data.pop("page_size")
self.assert_field_value("page_size", 10)
def test_zero_page_size(self):
self.form_data["page_size"] = "0"
self.assert_error("page_size", "Ensure this value is greater than or equal to 1.")
def test_excessive_page_size(self):
self.form_data["page_size"] = "101"
self.assert_field_value("page_size", 100)
@ddt.ddt
class ThreadListGetFormTest(FormTestMixin, PaginationTestMixin, TestCase):
"""Tests for ThreadListGetForm"""
FORM_CLASS = ThreadListGetForm
def setUp(self):
super().setUp()
self.form_data = QueryDict(
urlencode(
{
"course_id": "Foo/Bar/Baz",
"page": "2",
"page_size": "13",
}
),
mutable=True
)
def test_basic(self):
form = self.get_form(expected_valid=True)
assert form.cleaned_data == {
'course_id': CourseLocator.from_string('Foo/Bar/Baz'),
'page': 2,
'page_size': 13,
'count_flagged': None,
'topic_id': set(),
'text_search': '',
'following': None,
'author': '',
'thread_type': '',
'flagged': None,
'view': '',
'order_by': 'last_activity_at',
'order_direction': 'desc',
'requested_fields': set()
}
def test_topic_id(self):
self.form_data.setlist("topic_id", ["example topic_id", "example 2nd topic_id"])
form = self.get_form(expected_valid=True)
assert form.cleaned_data['topic_id'] == {'example topic_id', 'example 2nd topic_id'}
def test_text_search(self):
self.form_data["text_search"] = "test search string"
form = self.get_form(expected_valid=True)
assert form.cleaned_data['text_search'] == 'test search string'
def test_missing_course_id(self):
self.form_data.pop("course_id")
self.assert_error("course_id", "This field is required.")
def test_invalid_course_id(self):
self.form_data["course_id"] = "invalid course id"
self.assert_error("course_id", "'invalid course id' is not a valid course id")
def test_empty_topic_id(self):
self.form_data.setlist("topic_id", ["", "not empty"])
self.assert_error("topic_id", "This field cannot be empty.")
@ddt.data("discussion", "question")
def test_thread_type(self, value):
self.form_data["thread_type"] = value
self.assert_field_value("thread_type", value)
def test_thread_type_invalid(self):
self.form_data["thread_type"] = "invalid-option"
self.assert_error("thread_type", "Select a valid choice. invalid-option is not one of the available choices.")
@ddt.data("True", "true", 1, True)
def test_flagged_true(self, value):
self.form_data["flagged"] = value
self.assert_field_value("flagged", True)
@ddt.data("False", "false", 0, False)
def test_flagged_false(self, value):
self.form_data["flagged"] = value
self.assert_field_value("flagged", False)
def test_invalid_flagged(self):
self.form_data["flagged"] = "invalid-boolean"
self.assert_error("flagged", "Invalid Boolean Value.")
@ddt.data("True", "true", 1, True)
def test_following_true(self, value):
self.form_data["following"] = value
self.assert_field_value("following", True)
@ddt.data("False", "false", 0, False)
def test_following_false(self, value):
self.form_data["following"] = value
self.assert_error("following", "The value of the 'following' parameter must be true.")
def test_invalid_following(self):
self.form_data["following"] = "invalid-boolean"
self.assert_error("following", "Invalid Boolean Value.")
@ddt.data(*itertools.combinations(["topic_id", "text_search", "following"], 2))
def test_mutually_exclusive(self, params):
self.form_data.update({param: "True" for param in params})
self.assert_error(
"__all__",
"The following query parameters are mutually exclusive: topic_id, text_search, following"
)
def test_invalid_view_choice(self):
self.form_data["view"] = "not_a_valid_choice"
self.assert_error("view", "Select a valid choice. not_a_valid_choice is not one of the available choices.")
def test_invalid_sort_by_choice(self):
self.form_data["order_by"] = "not_a_valid_choice"
self.assert_error(
"order_by",
"Select a valid choice. not_a_valid_choice is not one of the available choices."
)
def test_invalid_sort_direction_choice(self):
self.form_data["order_direction"] = "not_a_valid_choice"
self.assert_error(
"order_direction",
"Select a valid choice. not_a_valid_choice is not one of the available choices."
)
@ddt.data(
("view", "unread"),
("view", "unanswered"),
("order_by", "last_activity_at"),
("order_by", "comment_count"),
("order_by", "vote_count"),
("order_direction", "desc"),
)
@ddt.unpack
def test_valid_choice_fields(self, field, value):
self.form_data[field] = value
self.assert_field_value(field, value)
def test_requested_fields(self):
self.form_data["requested_fields"] = "profile_image"
form = self.get_form(expected_valid=True)
assert form.cleaned_data['requested_fields'] == {'profile_image'}
@ddt.ddt
class CommentListGetFormTest(FormTestMixin, PaginationTestMixin, TestCase):
"""Tests for CommentListGetForm"""
FORM_CLASS = CommentListGetForm
def setUp(self):
super().setUp()
self.form_data = {
"thread_id": "deadbeef",
"endorsed": "False",
"page": "2",
"page_size": "13",
}
def test_basic(self):
form = self.get_form(expected_valid=True)
assert form.cleaned_data == {
'thread_id': 'deadbeef',
'endorsed': False,
'page': 2,
'page_size': 13,
'requested_fields': set()
}
def test_missing_thread_id(self):
self.form_data.pop("thread_id")
self.assert_error("thread_id", "This field is required.")
def test_missing_endorsed(self):
self.form_data.pop("endorsed")
self.assert_field_value("endorsed", None)
@ddt.data("True", "true", True, 1)
def test_endorsed_true(self, value):
self.form_data["endorsed"] = value
self.assert_field_value("endorsed", True)
@ddt.data("False", "false", False, 0)
def test_endorsed_false(self, value):
self.form_data["endorsed"] = value
self.assert_field_value("endorsed", False)
def test_invalid_endorsed(self):
self.form_data["endorsed"] = "invalid-boolean"
self.assert_error("endorsed", "Invalid Boolean Value.")
def test_requested_fields(self):
self.form_data["requested_fields"] = {"profile_image"}
form = self.get_form(expected_valid=True)
assert form.cleaned_data['requested_fields'] == {'profile_image'}
|
the-stack_0_26480
|
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["example.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
# CACHES = {
# "default": {
# "BACKEND": "django_redis.cache.RedisCache",
# "LOCATION": env("REDIS_URL"),
# "OPTIONS": {
# "CLIENT_CLASS": "django_redis.client.DefaultClient",
# # Mimicing memcache behavior.
# # http://jazzband.github.io/django-redis/latest/#_memcached_exceptions_behavior
# "IGNORE_EXCEPTIONS": True,
# },
# }
# }
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="Proffy Api <[email protected]>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[Proffy Api]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
ANYMAIL = {}
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
}
# Your stuff...
# ------------------------------------------------------------------------------
|
the-stack_0_26481
|
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import hashlib
from os import remove, makedirs
from os.path import exists, isdir
from functools import partial
def is_casava_v180_or_later(header_line):
"""Check if the header looks like it is Illumina software post-casava v1.8
Parameters
----------
header_line : bytes
A header line
Returns
-------
bool
``True`` for if casava v1.8+, otherwise ``False``
Examples
--------
>>> from skbio.util import is_casava_v180_or_later
>>> print(is_casava_v180_or_later('@foo'))
False
>>> id_ = '@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0'
>>> print(is_casava_v180_or_later(id_))
True
"""
if not header_line.startswith(b'@'):
raise ValueError("Non-header line passed in!")
fields = header_line.split(b':')
return len(fields) == 10 and fields[7] in b'YN'
def safe_md5(open_file, block_size=2 ** 20):
"""Computes an md5 sum without loading the file into memory
Parameters
----------
open_file : file object
open file handle to the archive to compute the checksum. It
must be open as a binary file
block_size : int, optional
size of the block taken per iteration
Returns
-------
md5 : md5 object from the hashlib module
object with the loaded file
Notes
-----
This method is based on the answers given in:
http://stackoverflow.com/a/1131255/379593
Examples
--------
>>> from StringIO import StringIO
>>> from skbio.util import safe_md5
>>> fd = StringIO("foo bar baz") # open file like object
>>> x = safe_md5(fd)
>>> x.hexdigest()
'ab07acbb1e496801937adfa772424bf7'
>>> fd.close()
"""
md5 = hashlib.md5()
data = True
while data:
data = open_file.read(block_size)
if data:
md5.update(data)
return md5
def remove_files(list_of_filepaths, error_on_missing=True):
"""Remove list of filepaths, optionally raising an error if any are missing
Parameters
----------
list_of_filepaths : list of strings
list with filepaths to remove
error_on_missing : bool, optional
whether or not the function should raise an ``OSError`` if a file is
not found
Raises
------
OSError
If a filepath in the list does not exist
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from os.path import exists
>>> from skbio.util import remove_files
>>> h = NamedTemporaryFile(delete=False)
>>> exists(h.name) # it exists
True
>>> remove_files([h.name])
>>> exists(h.name) # and now it's gone
False
"""
missing = []
for fp in list_of_filepaths:
try:
remove(fp)
except OSError:
missing.append(fp)
if error_on_missing and missing:
raise OSError("Some filepaths were not accessible: %s" %
'\t'.join(missing))
def create_dir(dir_name, fail_on_exist=False, handle_errors_externally=False):
"""Create a directory safely and fail meaningfully
Parameters
----------
dir_name: string
name of directory to create
fail_on_exist: bool, optional
if true raise an error if ``dir_name`` already exists
handle_errors_externally: bool, optional
if True do not raise Errors, but return failure codes. This allows to
handle errors locally and e.g. hint the user at a --force_overwrite
options.
Returns
-------
return_value : int
These values are only returned if no error is raised:
- ``0``: directory was safely created
- ``1``: directory already existed
- ``2``: a file with the same name exists
- ``3``: any other unspecified ``OSError``
Notes
-----
Depending of how thorough we want to be we could add tests, e.g. for
testing actual write permission in an existing dir.
Examples
--------
>>> from skbio.util import create_dir
>>> from os.path import exists, join
>>> from tempfile import gettempdir
>>> from os import rmdir
>>> new_dir = join(gettempdir(), 'scikitbio')
>>> create_dir(new_dir)
0
>>> exists(new_dir)
True
>>> rmdir(new_dir)
"""
error_code_lookup = _get_create_dir_error_codes()
# pre-instanciate function with
ror = partial(_handle_error_codes, dir_name, handle_errors_externally)
if exists(dir_name):
if isdir(dir_name):
# dir is there
if fail_on_exist:
return ror(error_code_lookup['DIR_EXISTS'])
else:
return error_code_lookup['DIR_EXISTS']
else:
# must be file with same name
return ror(error_code_lookup['FILE_EXISTS'])
else:
# no dir there, try making it
try:
makedirs(dir_name)
except OSError:
return ror(error_code_lookup['OTHER_OS_ERROR'])
return error_code_lookup['NO_ERROR']
def flatten(items):
"""Removes one level of nesting from items
Parameters
----------
items : iterable
list of items to flatten one level
Returns
-------
flattened_items : list
list of flattened items, items can be any sequence, but flatten always
returns a list.
Examples
--------
>>> from skbio.util import flatten
>>> h = [['a', 'b', 'c', 'd'], [1, 2, 3, 4, 5], ['x', 'y'], ['foo']]
>>> print(flatten(h))
['a', 'b', 'c', 'd', 1, 2, 3, 4, 5, 'x', 'y', 'foo']
"""
result = []
for i in items:
try:
result.extend(i)
except TypeError:
result.append(i)
return result
def _get_create_dir_error_codes():
return {'NO_ERROR': 0,
'DIR_EXISTS': 1,
'FILE_EXISTS': 2,
'OTHER_OS_ERROR': 3}
def _handle_error_codes(dir_name, suppress_errors=False,
error_code=None):
"""Wrapper function for error_handling.
dir_name: name of directory that raised the error
suppress_errors: if True raise Errors, otherwise return error_code
error_code: the code for the error
"""
error_code_lookup = _get_create_dir_error_codes()
if error_code is None:
error_code = error_code_lookup['NO_ERROR']
error_strings = \
{error_code_lookup['DIR_EXISTS']:
"Directory already exists: %s" % dir_name,
error_code_lookup['FILE_EXISTS']:
"File with same name exists: %s" % dir_name,
error_code_lookup['OTHER_OS_ERROR']:
"Could not create output directory: %s. " % dir_name +
"Check the permissions."}
if error_code == error_code_lookup['NO_ERROR']:
return error_code_lookup['NO_ERROR']
if suppress_errors:
return error_code
else:
raise OSError(error_strings[error_code])
|
the-stack_0_26483
|
from solvers.car_genetic import car_genetic
from solvers.hill_climbing import hill_climbing
from solvers.rides_genetic import rides_genetic
from solvers.simulated_annealing import simulated_annealing
from solvers.greedy import greedy
from solvers.files import group
from solvers.car_genetic import print_car_genetic_info
from solvers.rides_genetic import print_rides_genetic_info
import sys
import time
def run(algorithm):
global_score = 0
# print("\n{}".format(algorithm.__name__.upper()))
if len(sys.argv) == 3:
start_time = time.time()
score = algorithm(sys.argv[2])
print(sys.argv[2].ljust(20) + "time {:.4f}s \tscore {}".
format(time.time() - start_time, group(score)))
else:
# save start time in start to count total time
start_time = time.time()
start = start_time
score = algorithm("a_example")
global_score += score
print("{} time {:08.4f}s \tscore {}".
format("a_example".ljust(20, ' '), time.time() - start_time, group(score)))
start_time = time.time()
score = algorithm("b_should_be_easy")
global_score += score
print("{} time {:08.4f}s \tscore {}".
format("b_should_be_easy".ljust(20, ' '), time.time() - start_time, group(score)))
start_time = time.time()
score = algorithm("c_no_hurry")
global_score += score
print("{} time {:08.4f}s \tscore {}".
format("c_no_hurry".ljust(20, ' '), time.time() - start_time, group(score)))
start_time = time.time()
score = algorithm("d_metropolis")
global_score += score
print("{} time {:08.4f}s \tscore {}".
format("d_metropolis".ljust(20, ' '), time.time() - start_time, group(score)))
start_time = time.time()
score = algorithm("e_high_bonus")
global_score += score
print("{} time {:08.4f}s \tscore {}".
format("e_high_bonus".ljust(20, ' '), time.time() - start_time, group(score)))
print("\nGlobal score is {}".format(group(global_score)))
print("Total runtime is {:.4f}s".format(time.time() - start))
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print("python main.py <algorithm> <specific file> ---> For a specific file")
print("python main.py <algorithm> ---> For all test files\n")
print("algorithm options: car_genetic | greedy | hill_climbing | rides_genetic | simulated_annealing")
print("file options: a_example | b_should_be_easy | c_no_hurry | d_metropolis | e_high_bonus\n")
print("Try again...")
exit(1)
if sys.argv[1] == "car_genetic":
print_car_genetic_info()
run(car_genetic)
elif sys.argv[1] == "hill_climbing":
print("\nHILL CLIMBING")
run(hill_climbing)
elif sys.argv[1] == "rides_genetic":
print_rides_genetic_info()
run(rides_genetic)
elif sys.argv[1] == "simulated_annealing":
print("\nSIMULATED ANNEALING")
run(simulated_annealing)
elif sys.argv[1] == "greedy":
print("\nGREEDY")
run(greedy)
|
the-stack_0_26484
|
import airflow
import pendulum
from airflow import DAG
from airflow.exceptions import AirflowSkipException
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator, BranchPythonOperator
ERP_CHANGE_DATE = airflow.utils.dates.days_ago(1)
def _pick_erp_system(**context):
if context["execution_date"] < ERP_CHANGE_DATE:
return "fetch_sales_old"
else:
return "fetch_sales_new"
def _latest_only(**context):
now = pendulum.now("UTC")
left_window = context["dag"].following_schedule(context["execution_date"])
right_window = context["dag"].following_schedule(left_window)
if not left_window < now <= right_window:
raise AirflowSkipException()
with DAG(
dag_id="06_condition_dag",
start_date=airflow.utils.dates.days_ago(3),
schedule_interval="@daily",
tags=["chapter5"],
) as dag:
start = DummyOperator(task_id="start")
pick_erp = BranchPythonOperator(
task_id="pick_erp_system", python_callable=_pick_erp_system
)
fetch_sales_old = DummyOperator(task_id="fetch_sales_old")
clean_sales_old = DummyOperator(task_id="clean_sales_old")
fetch_sales_new = DummyOperator(task_id="fetch_sales_new")
clean_sales_new = DummyOperator(task_id="clean_sales_new")
join_erp = DummyOperator(task_id="join_erp_branch", trigger_rule="none_failed")
fetch_weather = DummyOperator(task_id="fetch_weather")
clean_weather = DummyOperator(task_id="clean_weather")
join_datasets = DummyOperator(task_id="join_datasets")
train_model = DummyOperator(task_id="train_model")
latest_only = PythonOperator(task_id="latest_only", python_callable=_latest_only)
deploy_model = DummyOperator(task_id="deploy_model")
start >> [pick_erp, fetch_weather]
pick_erp >> [fetch_sales_old, fetch_sales_new]
fetch_sales_old >> clean_sales_old
fetch_sales_new >> clean_sales_new
[clean_sales_old, clean_sales_new] >> join_erp
fetch_weather >> clean_weather
[join_erp, clean_weather] >> join_datasets
join_datasets >> train_model >> deploy_model
latest_only >> deploy_model
|
the-stack_0_26485
|
# Copyright: 2006-2011 Brian Harring <[email protected]>
# Copyright: 2006 Marien Zwart <[email protected]>
# License: BSD/GPL2
"""package merging and unmerging interface
pmerge is the main command-line utility for merging and unmerging packages on a
system. It provides an interface to install, update, and uninstall ebuilds from
source or binary packages.
"""
# more should be doc'd...
__all__ = ("AmbiguousQuery", "NoMatches")
from functools import partial
import sys
from time import time
from snakeoil.cli.exceptions import ExitException
from snakeoil.demandload import demandload
from snakeoil.sequences import iflatten_instance, stable_unique
from snakeoil.strings import pluralism
from pkgcore.ebuild import resolver, restricts
from pkgcore.ebuild.atom import atom
from pkgcore.ebuild.misc import run_sanity_checks
from pkgcore.merge import errors as merge_errors
from pkgcore.operations import observer, format
from pkgcore.repository.util import get_raw_repos
from pkgcore.resolver.util import reduce_to_failures
from pkgcore.restrictions import packages
from pkgcore.restrictions.boolean import OrRestriction
from pkgcore.util import commandline, parserestrict
demandload(
'textwrap:dedent',
'pkgcore.repository.virtual:RestrictionRepo',
)
argparser = commandline.ArgumentParser(
domain=True, description=__doc__, script=(__file__, __name__))
argparser.add_argument(
nargs='*', dest='targets', metavar='TARGET',
action=commandline.StoreTarget, use_sets='sets',
help="extended package matching",
docs=commandline.StoreTarget.__doc__.split('\n')[1:])
operation_args = argparser.add_argument_group('operations')
operation_options = operation_args.add_mutually_exclusive_group()
operation_options.add_argument(
'-u', '--upgrade', action='store_true',
help='try to upgrade installed pkgs/deps',
docs="""
Try to upgrade specified targets to the latest visible version. Note
that altered package visibility due to keywording or masking can often
hide the latest versions of packages, especially for stable
configurations.
""")
operation_options.add_argument(
'-d', '--downgrade', action='store_true',
help='try to downgrade installed pkgs/deps',
docs="""
Try to downgrade specified targets to a lower visible version
compared to what's currently installed.
Useful for reverting to the previously installed package version;
however, note that the -O/--nodeps option is generally used with this
otherwise lots of downgrades will be pulled into the resolved deptree.
""")
operation_options.add_argument(
'-C', '--unmerge', action='store_true',
help='unmerge packages',
docs="""
Target packages for unmerging from the system.
WARNING: This does not ask for user confirmation for any targets so
it's possible to quickly break a system.
""")
operation_options.add_argument(
'--clean', action='store_true',
help='remove installed packages not referenced by any target pkgs/sets',
docs="""
Remove installed packages that aren't referenced by any target packages
or sets. This defaults to using the world and system sets if no targets
are specified.
Use with *caution*, this option used incorrectly can render your system
unusable. Note that this implies --deep.
""")
operation_options.add_argument(
'--list-sets', action='store_true',
help='display the list of available package sets')
resolution_options = argparser.add_argument_group("resolver options")
resolution_options.add_argument(
'-p', '--pretend', action='store_true',
help="only perform the dep resolution",
docs="""
Resolve package dependencies and display the results without performing
any merges.
""")
resolution_options.add_argument(
'-a', '--ask', action='store_true',
help="ask for user confirmation after dep resolution",
docs="""
Perform the dependency resolution, but ask for user confirmation before
beginning the fetch/build/merge process. The choice defaults to yes so
pressing the "Enter" key will trigger acceptance.
""")
resolution_options.add_argument(
'-f', '--fetchonly', action='store_true',
help="do only the fetch steps of the resolved plan",
docs="""
Only perform fetching of all targets from SRC_URI based on the current
USE configuration.
""")
resolution_options.add_argument(
'-1', '--oneshot', action='store_true',
help="do not record changes in the world file",
docs="""
Build and merge packages normally, but do not add any targets to the
world file. Note that this is forcibly enabled if a package set is
specified.
""")
resolution_options.add_argument(
'-D', '--deep', action='store_true',
help='force the resolver to verify installed deps',
docs="""
Force dependency resolution across the entire dependency tree for all
specified targets.
""")
resolution_options.add_argument(
'-N', '--newuse', action='store_true',
help="add installed pkgs with changed useflags to targets",
docs="""
Include installed packages with USE flag changes in the list of viable
targets for rebuilding.
USE flag changes include flags being added, removed, enabled, or
disabled with regards to a package. USE flag changes can occur via
ebuild alterations, profile updates, or local configuration
modifications.
Note that this option implies -1/--oneshot.
""")
resolution_options.add_argument(
'-i', '--ignore-cycles', action='store_true',
help="ignore unbreakable dep cycles",
docs="""
Ignore dependency cycles if they're found to be unbreakable; for
example: a depends on b, and b depends on a, with neither built.
""")
resolution_options.add_argument(
'--with-bdeps', action='store_true',
help="process build deps for built packages",
docs="""
Pull in build time dependencies for built packages during dependency
resolution, by default they're ignored.
""")
resolution_options.add_argument(
'-O', '--nodeps', action='store_true',
help='disable dependency resolution',
docs="""
Build and merge packages without resolving any dependencies.
""")
resolution_options.add_argument(
'-o', '--onlydeps', action='store_true',
help='only merge the deps of the specified packages',
docs="""
Build and merge only the dependencies for the packages specified.
""")
resolution_options.add_argument(
'-n', '--noreplace', action='store_false', dest='replace',
help="don't reinstall target pkgs that are already installed",
docs="""
Skip packages that are already installed. By default when running
without this option, any specified target packages will be remerged
regardless of if they are already installed.
""")
resolution_options.add_argument(
'-b', '--buildpkg', action='store_true',
help="build binpkgs",
docs="""
Force binary packages to be built for all merged packages.
""")
resolution_options.add_argument(
'-k', '--usepkg', action='store_true',
help="prefer to use binpkgs",
docs="""
Binary packages are preferred over ebuilds when performing dependency
resolution.
""")
resolution_options.add_argument(
'-K', '--usepkgonly', action='store_true',
help="use only binpkgs",
docs="""
Only binary packages are considered when performing dependency
resolution.
""")
resolution_options.add_argument(
'-S', '--source-only', action='store_true',
help="use only ebuilds, no binpkgs",
docs="""
Only ebuilds are considered when performing dependency
resolution.
""")
resolution_options.add_argument(
'-e', '--empty', action='store_true',
help="force rebuilding of all involved packages",
docs="""
Force all targets and their dependencies to be rebuilt.
""")
resolution_options.add_argument(
'-x', '--exclude', dest='excludes', metavar='TARGET[,TARGET,...]',
action=commandline.StoreTarget, separator=',',
help='inject packages into the installed set',
docs="""
Comma-separated list of targets to pretend are installed.
This supports extended package globbing, e.g. ``'dev-python/*'``
equates to faking the entire dev-python category is installed.
""")
resolution_options.add_argument(
'--ignore-failures', action='store_true',
help='ignore failures while running all types of tasks',
docs="""
Skip failures during the following phases: sanity checks
(pkg_pretend), fetching, dep resolution, and (un)merging.
""")
resolution_options.add_argument(
'--force', action='store_true',
dest='force',
help="force changes to a repo, regardless of if it's frozen",
docs="""
Force (un)merging on the livefs (vdb), regardless of if it's frozen.
""")
resolution_options.add_argument(
'--preload-vdb-state', action='store_true',
help="enable preloading of the installed packages database",
docs="""
Preload the installed package database which causes the resolver to
work with a complete graph, thus disallowing actions that conflict with
installed packages. If disabled, it's possible for the requested action
to conflict with already installed dependencies that aren't involved in
the graph of the requested operation.
""")
output_options = argparser.add_argument_group("output options")
output_options.add_argument(
'--quiet-repo-display', action='store_true',
help="use indexes instead of ::repo suffixes in dep resolution output",
docs="""
In the package merge list display, suppress ::repo output and instead
use index numbers to indicate which repos packages come from.
""")
output_options.add_argument(
'-F', '--formatter', priority=90, metavar='FORMATTER',
action=commandline.StoreConfigObject, get_default=True,
config_type='pmerge_formatter',
help='output formatter to use',
docs="""
Select an output formatter to use for text formatting of --pretend or
--ask output, currently available formatters include the following:
basic, pkgcore, portage, portage-verbose, and paludis.
The basic formatter is the nearest to simple text output and is
intended for scripting while the portage/portage-verbose formatter
closely emulates portage output and is used by default.
""")
class AmbiguousQuery(parserestrict.ParseError):
"""Exception for multiple matches where a single match is required."""
def __init__(self, token, keys):
self.token = token
self.keys = keys
def __str__(self):
return f"multiple matches for {self.token!r}: {', '.join(map(str, self.keys))}"
class NoMatches(parserestrict.ParseError):
"""Exception for no matches where at least one match is required."""
def __init__(self, token):
super().__init__(f'{token!r}: no matches')
class Failure(ValueError):
"""Raised internally to indicate an "expected" failure condition."""
def unmerge(out, err, installed_repos, targets, options, formatter, world_set=None):
"""Unmerge tokens. hackish, should be rolled back into the resolver"""
# split real and virtual repos
vdb = installed_repos.real.combined
fake_vdb = installed_repos.virtual.combined
matches = set()
fake = set()
unknown = set()
for token, restriction in targets:
# Catch restrictions matching across more than one category.
# Multiple matches in the same category are acceptable.
# The point is that matching across more than one category is
# nearly always unintentional ("pmerge -C spork" without
# realising there are sporks in more than one category), but
# matching more than one cat/pkg is impossible without
# explicit wildcards.
installed = vdb.match(restriction)
if not installed:
fake_pkgs = fake_vdb.match(restriction)
if fake_pkgs:
fake.update(fake_pkgs)
else:
unknown.add(token)
continue
categories = set(pkg.category for pkg in installed)
if len(categories) > 1:
raise parserestrict.ParseError(
"%r is in multiple categories (%s)" % (
token, ', '.join(sorted(set(pkg.key for pkg in installed)))))
matches.update(installed)
# fail out if no matches are found, otherwise just output a notification
if unknown:
unknowns = ', '.join(map(repr, unknown))
if matches:
err.write(f"Skipping unknown matches: {unknowns}\n")
else:
raise Failure(f"no matches found: {unknowns}")
if fake:
err.write('Skipping virtual pkg%s: %s' % (
pluralism(fake_pkgs),
', '.join(f'{x.versioned_atom}::{x.repo_id}' for x in fake)))
if matches:
out.write(out.bold, 'The following packages are to be unmerged:')
out.prefix = [out.bold, ' * ', out.reset]
for pkg in matches:
out.write(pkg.cpvstr)
out.prefix = []
repo_obs = observer.repo_observer(
observer.formatter_output(out), debug=options.debug)
if options.pretend:
return
if (options.ask and not formatter.ask("Would you like to unmerge these packages?")):
return
return do_unmerge(options, out, err, vdb, matches, world_set, repo_obs)
def do_unmerge(options, out, err, vdb, matches, world_set, repo_obs):
if vdb.frozen:
if options.force:
out.write(
out.fg('red'), out.bold,
'warning: vdb is frozen, overriding')
vdb.frozen = False
else:
raise Failure('vdb is frozen')
for idx, match in enumerate(matches):
out.write(f"removing {idx + 1} of {len(matches)}: {match}")
out.title(f"{idx + 1}/{len(matches)}: {match}")
op = options.domain.uninstall_pkg(match, observer=repo_obs)
ret = op.finish()
if not ret:
if not options.ignore_failures:
raise Failure(f'failed unmerging {match}')
out.write(out.fg('red'), 'failed unmerging ', match)
pkg = slotatom_if_slotted(vdb, match.versioned_atom)
update_worldset(world_set, pkg, remove=True)
out.write(f"finished; removed {len(matches)} packages")
def display_failures(out, sequence, first_level=True, debug=False):
"""when resolution fails, display a nicely formatted message"""
sequence = iter(sequence)
frame = next(sequence)
if first_level:
# pops below need to exactly match.
out.first_prefix.extend((out.fg("red"), "!!!", out.reset))
out.first_prefix.append(" ")
out.write(f"request {frame.atom}, mode {frame.mode}")
for pkg, steps in sequence:
out.write(f"trying {pkg.cpvstr}")
out.first_prefix.append(" ")
for step in steps:
if isinstance(step, list):
display_failures(out, step, False, debug=debug)
elif step[0] == 'reduce':
out.write("removing choices involving %s" %
', '.join(str(x) for x in step[1]))
elif step[0] == 'blocker':
out.write("blocker %s failed due to %s existing" % (step[1],
', '.join(str(x) for x in step[2])))
elif step[0] == 'cycle':
out.write("%s cycle on %s: %s" % (step[1].mode, step[1].atom, step[2]))
elif step[0] == 'viable' and not step[1]:
out.write("%s: failed %s" % (step[3], step[4]))
elif step[0] == 'choice':
if not step[2]:
out.write("failed due to %s" % (step[3],))
elif step[0] == "debug":
if debug:
out.write(step[1])
else:
out.write(step)
out.first_prefix.pop()
out.first_prefix.pop()
if first_level:
for x in range(3):
out.first_prefix.pop()
def slotatom_if_slotted(repos, checkatom):
"""check repos for more than one slot of given atom"""
if checkatom.slot is None or checkatom.slot[0] != "0":
return checkatom
found_slots = ()
pkgs = repos.itermatch(checkatom, sorter=sorted)
for pkg in pkgs:
found_slots.update(pkg.slot[0])
if len(found_slots) == 1:
return atom(checkatom.key)
return checkatom
def update_worldset(world_set, pkg, remove=False):
"""record/kill given atom in worldset"""
if world_set is None:
return
if remove:
try:
world_set.remove(pkg)
except KeyError:
# nothing to remove, thus skip the flush
return
else:
world_set.add(pkg)
world_set.flush()
@argparser.bind_final_check
def _validate(parser, namespace):
# nothing to validate if listing pkgsets
if namespace.list_sets:
return
if namespace.unmerge:
if namespace.sets:
parser.error("using sets with -C probably isn't wise, aborting")
if not namespace.targets:
parser.error("you must provide at least one atom")
if namespace.clean:
if namespace.sets or namespace.targets:
parser.error(
"--clean currently cannot be used w/ any sets or targets given")
namespace.sets = ('world', 'system')
namespace.deep = True
namespace.replace = False
if namespace.usepkgonly or namespace.usepkg or namespace.source_only:
parser.error(
'--clean cannot be used with any of the following options: '
'--usepkg --usepkgonly --source-only')
elif namespace.usepkgonly and namespace.usepkg:
parser.error('--usepkg is redundant when --usepkgonly is used')
elif (namespace.usepkgonly or namespace.usepkg) and namespace.source_only:
parser.error("--source-only cannot be used with --usepkg nor --usepkgonly")
elif namespace.nodeps and namespace.onlydeps:
parser.error("-O/--nodeps cannot be used with -o/--onlydeps (it's a no-op)")
if namespace.sets:
unknown_sets = set(namespace.sets).difference(namespace.config.pkgset)
if unknown_sets:
parser.error("unknown set%s: %s (available sets: %s)" % (
pluralism(unknown_sets),
', '.join(sorted(map(repr, unknown_sets))),
', '.join(sorted(namespace.config.pkgset))))
namespace.sets = [(x, namespace.config.pkgset[x]) for x in namespace.sets]
if namespace.upgrade or namespace.downgrade:
namespace.replace = False
if not namespace.targets and not namespace.sets:
parser.error('please specify at least one atom or nonempty set')
if namespace.newuse:
namespace.oneshot = True
if namespace.upgrade:
namespace.resolver_kls = resolver.upgrade_resolver
elif namespace.downgrade:
namespace.resolver_kls = resolver.downgrade_resolver
else:
namespace.resolver_kls = resolver.min_install_resolver
def parse_target(restriction, repo, installed_repos, return_none=False):
"""Use :obj:`parserestrict.parse_match` to produce a list of matches.
This matches the restriction against a repo. If multiple pkgs match and a
simple package name was provided, then the restriction is applied against
installed repos. If multiple matches still exist then pkgs from the
'virtual' category are skipped. If multiple pkgs still match the
restriction, AmbiguousQuery is raised otherwise the matched atom is
returned. On the other hand, if a globbed match was specified, all repo
matches are returned.
:param restriction: string to convert.
:param repo: :obj:`pkgcore.repository.prototype.tree` instance to search in.
:param installed_repos: :obj:`pkgcore.config.domain.all_installed_repos`
instance to search in.
:param return_none: indicates if no matches raises or returns C{None}
:return: a list of matches or C{None}.
"""
key_matches = {x.unversioned_atom for x in repo.itermatch(restriction)}
if not key_matches:
if return_none:
return None
raise NoMatches(restriction)
elif len(key_matches) > 1:
if any(isinstance(r, restricts.PackageDep) for r in iflatten_instance([restriction])):
if len(restriction) > 1:
# drop repo specific restrictions, ebuild repos don't match installed pkgs
restriction = restriction.remove_restriction(
restriction_types=(restricts.RepositoryDep,))
# find installed package matches
matches = {x.unversioned_atom for x in installed_repos.itermatch(restriction)}
# try removing virtuals if there are multiple installed matches or none at all
if not matches:
matches = {x for x in key_matches if x.category != 'virtual'}
elif len(matches) > 1:
matches = {x for x in matches if x.category != 'virtual'}
if len(matches) == 1:
p = matches.pop()
# TODO: collapse redundant restrictions?
return [packages.KeyedAndRestriction(restriction, p, key=p.key)]
raise AmbiguousQuery(restriction, sorted(key_matches))
else:
# if a glob was specified then just return every match
return key_matches
if isinstance(restriction, atom):
# atom is guaranteed to be fine, since it's cat/pkg
return [restriction]
return [packages.KeyedAndRestriction(restriction, key=key_matches.pop().key)]
@argparser.bind_delayed_default(50, name='world')
def load_world(namespace, attr):
value = namespace.config.pkgset['world']
setattr(namespace, attr, value)
def display_pkgsets(out, options):
for name, kls in sorted(options.config.pkgset.items()):
if options.verbosity > 0:
out.write(name)
out.write('\n'.join(' ' + dedent(x) for x in kls.__doc__.split('\n')))
out.write()
if options.verbosity > 1:
out.write('\n'.join(' ' + str(pkg) for pkg in sorted(kls)))
out.write()
else:
out.write(name)
@argparser.bind_main_func
def main(options, out, err):
if options.list_sets:
display_pkgsets(out, options)
return 0
config = options.config
if options.debug:
resolver.plan.limiters.add(None)
domain = options.domain
world_set = world_list = options.world
if options.oneshot:
world_set = None
formatter = options.formatter(
out=out, err=err,
unstable_arch=domain.unstable_arch,
use_expand=domain.profile.use_expand,
use_expand_hidden=domain.profile.use_expand_hidden,
pkg_get_use=domain.get_package_use_unconfigured,
world_list=world_list,
verbosity=options.verbosity,
installed_repos=domain.all_installed_repos,
distdir=domain.fetcher.get_storage_path(),
quiet_repo_display=options.quiet_repo_display)
# This mode does not care about sets and packages so bypass all that.
if options.unmerge:
if not options.oneshot:
if world_set is None:
argparser.error("disable world updating via --oneshot, "
"or fix your configuration")
try:
unmerge(out, err, domain.installed_repos, options.targets, options, formatter, world_set)
except (parserestrict.ParseError, Failure) as e:
argparser.error(e)
return
source_repos = domain.source_repos
installed_repos = domain.installed_repos
pkg_type = 'ebuilds'
if options.usepkgonly:
source_repos = domain.binary_repos
pkg_type = 'binpkgs'
elif options.usepkg:
# binary repos are checked for matches first before ebuild repos
source_repos = domain.binary_repos + domain.ebuild_repos
pkg_type = 'ebuilds or binpkgs'
elif options.source_only:
source_repos = domain.ebuild_repos
atoms = []
for setname, pkgset in options.sets:
if pkgset is None:
return 1
l = list(pkgset)
if not l:
out.write(f"skipping set {setname!r}: set is empty, nothing to update")
else:
atoms.extend(l)
for token, restriction in options.targets:
try:
matches = parse_target(
restriction, source_repos.combined, installed_repos, return_none=True)
except parserestrict.ParseError as e:
e.token = token
argparser.error(e)
if matches is None:
if not options.ignore_failures:
error_msg = [f"no matching {pkg_type}: {token!r}"]
if token in config.pkgset:
error_msg.append(f"use '@{token}' instead for the package set")
elif options.usepkgonly:
matches = parse_target(
restriction, domain.ebuild_repos.combined,
installed_repos, return_none=True)
if matches:
error_msg.append("try re-running without -K/--usepkgonly "
"enabled to rebuild from source")
argparser.error(' -- '.join(error_msg))
else:
atoms.extend(matches)
if not atoms and not options.newuse:
err.write(f"{options.prog}: no targets specified; nothing to do")
return 1
atoms = stable_unique(atoms)
if options.clean and not options.oneshot:
if world_set is None:
argparser.error("disable world updating via --oneshot, or fix your configuration")
extra_kwargs = {}
if options.empty:
extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan
if options.debug:
extra_kwargs['debug'] = True
# XXX: This should recurse on deep
if options.newuse:
out.write(out.bold, ' * ', out.reset, 'Scanning for changed USE...')
out.title('Scanning for changed USE...')
for inst_pkg in installed_repos.itermatch(OrRestriction(*atoms)):
src_pkgs = source_repos.match(inst_pkg.versioned_atom)
if src_pkgs:
src_pkg = max(src_pkgs)
inst_iuse = inst_pkg.iuse_stripped
src_iuse = src_pkg.iuse_stripped
inst_flags = inst_iuse.intersection(inst_pkg.use)
src_flags = src_iuse.intersection(src_pkg.use)
if inst_flags.symmetric_difference(src_flags) or \
inst_iuse.symmetric_difference(src_iuse):
atoms.append(src_pkg.unversioned_atom)
excludes = [restriction for token, restriction in options.excludes]
if options.onlydeps:
excludes.extend(atoms)
if excludes:
injected_repo = RestrictionRepo(
repo_id='injected', restrictions=excludes, frozen=True, livefs=True)
installed_repos = injected_repo + installed_repos
# left intentionally in place for ease of debugging.
# from guppy import hpy
# hp = hpy()
# hp.setrelheap()
resolver_inst = options.resolver_kls(
vdbs=installed_repos, dbs=source_repos,
verify_vdb=options.deep, nodeps=options.nodeps,
drop_cycles=options.ignore_cycles, force_replace=options.replace,
process_built_depends=options.with_bdeps, **extra_kwargs)
if options.preload_vdb_state:
out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ')
vdb_time = time()
resolver_inst.load_vdb_state()
vdb_time = time() - vdb_time
else:
vdb_time = 0.0
# flush warning messages before dep resolution begins
out.flush()
err.flush()
failures = []
resolve_time = time()
if sys.stdout.isatty():
out.title('Resolving...')
out.write(out.bold, ' * ', out.reset, 'Resolving...')
out.flush()
ret = resolver_inst.add_atoms(atoms, finalize=True)
while ret:
out.error('resolution failed')
restrict = ret[0][0]
just_failures = reduce_to_failures(ret[1])
display_failures(out, just_failures, debug=options.debug)
failures.append(restrict)
if not options.ignore_failures:
break
out.write("restarting resolution")
atoms = [x for x in atoms if x != restrict]
resolver_inst.reset()
ret = resolver_inst.add_atoms(atoms, finalize=True)
resolve_time = time() - resolve_time
if failures:
out.write()
out.write('Failures encountered:')
for restrict in failures:
out.error(f"failed '{restrict}'")
out.write('potentials:')
match_count = 0
for r in get_raw_repos(source_repos):
l = r.match(restrict)
if l:
out.write(f"repo {r}: [ {', '.join(map(str, l))} ]")
match_count += len(l)
if not match_count:
out.write("No matches found")
if not options.ignore_failures:
return 1
out.write()
resolver_inst.free_caches()
if options.clean:
out.write(out.bold, ' * ', out.reset, 'Packages to be removed:')
vset = set(installed_repos.real.combined)
len_vset = len(vset)
vset.difference_update(x.pkg for x in resolver_inst.state.iter_ops(True))
wipes = sorted(x for x in vset if x.package_is_real)
for x in wipes:
out.write(f"Remove {x}")
out.write()
if wipes:
out.write("removing %i packages of %i installed, %0.2f%%." %
(len(wipes), len_vset, 100*(len(wipes)/float(len_vset))))
else:
out.write("no packages to remove")
if options.pretend:
return 0
if options.ask:
if not formatter.ask("Do you wish to proceed?", default_answer=False):
return 1
out.write()
repo_obs = observer.repo_observer(
observer.formatter_output(out), debug=options.debug)
do_unmerge(options, out, err, installed_repos.real.combined, wipes, world_set, repo_obs)
return 0
if options.debug:
out.write()
out.write(out.bold, ' * ', out.reset, 'debug: all ops')
out.first_prefix.append(" ")
plan_len = len(str(len(resolver_inst.state.plan)))
for pos, op in enumerate(resolver_inst.state.plan):
out.write(str(pos + 1).rjust(plan_len), ': ', str(op))
out.first_prefix.pop()
out.write(out.bold, ' * ', out.reset, 'debug: end all ops')
out.write()
changes = resolver_inst.state.ops(only_real=True)
build_obs = observer.phase_observer(
observer.formatter_output(out), debug=options.debug)
repo_obs = observer.repo_observer(
observer.formatter_output(out), debug=options.debug)
# show pkgs to merge in selected format
if (options.ask or options.pretend) and changes:
for op in changes:
formatter.format(op)
formatter.end()
if vdb_time:
out.write(out.bold, 'Took %.2f' % (vdb_time,), out.reset,
' seconds to preload vdb state')
if changes:
if not options.fetchonly:
# run sanity checks for pkgs -- pkg_pretend, REQUIRED_USE, etc
out.write()
out.write(out.bold, " * ", out.reset, "Running sanity checks...")
if options.debug:
start_time = time()
# flush output so bash spawned errors are shown in the correct order of events
out.flush()
sanity_failures = run_sanity_checks((x.pkg for x in changes), domain, threads=1)
if sanity_failures:
for pkg, errors in sanity_failures.items():
out.write('\n'.join(e.msg(verbosity=options.verbosity) for e in errors))
if options.verbosity > 0:
out.write()
if options.ignore_failures:
out.write(
out.fg('red'), out.bold, "!!! ",
out.reset, "Skipping failed sanity checks...")
else:
out.write(
out.fg('red'), out.bold, "!!! ",
out.reset, "Sanity checks failed, exiting...")
return 1
else:
out.write()
if options.debug:
out.write(
out.bold, " * ", out.reset,
"finished sanity checks in %.2f seconds" % (time() - start_time))
out.write()
elif options.verbosity > 0:
# show skipped virtuals
virtual_pkgs = set()
for x in atoms:
matches = installed_repos.virtual.match(x)
if matches:
virtual_pkgs.add(sorted(matches)[-1])
if virtual_pkgs:
out.write(
"Skipping virtual pkgs:\n%s\n" % '\n'.join(
str(x.versioned_atom) for x in virtual_pkgs))
out.write("Nothing to merge.")
return
if options.pretend:
if options.verbosity > 0:
out.write(
out.bold, ' * ', out.reset,
"resolver plan required %i ops (%.2f seconds)" %
(len(resolver_inst.state.plan), resolve_time))
return
action = 'merge'
if options.fetchonly:
action = 'fetch'
if (options.ask and not formatter.ask(f"Would you like to {action} these packages?")):
return
change_count = len(changes)
# left in place for ease of debugging.
cleanup = []
try:
for count, op in enumerate(changes):
for func in cleanup:
func()
cleanup = []
out.write(f"\nProcessing {count + 1} of {change_count}: "
f"{op.pkg.cpvstr}::{op.pkg.repo}")
out.title(f"{count + 1}/{change_count}: {op.pkg.cpvstr}")
if op.desc != "remove":
cleanup.append(op.pkg.release_cached_data)
if not options.fetchonly and options.debug:
out.write("Forcing a clean of workdir")
pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs)
out.write(f"\n{len(op.pkg.distfiles)} file{pluralism(op.pkg.distfiles)} required-")
if not pkg_ops.run_if_supported("fetch", or_return=True):
out.error(f"fetching failed for {op.pkg.cpvstr}")
if not options.ignore_failures:
return 1
continue
if options.fetchonly:
continue
buildop = pkg_ops.run_if_supported("build", or_return=None)
pkg = op.pkg
if buildop is not None:
out.write(f"building {op.pkg.cpvstr}")
result = False
exc = None
try:
result = buildop.finalize()
except format.BuildError as e:
out.error(f"caught exception building {op.pkg.cpvstr}: {e}")
exc = e
else:
if result is False:
out.error(f"failed building {op.pkg.cpvstr}")
if result is False:
if not options.ignore_failures:
raise ExitException(1) from exc
continue
pkg = result
cleanup.append(pkg.release_cached_data)
pkg_ops = domain.pkg_operations(pkg, observer=build_obs)
cleanup.append(buildop.cleanup)
cleanup.append(partial(pkg_ops.run_if_supported, "cleanup"))
pkg = pkg_ops.run_if_supported("localize", or_return=pkg)
# wipe this to ensure we don't inadvertantly use it further down;
# we aren't resetting it after localizing, so could have the wrong
# set of ops.
del pkg_ops
out.write()
if op.desc == "replace":
if op.old_pkg == pkg:
out.write(f">>> Reinstalling {pkg.cpvstr}")
else:
out.write(f">>> Replacing {op.old_pkg.cpvstr} with {pkg.cpvstr}")
i = domain.replace_pkg(op.old_pkg, pkg, repo_obs)
cleanup.append(op.old_pkg.release_cached_data)
else:
out.write(f">>> Installing {pkg.cpvstr}")
i = domain.install_pkg(pkg, repo_obs)
# force this explicitly- can hold onto a helluva lot more
# then we would like.
else:
out.write(f">>> Removing {op.pkg.cpvstr}")
i = domain.uninstall_pkg(op.pkg, repo_obs)
try:
ret = i.finish()
except merge_errors.BlockModification as e:
out.error(f"Failed to merge {op.pkg}: {e}")
if not options.ignore_failures:
return 1
continue
# while this does get handled through each loop, wipe it now; we don't need
# that data, thus we punt it now to keep memory down.
# for safety sake, we let the next pass trigger a release also-
# mainly to protect against any code following triggering reloads
# basically, be protective
if world_set is not None:
if op.desc == "remove":
out.write(f'>>> Removing {op.pkg.cpvstr} from world file')
removal_pkg = slotatom_if_slotted(
source_repos.combined, op.pkg.versioned_atom)
update_worldset(world_set, removal_pkg, remove=True)
elif not options.oneshot and any(x.match(op.pkg) for x in atoms):
if not (options.upgrade or options.downgrade):
out.write(f'>>> Adding {op.pkg.cpvstr} to world file')
add_pkg = slotatom_if_slotted(
source_repos.combined, op.pkg.versioned_atom)
update_worldset(world_set, add_pkg)
# again... left in place for ease of debugging.
# except KeyboardInterrupt:
# import pdb;pdb.set_trace()
# else:
# import pdb;pdb.set_trace()
finally:
pass
# the final run from the loop above doesn't invoke cleanups;
# we could ignore it, but better to run it to ensure nothing is
# inadvertantly held on the way out of this function.
# makes heappy analysis easier if we're careful about it.
for func in cleanup:
func()
# and wipe the reference to the functions to allow things to fall out of
# memory.
cleanup = []
return 0
|
the-stack_0_26486
|
#
# Copyright (c) 2012-2014 by Pawel Tomulik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
__docformat__ = "restructuredText"
"""
Tests declaring variables with SConsGnu.GVar.GVarDeclU() factory method.
"""
import TestSCons
##############################################################################
# GVarDeclU(): Test 2 - declare GVar that is bound to ENV only.
##############################################################################
test = TestSCons.TestSCons()
test.dir_fixture('../../../../SConsGnu', 'site_scons/SConsGnu')
test.write('SConstruct',
"""
# SConstruct
from SConsGnu.GVars import GVarDeclU, ENV, VAR, OPT
list = []
list.append( GVarDeclU('env_x', None, None, 'env x default') )
list.append( GVarDeclU(env_key = 'env_x', default = 'env x default') )
i = 0
for v in list:
print "GVAR[%d].has_xxx_decl(ENV): %r" % (i, v.has_xxx_decl(ENV))
print "GVAR[%d].has_xxx_decl(VAR): %r" % (i, v.has_xxx_decl(VAR))
print "GVAR[%d].has_xxx_decl(OPT): %r" % (i, v.has_xxx_decl(OPT))
print "GVAR[%d].get_xxx_key(ENV): %r" % (i, v.get_xxx_key(ENV))
print "GVAR[%d].get_xxx_default(ENV): %r" % (i, v.get_xxx_default(ENV))
i += 1
""")
test.run()
lines = [
"GVAR[0].has_xxx_decl(ENV): True",
"GVAR[0].has_xxx_decl(VAR): False",
"GVAR[0].has_xxx_decl(OPT): False",
"GVAR[0].get_xxx_key(ENV): 'env_x'",
"GVAR[0].get_xxx_default(ENV): 'env x default'",
"GVAR[1].has_xxx_decl(ENV): True",
"GVAR[1].has_xxx_decl(VAR): False",
"GVAR[1].has_xxx_decl(OPT): False",
"GVAR[1].get_xxx_key(ENV): 'env_x'",
"GVAR[1].get_xxx_default(ENV): 'env x default'",
]
test.must_contain_all_lines(test.stdout(), lines)
test.pass_test()
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_26487
|
from checkov.terraform.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_check import BaseResourceCheck
class CloudfrontDistributionEncryption(BaseResourceCheck):
def __init__(self):
name = "Ensure cloudfront distribution ViewerProtocolPolicy is set to HTTPS"
id = "CKV_AWS_34"
supported_resources = ['aws_cloudfront_distribution']
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
"""
Looks for ViewerProtocolPolicy configuration at cloudfront distributions:
https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#viewer_protocol_policy
:param conf: cloudfront configuration
:return: <CheckResult>
"""
if "default_cache_behavior" in conf.keys():
default_viewer_policy = conf["default_cache_behavior"][0]["viewer_protocol_policy"][0]
if default_viewer_policy == "allow-all":
return CheckResult.FAILED
if "ordered_cache_behavior" in conf.keys():
for behavior in conf["ordered_cache_behavior"]:
if behavior["viewer_protocol_policy"][0] == "allow-all":
return CheckResult.FAILED
return CheckResult.PASSED
check = CloudfrontDistributionEncryption()
|
the-stack_0_26488
|
#!/usr/bin/env python
# coding: utf-8
# <center><img src='https://www.intel.com/content/dam/develop/external/us/en/images/infosim-logo-746616.png' style="width:300px"></center>
# <h1 align="center">StableNet® WeatherMap Restore from Filesystem (XML)</h1>
# <h2>Import necessary python modules</h2>
# In[ ]:
import warnings
import requests
from requests.auth import HTTPBasicAuth
import getpass
from xml.etree import ElementTree
from pathlib import Path
import os, glob
import io
import re
import PIL.Image as Image
import shutil
# <h2>Enter server credentials and the source filesystem path to be used as base</h2>
#
# It is possible to enter either the cleartext or the hashed password for the credentials. However, using the hash is more secure.
# In[ ]:
server_ip = '10.20.20.113'
server_port = '5443'
username = 'infosim'
pw=getpass.getpass('Enter password-hash for user ' + username + ' on the server:');
path = Path.cwd() # the current path (equal to "pwd" in bash)
print("You are currently in directory " + str(path))
new_directory = "backup_" + server_ip #input("Enter the destination directory (Relative or absolute path):")
path = Path(new_directory)
if not os.path.exists(path):
raise SystemExit("The path " + str(path) + " does not exist yet. Please provide an existing folder to read in the Weathermaps from the filesystem")
else:
print("The path " + str(path) + " could be found and will be used as base for Weathermap restore")
# <h2>Get List of Weather Maps from the Filesystem and print name and id</h2>
# In[ ]:
for filename in sorted(os.listdir(path)):
if not filename.endswith('.xml'): continue
fullname = os.path.join(path, filename)
wmap = ElementTree.parse(fullname).getroot()
if 'name' in wmap.attrib:
print('WeatherMap '+wmap.get('obid')+': '+wmap.get('name'))
else:
wmap.set('name',wmap.get('obid'))
print('WeatherMap '+wmap.get('obid'))
# <h2>Restore selected Weather Map to the Server</h2>
# In[ ]:
for wmapid in [1041]:
map = glob.glob(os.path.join(path,str(wmapid)+"*"+".xml"))
if len(map)>0:
print('Restoring map '+str(wmapid)+" from file "+ map[0])
wmap = ElementTree.parse(map[0]).getroot()
warnings.filterwarnings("ignore")
resp = requests.post("https://{}:{}/rest/weathermaps/add/" .format(server_ip, server_port),
verify=False,
auth=HTTPBasicAuth(username, pw),
data=ElementTree.tostring(wmap),
headers={'Content-Type': 'application/xml'}
)
|
the-stack_0_26489
|
from envirophat import motion
from time import sleep
import pantilthat
import math
import pigpio
import keyboard
def track(init_heading, i, motor, prev):
acc = motion.accelerometer()
heading = (motion.heading() + i) % 360
# handle tilt
tilt = math.floor(90 * acc[0])
if tilt > 90:
tilt = 90
elif tilt < -90:
tilt = -90
if prev[0] is None or abs(tilt-prev[0]) > 3:
motor.tilt(tilt)
else:
tilt = prev[0]
# handle pan
heading = heading - init_heading
if heading < -90:
heading = -90
elif heading > 90:
heading = 90
if prev[1] is None or abs(heading - prev[1]) > .5:
motor.pan(heading)
else:
heading = prev[1]
return (tilt, heading)
motor = None
def main():
# init cam motor
global motor
motor = pantilthat.PanTilt()
# set up wheel motors
ESC = 4
pi = pigpio.pi()
pi.set_servo_pulsewidth(ESC, 0)
max_val = 2000
min_val = 700
# arming the motors
print("Connect the battery and press enter")
input()
pi.set_servo_pulsewidth(ESC, 0)
sleep(1)
pi.set_servo_pulsewidth(ESC, max_val)
sleep(1)
pi.set_servo_pulsewidth(ESC, min_val)
sleep(1)
print("Arming done")
# cam control functions
def toggle_tracking(_):
global tracking_eh
tracking_eh = not tracking_eh
def move_right(_):
global motor
motor.pan((motor.get_servo_one() + 10) if motor.get_servo_one() < 80 else 90)
def move_left(_):
global motor
motor.pan((motor.get_servo_one() - 10) if motor.get_servo_one() > -80 else -90)
def move_up(_):
global motor
motor.tilt((motor.get_servo_two() + 10) if motor.get_servo_two() < 80 else 90)
def move_down(_):
global motor
motor.tilt((motor.get_servo_two() - 10) if motor.get_servo_two() > -80 else -90)
def go_fast(_):
pi.set_servo_pulsewidth(ESC, max_val)
def so_slow(_):
pi.set_servo_pulsewidth(ESC, min_val)
def stop_motors(_):
pi.set_servo_pulsewidth(ESC, 0)
# cam controls
keyboard.on_press_key("w", move_up)
keyboard.on_press_key("s", move_down)
keyboard.on_press_key("a", move_right)
keyboard.on_press_key("d", move_left)
keyboard.on_press_key(" ", toggle_tracking)
# drive controls
keyboard.on_press_key("up", go_fast)
keyboard.on_press_key("down", so_slow)
for key in ["up", "down"]:
keyboard.on_release_key(key, stop_motors)
# main bot loop
prev = (None, None)
while True:
# handle init heading
if not tracking_eh:
init_heading = motion.heading()
i = 0
if init_heading > 270:
init_heading -= 90
i = -90
elif init_heading < 90:
init_heading += 90
i = 90
else:
prev = track(init_heading, i, motor, prev)
tracking_eh = False
if __name__ == "__main__":
main()
|
the-stack_0_26490
|
"""
General utility functions for machine learning.
"""
import abc
import math
import numpy as np
class ScalarSchedule(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_value(self, t):
pass
class ConstantSchedule(ScalarSchedule):
def __init__(self, value):
self._value = value
def get_value(self, t):
return self._value
class LinearSchedule(ScalarSchedule):
"""
Linearly interpolate and then stop at a final value.
"""
def __init__(
self,
init_value,
final_value,
ramp_duration,
):
self._init_value = init_value
self._final_value = final_value
self._ramp_duration = ramp_duration
def get_value(self, t):
return (
self._init_value
+ (self._final_value - self._init_value)
* min(1.0, t * 1.0 / self._ramp_duration)
)
class IntLinearSchedule(LinearSchedule):
"""
Same as RampUpSchedule but round output to an int
"""
def get_value(self, t):
return int(super().get_value(t))
class PiecewiseLinearSchedule(ScalarSchedule):
"""
Given a list of (x, t) value-time pairs, return value x at time t,
and linearly interpolate between the two
"""
def __init__(
self,
x_values,
y_values,
):
self._x_values = x_values
self._y_values = y_values
def get_value(self, t):
return np.interp(t, self._x_values, self._y_values)
class IntPiecewiseLinearSchedule(PiecewiseLinearSchedule):
def get_value(self, t):
return int(super().get_value(t))
def none_to_infty(bounds):
if bounds is None:
bounds = -math.inf, math.inf
lb, ub = bounds
if lb is None:
lb = -math.inf
if ub is None:
ub = math.inf
return lb, ub
|
the-stack_0_26491
|
from ctypes import cdll, create_string_buffer
import pytest
import RP1210, os, configparser
from utilities import RP1210ConfigTestUtility
API_NAMES = ["PEAKRP32", "DLAUSB32", "DGDPA5MA", "NULN2R32", "CMNSI632", "CIL7R32"]
INVALID_API_NAMES = ["empty_api", "invalid_api", "extra_empty_api", "invalid_pd_api"]
# These tests are meant to be run with cwd @ repository's highest-level directory
CWD = os.getcwd()
TEST_FILES_DIRECTORY = CWD + ".\\Test\\test-files"
INI_DIRECTORY = TEST_FILES_DIRECTORY + "\\ini-files"
DLL_DIRECTORY = TEST_FILES_DIRECTORY + "\\dlls"
RP121032_PATH = TEST_FILES_DIRECTORY + "\\RP121032.ini"
# try to get Windows Server to load DLLs w/ GitHub Actions
os.add_dll_directory(DLL_DIRECTORY)
os.add_dll_directory(os.getcwd())
os.environ['PATH'] += os.pathsep + DLL_DIRECTORY
for d in os.environ['path'].split(';'): # overboard
if os.path.isdir(d):
os.add_dll_directory(d)
invalid_apis = [] + INVALID_API_NAMES
# Check which APIs are missing dependencies so they can be skipped
for api_name in API_NAMES:
valid = True
try:
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
if api_name not in invalid_apis:
valid = rp1210.getAPI().isValid()
except Exception:
valid = False
if not valid:
invalid_apis.append(api_name)
def test_cwd():
"""Make sure cwd isn't in Test folder."""
cwd = os.getcwd()
assert "RP1210" in cwd
assert "Test" not in cwd
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_api_files_exist(api_name : str):
"""Makes sure all the relevant API files are in test-files directory."""
assert os.path.exists(TEST_FILES_DIRECTORY)
assert os.path.exists(INI_DIRECTORY)
assert os.path.exists(DLL_DIRECTORY)
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
assert os.path.isfile(ini_path)
assert os.path.isfile(RP121032_PATH)
if not api_name in invalid_apis:
assert os.path.isfile(dll_path)
assert cdll.LoadLibrary(dll_path) != None
def test_getAPINames():
"""
Test the getAPINames() function with a custom directory.
Also calls getAPINames() with no argument to make sure there isn't an exception.
"""
RP1210.getAPINames()
for name in RP1210.getAPINames(RP121032_PATH):
assert name in API_NAMES
@pytest.mark.parametrize("rp121032_path", ["bork", "bork.ini", 1234, "RP121032", RP121032_PATH + "x"])
def test_getAPINames_invalid(rp121032_path):
"""
Makes sure we get an exception if we provide an invalid path for getAPINames().
"""
with pytest.raises(FileNotFoundError):
RP1210.getAPINames(rp121032_path)
@pytest.mark.parametrize("api_name", argvalues=API_NAMES + INVALID_API_NAMES)
def test_RP1210Config(api_name : str):
"""
Tests RP1210Config class with all sample files provided in test-files folder.
"""
config = configparser.ConfigParser()
utility = RP1210ConfigTestUtility(config)
config.read(INI_DIRECTORY + "\\" + api_name + ".ini")
rp1210 = RP1210.RP1210Config(api_name, DLL_DIRECTORY, INI_DIRECTORY)
assert rp1210.isValid() == True or api_name in INVALID_API_NAMES
assert rp1210.getAPIName() == api_name
utility.verifydata(rp1210.getName, "VendorInformation", "Name", fallback="(Vendor Name Missing)")
utility.verifydata(rp1210.getAddress1, "VendorInformation", "Address1")
utility.verifydata(rp1210.getAddress2, "VendorInformation", "Address2")
utility.verifydata(rp1210.getCity, "VendorInformation", "City")
utility.verifydata(rp1210.getState, "VendorInformation", "State")
utility.verifydata(rp1210.getCountry, "VendorInformation", "Country")
utility.verifydata(rp1210.getPostal, "VendorInformation", "Postal")
utility.verifydata(rp1210.getTelephone, "VendorInformation", "Telephone")
utility.verifydata(rp1210.getFax, "VendorInformation", "Fax")
utility.verifydata(rp1210.getVendorURL, "VendorInformation", "VendorURL")
utility.verifydata(rp1210.getVersion, "VendorInformation", "Version")
utility.verifydata(rp1210.autoDetectCapable, "VendorInformation", "AutoDetectCapable", fallback=False)
utility.verifydata(rp1210.getAutoDetectCapable, "VendorInformation", "AutoDetectCapable", fallback=False)
utility.verifydata(rp1210.getTimeStampWeight, "VendorInformation", "TimeStampWeight", fallback=1.0)
utility.verifydata(rp1210.getMessageString, "VendorInformation", "MessageString")
utility.verifydata(rp1210.getErrorString, "VendorInformation", "ErrorString")
utility.verifydata(rp1210.getRP1210Version, "VendorInformation", "RP1210")
utility.verifydata(rp1210.getDebugLevel, "VendorInformation", "DebugLevel", fallback=-1)
utility.verifydata(rp1210.getDebugFile, "VendorInformation", "DebugFile")
utility.verifydata(rp1210.getDebugMode, "VendorInformation", "DebugMode", fallback=-1)
utility.verifydata(rp1210.getDebugFileSize, "VendorInformation", "DebugFileSize", fallback=1024)
utility.verifydata(rp1210.getNumberOfSessions, "VendorInformation", "NumberOfRTSCTSSessions", fallback=1)
utility.verifydata(rp1210.getCANAutoBaud, "VendorInformation", "CANAutoBaud", fallback=False)
utility.verifydata(rp1210.getCANFormatsSupported, "VendorInformation", "CANFormatsSupported")
utility.verifydata(rp1210.getJ1939FormatsSupported, "VendorInformation", "J1939FormatsSupported")
utility.verifydata(rp1210.getDeviceIDs, "VendorInformation", "Devices")
utility.verifydata(rp1210.getProtocolIDs, "VendorInformation", "Protocols")
assert rp1210.getName() == rp1210.getDescription()
assert rp1210.getName() in str(rp1210)
assert rp1210.getCANAutoBaud() == rp1210.autoBaudEnabled()
assert rp1210.getProtocol() == rp1210.getProtocol("J1939")
@pytest.mark.parametrize("api_name", argvalues=API_NAMES + INVALID_API_NAMES)
def test_Devices(api_name : str):
config = configparser.ConfigParser()
utility = RP1210ConfigTestUtility(config)
config.read(INI_DIRECTORY + "\\" + api_name + ".ini")
rp1210 = RP1210.RP1210Config(api_name, DLL_DIRECTORY, INI_DIRECTORY)
deviceIDs = rp1210.getDeviceIDs()
for id in deviceIDs:
device = rp1210.getDevice(id)
utility.verifydevicedata(device.getID, id, "DeviceID", fallback=-1)
utility.verifydevicedata(device.getDescription, id, "DeviceDescription")
utility.verifydevicedata(device.getName, id, "DeviceName")
utility.verifydevicedata(device.getParams, id, "DeviceParams")
utility.verifydevicedata(device.getMultiJ1939Channels, id, "MultiJ1939Channels", fallback=0)
utility.verifydevicedata(device.getMultiCANChannels, id, "MultiCANChannels", fallback=0)
if device.getID() == -1:
assert "(Invalid Device)" in str(device)
else:
assert str(device) == str(device.getID()) + " - " + device.getDescription()
assert device in rp1210.getDevices()
with pytest.raises(TypeError):
assert device != "dingus"
@pytest.mark.parametrize("api_name", argvalues=API_NAMES + INVALID_API_NAMES)
def test_Protocols(api_name : str):
config = configparser.ConfigParser()
utility = RP1210ConfigTestUtility(config)
rp1210 = RP1210.RP1210Config(api_name, DLL_DIRECTORY, INI_DIRECTORY)
config.read(INI_DIRECTORY + "\\" + api_name + ".ini")
protocolIDs = rp1210.getProtocolIDs()
assert rp1210.getProtocol("test protocol name") is None
if not api_name in INVALID_API_NAMES:
assert protocolIDs
for id in protocolIDs:
protocol = rp1210.getProtocol(id)
name = protocol.getString()
protocolFromString = rp1210.getProtocol(name)
assert protocolFromString.getString() == name
assert name in rp1210.getProtocolNames()
assert rp1210.getProtocol(name).getString() == name
utility.verifyprotocoldata(protocol.getDescription, id, "ProtocolDescription")
utility.verifyprotocoldata(protocol.getString, id, "ProtocolString")
utility.verifyprotocoldata(protocol.getParams, id, "ProtocolParams")
utility.verifyprotocoldata(protocol.getDevices, id, "Devices")
utility.verifyprotocoldata(protocol.getSpeed, id, "ProtocolSpeed")
assert protocol in rp1210.getProtocols()
with pytest.raises(TypeError):
assert protocol != "dingus"
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_load_DLL(api_name : str):
"""Loads an API's DLL and checks for validity."""
if api_name in invalid_apis:
pytest.skip(f"Skipping {api_name} due to missing dependencies.")
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
# try to load it twice, to make sure they don't collide or something
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
assert rp1210.getAPI().isValid()
assert rp1210.api.getDLL() != None
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
assert rp1210.api.loadDLL() != None
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_disconnected_ClientConnect(api_name : str):
"""Tests whether ClientConnect follows expected behavior when disconnected from device."""
if api_name in invalid_apis:
pytest.skip(f"Skipping {api_name} due to missing dependencies.")
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
deviceID = rp1210.getProtocol("J1939").getDevices()[0]
clientID = rp1210.api.ClientConnect(deviceID, b"J1939:Baud=Auto")
assert RP1210.translateErrorCode(clientID) in RP1210.RP1210_ERRORS.values()
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_disconnected_ClientDisconnect(api_name : str):
"""Tests whether ClientDisconnect follows expected behavior when disconnected from device."""
if api_name in invalid_apis:
pytest.skip(f"Skipping {api_name} due to missing dependencies.")
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
code = rp1210.api.ClientDisconnect(0)
if code < 0:
code += 65536
if api_name == "NULN2R32": # Nexiq drivers can trick computer into thinking it's connected
assert code == 0 or code >= 128
return
assert code >= 128
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_disconnected_ReadVersion(api_name : str):
"""Test RP1210_ReadVersion while adapter is disconnected."""
if api_name in invalid_apis:
pytest.skip(f"Skipping ReadVersion test for {api_name} due to missing dependencies.")
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
buff1 = create_string_buffer(16)
buff2 = create_string_buffer(16)
buff3 = create_string_buffer(16)
buff4 = create_string_buffer(16)
rp1210.api.ReadVersion(buff1, buff2, buff3, buff4)
assert str(buff1) + str(buff2) + str(buff3) + str(buff4) != ""
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_disconnected_ReadVersionDirect(api_name : str):
"""Test ReadVersionDirect while adapter is disconnected."""
if api_name in invalid_apis:
pytest.skip(f"Skipping ReadVersionDirect test for {api_name} due to missing dependencies.")
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
for ver in rp1210.api.ReadVersionDirect():
assert ver != ""
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_disconnected_ReadDetailedVersion(api_name : str):
"""Test ReadDetailedVersion while adapter is disconnected."""
if api_name in invalid_apis:
pytest.skip(f"Skipping ReadDetailedVersion test for {api_name} due to missing dependencies.")
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
buff1 = create_string_buffer(17)
buff2 = create_string_buffer(17)
buff3 = create_string_buffer(17)
ret_val = rp1210.api.ReadDetailedVersion(0, buff1, buff2, buff3)
assert RP1210.translateErrorCode(ret_val) in ["ERR_DLL_NOT_INITIALIZED", "ERR_HARDWARE_NOT_RESPONDING", "ERR_INVALID_CLIENT_ID"]
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_disconnected_GetErrorMsg(api_name : str):
"""Test GetErrorMsg while adapter is disconnected."""
if api_name in invalid_apis:
pytest.skip(f"Skipping GetErrorMsg test for {api_name} due to missing dependencies.")
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
for code in RP1210.RP1210_ERRORS.keys():
msg = rp1210.api.GetErrorMsg(code)
assert msg != ""
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_disconnected_SendCommand(api_name : str):
"""Test SendCommand while adapter is disconnected."""
if api_name in invalid_apis:
pytest.skip(f"Skipping SendCommand test for {api_name} due to missing dependencies.")
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
for command in RP1210.RP1210_COMMANDS:
assert RP1210.translateErrorCode(rp1210.api.SendCommand(command, 0)) in RP1210.RP1210_ERRORS.values()
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_disconnected_GetHardwareStatus(api_name : str):
"""Test GetHardwareStatus while adapter is disconnected."""
if api_name in invalid_apis:
pytest.skip(f"Skipping GetHardwareStatus test for {api_name} due to missing dependencies.")
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
buffer = create_string_buffer(64)
ret_val = rp1210.api.GetHardwareStatus(0, buffer, 64)
if ret_val < 0:
ret_val += 65536
assert not buffer.value
assert ret_val in RP1210.RP1210_ERRORS
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_disconnected_GetHardwareStatusDirect(api_name : str):
"""Test GetHardwareStatusDirect while adapter is disconnected."""
if api_name in invalid_apis:
pytest.skip(f"Skipping GetHardwareStatusDirect test for {api_name} due to missing dependencies.")
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
assert not rp1210.api.GetHardwareStatusDirect(0).value
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_disconnected_RemainingFunctions(api_name : str):
"""Tests whether API functions follow expected behavior when disconnected from device."""
if api_name in invalid_apis:
pytest.skip(f"Skipping 'Remaining Functions' test for {api_name} due to missing dependencies.")
ini_path = INI_DIRECTORY + "\\" + api_name + ".ini"
dll_path = DLL_DIRECTORY + "\\" + api_name + ".dll"
rp1210 = RP1210.RP1210Config(api_name, dll_path, ini_path)
ret_val = rp1210.api.SendMessage(0, b"", 0)
assert RP1210.translateErrorCode(ret_val) in RP1210.RP1210_ERRORS.values()
ret_val = rp1210.api.SendMessage(0, b"12345678", 8)
assert RP1210.translateErrorCode(ret_val) in RP1210.RP1210_ERRORS.values()
read_array_in = create_string_buffer(256)
assert rp1210.api.ReadMessage(128, read_array_in, len(read_array_in)) <= 0
assert not read_array_in.value
read_array_in = create_string_buffer(64)
assert rp1210.api.ReadMessage(0, read_array_in) <= 0
assert not read_array_in.value
assert not rp1210.api.ReadDirect(0)
assert rp1210.api.ReadDetailedVersionDirect(0)
@pytest.mark.parametrize("api_name", argvalues=API_NAMES)
def test_disconnected_rp1210client_commands(api_name):
"""Tests RP1210Client command functions when adapter is disconnected."""
pytest.skip("Tests for RP1210Client are invalid until modular API loading is implemented.")
# TODO: this is copied from old tests & will need to be updated before use!
client = RP1210.RP1210Client()
client.setVendor(api_name)
assert client.getClientID() == 128
clientID = client.connect()
assert clientID in RP1210.RP1210_ERRORS.keys()
assert clientID == client.getClientID()
# sampling of simpler commands
assert client.resetDevice() in RP1210.RP1210_ERRORS.keys()
assert client.setAllFiltersToPass() in RP1210.RP1210_ERRORS.keys()
assert client.setAllFiltersToDiscard() in RP1210.RP1210_ERRORS.keys()
assert client.setEcho(True) in RP1210.RP1210_ERRORS.keys()
assert client.setMessageReceive(True) in RP1210.RP1210_ERRORS.keys()
assert client.releaseJ1939Address(0xEE) in RP1210.RP1210_ERRORS.keys()
assert client.setJ1939FilterType(0) in RP1210.RP1210_ERRORS.keys()
assert client.setCANFilterType(0) in RP1210.RP1210_ERRORS.keys()
assert client.setJ1939InterpacketTime(100) in RP1210.RP1210_ERRORS.keys()
assert client.setMaxErrorMsgSize(100) in RP1210.RP1210_ERRORS.keys()
assert client.disallowConnections() in RP1210.RP1210_ERRORS.keys()
assert client.setJ1939Baud(5) in RP1210.RP1210_ERRORS.keys()
assert client.setBlockingTimeout(20, 30) in RP1210.RP1210_ERRORS.keys()
assert client.flushBuffers() in RP1210.RP1210_ERRORS.keys()
assert client.setCANBaud(5) in RP1210.RP1210_ERRORS.keys()
|
the-stack_0_26492
|
import sh
from urllib.parse import urlparse
def removeDotGit(url):
""" Remove trailing `.git` from the git remote url """
if url.endswith('.git'):
return url[:-4]
return url
def get_self_urn():
""" determine the URN for the repo on github by looking at the remote named
"origin", and parsing it, or using a sensible default. this will allow
local tests on a developer's fork """
# remote is one of these:
# [email protected]:amoffat/chaos
# [email protected]:amoffat/chaos.git
# https://github.com/chaosbot/chaos
# https://github.com/chaosbot/chaos.git
remote = removeDotGit(sh.git.config("--get", "remote.origin.url").strip())
if remote:
if remote.startswith("git@"):
urn = remote.split(":")[1]
else:
parts = urlparse(remote)
urn = parts.path[1:]
# we're not in a git repo, or we have no remotes, so just assume a sensible
# default
else:
urn = "chaosbot/chaos"
return urn
|
the-stack_0_26494
|
import csv
from itertools import izip
import json
import sys
args = sys.argv[1:]
f = open(args[0], 'r')
jsonOut = open(args[1], 'w+')
reader = csv.reader( f )
keys = ( "lga_name", "number_aboriginal_tsi", "non_indigenous", "not_stated", "total", "indigenous_total_percentage" )
out = []
for property in reader:
property = iter( property )
data = {}
out = [dict(zip(keys, property)) for property in reader]
out += [ data ]
jsonOut.write(json.dumps(out));
|
the-stack_0_26497
|
import sys, platform, re, pytest
from numpy.core._multiarray_umath import __cpu_features__
def assert_features_equal(actual, desired, fname):
__tracebackhide__ = True # Hide traceback for py.test
actual, desired = str(actual), str(desired)
if actual == desired:
return
detected = str(__cpu_features__).replace("'", "")
try:
with open("/proc/cpuinfo", "r") as fd:
cpuinfo = fd.read(2048)
except Exception as err:
cpuinfo = str(err)
try:
import subprocess
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
auxv = auxv.decode()
except Exception as err:
auxv = str(err)
import textwrap
error_report = textwrap.indent(
"""
###########################################
### Extra debugging information
###########################################
-------------------------------------------
--- NumPy Detections
-------------------------------------------
%s
-------------------------------------------
--- SYS / CPUINFO
-------------------------------------------
%s....
-------------------------------------------
--- SYS / AUXV
-------------------------------------------
%s
""" % (detected, cpuinfo, auxv), prefix='\r')
raise AssertionError((
"Failure Detection\n"
" NAME: '%s'\n"
" ACTUAL: %s\n"
" DESIRED: %s\n"
"%s"
) % (fname, actual, desired, error_report))
class AbstractTest:
features = []
features_groups = {}
features_map = {}
features_flags = set()
def load_flags(self):
# a hook
pass
def test_features(self):
self.load_flags()
for gname, features in self.features_groups.items():
test_features = [self.cpu_have(f) for f in features]
assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
for feature_name in self.features:
cpu_have = self.cpu_have(feature_name)
npy_have = __cpu_features__.get(feature_name)
assert_features_equal(npy_have, cpu_have, feature_name)
def cpu_have(self, feature_name):
map_names = self.features_map.get(feature_name, feature_name)
if isinstance(map_names, str):
return map_names in self.features_flags
for f in map_names:
if f in self.features_flags:
return True
return False
def load_flags_cpuinfo(self, magic_key):
self.features_flags = self.get_cpuinfo_item(magic_key)
def get_cpuinfo_item(self, magic_key):
values = set()
with open('/proc/cpuinfo') as fd:
for line in fd:
if not line.startswith(magic_key):
continue
flags_value = [s.strip() for s in line.split(':', 1)]
if len(flags_value) == 2:
values = values.union(flags_value[1].upper().split())
return values
def load_flags_auxv(self):
import subprocess
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
for at in auxv.split(b'\n'):
if not at.startswith(b"AT_HWCAP"):
continue
hwcap_value = [s.strip() for s in at.split(b':', 1)]
if len(hwcap_value) == 2:
self.features_flags = self.features_flags.union(
hwcap_value[1].upper().decode().split()
)
is_linux = sys.platform.startswith('linux')
is_cygwin = sys.platform.startswith('cygwin')
machine = platform.machine()
is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
@pytest.mark.skipif(
not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
)
class Test_X86_Features(AbstractTest):
features = [
"MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
"AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
"AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
"AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
"AVX512VBMI", "AVX512VBMI2", "AVX512BITALG",
]
features_groups = dict(
AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
"AVX5124VNNIW", "AVX512VPOPCNTDQ"],
AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI"],
AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
)
features_map = dict(
SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
)
def load_flags(self):
self.load_flags_cpuinfo("flags")
is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
class Test_POWER_Features(AbstractTest):
features = ["VSX", "VSX2", "VSX3", "VSX4"]
features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
def load_flags(self):
self.load_flags_auxv()
is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_zarch,
reason="Only for Linux and IBM Z")
class Test_ZARCH_Features(AbstractTest):
features = ["VX", "VXE", "VXE2"]
def load_flags(self):
self.load_flags_auxv()
is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
class Test_ARM_Features(AbstractTest):
features = [
"NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
]
features_groups = dict(
NEON_FP16 = ["NEON", "HALF"],
NEON_VFPV4 = ["NEON", "VFPV4"],
)
def load_flags(self):
self.load_flags_cpuinfo("Features")
arch = self.get_cpuinfo_item("CPU architecture")
# in case of mounting virtual filesystem of aarch64 kernel
is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
self.features_map = dict(
NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
)
else:
self.features_map = dict(
# ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
# doesn't provide information about ASIMD, so we assume that ASIMD is supported
# if the kernel reports any one of the following ARM8 features.
ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
)
|
the-stack_0_26498
|
from __future__ import unicode_literals
import json
from moto.core.responses import BaseResponse
from .models import apigateway_backends
from .exceptions import (
ApiKeyNotFoundException,
UsagePlanNotFoundException,
BadRequestException,
CrossAccountNotAllowed,
AuthorizerNotFoundException,
StageNotFoundException,
ApiKeyAlreadyExists,
DomainNameNotFound,
InvalidDomainName,
InvalidRestApiId,
InvalidModelName,
RestAPINotFound,
ModelNotFound,
)
API_KEY_SOURCES = ["AUTHORIZER", "HEADER"]
AUTHORIZER_TYPES = ["TOKEN", "REQUEST", "COGNITO_USER_POOLS"]
ENDPOINT_CONFIGURATION_TYPES = ["PRIVATE", "EDGE", "REGIONAL"]
class APIGatewayResponse(BaseResponse):
def error(self, type_, message, status=400):
return (
status,
self.response_headers,
json.dumps({"__type": type_, "message": message}),
)
def _get_param(self, key):
return json.loads(self.body).get(key) if self.body else None
def _get_param_with_default_value(self, key, default):
jsonbody = json.loads(self.body)
if key in jsonbody:
return jsonbody.get(key)
else:
return default
@property
def backend(self):
return apigateway_backends[self.region]
def restapis(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if self.method == "GET":
apis = self.backend.list_apis()
return 200, {}, json.dumps({"item": [api.to_dict() for api in apis]})
elif self.method == "POST":
name = self._get_param("name")
description = self._get_param("description")
api_key_source = self._get_param("apiKeySource")
endpoint_configuration = self._get_param("endpointConfiguration")
tags = self._get_param("tags")
policy = self._get_param("policy")
# Param validation
if api_key_source and api_key_source not in API_KEY_SOURCES:
return self.error(
"ValidationException",
(
"1 validation error detected: "
"Value '{api_key_source}' at 'createRestApiInput.apiKeySource' failed "
"to satisfy constraint: Member must satisfy enum value set: "
"[AUTHORIZER, HEADER]"
).format(api_key_source=api_key_source),
)
if endpoint_configuration and "types" in endpoint_configuration:
invalid_types = list(
set(endpoint_configuration["types"])
- set(ENDPOINT_CONFIGURATION_TYPES)
)
if invalid_types:
return self.error(
"ValidationException",
(
"1 validation error detected: Value '{endpoint_type}' "
"at 'createRestApiInput.endpointConfiguration.types' failed "
"to satisfy constraint: Member must satisfy enum value set: "
"[PRIVATE, EDGE, REGIONAL]"
).format(endpoint_type=invalid_types[0]),
)
rest_api = self.backend.create_rest_api(
name,
description,
api_key_source=api_key_source,
endpoint_configuration=endpoint_configuration,
tags=tags,
policy=policy,
)
return 200, {}, json.dumps(rest_api.to_dict())
def restapis_individual(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
function_id = self.path.replace("/restapis/", "", 1).split("/")[0]
if self.method == "GET":
rest_api = self.backend.get_rest_api(function_id)
return 200, {}, json.dumps(rest_api.to_dict())
elif self.method == "DELETE":
rest_api = self.backend.delete_rest_api(function_id)
return 200, {}, json.dumps(rest_api.to_dict())
def resources(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
function_id = self.path.replace("/restapis/", "", 1).split("/")[0]
if self.method == "GET":
resources = self.backend.list_resources(function_id)
return (
200,
{},
json.dumps({"item": [resource.to_dict() for resource in resources]}),
)
def resource_individual(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
function_id = self.path.replace("/restapis/", "", 1).split("/")[0]
resource_id = self.path.split("/")[-1]
try:
if self.method == "GET":
resource = self.backend.get_resource(function_id, resource_id)
elif self.method == "POST":
path_part = self._get_param("pathPart")
resource = self.backend.create_resource(
function_id, resource_id, path_part
)
elif self.method == "DELETE":
resource = self.backend.delete_resource(function_id, resource_id)
return 200, {}, json.dumps(resource.to_dict())
except BadRequestException as e:
return self.error(
"com.amazonaws.dynamodb.v20111205#BadRequestException", e.message
)
def resource_methods(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
resource_id = url_path_parts[4]
method_type = url_path_parts[6]
if self.method == "GET":
method = self.backend.get_method(function_id, resource_id, method_type)
return 200, {}, json.dumps(method)
elif self.method == "PUT":
authorization_type = self._get_param("authorizationType")
api_key_required = self._get_param("apiKeyRequired")
method = self.backend.create_method(
function_id,
resource_id,
method_type,
authorization_type,
api_key_required,
)
return 200, {}, json.dumps(method)
def resource_method_responses(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
resource_id = url_path_parts[4]
method_type = url_path_parts[6]
response_code = url_path_parts[8]
if self.method == "GET":
method_response = self.backend.get_method_response(
function_id, resource_id, method_type, response_code
)
elif self.method == "PUT":
method_response = self.backend.create_method_response(
function_id, resource_id, method_type, response_code
)
elif self.method == "DELETE":
method_response = self.backend.delete_method_response(
function_id, resource_id, method_type, response_code
)
return 200, {}, json.dumps(method_response)
def restapis_authorizers(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
restapi_id = url_path_parts[2]
if self.method == "POST":
name = self._get_param("name")
authorizer_type = self._get_param("type")
provider_arns = self._get_param_with_default_value("providerARNs", None)
auth_type = self._get_param_with_default_value("authType", None)
authorizer_uri = self._get_param_with_default_value("authorizerUri", None)
authorizer_credentials = self._get_param_with_default_value(
"authorizerCredentials", None
)
identity_source = self._get_param_with_default_value("identitySource", None)
identiy_validation_expression = self._get_param_with_default_value(
"identityValidationExpression", None
)
authorizer_result_ttl = self._get_param_with_default_value(
"authorizerResultTtlInSeconds", 300
)
# Param validation
if authorizer_type and authorizer_type not in AUTHORIZER_TYPES:
return self.error(
"ValidationException",
(
"1 validation error detected: "
"Value '{authorizer_type}' at 'createAuthorizerInput.type' failed "
"to satisfy constraint: Member must satisfy enum value set: "
"[TOKEN, REQUEST, COGNITO_USER_POOLS]"
).format(authorizer_type=authorizer_type),
)
authorizer_response = self.backend.create_authorizer(
restapi_id,
name,
authorizer_type,
provider_arns=provider_arns,
auth_type=auth_type,
authorizer_uri=authorizer_uri,
authorizer_credentials=authorizer_credentials,
identity_source=identity_source,
identiy_validation_expression=identiy_validation_expression,
authorizer_result_ttl=authorizer_result_ttl,
)
elif self.method == "GET":
authorizers = self.backend.get_authorizers(restapi_id)
return 200, {}, json.dumps({"item": authorizers})
return 200, {}, json.dumps(authorizer_response)
def authorizers(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
restapi_id = url_path_parts[2]
authorizer_id = url_path_parts[4]
if self.method == "GET":
try:
authorizer_response = self.backend.get_authorizer(
restapi_id, authorizer_id
)
except AuthorizerNotFoundException as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
elif self.method == "PATCH":
patch_operations = self._get_param("patchOperations")
authorizer_response = self.backend.update_authorizer(
restapi_id, authorizer_id, patch_operations
)
elif self.method == "DELETE":
self.backend.delete_authorizer(restapi_id, authorizer_id)
return 202, {}, "{}"
return 200, {}, json.dumps(authorizer_response)
def restapis_stages(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
if self.method == "POST":
stage_name = self._get_param("stageName")
deployment_id = self._get_param("deploymentId")
stage_variables = self._get_param_with_default_value("variables", {})
description = self._get_param_with_default_value("description", "")
cacheClusterEnabled = self._get_param_with_default_value(
"cacheClusterEnabled", False
)
cacheClusterSize = self._get_param_with_default_value(
"cacheClusterSize", None
)
stage_response = self.backend.create_stage(
function_id,
stage_name,
deployment_id,
variables=stage_variables,
description=description,
cacheClusterEnabled=cacheClusterEnabled,
cacheClusterSize=cacheClusterSize,
)
elif self.method == "GET":
stages = self.backend.get_stages(function_id)
return 200, {}, json.dumps({"item": stages})
return 200, {}, json.dumps(stage_response)
def stages(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
stage_name = url_path_parts[4]
if self.method == "GET":
try:
stage_response = self.backend.get_stage(function_id, stage_name)
except StageNotFoundException as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
elif self.method == "PATCH":
patch_operations = self._get_param("patchOperations")
stage_response = self.backend.update_stage(
function_id, stage_name, patch_operations
)
elif self.method == "DELETE":
self.backend.delete_stage(function_id, stage_name)
return 202, {}, "{}"
return 200, {}, json.dumps(stage_response)
def integrations(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
resource_id = url_path_parts[4]
method_type = url_path_parts[6]
try:
if self.method == "GET":
integration_response = self.backend.get_integration(
function_id, resource_id, method_type
)
elif self.method == "PUT":
integration_type = self._get_param("type")
uri = self._get_param("uri")
integration_http_method = self._get_param("httpMethod")
creds = self._get_param("credentials")
request_templates = self._get_param("requestTemplates")
integration_response = self.backend.create_integration(
function_id,
resource_id,
method_type,
integration_type,
uri,
credentials=creds,
integration_method=integration_http_method,
request_templates=request_templates,
)
elif self.method == "DELETE":
integration_response = self.backend.delete_integration(
function_id, resource_id, method_type
)
return 200, {}, json.dumps(integration_response)
except BadRequestException as e:
return self.error(
"com.amazonaws.dynamodb.v20111205#BadRequestException", e.message
)
except CrossAccountNotAllowed as e:
return self.error(
"com.amazonaws.dynamodb.v20111205#AccessDeniedException", e.message
)
def integration_responses(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
resource_id = url_path_parts[4]
method_type = url_path_parts[6]
status_code = url_path_parts[9]
try:
if self.method == "GET":
integration_response = self.backend.get_integration_response(
function_id, resource_id, method_type, status_code
)
elif self.method == "PUT":
selection_pattern = self._get_param("selectionPattern")
response_templates = self._get_param("responseTemplates")
integration_response = self.backend.create_integration_response(
function_id,
resource_id,
method_type,
status_code,
selection_pattern,
response_templates,
)
elif self.method == "DELETE":
integration_response = self.backend.delete_integration_response(
function_id, resource_id, method_type, status_code
)
return 200, {}, json.dumps(integration_response)
except BadRequestException as e:
return self.error(
"com.amazonaws.dynamodb.v20111205#BadRequestException", e.message
)
def deployments(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
function_id = self.path.replace("/restapis/", "", 1).split("/")[0]
try:
if self.method == "GET":
deployments = self.backend.get_deployments(function_id)
return 200, {}, json.dumps({"item": deployments})
elif self.method == "POST":
name = self._get_param("stageName")
description = self._get_param_with_default_value("description", "")
stage_variables = self._get_param_with_default_value("variables", {})
deployment = self.backend.create_deployment(
function_id, name, description, stage_variables
)
return 200, {}, json.dumps(deployment)
except BadRequestException as e:
return self.error(
"com.amazonaws.dynamodb.v20111205#BadRequestException", e.message
)
def individual_deployment(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
function_id = url_path_parts[2]
deployment_id = url_path_parts[4]
if self.method == "GET":
deployment = self.backend.get_deployment(function_id, deployment_id)
elif self.method == "DELETE":
deployment = self.backend.delete_deployment(function_id, deployment_id)
return 200, {}, json.dumps(deployment)
def apikeys(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if self.method == "POST":
try:
apikey_response = self.backend.create_apikey(json.loads(self.body))
except ApiKeyAlreadyExists as error:
return (
error.code,
self.headers,
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
elif self.method == "GET":
apikeys_response = self.backend.get_apikeys()
return 200, {}, json.dumps({"item": apikeys_response})
return 200, {}, json.dumps(apikey_response)
def apikey_individual(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
apikey = url_path_parts[2]
if self.method == "GET":
apikey_response = self.backend.get_apikey(apikey)
elif self.method == "PATCH":
patch_operations = self._get_param("patchOperations")
apikey_response = self.backend.update_apikey(apikey, patch_operations)
elif self.method == "DELETE":
apikey_response = self.backend.delete_apikey(apikey)
return 200, {}, json.dumps(apikey_response)
def usage_plans(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if self.method == "POST":
usage_plan_response = self.backend.create_usage_plan(json.loads(self.body))
elif self.method == "GET":
api_key_id = self.querystring.get("keyId", [None])[0]
usage_plans_response = self.backend.get_usage_plans(api_key_id=api_key_id)
return 200, {}, json.dumps({"item": usage_plans_response})
return 200, {}, json.dumps(usage_plan_response)
def usage_plan_individual(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
usage_plan = url_path_parts[2]
if self.method == "GET":
try:
usage_plan_response = self.backend.get_usage_plan(usage_plan)
except (UsagePlanNotFoundException) as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
elif self.method == "DELETE":
usage_plan_response = self.backend.delete_usage_plan(usage_plan)
return 200, {}, json.dumps(usage_plan_response)
def usage_plan_keys(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
usage_plan_id = url_path_parts[2]
if self.method == "POST":
try:
usage_plan_response = self.backend.create_usage_plan_key(
usage_plan_id, json.loads(self.body)
)
except ApiKeyNotFoundException as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
elif self.method == "GET":
usage_plans_response = self.backend.get_usage_plan_keys(usage_plan_id)
return 200, {}, json.dumps({"item": usage_plans_response})
return 200, {}, json.dumps(usage_plan_response)
def usage_plan_key_individual(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
usage_plan_id = url_path_parts[2]
key_id = url_path_parts[4]
if self.method == "GET":
try:
usage_plan_response = self.backend.get_usage_plan_key(
usage_plan_id, key_id
)
except (UsagePlanNotFoundException, ApiKeyNotFoundException) as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
elif self.method == "DELETE":
usage_plan_response = self.backend.delete_usage_plan_key(
usage_plan_id, key_id
)
return 200, {}, json.dumps(usage_plan_response)
def domain_names(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
try:
if self.method == "GET":
domain_names = self.backend.get_domain_names()
return 200, {}, json.dumps({"item": domain_names})
elif self.method == "POST":
domain_name = self._get_param("domainName")
certificate_name = self._get_param("certificateName")
tags = self._get_param("tags")
certificate_arn = self._get_param("certificateArn")
certificate_body = self._get_param("certificateBody")
certificate_private_key = self._get_param("certificatePrivateKey")
certificate_chain = self._get_param("certificateChain")
regional_certificate_name = self._get_param("regionalCertificateName")
regional_certificate_arn = self._get_param("regionalCertificateArn")
endpoint_configuration = self._get_param("endpointConfiguration")
security_policy = self._get_param("securityPolicy")
generate_cli_skeleton = self._get_param("generateCliSkeleton")
domain_name_resp = self.backend.create_domain_name(
domain_name,
certificate_name,
tags,
certificate_arn,
certificate_body,
certificate_private_key,
certificate_chain,
regional_certificate_name,
regional_certificate_arn,
endpoint_configuration,
security_policy,
generate_cli_skeleton,
)
return 200, {}, json.dumps(domain_name_resp)
except InvalidDomainName as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
def domain_name_induvidual(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
domain_name = url_path_parts[2]
domain_names = {}
try:
if self.method == "GET":
if domain_name is not None:
domain_names = self.backend.get_domain_name(domain_name)
return 200, {}, json.dumps(domain_names)
except DomainNameNotFound as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
def models(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
rest_api_id = self.path.replace("/restapis/", "", 1).split("/")[0]
try:
if self.method == "GET":
models = self.backend.get_models(rest_api_id)
return 200, {}, json.dumps({"item": models})
elif self.method == "POST":
name = self._get_param("name")
description = self._get_param("description")
schema = self._get_param("schema")
content_type = self._get_param("contentType")
cli_input_json = self._get_param("cliInputJson")
generate_cli_skeleton = self._get_param("generateCliSkeleton")
model = self.backend.create_model(
rest_api_id,
name,
content_type,
description,
schema,
cli_input_json,
generate_cli_skeleton,
)
return 200, {}, json.dumps(model)
except (InvalidRestApiId, InvalidModelName, RestAPINotFound) as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
def model_induvidual(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
rest_api_id = url_path_parts[2]
model_name = url_path_parts[4]
model_info = {}
try:
if self.method == "GET":
model_info = self.backend.get_model(rest_api_id, model_name)
return 200, {}, json.dumps(model_info)
except (
ModelNotFound,
RestAPINotFound,
InvalidRestApiId,
InvalidModelName,
) as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
|
the-stack_0_26500
|
#!/usr/bin/python
from ansible.module_utils.basic import *
from ansible.module_utils.sensu_api import *
class SensuHook:
def __init__(self, api, name, namespace):
self.api = api
self.name = name
self.namespace = namespace
self.exists = False
def get_data(self):
status_code, data = self.api.get('namespaces/{}/hooks/{}'.format(self.namespace, self.name))
if status_code == 200:
self.exists = True
return data
return {}
def has_changed(self, options):
data = self.get_data()
data.pop('metadata')
for option, value in data.items():
if not option in options:
if value:
return True
elif options[option] != value:
return True
return False
def create(self, options):
options.update({
'metadata': {
'name': self.name,
'namespace': self.namespace
}
})
self.api.put(
'namespaces/{}/hooks/{}'.format(self.namespace, self.name),
options
)
def delete(self):
self.api.delete(
'namespaces/{}/hooks/{}'.format(self.namespace, self.name)
)
def main():
fields = {
'name': { 'type': 'str', 'required': True },
'namespaces': { 'type': 'list', 'default': ['default'] },
'command': { 'type': 'str', 'required': True },
'timeout': { 'type': 'int', 'default': 10 },
'api_url': { 'type': 'str', 'default': 'http://127.0.0.1:8080' },
'api_user': { 'type': 'str', 'default': 'admin' },
'api_password': { 'type': 'str', 'default': 'P@ssw0rd!', 'no_log': True },
'state': { 'type': 'str', 'default': 'present', 'choices': ['present', 'absent'] }
}
module = AnsibleModule(argument_spec=fields)
changed = False
options = {
'command': module.params['command'],
'timeout': module.params['timeout']
}
api = SensuApi(
module.params['api_url'],
module.params['api_user'],
module.params['api_password']
)
api.auth()
for namespace in module.params['namespaces']:
hook = SensuHook(
api,
module.params['name'],
namespace
)
hook.get_data()
if module.params['state'] == 'present':
if not hook.exists or hook.has_changed(options):
hook.create(options)
changed = True
elif hook.exists:
hook.delete()
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
|
the-stack_0_26507
|
import os
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
os.environ['TF_DUMP_GRAPH_PREFIX'] = '/tmp/tf_graphs/'
import numpy as np
import tensorflow as tf
inp = np.random.rand(10, 10)
@tf.function(experimental_compile=True)
def model_fn(x):
y = tf.sigmoid(tf.matmul(x, x))
return y
print(model_fn(inp))
|
the-stack_0_26508
|
import requests_mock
from CSVFeedApiModule import *
import io
import pytest
def test_get_indicators_1():
"""Test with 1 fieldname"""
feed_url_to_config = {
'https://ipstack.com': {
'fieldnames': ['value'],
'indicator_type': 'IP'
}
}
with open('test_data/ip_ranges.txt') as ip_ranges_txt:
ip_ranges = ip_ranges_txt.read().encode('utf8')
with requests_mock.Mocker() as m:
itype = 'IP'
args = {
'indicator_type': itype,
'limit': 35
}
m.get('https://ipstack.com', content=ip_ranges)
client = Client(
url="https://ipstack.com",
feed_url_to_config=feed_url_to_config,
)
hr, indicators_ec, raw_json = get_indicators_command(client, args)
assert not indicators_ec
for ind_json in raw_json:
ind_val = ind_json.get('value')
ind_type = ind_json.get('type')
ind_rawjson = ind_json.get('rawJSON')
assert ind_val
assert ind_type == itype
assert ind_rawjson['value'] == ind_val
assert ind_rawjson['type'] == ind_type
def test_get_indicators_with_mapping():
"""Test with 1 fieldname"""
feed_url_to_config = {
'https://ipstack.com': {
'fieldnames': ['value', 'a'],
'indicator_type': 'IP',
'mapping': {
'AAA': 'a'
}
}
}
with open('test_data/ip_ranges.txt') as ip_ranges_txt:
ip_ranges = ip_ranges_txt.read()
with requests_mock.Mocker() as m:
itype = 'IP'
args = {
'indicator_type': itype,
'limit': 35
}
m.get('https://ipstack.com', content=ip_ranges.encode('utf-8'))
client = Client(
url="https://ipstack.com",
feed_url_to_config=feed_url_to_config
)
hr, indicators_ec, raw_json = get_indicators_command(client, args)
assert not indicators_ec
for ind_json in raw_json:
ind_val = ind_json.get('value')
ind_map = ind_json['fields'].get('AAA')
ind_type = ind_json.get('type')
ind_rawjson = ind_json.get('rawJSON')
assert ind_val
assert ind_type == itype
assert ind_map == 'a'
assert ind_rawjson['value'] == ind_val
assert ind_rawjson['type'] == ind_type
def test_get_indicators_2():
"""Test with 1 fieldname that's not called indicator"""
feed_url_to_config = {
'https://ipstack.com': {
'fieldnames': ['special_ind'],
'indicator_type': 'IP'
}
}
with open('test_data/ip_ranges.txt') as ip_ranges_txt:
ip_ranges = ip_ranges_txt.read().encode('utf8')
with requests_mock.Mocker() as m:
itype = 'IP'
args = {
'indicator_type': itype,
'limit': 35
}
m.get('https://ipstack.com', content=ip_ranges)
client = Client(
url="https://ipstack.com",
feed_url_to_config=feed_url_to_config,
)
hr, indicators_ec, raw_json = get_indicators_command(client, args)
assert not indicators_ec
for ind_json in raw_json:
ind_val = ind_json.get('value')
ind_type = ind_json.get('type')
ind_rawjson = ind_json.get('rawJSON')
assert ind_val
assert ind_type == itype
assert ind_rawjson['value'] == ind_val
assert ind_rawjson['type'] == ind_type
def test_get_feed_content():
"""Test that it can handle both zipped and unzipped files correctly"""
with open('test_data/ip_ranges.txt', 'rb') as ip_ranges_txt:
ip_ranges_unzipped = ip_ranges_txt.read()
with open('test_data/ip_ranges.gz', 'rb') as ip_ranges_gz:
ip_ranges_zipped = ip_ranges_gz.read()
expected_output = ip_ranges_unzipped.decode('utf8').split('\n')
feed_url_to_config = {
'https://ipstack1.com': {
'content': ip_ranges_unzipped
},
'https://ipstack2.com': {
'content': ip_ranges_unzipped,
'is_zipped_file': False,
},
'https://ipstack3.com': {
'content': ip_ranges_zipped,
'is_zipped_file': True
}
}
with requests_mock.Mocker() as m:
for url in feed_url_to_config:
client = Client(
url=url,
feed_url_to_config=feed_url_to_config,
)
m.get(url, content=feed_url_to_config.get(url).get('content'))
raw_response = requests.get(url)
assert client.get_feed_content_divided_to_lines(url, raw_response) == expected_output
@pytest.mark.parametrize('date_string,expected_result', [
("2020-02-10 13:39:14", '2020-02-10T13:39:14Z'), ("2020-02-10T13:39:14", '2020-02-10T13:39:14Z'),
("2020-02-10 13:39:14.123", '2020-02-10T13:39:14Z'), ("2020-02-10T13:39:14.123", '2020-02-10T13:39:14Z'),
("2020-02-10T13:39:14Z", '2020-02-10T13:39:14Z'), ("2020-11-01T04:16:13-04:00", '2020-11-01T08:16:13Z')])
def test_date_format_parsing(date_string, expected_result):
"""
Given
- A string represting a date.
When
- running date_format_parsing on the date.
Then
- Ensure the datestring is converted to the ISO-8601 format.
"""
assert expected_result == date_format_parsing(date_string)
class TestTagsParam:
def test_tags_exists(self):
"""
Given:
- tags ['tag1', 'tag2'] params
When:
- Running get indicators/fetch indicators
Then:
- Validating tags key exists with given tags
"""
tags = ['tag1', 'tag2']
feed_url_to_config = {
'https://ipstack.com': {
'fieldnames': ['value'],
'indicator_type': 'IP'
}
}
with open('test_data/ip_ranges.txt') as ip_ranges_txt:
ip_ranges = ip_ranges_txt.read().encode('utf8')
with requests_mock.Mocker() as m:
itype = 'IP'
args = {
'indicator_type': itype,
'limit': 35
}
m.get('https://ipstack.com', content=ip_ranges)
client = Client(
url="https://ipstack.com",
feed_url_to_config=feed_url_to_config,
feedTags=tags
)
_, _, indicators = get_indicators_command(client, args, tags)
assert tags == indicators[0]['fields']['tags']
def test_tags_not_exists(self):
"""
Given:
- No tags param
When:
- Running get indicators/fetch indicators
Then:
- Validating tags key exists with an empty list.
"""
feed_url_to_config = {
'https://ipstack.com': {
'fieldnames': ['value'],
'indicator_type': 'IP'
}
}
with open('test_data/ip_ranges.txt') as ip_ranges_txt:
ip_ranges = ip_ranges_txt.read().encode('utf8')
with requests_mock.Mocker() as m:
itype = 'IP'
args = {
'indicator_type': itype,
'limit': 35
}
m.get('https://ipstack.com', content=ip_ranges)
client = Client(
url="https://ipstack.com",
feed_url_to_config=feed_url_to_config,
feedTags=[]
)
_, _, indicators = get_indicators_command(client, args)
assert [] == indicators[0]['fields']['tags']
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_create_fields_mapping():
"""
Given:
- Raw json of the csv row extracted
When:
- Fetching indicators from csv rows
Then:
- Validate the mapping is done correctly
"""
raw_json = util_load_json("test_data/create_field_mapping_test.json")
mapping = {
'Value': ('Name', '^([A-Z]{1}[a-z]+)', None),
'Country': 'Country Name',
'Count': ('Count', lambda count: 'Low' if count < 5 else 'High')
}
result = create_fields_mapping(raw_json, mapping)
assert result == {
'Value': 'John',
'Country': 'United States',
'Count': 'Low'
}
def test_get_indicators_with_relations():
"""
Given:
- Raw json of the csv row extracted
When:
- Fetching indicators from csv rows
- create_relationships param is set to True
Then:
- Validate the returned list of indicators have relations.
"""
feed_url_to_config = {
'https://ipstack.com': {
'fieldnames': ['value', 'a'],
'indicator_type': 'IP',
'relationship_entity_b_type': 'IP',
'relationship_name': 'resolved-from',
'mapping': {
'AAA': 'a',
'relationship_entity_b': ('a', r'.*used\s+by\s(.*?)\s', None),
}
}
}
expected_res = ([{'value': 'test.com', 'type': 'IP',
'rawJSON': {'value': 'test.com', 'a': 'Domain used by Test c&c',
None: ['2021-04-22 06:03',
'https://test.com/manual/test-iplist.txt'],
'type': 'IP'},
'fields': {'AAA': 'Domain used by Test c&c', 'relationship_entity_b': 'Test',
'tags': []},
'relationships': [
{'name': 'resolved-from', 'reverseName': 'resolves-to', 'type': 'IndicatorToIndicator',
'entityA': 'test.com', 'entityAFamily': 'Indicator', 'entityAType': 'IP',
'entityB': 'Test', 'entityBFamily': 'Indicator', 'entityBType': 'IP',
'fields': {}}]}], True)
ip_ranges = 'test.com,Domain used by Test c&c,2021-04-22 06:03,https://test.com/manual/test-iplist.txt'
with requests_mock.Mocker() as m:
itype = 'IP'
m.get('https://ipstack.com', content=ip_ranges.encode('utf8'))
client = Client(
url="https://ipstack.com",
feed_url_to_config=feed_url_to_config
)
indicators = fetch_indicators_command(client, default_indicator_type=itype, auto_detect=False,
limit=35, create_relationships=True)
assert indicators == expected_res
def test_get_indicators_without_relations():
"""
Given:
- Raw json of the csv row extracted
When:
- Fetching indicators from csv rows
- create_relationships param is set to False
Then:
- Validate the returned list of indicators dont return relationships.
"""
feed_url_to_config = {
'https://ipstack.com': {
'fieldnames': ['value', 'a'],
'indicator_type': 'IP',
'relationship_entity_b_type': 'IP',
'relationship_name': 'resolved-from',
'mapping': {
'AAA': 'a',
'relationship_entity_b': ('a', r'.*used\s+by\s(.*?)\s', None),
}
}
}
expected_res = ([{'value': 'test.com', 'type': 'IP',
'rawJSON': {'value': 'test.com', 'a': 'Domain used by Test c&c',
None: ['2021-04-22 06:03',
'https://test.com/manual/test-iplist.txt'],
'type': 'IP'},
'fields': {'AAA': 'Domain used by Test c&c', 'relationship_entity_b': 'Test',
'tags': []}, 'relationships': []}], True)
ip_ranges = 'test.com,Domain used by Test c&c,2021-04-22 06:03,https://test.com/manual/test-iplist.txt'
with requests_mock.Mocker() as m:
itype = 'IP'
m.get('https://ipstack.com', content=ip_ranges.encode('utf8'))
client = Client(
url="https://ipstack.com",
feed_url_to_config=feed_url_to_config
)
indicators = fetch_indicators_command(client, default_indicator_type=itype, auto_detect=False,
limit=35, create_relationships=False)
assert indicators == expected_res
def test_get_no_update_value(mocker):
"""
Given
- response with last_modified and etag headers with the same values like in the integration context.
When
- Running get_no_update_value method.
Then
- Ensure that the response is False
"""
mocker.patch.object(demisto, 'debug')
class MockResponse:
headers = {'Last-Modified': 'Fri, 30 Jul 2021 00:24:13 GMT', # guardrails-disable-line
'ETag': 'd309ab6e51ed310cf869dab0dfd0d34b'} # guardrails-disable-line
status_code = 200
no_update = get_no_update_value(MockResponse(), 'https://test.com/manual/test-iplist.txt')
assert not no_update
assert demisto.debug.call_args[0][0] == 'New indicators fetched - the Last-Modified value has been updated,' \
' createIndicators will be executed with noUpdate=False.'
def test_build_iterator_not_modified_header(mocker):
"""
Given
- response with status code 304(Not Modified)
When
- Running build_iterator method.
Then
- Ensure that the results are empty and No_update value is True.
"""
mocker.patch.object(demisto, 'debug')
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.5.0"})
with requests_mock.Mocker() as m:
m.get('https://api.github.com/meta', status_code=304)
client = Client(
url='https://api.github.com/meta'
)
result = client.build_iterator()
assert result
assert result[0]['https://api.github.com/meta']
assert list(result[0]['https://api.github.com/meta']['result']) == []
assert result[0]['https://api.github.com/meta']['no_update']
assert demisto.debug.call_args[0][0] == 'No new indicators fetched, ' \
'createIndicators will be executed with noUpdate=True.'
def test_build_iterator_with_version_6_2_0(mocker):
"""
Given
- server version 6.2.0
When
- Running build_iterator method.
Then
- Ensure that the no_update value is True
- Request is called without headers "If-None-Match" and "If-Modified-Since"
"""
mocker.patch.object(demisto, 'debug')
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
with requests_mock.Mocker() as m:
m.get('https://api.github.com/meta', status_code=304)
client = Client(
url='https://api.github.com/meta',
headers={}
)
result = client.build_iterator()
assert result[0]['https://api.github.com/meta']['no_update']
assert 'If-None-Match' not in client.headers
assert 'If-Modified-Since' not in client.headers
def test_get_no_update_value_without_headers(mocker):
"""
Given
- response without last_modified and etag headers.
When
- Running get_no_update_value.
Then
- Ensure that the response is False.
"""
mocker.patch.object(demisto, 'debug')
class MockResponse:
headers = {}
status_code = 200
no_update = get_no_update_value(MockResponse(), 'https://test.com/manual/test-iplist.txt')
assert not no_update
assert demisto.debug.call_args[0][0] == 'Last-Modified and Etag headers are not exists,' \
'createIndicators will be executed with noUpdate=False.'
|
the-stack_0_26509
|
# encoding: utf-8
"""
Unit tests of reader.py.
"""
import os
import sys
import unittest
from pystache.reader import Reader
DATA_DIR = 'tests/data'
class ReaderTestCase(unittest.TestCase):
def _get_path(self, filename):
return os.path.join(DATA_DIR, filename)
def test_init__decode_errors(self):
# Test the default value.
reader = Reader()
self.assertEquals(reader.decode_errors, 'strict')
reader = Reader(decode_errors='replace')
self.assertEquals(reader.decode_errors, 'replace')
def test_init__encoding(self):
# Test the default value.
reader = Reader()
self.assertEquals(reader.encoding, sys.getdefaultencoding())
reader = Reader(encoding='foo')
self.assertEquals(reader.encoding, 'foo')
def test_read(self):
"""
Test read().
"""
reader = Reader()
path = self._get_path('ascii.mustache')
self.assertEquals(reader.read(path), 'ascii: abc')
def test_read__returns_unicode(self):
"""
Test that read() returns unicode strings.
"""
reader = Reader()
path = self._get_path('ascii.mustache')
contents = reader.read(path)
self.assertEqual(type(contents), unicode)
def test_read__encoding(self):
"""
Test read(): encoding attribute respected.
"""
reader = Reader()
path = self._get_path('nonascii.mustache')
self.assertRaises(UnicodeDecodeError, reader.read, path)
reader.encoding = 'utf-8'
self.assertEquals(reader.read(path), u'non-ascii: é')
def test_get__decode_errors(self):
"""
Test get(): decode_errors attribute.
"""
reader = Reader()
path = self._get_path('nonascii.mustache')
self.assertRaises(UnicodeDecodeError, reader.read, path)
reader.decode_errors = 'replace'
self.assertEquals(reader.read(path), u'non-ascii: \ufffd\ufffd')
|
the-stack_0_26510
|
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
DAY_MAPPING = {1: "Su", 2: "Mo", 3: "Tu", 4: "We", 5: "Th", 6: "Fr", 7: "Sa"}
class GiantEagleSpider(scrapy.Spider):
name = "gianteagle"
item_attributes = {"brand": "Giant Eagle"}
allowed_domains = "www.gianteagle.com"
download_delay = 0.2
start_urls = (
"https://www.gianteagle.com/api/sitecore/locations/getlocationlistvm?q=&orderBy=geo.distance(storeCoordinate,%20geography%27POINT(-97.68194299999999%2030.2737366)%27)%20asc&skip=0",
)
items_per_page = 12 # api limit
def parse_hours(self, hours):
o = OpeningHours()
for h in hours:
day = DAY_MAPPING[h["DayNumber"]]
open = h["Range"].get("Open")
close = h["Range"].get("Close")
if h["IsOpenedAllDay"]:
open = "0:00"
close = "23:59"
elif h["IsClosedAllDay"]:
continue
if open and close:
o.add_range(day=day, open_time=open, close_time=close)
return o.as_opening_hours()
def parse_address(self, address):
return ", ".join(
filter(
lambda x: True if x and x != "-" else False,
[address["address_no"], address["lineOne"], address["lineTwo"]],
)
)
def parse(self, response):
page_regex = re.compile(r"skip=(\d+)")
page = int(page_regex.search(response.url).group(1))
stores = json.loads(response.body_as_unicode())["Locations"] or []
for store in stores:
telephone = [
t["DisplayNumber"]
for t in store["TelephoneNumbers"]
if t["location"]["Item2"] == "Main"
]
properties = dict(
ref=store["Number"]["Value"],
name=store["Name"],
addr_full=self.parse_address(store["Address"]),
lat=store["Address"]["Coordinates"]["Latitude"],
lon=store["Address"]["Coordinates"]["Longitude"],
country="US",
city=store["Address"]["City"],
state=store["Address"]["State"]["Abbreviation"],
postcode=store["Address"]["Zip"],
phone=telephone[0] if telephone else None,
opening_hours=self.parse_hours(store["HoursOfOperation"]),
extras={
"number": store["Number"]["Value"],
"display_name": store["StoreDisplayName"],
},
)
yield GeojsonPointItem(**properties)
if stores:
page += self.items_per_page
yield scrapy.Request(
url=page_regex.sub("skip={}".format(page), response.url),
dont_filter=True,
)
|
the-stack_0_26512
|
import os
import sys
import signal
import datetime
import subprocess
import socket
import json
import platform
import getpass
import atexit
import time
import uuid
import ftrack_api
import pymongo
from openpype.lib import (
get_pype_execute_args,
OpenPypeMongoConnection,
get_openpype_version,
get_build_version,
validate_mongo_connection
)
from openpype_modules.ftrack import FTRACK_MODULE_DIR
from openpype_modules.ftrack.lib import credentials
from openpype_modules.ftrack.ftrack_server.lib import check_ftrack_url
from openpype_modules.ftrack.ftrack_server import socket_thread
class MongoPermissionsError(Exception):
"""Is used when is created multiple objects of same RestApi class."""
def __init__(self, message=None):
if not message:
message = "Exiting because have issue with acces to MongoDB"
super().__init__(message)
def check_mongo_url(mongo_uri, log_error=False):
"""Checks if mongo server is responding"""
try:
validate_mongo_connection(mongo_uri)
except pymongo.errors.InvalidURI as err:
if log_error:
print("Can't connect to MongoDB at {} because: {}".format(
mongo_uri, err
))
return False
except pymongo.errors.ServerSelectionTimeoutError as err:
if log_error:
print("Can't connect to MongoDB at {} because: {}".format(
mongo_uri, err
))
return False
return True
def validate_credentials(url, user, api):
first_validation = True
if not user:
print('- Ftrack Username is not set')
first_validation = False
if not api:
print('- Ftrack API key is not set')
first_validation = False
if not first_validation:
return False
try:
session = ftrack_api.Session(
server_url=url,
api_user=user,
api_key=api
)
session.close()
except Exception as e:
print("Can't log into Ftrack with used credentials:")
ftrack_cred = {
"Ftrack server": str(url),
"Username": str(user),
"API key": str(api)
}
item_lens = [len(key) + 1 for key in ftrack_cred.keys()]
justify_len = max(*item_lens)
for key, value in ftrack_cred.items():
print("{} {}".format(
(key + ":").ljust(justify_len, " "),
value
))
return False
print('DEBUG: Credentials Username: "{}", API key: "{}" are valid.'.format(
user, api
))
return True
def legacy_server(ftrack_url):
# Current file
scripts_dir = os.path.join(FTRACK_MODULE_DIR, "scripts")
min_fail_seconds = 5
max_fail_count = 3
wait_time_after_max_fail = 10
subproc = None
subproc_path = "{}/sub_legacy_server.py".format(scripts_dir)
subproc_last_failed = datetime.datetime.now()
subproc_failed_count = 0
ftrack_accessible = False
printed_ftrack_error = False
while True:
if not ftrack_accessible:
ftrack_accessible = check_ftrack_url(ftrack_url)
# Run threads only if Ftrack is accessible
if not ftrack_accessible and not printed_ftrack_error:
print("Can't access Ftrack {} <{}>".format(
ftrack_url, str(datetime.datetime.now())
))
if subproc is not None:
if subproc.poll() is None:
subproc.terminate()
subproc = None
printed_ftrack_error = True
time.sleep(1)
continue
printed_ftrack_error = False
if subproc is None:
if subproc_failed_count < max_fail_count:
args = get_pype_execute_args("run", subproc_path)
subproc = subprocess.Popen(
args,
stdout=subprocess.PIPE
)
elif subproc_failed_count == max_fail_count:
print((
"Storer failed {}times I'll try to run again {}s later"
).format(str(max_fail_count), str(wait_time_after_max_fail)))
subproc_failed_count += 1
elif ((
datetime.datetime.now() - subproc_last_failed
).seconds > wait_time_after_max_fail):
subproc_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif subproc.poll() is not None:
subproc = None
ftrack_accessible = False
_subproc_last_failed = datetime.datetime.now()
delta_time = (_subproc_last_failed - subproc_last_failed).seconds
if delta_time < min_fail_seconds:
subproc_failed_count += 1
else:
subproc_failed_count = 0
subproc_last_failed = _subproc_last_failed
time.sleep(1)
def main_loop(ftrack_url):
""" This is main loop of event handling.
Loop is handling threads which handles subprocesses of event storer and
processor. When one of threads is stopped it is tested to connect to
ftrack and mongo server. Threads are not started when ftrack or mongo
server is not accessible. When threads are started it is checked for socket
signals as heartbeat. Heartbeat must become at least once per 30sec
otherwise thread will be killed.
"""
os.environ["FTRACK_EVENT_SUB_ID"] = str(uuid.uuid1())
mongo_uri = OpenPypeMongoConnection.get_default_mongo_url()
# Current file
scripts_dir = os.path.join(FTRACK_MODULE_DIR, "scripts")
min_fail_seconds = 5
max_fail_count = 3
wait_time_after_max_fail = 10
# Threads data
storer_name = "StorerThread"
storer_port = 10001
storer_path = "{}/sub_event_storer.py".format(scripts_dir)
storer_thread = None
storer_last_failed = datetime.datetime.now()
storer_failed_count = 0
processor_name = "ProcessorThread"
processor_port = 10011
processor_path = "{}/sub_event_processor.py".format(scripts_dir)
processor_thread = None
processor_last_failed = datetime.datetime.now()
processor_failed_count = 0
statuser_name = "StorerThread"
statuser_port = 10021
statuser_path = "{}/sub_event_status.py".format(scripts_dir)
statuser_thread = None
statuser_last_failed = datetime.datetime.now()
statuser_failed_count = 0
ftrack_accessible = False
mongo_accessible = False
printed_ftrack_error = False
printed_mongo_error = False
# stop threads on exit
# TODO check if works and args have thread objects!
def on_exit(processor_thread, storer_thread, statuser_thread):
if processor_thread is not None:
processor_thread.stop()
processor_thread.join()
processor_thread = None
if storer_thread is not None:
storer_thread.stop()
storer_thread.join()
storer_thread = None
if statuser_thread is not None:
statuser_thread.stop()
statuser_thread.join()
statuser_thread = None
atexit.register(
on_exit,
processor_thread=processor_thread,
storer_thread=storer_thread,
statuser_thread=statuser_thread
)
host_name = socket.gethostname()
main_info = [
["created_at", datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S")],
["Username", getpass.getuser()],
["Host Name", host_name],
["Host IP", socket.gethostbyname(host_name)],
["OpenPype executable", get_pype_execute_args()[-1]],
["OpenPype version", get_openpype_version() or "N/A"],
["OpenPype build version", get_build_version() or "N/A"]
]
main_info_str = json.dumps(main_info)
# Main loop
while True:
# Check if accessible Ftrack and Mongo url
if not ftrack_accessible:
ftrack_accessible = check_ftrack_url(ftrack_url)
if not mongo_accessible:
mongo_accessible = check_mongo_url(mongo_uri)
# Run threads only if Ftrack is accessible
if not ftrack_accessible or not mongo_accessible:
if not mongo_accessible and not printed_mongo_error:
print("Can't access Mongo {}".format(mongo_uri))
if not ftrack_accessible and not printed_ftrack_error:
print("Can't access Ftrack {}".format(ftrack_url))
if storer_thread is not None:
storer_thread.stop()
storer_thread.join()
storer_thread = None
if processor_thread is not None:
processor_thread.stop()
processor_thread.join()
processor_thread = None
printed_ftrack_error = True
printed_mongo_error = True
time.sleep(1)
continue
printed_ftrack_error = False
printed_mongo_error = False
# ====== STATUSER =======
if statuser_thread is None:
if statuser_failed_count < max_fail_count:
statuser_thread = socket_thread.StatusSocketThread(
statuser_name, statuser_port, statuser_path,
[main_info_str]
)
statuser_thread.start()
elif statuser_failed_count == max_fail_count:
print((
"Statuser failed {}times in row"
" I'll try to run again {}s later"
).format(str(max_fail_count), str(wait_time_after_max_fail)))
statuser_failed_count += 1
elif ((
datetime.datetime.now() - statuser_last_failed
).seconds > wait_time_after_max_fail):
statuser_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not statuser_thread.isAlive():
statuser_thread.join()
statuser_thread = None
ftrack_accessible = False
mongo_accessible = False
_processor_last_failed = datetime.datetime.now()
delta_time = (
_processor_last_failed - statuser_last_failed
).seconds
if delta_time < min_fail_seconds:
statuser_failed_count += 1
else:
statuser_failed_count = 0
statuser_last_failed = _processor_last_failed
elif statuser_thread.stop_subprocess:
print("Main process was stopped by action")
on_exit(processor_thread, storer_thread, statuser_thread)
os.kill(os.getpid(), signal.SIGTERM)
return 1
# ====== STORER =======
# Run backup thread which does not requeire mongo to work
if storer_thread is None:
if storer_failed_count < max_fail_count:
storer_thread = socket_thread.SocketThread(
storer_name, storer_port, storer_path
)
storer_thread.start()
elif storer_failed_count == max_fail_count:
print((
"Storer failed {}times I'll try to run again {}s later"
).format(str(max_fail_count), str(wait_time_after_max_fail)))
storer_failed_count += 1
elif ((
datetime.datetime.now() - storer_last_failed
).seconds > wait_time_after_max_fail):
storer_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not storer_thread.isAlive():
if storer_thread.mongo_error:
raise MongoPermissionsError()
storer_thread.join()
storer_thread = None
ftrack_accessible = False
mongo_accessible = False
_storer_last_failed = datetime.datetime.now()
delta_time = (_storer_last_failed - storer_last_failed).seconds
if delta_time < min_fail_seconds:
storer_failed_count += 1
else:
storer_failed_count = 0
storer_last_failed = _storer_last_failed
# ====== PROCESSOR =======
if processor_thread is None:
if processor_failed_count < max_fail_count:
processor_thread = socket_thread.SocketThread(
processor_name, processor_port, processor_path
)
processor_thread.start()
elif processor_failed_count == max_fail_count:
print((
"Processor failed {}times in row"
" I'll try to run again {}s later"
).format(str(max_fail_count), str(wait_time_after_max_fail)))
processor_failed_count += 1
elif ((
datetime.datetime.now() - processor_last_failed
).seconds > wait_time_after_max_fail):
processor_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not processor_thread.isAlive():
if processor_thread.mongo_error:
raise Exception(
"Exiting because have issue with acces to MongoDB"
)
processor_thread.join()
processor_thread = None
ftrack_accessible = False
mongo_accessible = False
_processor_last_failed = datetime.datetime.now()
delta_time = (
_processor_last_failed - processor_last_failed
).seconds
if delta_time < min_fail_seconds:
processor_failed_count += 1
else:
processor_failed_count = 0
processor_last_failed = _processor_last_failed
if statuser_thread is not None:
statuser_thread.set_process("storer", storer_thread)
statuser_thread.set_process("processor", processor_thread)
time.sleep(1)
def run_event_server(
ftrack_url,
ftrack_user,
ftrack_api_key,
legacy,
clockify_api_key,
clockify_workspace
):
if not ftrack_user or not ftrack_api_key:
print((
"Ftrack user/api key were not passed."
" Trying to use credentials from user keyring."
))
cred = credentials.get_credentials(ftrack_url)
ftrack_user = cred.get("username")
ftrack_api_key = cred.get("api_key")
if clockify_workspace and clockify_api_key:
os.environ["CLOCKIFY_WORKSPACE"] = clockify_workspace
os.environ["CLOCKIFY_API_KEY"] = clockify_api_key
# Check url regex and accessibility
ftrack_url = check_ftrack_url(ftrack_url)
if not ftrack_url:
print('Exiting! < Please enter Ftrack server url >')
return 1
# Validate entered credentials
if not validate_credentials(ftrack_url, ftrack_user, ftrack_api_key):
print('Exiting! < Please enter valid credentials >')
return 1
# Set Ftrack environments
os.environ["FTRACK_SERVER"] = ftrack_url
os.environ["FTRACK_API_USER"] = ftrack_user
os.environ["FTRACK_API_KEY"] = ftrack_api_key
if legacy:
return legacy_server(ftrack_url)
return main_loop(ftrack_url)
|
the-stack_0_26513
|
import os
import datetime
import zipfile
import urllib
import sys
from colorama import *
# inits colorama
init()
# returns true for Windows as an OS
def isWindows():
return os.name == "nt"
# Returns true if the directory did not exist before
def make_dir(d):
if not os.path.exists(d):
os.makedirs(d)
return True
return False
# Prints colored text surrounded by square brackets with a time stamp before it
def msg(text, color_in = "cyan", indent = 0):
color_in = color_in.upper()
color = Fore.RESET
if color_in == "BLACK": color = Fore.BLACK
if color_in == "RED": color = Fore.RED
if color_in == "GREEN": color = Fore.GREEN
if color_in == "YELLOW": color = Fore.YELLOW
if color_in == "BLUE": color = Fore.BLUE
if color_in == "MAGENTA": color = Fore.MAGENTA
if color_in == "CYAN": color = Fore.CYAN
if color_in == "WHITE": color = Fore.WHITE
now = datetime.datetime.now()
text = "[" + str(datetime.time(now.hour, now.minute, now.second)) + "] [" + text + "]"
print(color + '\t' * indent + "%s" % str(text) + Fore.RESET + Back.RESET + Style.RESET_ALL)
# Used for printing messages if the env var is set
def trace(text):
envVar = os.environ.get('TRACE_MSG')
if envVar and int(envVar) == 1:
msg("TRACE] [" + str(text), "RED")
# Extracts a zip file and prints a percentage of the progress
def extractZip(zip, outdir = "./"):
z = zipfile.ZipFile(zip, "r")
uncompress_size = sum((file.file_size for file in z.infolist()))
extracted_size = 0
for file in z.infolist():
extracted_size += file.file_size
print("%s %%\r" % (extracted_size * 100/uncompress_size)),
z.extract(file, outdir)
print("100")
# Downloads a file
def downloadFile(url, dest):
msg("downloading " + dest)
try:
if sys.version_info < (3,0):
file = urllib.FancyURLopener()
file.retrieve(url, dest)
else:
from urllib import request
file = request.FancyURLopener()
file.retrieve(url, dest)
except Exception as e:
msg('could not download "' + dest + '" because of this: ' + str(e), "RED")
sys.exit(1)
# using a .zip because python 2.7 cant extract from .7z
def downloadAndExtractZip(url, zip_name, extract_dir):
downloadFile(url, zip_name)
msg("extracting " + zip_name)
extractZip(zip_name, extract_dir)
|
the-stack_0_26515
|
import logging
import re
import subprocess
import sys
from pathlib import Path
from typing import Optional
from github import Github
from jinja2 import Template
from pydantic import BaseModel, BaseSettings, SecretStr
class Settings(BaseSettings):
github_repository: str
github_event_path: Path
github_event_name: Optional[str] = None
input_token: SecretStr
input_latest_changes_file: Path = Path("README.md")
input_latest_changes_header: str = "### Latest Changes\n\n"
input_template_file: Path = Path(__file__).parent / "latest-changes.jinja2"
input_debug_logs: Optional[bool] = False
class PartialGitHubEventInputs(BaseModel):
number: int
class PartialGitHubEvent(BaseModel):
number: Optional[int] = None
inputs: Optional[PartialGitHubEventInputs] = None
logging.basicConfig(level=logging.INFO)
settings = Settings()
if settings.input_debug_logs:
logging.info(f"Using config: {settings.json()}")
g = Github(settings.input_token.get_secret_value())
repo = g.get_repo(settings.github_repository)
if not settings.github_event_path.is_file():
logging.error(f"No event file was found at: {settings.github_event_path}")
sys.exit(1)
contents = settings.github_event_path.read_text()
event = PartialGitHubEvent.parse_raw(contents)
if event.number is not None:
number = event.number
elif event.inputs and event.inputs.number:
number = event.inputs.number
else:
logging.error(
f"No PR number was found (PR number or workflow input) in the event file at: {settings.github_event_path}"
)
sys.exit(1)
pr = repo.get_pull(number)
if not pr.merged:
logging.info("The PR was not merged, nothing else to do.")
sys.exit(0)
if not settings.input_latest_changes_file.is_file():
logging.error(
f"The latest changes files doesn't seem to exist: {settings.input_latest_changes_file}"
)
sys.exit(1)
logging.info("Setting up GitHub Actions git user")
subprocess.run(["git", "config", "user.name", "github-actions"], check=True)
subprocess.run(["git", "config", "user.email", "[email protected]"], check=True)
logging.info("Pulling the latest changes, including the latest merged PR (this one)")
subprocess.run(["git", "pull"], check=True)
content = settings.input_latest_changes_file.read_text()
match = re.search(settings.input_latest_changes_header, content)
if not match:
logging.error(
f"The latest changes file at: {settings.input_latest_changes_file} doesn't seem to contain the header RegEx: {settings.input_latest_changes_header}"
)
sys.exit(1)
template_content = settings.input_template_file.read_text("utf-8")
template = Template(template_content)
message = template.render(pr=pr)
if message in content:
logging.error(f"It seems these PR's latest changes were already added: {number}")
sys.exit(1)
pre_content = content[: match.end()]
post_content = content[match.end() :]
new_content = pre_content + message + post_content
settings.input_latest_changes_file.write_text(new_content)
logging.info(f"Committing changes to: {settings.input_latest_changes_file}")
subprocess.run(["git", "add", str(settings.input_latest_changes_file)], check=True)
subprocess.run(["git", "commit", "-m", "📝 Update release notes"], check=True)
logging.info(f"Pushing changes: {settings.input_latest_changes_file}")
subprocess.run(["git", "push"], check=True)
logging.info("Finished")
|
the-stack_0_26516
|
import tenable
import security_center
import report_generation
import logging
from typing import List, Tuple
import concurrent.futures
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-15s [%(levelname)-8s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# Use the environment to get the vulnerabilities
def fetch(env: str) -> Tuple[List, str]:
vulnerabilities = {
'prod': security_center.fetch_vulnerabilities,
'gcp': tenable.fetch_gcp_vulnerabilities,
'corp': tenable.fetch_workstation_vulnerabilities
}
result = vulnerabilities[env]()
return result, env
def main():
tenable_environment = ['gcp', 'corp']
with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor:
fs = [executor.submit(fetch, env) for env in tenable_environment]
for future in concurrent.futures.as_completed(fs):
if future.result():
result, env = future.result()
if env == 'prod' and result:
report_generation.generate_prod_reports(result)
elif env == 'gcp' and result:
report_generation.generate_prod_reports(result, gcp=True)
elif env == 'corp' and result:
report_generation.generate_corp_reports(result)
nessus_vulns, env = fetch('prod')
report_generation.generate_prod_reports(nessus_vulns)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.